source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
analysis_subprocess.py | #####################################################################
# #
# /analysis_subprocess.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program lyse, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
import labscript_utils.excepthook
from labscript_utils.ls_zprocess import ProcessTree
process_tree = ProcessTree.connect_to_parent()
to_parent = process_tree.to_parent
from_parent = process_tree.from_parent
kill_lock = process_tree.kill_lock
import sys
import os
import threading
import traceback
import time
from qtutils.qt import QtCore, QtGui, QtWidgets, QT_ENV, PYQT5
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils.qt.QtCore import pyqtSlot as Slot
import matplotlib
if QT_ENV == PYQT5:
matplotlib.use("QT5Agg")
else:
matplotlib.use("QT4Agg")
import lyse
from lyse import LYSE_DIR
lyse.spinning_top = True
import lyse.figure_manager
lyse.figure_manager.install()
if QT_ENV == PYQT5:
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
else:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
import pylab
import labscript_utils.h5_lock, h5py
from qtutils import inmain, inmain_later, inmain_decorator, UiLoader, inthread, DisconnectContextManager
import qtutils.icons
from labscript_utils.modulewatcher import ModuleWatcher
class _DeprecationDict(dict):
"""Dictionary that spouts deprecation warnings when you try to access some
keys."""
def __init__(self, *args, **kwargs):
self.deprecation_messages = {} # To be added to after the deprecated items are added to the dict.
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
if key in self.deprecation_messages:
import warnings
import linecache
# DeprecationWarnings are ignored by default. Clear the filter so
# they are not:
previous_warning_filters = warnings.filters[:]
try:
warnings.resetwarnings()
# Hacky stuff to get it to work from within execfile() with
# correct line data:
linecache.clearcache()
caller = sys._getframe(1)
globals = caller.f_globals
lineno = caller.f_lineno
module = globals['__name__']
filename = globals.get('__file__')
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
message = self.deprecation_messages[key]
warnings.warn_explicit(message, DeprecationWarning, filename, lineno, module)
finally:
# Restore the warnings filter:
warnings.filters[:] = previous_warning_filters
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
if key in self.deprecation_messages:
# No longer deprecated if the user puts something in place of the
# originally deprecated item:
del self.deprecation_messages[key]
return dict.__setitem__(self, key, value)
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.join(LYSE_DIR, 'lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.join(LYSE_DIR, '__main__.py')
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
class PlotWindowCloseEvent(QtGui.QCloseEvent):
def __init__(self, force, *args, **kwargs):
QtGui.QCloseEvent.__init__(self, *args, **kwargs)
self.force = force
class PlotWindow(QtWidgets.QWidget):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def __init__(self, plot, *args, **kwargs):
self.__plot = plot
QtWidgets.QWidget.__init__(self, *args, **kwargs)
def event(self, event):
result = QtWidgets.QWidget.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.hide()
if isinstance(event, PlotWindowCloseEvent) and event.force:
self.__plot.on_close()
event.accept()
else:
event.ignore()
class Plot(object):
def __init__(self, figure, identifier, filepath):
self.identifier = identifier
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'plot_window.ui'), PlotWindow(self))
# Tell Windows how to handle our windows in the the taskbar, making pinning work properly and stuff:
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.set_window_title(identifier, filepath)
# figure.tight_layout()
self.figure = figure
self.canvas = figure.canvas
self.navigation_toolbar = NavigationToolbar(self.canvas, self.ui)
self.lock_action = self.navigation_toolbar.addAction(
QtGui.QIcon(':qtutils/fugue/lock-unlock'),
'Lock axes', self.on_lock_axes_triggered)
self.lock_action.setCheckable(True)
self.lock_action.setToolTip('Lock axes')
self.copy_to_clipboard_action = self.navigation_toolbar.addAction(
QtGui.QIcon(':qtutils/fugue/clipboard--arrow'),
'Copy to clipboard', self.on_copy_to_clipboard_triggered)
self.copy_to_clipboard_action.setToolTip('Copy to clipboard')
self.copy_to_clipboard_action.setShortcut(QtGui.QKeySequence.Copy)
self.ui.verticalLayout_canvas.addWidget(self.canvas)
self.ui.verticalLayout_navigation_toolbar.addWidget(self.navigation_toolbar)
self.lock_axes = False
self.axis_limits = None
self.update_window_size()
self.ui.show()
def on_lock_axes_triggered(self):
if self.lock_action.isChecked():
self.lock_axes = True
self.lock_action.setIcon(QtGui.QIcon(':qtutils/fugue/lock'))
else:
self.lock_axes = False
self.lock_action.setIcon(QtGui.QIcon(':qtutils/fugue/lock-unlock'))
def on_copy_to_clipboard_triggered(self):
lyse.figure_to_clipboard(self.figure)
@inmain_decorator()
def save_axis_limits(self):
axis_limits = {}
for i, ax in enumerate(self.figure.axes):
# Save the limits of the axes to restore them afterward:
axis_limits[i] = ax.get_xlim(), ax.get_ylim()
self.axis_limits = axis_limits
@inmain_decorator()
def clear(self):
self.figure.clear()
@inmain_decorator()
def restore_axis_limits(self):
for i, ax in enumerate(self.figure.axes):
try:
xlim, ylim = self.axis_limits[i]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
except KeyError:
continue
@inmain_decorator()
def set_window_title(self, identifier, filepath):
self.ui.setWindowTitle(str(identifier) + ' - ' + os.path.basename(filepath))
@inmain_decorator()
def update_window_size(self):
l, w = self.figure.get_size_inches()
dpi = self.figure.get_dpi()
self.canvas.resize(int(l*dpi),int(w*dpi))
self.ui.adjustSize()
@inmain_decorator()
def draw(self):
self.canvas.draw()
def show(self):
self.ui.show()
@property
def is_shown(self):
return self.ui.isVisible()
def analysis_complete(self, figure_in_use):
"""To be overriden by subclasses.
Called as part of the post analysis plot actions"""
pass
def get_window_state(self):
"""Called when the Plot window is about to be closed due to a change in
registered Plot window class
Can be overridden by subclasses if custom information should be saved
(although bear in mind that you will passing the information from the previous
Plot subclass which might not be what you want unless the old and new classes
have a common ancestor, or the change in Plot class is triggered by a reload
of the module containing your Plot subclass).
Returns a dictionary of information on the window state.
If you have overridden this method, please call the base method first and
then update the returned dictionary with your additional information before
returning it from your method.
"""
return {
'window_geometry': self.ui.saveGeometry(),
'axis_lock_state': self.lock_axes,
'axis_limits': self.axis_limits,
}
def restore_window_state(self, state):
"""Called when the Plot window is recreated due to a change in registered
Plot window class.
Can be overridden by subclasses if custom information should be restored
(although bear in mind that you will get the information from the previous
Plot subclass which might not be what you want unless the old and new classes
have a common ancestor, or the change in Plot class is triggered by a reload
of the module containing your Plot subclass).
If overriding, please call the parent method in addition to your new code
Arguments:
state: A dictionary of information to restore
"""
geometry = state.get('window_geometry', None)
if geometry is not None:
self.ui.restoreGeometry(geometry)
axis_limits = state.get('axis_limits', None)
axis_lock_state = state.get('axis_lock_state', None)
if axis_lock_state is not None:
if axis_lock_state:
# assumes the default state is False for new windows
self.lock_action.trigger()
if axis_limits is not None:
self.axis_limits = axis_limits
self.restore_axis_limits()
def on_close(self):
"""Called when the window is closed.
Note that this only happens if the Plot window class has changed.
Clicking the "X" button in the window title bar has been overridden to hide
the window instead of closing it."""
# release selected toolbar action as selecting an action acquires a lock
# that is associated with the figure canvas (which is reused in the new
# plot window) and this must be released before closing the window or else
# it is held forever
self.navigation_toolbar.pan()
self.navigation_toolbar.zoom()
self.navigation_toolbar.pan()
self.navigation_toolbar.pan()
class AnalysisWorker(object):
def __init__(self, filepath, to_parent, from_parent):
self.to_parent = to_parent
self.from_parent = from_parent
self.filepath = filepath
# Filepath as a unicode string on py3 and a bytestring on py2,
# so that the right string type can be passed to functions that
# require the 'native' string type for that python version. On
# Python 2, encode it with the filesystem encoding.
if PY2:
self.filepath_native_string = self.filepath.encode(sys.getfilesystemencoding())
else:
self.filepath_native_string = self.filepath
# Add user script directory to the pythonpath:
sys.path.insert(0, os.path.dirname(self.filepath_native_string))
# Plot objects, keyed by matplotlib Figure object:
self.plots = {}
# An object with a method to unload user modules if any have
# changed on disk:
self.modulewatcher = ModuleWatcher()
# Start the thread that listens for instructions from the
# parent process:
self.mainloop_thread = threading.Thread(target=self.mainloop)
self.mainloop_thread.daemon = True
self.mainloop_thread.start()
def mainloop(self):
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
task, data = self.from_parent.get()
with kill_lock:
if task == 'quit':
inmain(qapplication.quit)
elif task == 'analyse':
path = data
success = self.do_analysis(path)
if success:
if lyse._delay_flag:
lyse.delay_event.wait()
self.to_parent.put(['done', lyse._updated_data])
else:
self.to_parent.put(['error', lyse._updated_data])
else:
self.to_parent.put(['error','invalid task %s'%str(task)])
@inmain_decorator()
def do_analysis(self, path):
now = time.strftime('[%x %X]')
if path is not None:
print('%s %s %s ' %(now, os.path.basename(self.filepath), os.path.basename(path)))
else:
print('%s %s' %(now, os.path.basename(self.filepath)))
self.pre_analysis_plot_actions()
# The namespace the routine will run in:
sandbox = _DeprecationDict(path=path,
__name__='__main__',
__file__= os.path.basename(self.filepath_native_string))
# path global variable is deprecated:
deprecation_message = ("use of 'path' global variable is deprecated and will be removed " +
"in a future version of lyse. Please use lyse.path, which defaults " +
"to sys.argv[1] when scripts are run stand-alone.")
sandbox.deprecation_messages['path'] = deprecation_message
# Use lyse.path instead:
lyse.path = path
lyse.plots = self.plots
lyse.Plot = Plot
lyse._updated_data = {}
lyse._delay_flag = False
lyse.delay_event.clear()
# Save the current working directory before changing it to the
# location of the user's script:
cwd = os.getcwd()
os.chdir(os.path.dirname(self.filepath))
# Do not let the modulewatcher unload any modules whilst we're working:
try:
with self.modulewatcher.lock:
# Actually run the user's analysis!
with open(self.filepath) as f:
code = compile(f.read(), os.path.basename(self.filepath_native_string),
'exec', dont_inherit=True)
exec(code, sandbox)
except:
traceback_lines = traceback.format_exception(*sys.exc_info())
del traceback_lines[1]
# Avoiding a list comprehension here so as to avoid this
# python bug in earlier versions of 2.7 (fixed in 2.7.9):
# https://bugs.python.org/issue21591
message = ''
for line in traceback_lines:
if PY2:
# errors='replace' is for Windows filenames present in the
# traceback that are not UTF8. They will not display
# correctly, but that's the best we can do - the traceback
# may contain code from the file in a different encoding,
# so we could have a mixed encoding string. This is only
# a problem for Python 2.
line = line.decode('utf8', errors='replace')
message += line
sys.stderr.write(message)
return False
else:
return True
finally:
os.chdir(cwd)
print('')
self.post_analysis_plot_actions()
def pre_analysis_plot_actions(self):
lyse.figure_manager.figuremanager.reset()
for plot in self.plots.values():
plot.save_axis_limits()
plot.clear()
def post_analysis_plot_actions(self):
# reset the current figure to figure 1:
lyse.figure_manager.figuremanager.set_first_figure_current()
# Introspect the figures that were produced:
for identifier, fig in lyse.figure_manager.figuremanager.figs.items():
window_state = None
if not fig.axes:
# Try and clear the figure if it is not in use
try:
plot = self.plots[fig]
plot.set_window_title("Empty", self.filepath)
plot.draw()
plot.analysis_complete(figure_in_use=False)
except KeyError:
pass
# Skip the rest of the loop regardless of whether we managed to clear
# the unused figure or not!
continue
try:
plot = self.plots[fig]
# Get the Plot subclass registered for this plot identifier if it exists
cls = lyse.get_plot_class(identifier)
# If no plot was registered, use the base class
if cls is None: cls = Plot
# if plot instance does not match the expected identifier,
# or the identifier in use with this plot has changes,
# we need to close and reopen it!
if type(plot) != cls or plot.identifier != identifier:
window_state = plot.get_window_state()
# Create a custom CloseEvent to force close the plot window
event = PlotWindowCloseEvent(True)
QtCore.QCoreApplication.instance().postEvent(plot.ui, event)
# Delete the plot
del self.plots[fig]
# force raise the keyerror exception to recreate the window
self.plots[fig]
except KeyError:
# If we don't already have this figure, make a window
# to put it in:
plot = self.new_figure(fig, identifier)
# restore window state/geometry if it was saved
if window_state is not None:
plot.restore_window_state(window_state)
else:
if not plot.is_shown:
plot.show()
plot.update_window_size()
plot.set_window_title(identifier, self.filepath)
if plot.lock_axes:
plot.restore_axis_limits()
plot.draw()
plot.analysis_complete(figure_in_use=True)
def new_figure(self, fig, identifier):
try:
# Get custom class for this plot if it is registered
cls = lyse.get_plot_class(identifier)
# If no plot was registered, use the base class
if cls is None: cls = Plot
# if cls is not a subclass of Plot, then raise an Exception
if not issubclass(cls, Plot):
raise RuntimeError('The specified class must be a subclass of lyse.Plot')
# Instantiate the plot
self.plots[fig] = cls(fig, identifier, self.filepath)
except Exception:
traceback_lines = traceback.format_exception(*sys.exc_info())
del traceback_lines[1]
# Avoiding a list comprehension here so as to avoid this
# python bug in earlier versions of 2.7 (fixed in 2.7.9):
# https://bugs.python.org/issue21591
message = """Failed to instantiate custom class for plot "{identifier}".
Perhaps lyse.register_plot_class() was called incorrectly from your
script? The exception raised was:
""".format(identifier=identifier)
message = lyse.dedent(message)
for line in traceback_lines:
if PY2:
# errors='replace' is for Windows filenames present in the
# traceback that are not UTF8. They will not display
# correctly, but that's the best we can do - the traceback
# may contain code from the file in a different encoding,
# so we could have a mixed encoding string. This is only
# a problem for Python 2.
line = line.decode('utf8', errors='replace')
message += line
message += '\n'
message += 'Due to this error, we used the default lyse.Plot class instead.\n'
sys.stderr.write(message)
# instantiate plot using original Base class so that we always get a plot
self.plots[fig] = Plot(fig, identifier, self.filepath)
return self.plots[fig]
def reset_figs(self):
pass
if __name__ == '__main__':
filepath = from_parent.get()
# Set a meaningful client id for zlock
process_tree.zlock_client.set_process_name('lyse-'+os.path.basename(filepath))
qapplication = QtWidgets.QApplication(sys.argv)
worker = AnalysisWorker(filepath, to_parent, from_parent)
qapplication.exec_()
|
sim_double_pendulum.py | # Copyright (c) 2018, The SenseAct Authors.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import gym
import time
from senseact.rtrl_base_env import RTRLBaseEnv
from senseact.devices.sim_double_pendulum.gym_simulator import GymSimulator
from senseact.devices.sim_double_pendulum.cart_communicator import CartCommunicator
from senseact.devices.sim_double_pendulum.rod_communicator import RodCommunicator
from multiprocessing import Value, Process
class DoubleInvertedPendulumEnv(RTRLBaseEnv, gym.core.Env):
"""Double Inverted Pendulum Environment implemented in mujoco(DoubleInvertedPendulumEnv).
This task simulates real world communications of the DoubleInvertedPendulumEnv-v2 by running
the simulator (mujoco), the actuators, the sensors, in separate processes asynchronously.
The sensors of the cartpole system are split into 2: the cart and the rod (2 links).
The cart has a single actuator and 2 observations while the rod has a total of 9 observations.
"""
def __init__(self,
agent_dt=0.01,
sensor_dt=[0.001, 0.001],
gym_dt=0.001,
is_render=False,
**kwargs
):
"""Inits DoubleInvertedPendulumEnv class with task and communication specific parameters.
Args:
agent_dt: Time step length associated with agent cycle.
sensor_dt: List of time steps associated with each sensor cycle.
gym_dt: Time step length associated with gym environment cycle.
This should match the dt increment set in openai gym.
**kwargs: Keyword arguments
"""
self.agent_dt = agent_dt
self.gym_dt = gym_dt
self.sensor_dt = sensor_dt
self.is_render = is_render
from gym.envs.registration import register
register(
id='SimDoublePendulum-v0',
entry_point='senseact.envs.sim_double_pendulum:InvertedDoublePendulumEnvSenseAct',
max_episode_steps=1000,
reward_threshold=9100.0,
)
self.env = gym.make('SimDoublePendulum-v0') #Same as DoubleInvertedPendulumEnv-v2 but with dt=0.001s
self.episode_steps = 0
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self._reward_ = Value('d', 0.0)
self.observations = np.zeros(self.env.observation_space.shape[0])
self.episode_length_step = int(2 / self.agent_dt)
#start openai gym simulator in a separate process (simulates real word interactions)
self._start_simulator_()
#setup a communicator for each sensor/actuator
# ------------------------------------------
# CartCommunicator: reads x_cart position, velocity and sends x actuator commands
# RodCommunicator: reads position and velocity of rod angles theta1 and theta2
# Note that the buffer type is used to pass variables from gym environment to RL task.
# On real robot, this buffer will be replaced by the device communication protocol.
# ------------------------------------------
communicator_setups = {
'CartCommunicator': {
'Communicator': CartCommunicator,
'kwargs': {
'simulator': self.simulator,
}
},
'RodCommunicator': {
'Communicator': RodCommunicator,
'kwargs': {
'simulator': self.simulator,
}
},
}
super(DoubleInvertedPendulumEnv, self).__init__(
communicator_setups=communicator_setups,
action_dim=1,
observation_dim=self.observation_space.shape[0],
dt=agent_dt,
**kwargs
)
def _reset_(self):
"""Resets the environment episode."""
#get last command sent to robot
last_action, _, _ = self._actuator_comms['CartCommunicator'].actuator_buffer.read()
#build new action vector [last_action[0][0], flag], where flag = 1 means reset mujoco environment
action = np.array([last_action[0][0], 1])
#write reset command to actuator_buffer
self._actuator_comms['CartCommunicator'].actuator_buffer.write(action)
time.sleep(.01)
def _compute_sensation_(self, name, sensor_window, timestamp_window, index_window):
"""Creates and saves an observation vector based on sensory data.
For the DoubleInvertedPendulumEnv environment the observation vector is a concatenation of:
- cart position: x
- link angle: [sin(theta1), sin(theta2)]
- link angle: [cos(theta1), cos(theta2)]
- velocities: [dx dtheta1 dtheta2]
Args:
name: a string specifying the name of a communicator that
received given sensory data.
sensor_window: a list of latest sensory observations stored in
communicator sensor buffer. the length of list is defined by
obs_history parameter.
timestamp_window: a list of latest timestamp values stored in
communicator buffer.
index_window: a list of latest sensor index values stored in
communicator buffer.
Returns:
A numpy array containing concatenated [observation, reward, done]
vector.
"""
if name == 'CartCommunicator':
self.observations[[0, 5]] = sensor_window[0]
# update local environment with most recent cart observations
self.env.env.data.set_joint_qpos('slider', self.observations[0])
self.env.env.data.set_joint_qvel('slider', self.observations[5])
elif name == 'RodCommunicator':
self.observations[[1, 3, 6, 2, 4, 7]] = sensor_window[0]
# update local environment with most recent rod observations
self.env.env.data.set_joint_qpos('hinge', np.arcsin(self.observations[1]))
self.env.env.data.set_joint_qvel('hinge', self.observations[6])
self.env.env.data.set_joint_qpos('hinge2', np.arcsin(self.observations[2]))
self.env.env.data.set_joint_qvel('hinge2', self.observations[7])
self._reward_.value = self._compute_reward()
#set state of local environment with most recent observations
self.env.env.set_state(self.env.env.data.qpos, self.env.env.data.qvel)
x, _, y = self.env.env.data.site_xpos[0]
#check if episode is done
done = bool(y <= 1)
return np.concatenate(
(
self.observations,
np.array([self._reward_.value]),
np.array([done])
)
)
def _compute_actuation_(self, action, timestamp, index):
"""Creates and sends actuation packets to the communicator.
Computes actuation commands based on agent's action and
control type and writes actuation packets to the
communicators' actuation buffers.
Args:
action: a float containing the action value
timestamp: a float containing action timestamp
index: an integer containing action index
"""
self._actuation_packet_['CartCommunicator'][0] = action
self._actuation_packet_['CartCommunicator'][-1] = 0 #flag=0 means do not reset
def _compute_reward(self):
"""Computes reward at a given timestamp.
The reward is defined as in
<rllab/rllab/envs/mujoco/inverted_double_pendulum_env.py>.
The reward is computed using the latest observations updates.
Returns:
An array containing the scalar reward
"""
x, _, y = self.env.env.data.site_xpos[0]
dist_penalty = 0.01 * x ** 2 + (y - 2) ** 2
v1, v2 = self.env.env.data.qvel[1:3]
vel_penalty = 1e-3 * v1 ** 2 + 5e-3 * v2 ** 2
alive_bonus = 10
reward = float(alive_bonus - dist_penalty - vel_penalty)
reward *= (self.agent_dt / self.gym_dt)
return np.array([reward])
def _check_done(self, env_done):
"""Checks whether the episode is over.
Args:
env_done: a bool specifying whether the episode should be ended.
Returns:
A bool specifying whether the episode is over.
"""
#update episode length
self.episode_steps += 1
if self.episode_steps >= self.episode_length_step or env_done:
# print('steps ', self.episode_steps, ' out of ', self.episode_length_step)
done = True
else:
done = False
return done
def reset(self, blocking=True):
"""Resets environment and updates episode_steps.
Returns:
Array of observations
"""
ret = super(DoubleInvertedPendulumEnv, self).reset(blocking=blocking)
self.episode_steps = 0
return ret
def close(self):
"""Closes all manager threads and communicator processes.
Overwrites rtrl_env close method.
"""
for name, comm in self._all_comms.items():
comm.terminate()
comm.join()
for process in self.simulator.sensor_pp:
process.terminate()
process.join()
self.pp.terminate()
self.pp.join()
super(DoubleInvertedPendulumEnv, self).close()
def _start_simulator_(self):
"""Starts gym simulator as in independent process that simulates the real world running in real-time."""
#Define Simulator object
self.simulator = GymSimulator(self.env,
gym_dt=self.gym_dt,
sensor_dt=self.sensor_dt,
is_render=self.is_render,
)
#start simulator as a separate process
self.pp = Process(target=self.simulator.run_simulator, args=())
self.pp.start()
|
app.py | from src.kafka_module.kf_service import process_vision_ocr_kf, vision_ocr_request_worker
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_error
from flask import Flask
from flask.blueprints import Blueprint
from flask_cors import CORS
from src import routes
import config
import threading
import time
from src.utilities.app_context import LOG_WITHOUT_CONTEXT
merge_app = Flask(__name__)
def start_kafka():
try:
t1 = threading.Thread(target=process_vision_ocr_kf, name='vision_ocr-consumer-thread')
t1.start()
log_info("multithread Kafka running on multithread", LOG_WITHOUT_CONTEXT)
t2 = threading.Thread(target=vision_ocr_request_worker, name='vision_ocr-worker-thread')
t2.start()
log_info("Starting vision_ocr_request_worker", LOG_WITHOUT_CONTEXT)
except Exception as e:
log_error("threading ERROR WHILE RUNNING CUSTOM THREADS ", LOG_WITHOUT_CONTEXT, e)
if config.ENABLE_CORS:
cors = CORS(merge_app, resources={r"/api/*": {"origins": "*"}})
for blueprint in vars(routes).values():
if isinstance(blueprint, Blueprint):
merge_app.register_blueprint(blueprint, url_prefix=config.API_URL_PREFIX)
if __name__ == "__main__":
start_kafka()
print(merge_app.url_map)
merge_app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG)
|
vnlbank.py | # encoding: utf-8
import urllib
import hashlib
import requests
from Queue import Queue, Empty
from threading import Thread
from time import sleep
API_ROOT ="https://api.lbank.info/v1/"
FUNCTION_TICKER = ('ticker.do', 'get')
FUNCTION_DEPTH = ('depth.do', 'get')
FUNCTION_TRADES = ('trades.do', 'get')
FUNCTION_KLINE = ('kline.do', 'get')
FUNCTION_USERINFO = ('user_info.do', 'post')
FUNCTION_CREATEORDER = ('create_order.do', 'post')
FUNCTION_CANCELORDER = ('cancel_order.do', 'post')
FUNCTION_ORDERSINFO = ('orders_info.do', 'post')
FUNCTION_ORDERSINFOHISTORY = ('orders_info_history.do', 'post')
#----------------------------------------------------------------------
def signature(params, secretKey):
"""生成签名"""
params = sorted(params.iteritems(), key=lambda d:d[0], reverse=False)
params.append(('secret_key', secretKey))
message = urllib.urlencode(params)
m = hashlib.md5()
m.update(message)
m.digest()
sig=m.hexdigest()
return sig
########################################################################
class LbankApi(object):
""""""
DEBUG = True
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = ''
self.secretKey = ''
self.interval = 1 # 每次请求的间隔等待
self.active = False # API工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
#----------------------------------------------------------------------
def init(self, apiKey, secretKey, interval):
"""初始化"""
self.apiKey = apiKey
self.secretKey = secretKey
self.interval = interval
self.active = True
self.reqThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
#----------------------------------------------------------------------
def processRequest(self, req):
"""处理请求"""
# 读取方法和参数
api, method = req['function']
params = req['params']
url = API_ROOT + api
# 在参数中增加必须的字段
params['api_key'] = self.apiKey
# 添加签名
sign = signature(params, self.secretKey)
params['sign'] = sign
# 发送请求
payload = urllib.urlencode(params)
r = requests.request(method, url, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
#----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
callback = req['callback']
reqID = req['reqID']
data = self.processRequest(req)
# 请求失败
if data is None:
error = u'请求失败'
self.onError(error, req, reqID)
elif 'error_code' in data:
error = u'请求出错,错误代码:%s' % data['error_code']
self.onError(error, req, reqID)
# 请求成功
else:
if self.DEBUG:
print callback.__name__
callback(data, req, reqID)
# 流控等待
sleep(self.interval)
except Empty:
pass
#----------------------------------------------------------------------
def sendRequest(self, function, params, callback):
"""发送请求"""
# 请求编号加1
self.reqID += 1
# 生成请求字典并放入队列中
req = {}
req['function'] = function
req['params'] = params
req['callback'] = callback
req['reqID'] = self.reqID
self.reqQueue.put(req)
# 返回请求编号
return self.reqID
#----------------------------------------------------------------------
def onError(self, error, req, reqID):
"""错误推送"""
print error, req, reqID
###############################################
# 行情接口
###############################################
#----------------------------------------------------------------------
def getTicker(self, symbol):
"""查询行情"""
function = FUNCTION_TICKER
params = {'symbol': symbol}
callback = self.onGetTicker
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getDepth(self, symbol, size, merge):
"""查询深度"""
function = FUNCTION_DEPTH
params = {
'symbol': symbol,
'size': size,
'mege': merge
}
callback = self.onGetDepth
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getTrades(self, symbol, size, time):
"""查询历史成交"""
function = FUNCTION_TRADES
params = {
'symbol': symbol,
'size': size,
'time': time
}
callback = self.onGetTrades
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getKline(self, symbol, size, type_, time):
"""查询K线"""
function = FUNCTION_KLINE
params = {
'symbol': symbol,
'size': size,
'type': type_,
'time': time
}
callback = self.onGetKline
return self.sendRequest(function, params, callback)
#----------------------------------------------------------------------
def onGetTicker(self, data, req, reqID):
"""查询行情回调"""
print data, reqID
# ----------------------------------------------------------------------
def onGetDepth(self, data, req, reqID):
"""查询深度回调"""
print data, reqID
# ----------------------------------------------------------------------
def onGetTrades(self, data, req, reqID):
"""查询历史成交"""
print data, reqID
# ----------------------------------------------------------------------
def onGetKline(self, data, req, reqID):
"""查询K线回报"""
print data, reqID
###############################################
# 交易接口
###############################################
# ----------------------------------------------------------------------
def getUserInfo(self):
"""查询账户信息"""
function = FUNCTION_USERINFO
params = {}
callback = self.onGetUserInfo
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def createOrder(self, symbol, type_, price, amount):
"""发送委托"""
function = FUNCTION_CREATEORDER
params = {
'symbol': symbol,
'type': type_,
'price': price,
'amount': amount
}
callback = self.onCreateOrder
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def cancelOrder(self, symbol, orderId):
"""撤单"""
function = FUNCTION_CANCELORDER
params = {
'symbol': symbol,
'order_id': orderId
}
callback = self.onCancelOrder
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getOrdersInfo(self, symbol, orderId):
"""查询委托"""
function = FUNCTION_ORDERSINFO
params = {
'symbol': symbol,
'order_id': orderId
}
callback = self.onGetOrdersInfo
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getOrdersInfoHistory(self, symbol, status, currentPage, pageLength):
"""撤单"""
function = FUNCTION_ORDERSINFOHISTORY
params = {
'symbol': symbol,
'status': status,
'current_page': currentPage,
'page_length': pageLength
}
callback = self.onGetOrdersInfoHistory
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def onGetUserInfo(self, data, req, reqID):
"""查询账户信息"""
print data, reqID
# ----------------------------------------------------------------------
def onCreateOrder(self, data, req, reqID):
"""委托回报"""
print data, reqID
# ----------------------------------------------------------------------
def onCancelOrder(self, data, req, reqID):
"""撤单回报"""
print data, reqID
# ----------------------------------------------------------------------
def onGetOrdersInfo(self, data, req, reqID):
"""查询委托回报"""
print data, reqID
# ----------------------------------------------------------------------
def onGetOrdersInfoHistory(self, data, req, reqID):
"""撤单回报"""
print data, reqID
|
D_1_Recalculate_new_levenshtein.py | import pandas as pd
import numpy as np
import math
import Levenshtein
from tqdm import tqdm
from multiprocessing import Process
def recalculate_levenshtein_distance(log_process, process_id, OUTPUT_PATH):
log_process['new_lev_dist'] = -7
for row in range(log_process.shape[0]):
## Very first row
if (row == 0) and (len(str(log_process.loc[log_process.index[row], 'text_field_prev'])) > 5) and (log_process.loc[log_process.index[row], 'text_field'] == log_process.loc[log_process.index[row], 'key']):
log_process.loc[log_process.index[row], 'text_field_prev'] = ' '
log_process.loc[log_process.index[row], 'new_lev_dist'] = Levenshtein.distance(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'text_field_prev'] = ''
continue
## Beginning of new sentences where text_field_prev contains sentence from previous row
if (row > 0) and (log_process.loc[log_process.index[row], 'ts_id'] != log_process.loc[log_process.index[row-1], 'ts_id']) and (log_process.loc[log_process.index[row-1], 'text_field'] == log_process.loc[log_process.index[row], 'text_field_prev']):
log_process.loc[log_process.index[row], 'text_field_prev'] = ' '
#log_process.loc[log_process.index[row], 'new_lev_dist'] = stringdist.levenshtein(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'new_lev_dist'] = Levenshtein.distance(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'text_field_prev'] = ''
continue
## Beginning of new sentences where text_field contains one letter and text_field_prev contains a whole sentence, also ts_ids are different between row and row-1
if (row > 0) and (log_process.loc[log_process.index[row], 'ts_id'] != log_process.loc[log_process.index[row-1], 'ts_id']) and (len(str(log_process.loc[log_process.index[row], 'text_field'])) == 1) and (len(str(log_process.loc[log_process.index[row], 'text_field_prev'])) > 5):
log_process.loc[log_process.index[row], 'text_field_prev'] = ' '
#log_process.loc[log_process.index[row], 'new_lev_dist'] = stringdist.levenshtein(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'new_lev_dist'] = Levenshtein.distance(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'text_field_prev'] = ''
continue
## Beginning of new sentences where text_field contains one letter and text_field_prev contains a whole sentence, also ts_ids are different between row and row-1 and text_field is identical to key
if (row > 0) and (log_process.loc[log_process.index[row], 'ts_id'] != log_process.loc[log_process.index[row-1], 'ts_id']) and (str(log_process.loc[log_process.index[row], 'text_field']) == str(log_process.loc[log_process.index[row], 'key'])) and (len(str(log_process.loc[log_process.index[row], 'text_field_prev'])) > 5):
log_process.loc[log_process.index[row], 'text_field_prev'] = ' '
#log_process.loc[log_process.index[row], 'new_lev_dist'] = stringdist.levenshtein(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'new_lev_dist'] = Levenshtein.distance(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'text_field_prev'] = ''
continue
## Beginning of new sentences with other sentences than the one from the previous row
## (could be due to removal of certain invalid sentences which contain cryptic letters)
if (len(str(log_process.loc[log_process.index[row],'text_field'])) == 1) and (str(log_process.loc[log_process.index[row],'text_field_prev']) == ' ') and (len(str(log_process.loc[log_process.index[row],'key'])) == 1) and (log_process.loc[log_process.index[row],'key'] != ' '):
log_process.loc[log_process.index[row], 'text_field_prev'] = ' '
#log_process.loc[log_process.index[row], 'new_lev_dist'] = stringdist.levenshtein(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'new_lev_dist'] = Levenshtein.distance(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row],'text_field_prev'] = ''
continue
## Use of backspace until the whole sentence is erased led to false new_lev_dist calculations
## because of nan value of empty text_field_prev or text_field
## We solve this by temporarily changing the empty field ('') to a space (' '), recalculate the lev_dist and then emptying the field again
if (len(str(log_process.loc[log_process.index[row], 'text_field_prev'])) == 1) and (log_process.loc[log_process.index[row],'key'] == '_'):
log_process.loc[log_process.index[row], 'text_field'] = ' '
#log_process.loc[log_process.index[row], 'new_lev_dist'] = stringdist.levenshtein(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'new_lev_dist'] = Levenshtein.distance(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'text_field'] = ''
continue
if (row > 0) and (log_process.loc[log_process.index[row],'text_field'] == log_process.loc[log_process.index[row], 'key']) and (log_process.loc[log_process.index[row-1],'key'] == '_'):
log_process.loc[log_process.index[row], 'text_field_prev'] = ' '
#log_process.loc[log_process.index[row], 'new_lev_dist'] = stringdist.levenshtein(str(log.loc[log_process.index[row], 'text_field']), str(log.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'new_lev_dist'] = Levenshtein.distance(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'text_field_prev'] = ''
continue
#log_process.loc[log_process.index[row], 'new_lev_dist'] = stringdist.levenshtein(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
log_process.loc[log_process.index[row], 'new_lev_dist'] = Levenshtein.distance(str(log_process.loc[log_process.index[row], 'text_field']), str(log_process.loc[log_process.index[row], 'text_field_prev']))
# Save files
# print('Saving files...')
log_process.to_csv(OUTPUT_PATH + str(process_id) + '.csv', index=False)
def Run_D_1(LOG_ID):
print(' ----- D1 -----------------------------------------------------------\n')
global INPUT_PATH
global OUTPUT_PATH
INPUT_PATH = r'C:\\Users\\eu_el\\Desktop\\VB_Schnittstelle\\Dataset\\Data\\Log' + str(LOG_ID) + '\\log_' + str(LOG_ID) + '_valid_post_processed_new_decrypted.csv'
OUTPUT_PATH = r'C:\\Users\\eu_el\\Desktop\\VB_Schnittstelle\\Dataset\\Data\\Log' + str(LOG_ID) + '\\New_Levenshtein\\log_' + str(LOG_ID) + '_valid_post_processed_new_decrypted_new_levenshtein_after_process_'
# print('Reading files...')
log_comb_ite_with_participants = pd.read_csv(INPUT_PATH)
# # Recalculate levenshtein distance
# Add multi processing
# print('Running processes...')
processes = []
logs = {}
# create processes
num_processes = 40
step = math.ceil(log_comb_ite_with_participants.shape[0]/ num_processes )
for i in range(num_processes):
start = i * step
if(i + 1 != num_processes):
end = start + step
else:
end = log_comb_ite_with_participants.shape[0]
logs[i] = pd.DataFrame(log_comb_ite_with_participants[start:end], columns=log_comb_ite_with_participants.columns)
p = Process(target=recalculate_levenshtein_distance, args=[logs[i], i, OUTPUT_PATH])
processes.append(p)
# start processes
for p in tqdm(processes):
p.start()
# let processes join
for p in processes:
p.join()
print('\nD1 DONE\n') |
popen.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import copy
import logging
import os
import subprocess
import threading
import time
from .constants import PIPE, STDOUT, DEVNULL
from .exceptions import TimeoutExpired
logger = logging.getLogger(__name__)
class Popen:
'''
It wraps multiple subprocess.popen and provides I/F like subprocess.Popen.
'''
polling_interval = 0.1
'''
Parameters
----------
popen_args_list
The list of pipechildren.PopenArgs
stderr
Specify One of pipechildren.DEVNULL, pipechildren.STDOUT, or file-like object
'''
def __init__(self, popen_args_list, stdin=None, stdout=None, stderr=None, universal_newlines=None, encoding=None, errors=None, text=None, _debug_communicate_io=False):
self.text = universal_newlines or encoding or errors or text
self.encoding = encoding
self.popen_args_list = popen_args_list
self.processes = []
self.stdin = None
self.stdout = None
self.stderr = None
self.stderr_write_end = None
self.outs = None
self.errs = None
self.pids = []
self.returncodes = []
self._debug_communicate_io = _debug_communicate_io
self._communicate_called = False
self._workers = {
"stderr_drainer": None,
"close_stderr_write_end_worker": None,
"waiter": None,
"stdin_worker": None,
"stdout_worker": None,
"stderr_worker": None
}
self._stop_workers = False
'''
Call popen with each popen_args and connect stdout -> stdin
between subprocesses.
'''
# previous stdout goes into current stdin
prev_out = stdin
for i in range(len(self.popen_args_list)):
pa = self.popen_args_list[i]
if i == len(self.popen_args_list) - 1:
# Last
_stdout = stdout
else:
_stdout = subprocess.PIPE
_stderr = pa.stderr if pa.stderr else stderr
p = subprocess.Popen(stdout=_stdout,
stdin=prev_out,
stderr=_stderr,
text=self.text,
encoding=self.encoding,
**pa.popen_kwargs)
setattr(p, "name", pa.name)
logger.info(f"Popening({pa.fullname})")
if i > 0:
"""
piped stdout/stdin is connected between subprocesses and used in
forked sub-processes. We should release them not to prevent pipe close.
"""
self.processes[-1].stdout.close()
self.processes[-1].stdout = None
self.processes.append(p)
self.pids.append(p.pid)
prev_out = p.stdout
#self._start_pipe_closer()
if stdin is PIPE:
self.stdin = self.processes[0].stdin
else:
self.stdin = None
if stdout is PIPE:
self.stdout = self.processes[-1].stdout
else:
self.stdout = None
if stderr is PIPE:
logger.debug("stderr is PIPE")
if len(self.processes) == 1:
self.stderr = self.processes[0].stderr
else:
r, w = os.pipe()
if self.text:
self.stderr = os.fdopen(r, 'r')
self.stderr_write_end = os.fdopen(w, 'w')
else:
self.stderr = os.fdopen(r, 'rb')
self.stderr_write_end = os.fdopen(w, 'wb')
self._start_stderr_drainer()
else:
self.stderr = None
self.stderr_write_end = stderr
if stderr:
self._start_stderr_drainer()
@staticmethod
def _work_text_drainer(_self, name, reader, data_writer):
'''
Generic thread reader to read data from <reader> and write
data to callback data_writer(data).
NOTE: It is STATIC method.
Called like self._work_text_drainer(self)
data_writer() gets binary data as 1st argument and needs to return
False if writer is no longer avaialb.e
'''
logger.debug(f"_work_text_drainer {name} started")
while (not _self._stop_workers):
line = reader.readline()
if not line:
break
if _self._debug_communicate_io:
logger.debug(f"{name} -> {line}")
if not data_writer(line):
break
logger.debug(f"_work_text_drainer {name} finished.")
@staticmethod
def _work_binary_drainer(_self, name, reader, data_writer):
'''
Generic thread reader to read data from <reader> and write
data to callback data_writer(data).
NOTE: It is STATIC method.
Called like self._work_binary_drainer(self)
data_writer() gets binary data as 1st argument and need to return
False if writer is no longer avaialb.e
'''
logger.debug(f"_work_binary_drainer {name} started")
while (not _self._stop_workers):
data = reader.read(4096)
if not data:
break
if _self._debug_communicate_io:
logger.debug(f"{name} -> {data}")
if not data_writer(data):
logger.debug(f"{name} -> EOF")
break
logger.debug(f"_work_binary_drainer {name} finished.")
def _start_stderr_drainer(self):
'''
drain stderr from all sub-processes and gather to one piped stderr
'''
stderr_drainer = []
def stderr_write_end_writer(data):
if self.stderr_write_end.closed:
return False
else:
self.stderr_write_end.write(data)
return True
for p in self.processes:
name=f"{p.name}_stderr_drainer"
if self.text:
drainer = lambda: self._work_text_drainer(self,
name,
p.stderr,
stderr_write_end_writer)
else:
drainer = lambda: self._work_binary_drainer(self,
name,
p.stderr,
stderr_write_end_writer)
t = threading.Thread(name=name, target=drainer)
t.start()
stderr_drainer.append(t)
self._workers["stderr_drainer"] = stderr_drainer
if self.stderr:
# We need close worker otherwise reader cannot finish reading.
def work_close_stderr_write_end():
logger.debug(f"work_close_stderr_write_end started")
drainers = self._workers["stderr_drainer"]
while not self._stop_workers:
alive = False
for t in drainers:
if t.is_alive():
alive = True
break
if not alive:
break
logger.debug(f"work_close_stderr_write_end finished")
self.stderr_write_end.close()
close_stderr_write_end_worker = threading.Thread(
target=work_close_stderr_write_end,
name=name)
close_stderr_write_end_worker.start()
self._workers["close_stderr_write_end_worker"] = close_stderr_write_end_worker
def __enter__(self):
return self
def __exit__(self):
# To support "with pipechildren.Popen() as p:"
self.wait()
def poll(self):
'''
Check if child process has terminated. Set and return returncode list attribute. Otherwise, returns None.
Returns
----------
returncode
list of returncode of subprocesses.
'''
self.returncodes = [p.poll() for p in self.processes]
if None in self.returncodes:
return None
return self.returncodes
def wait(self, timeout=None):
'''
Wait for child processes to terminate. Set and return returncode attribute.
If the process does not terminate after timeout seconds,
raise a TimeoutExpired exception.
It is safe to catch this exception and retry the wait.
Returns
----------
returncodes
list of returncodes of subprocesses.
'''
logger.debug("wait started")
def work_wait(name, p, timeout):
logger.debug(f"waiter {name} started")
ret = None
try:
ret = p.wait(timeout=timeout)
except subprocess.TimeoutExpired:
logger.debug(f"waiter {name} timed out.")
else:
logger.debug(f"waiter {name} finished")
return ret
waiter = []
for p in self.processes:
name = f"{p.name}_waiter"
t = threading.Thread(
target=lambda: work_wait(name, p, timeout),
name=name)
t.start()
waiter.append(t)
self._workers["waiter"] = waiter
for t in waiter:
t.join()
self._workers["waiter"] = None
returncodes = self.poll()
if returncodes is None:
raise TimeoutExpired(self.popen_args_list, timeout, stdout=self.outs, stderr=self.errs)
logger.debug("wait finished")
return returncodes
def _time_left_sec(self, timeout_at):
if timeout_at:
time_left_sec = (timeout_at - datetime.now()).total_seconds()
if time_left_sec < 0:
return 0
else:
return time_left_sec
return None
def get_timeout_at(self, timeout):
return datetime.now() + timedelta(seconds=timeout)
def _start_communicate_pipes(self, input=input):
'''
Start threads below. It's called only once when communicate is called first time.
- Thread1: write <input> to stdin if stdin is PIPE and <input> is given.
- Thread2: read stdout to outs if stdout is PIPE
- Thread3: read stderr to errs if stderr is PIPE
'''
logger.debug("_start_communicate_pipes called")
def work_stdin(input=None):
'''
Thread worker to write <input> to stdin
'''
logger.debug("stdin_worker started")
start = 0
step = 4096
end = start + step
while not self._stop_workers and not self.stdin.closed:
if len(input) > end:
if self._debug_communicate_io:
logger.debug(f"->stdin {input[start:end]}")
self.stdin.write(input[start:end])
else:
if self._debug_communicate_io:
logger.debug(f"->stdin {input[start:]}")
self.stdin.write(input[start:])
break
start += step
end += step
self.stdin.close()
logger.debug("stdin_worker finished")
def add_to_outs_writer(data):
'''
Writer used by stdout drainer thread
'''
self.outs += data
return True
def add_to_errs_writer(data):
'''
Writer used by stderr drainer thread
'''
self.errs += data
return True
if input and self.stdin:
stdin_worker = threading.Thread(
target=lambda: work_stdin(input=input),
name="stdin_worker")
stdin_worker.start()
self._workers["stdin_worker"] = stdin_worker
elif self.stdin:
self.stdin.close()
if self.stdout:
if self.text:
self.outs = ''
drainer = lambda: self._work_text_drainer(self,
'stdout_drainer',
self.stdout,
add_to_outs_writer)
else:
self.outs = b''
drainer = lambda: self._work_binary_drainer(self,
'stdout_drainer',
self.stdout,
add_to_outs_writer)
stdout_worker = threading.Thread(
target=drainer,
name="stdout_worker")
stdout_worker.start()
self._workers["stdout_worker"] = stdout_worker
if self.stderr:
if self.text:
self.errs = ''
drainer = lambda: self._work_text_drainer(self,
'stderr_drainer',
self.stderr,
add_to_errs_writer)
else:
self.errs = b''
drainer = lambda: self._work_binary_drainer(self,
'stderr_drainer',
self.stderr,
add_to_errs_writer)
stderr_worker = threading.Thread(
target=drainer,
name="stderr_worker")
stderr_worker.start()
self._workers["stderr_worker"] = stderr_worker
def communicate(self, input=None, timeout=None):
'''
Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be data to be sent
to the upper stream child process, or None, if no data should be sent to the child.
If streams were opened in text mode, input must be a string. Otherwise, it must be bytes.
Returns
----------
stdout_data
stdout of down most process
stderr_data
stderr of whole process if pipechildren.PIPE is specified.
The data will be strings if streams were opened in text mode; otherwise, bytes.
'''
logger.debug("communicate called")
if len(self.processes) == 1:
# In this case, just call subprocess.communicate
self.outs, self.errs = self.processes[0].communicate(input=input, timeout=timeout)
return self.outs, self.errs
firsttime = True
if self._communicate_called:
firsttime = False
self._communicate_called = True
if firsttime:
self._start_communicate_pipes(input=input)
timeout_at = None
if timeout:
timeout_at = self.get_timeout_at(timeout)
self.wait(timeout=timeout)
# If self.wait() timedout, it raises to caller out of thie method.
# If we reach here, all processes have finished.
# Close stdin first then wait for the end of output workers.
if self.stdin:
self.stdin.close()
timedout = False
if self._workers["stdin_worker"]:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stdin_worker"].join(timeout=timeout_left)
timedout = self._workers["stdin_worker"].is_alive()
if self._workers["stdout_worker"] and not timedout:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stdout_worker"].join(timeout=timeout_left)
timedout = self._workers["stdout_worker"].is_alive()
if self._workers["stderr_worker"] and not timedout:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stderr_worker"].join(timeout=timeout_left)
if not timedout:
timedout = self._workers["stderr_worker"].is_alive()
if timedout:
raise TimeoutExpired(self.popen_args_list, timeout, stdout=self.outs, stderr=self.errs)
# Guard all workers from running just in case.
self._stop_workers = True
# Close up pipes
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
for p in self.processes:
if p.stderr:
p.stderr.close()
return self.outs, self.errs
def kill(self, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].kill()
else:
for p in self.processes:
p.kill()
def terminate(self, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].terminate()
else:
for p in self.processes:
p.terminate()
def send_signal(self, signal, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].send_signal(signal)
else:
for p in self.processes:
p.send_signal(signal)
|
utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import warnings
import time
import threading
import logging
from bisect import bisect_left
from synphot import SpectralElement
from scipy.interpolate import interp1d
from decimal import Decimal
import astropy.units as u
# Get around logging warnings of missing packages
logging.getLogger('isochrones').setLevel(logging.ERROR)
from isochrones.interp import DFInterpolator
# convert apparent to absolute mag
def abs_mag(app_mag, plx):
return app_mag + (5 * (1 + np.log10(plx)))
# convert apparent mag error to absolute mag error
def abs_mag_error(e_app_mag, plx, e_plx):
return np.sqrt(((5 * e_plx) / (plx * np.log(10)))**2 + e_app_mag**2)
# convert absolute to apparent mag
def app_mag(abs_mag, plx):
return abs_mag - (5 * (1 + np.log10(plx)))
# convert absolute mag error to apparent mag error
def app_mag_error(e_abs_mag, plx, e_plx):
return np.sqrt(((5 * e_plx) / (plx * np.log(10)))**2 + e_abs_mag**2)
# grab filter profiles and setup for synphot extinction calculation
def filter_profiles_setup(dirpath):
filter_profiles = dict()
# 2MASS
# https://old.ipac.caltech.edu/2mass/releases/allsky/doc/sec6_4a.html
filter_profiles.update({'2mass_jmag' : SpectralElement.from_file(dirpath + r'/2MASS.J.dat', wave_unit=u.um)})
filter_profiles.update({'2mass_hmag' : SpectralElement.from_file(dirpath + r'/2MASS.H.dat', wave_unit=u.um)})
filter_profiles.update({'2mass_kmag' : SpectralElement.from_file(dirpath + r'/2MASS.Ks.dat', wave_unit=u.um)})
# Gaia
# https://www.cosmos.esa.int/web/gaia/edr3-passbands
filter_profiles.update({'gaia_gmag' : SpectralElement.from_file(dirpath + r'/GAIA_EDR3.G.dat', wave_unit=u.nm)})
filter_profiles.update({'gaia_bpmag' : SpectralElement.from_file(dirpath + r'/GAIA_EDR3.Gbp.dat', wave_unit=u.nm)})
filter_profiles.update({'gaia_rpmag' : SpectralElement.from_file(dirpath + r'/GAIA_EDR3.Grp.dat', wave_unit=u.nm)})
# SDSS
# https://www.sdss.org/instruments/camera/#Filters
filter_profiles.update({'sdss_gmag' : SpectralElement.from_file(dirpath + r'/SDSS.g.dat', wave_unit=u.AA)})
filter_profiles.update({'sdss_rmag' : SpectralElement.from_file(dirpath + r'/SDSS.r.dat', wave_unit=u.AA)})
filter_profiles.update({'sdss_imag' : SpectralElement.from_file(dirpath + r'/SDSS.i.dat', wave_unit=u.AA)})
filter_profiles.update({'sdss_zmag' : SpectralElement.from_file(dirpath + r'/SDSS.z.dat', wave_unit=u.AA)})
# Johnson
# http://svo2.cab.inta-csic.es/svo/theory/
filter_profiles.update({'johnson_bmag' : SpectralElement.from_file(dirpath + r'/Generic_Johnson.B.dat', wave_unit=u.AA)})
filter_profiles.update({'johnson_vmag' : SpectralElement.from_file(dirpath + r'/Generic_Johnson.V.dat', wave_unit=u.AA)})
# Cousins
# http://svo2.cab.inta-csic.es/svo/theory/
filter_profiles.update({'cousins_rmag' : SpectralElement.from_file(dirpath + r'/Generic_Cousins.R.dat', wave_unit=u.AA)})
filter_profiles.update({'cousins_imag' : SpectralElement.from_file(dirpath + r'/Generic_Cousins.I.dat', wave_unit=u.AA)})
# TYCHO
# http://svo2.cab.inta-csic.es/svo/theory/
filter_profiles.update({'tycho_bmag' : SpectralElement.from_file(dirpath + r'/TYCHO_TYCHO.B.dat', wave_unit=u.AA)})
filter_profiles.update({'tycho_vmag' : SpectralElement.from_file(dirpath + r'/TYCHO_TYCHO.V.dat', wave_unit=u.AA)})
# Hipparcos
# http://svo2.cab.inta-csic.es/svo/theory/
filter_profiles.update({'hipparcos_hpmag' : SpectralElement.from_file(dirpath + r'/Hipparcos_Hipparcos.Hp.dat', wave_unit=u.AA)})
# PS1
# https://ipp.ifa.hawaii.edu/ps1.filters/
filter_profiles.update({'ps_gmag' : SpectralElement.from_file(dirpath + r'/PS1.g.dat', wave_unit=u.nm)})
filter_profiles.update({'ps_rmag' : SpectralElement.from_file(dirpath + r'/PS1.r.dat', wave_unit=u.nm)})
filter_profiles.update({'ps_imag' : SpectralElement.from_file(dirpath + r'/PS1.i.dat', wave_unit=u.nm)})
filter_profiles.update({'ps_zmag' : SpectralElement.from_file(dirpath + r'/PS1.z.dat', wave_unit=u.nm)})
filter_profiles.update({'ps_ymag' : SpectralElement.from_file(dirpath + r'/PS1.y.dat', wave_unit=u.nm)})
return filter_profiles
# DataFrame holding the wavelengths and zeropoints for all bands
def flux_meta():
bands = [
'johnson_bmag',
'johnson_vmag',
'cousins_rmag',
'cousins_imag',
'2mass_jmag',
'2mass_hmag',
'2mass_kmag',
'gaia_gmag',
'gaia_bpmag',
'gaia_rpmag',
'sdss_gmag',
'sdss_rmag',
'sdss_imag',
'sdss_zmag',
'tycho_bmag',
'tycho_vmag',
'hipparcos_hpmag',
'ps1_gmag',
'ps1_imag',
'ps1_omag',
'ps1_rmag',
'ps1_wmag',
'ps1_ymag',
'ps1_zmag',
]
bands.sort()
df = pd.DataFrame(index=bands)
# Using effective wavelengths
# http://svo2.cab.inta-csic.es/theory/fps/
df.loc['johnson_bmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 4378.1, 6.293e-9, 5.679e-9
df.loc['johnson_vmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 5466.1, 3.575e-9, 3.64326e-9
# http://svo2.cab.inta-csic.es/theory/fps/
df.loc['cousins_rmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 6357.96, 2.24563e-9, 2.69285e-9
df.loc['cousins_imag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 7829.17, 1.20234e-9, 1.77589e-9
# http://svo2.cab.inta-csic.es/theory/fps/
df.loc['2mass_jmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 12350, 3.143e-10, 7.21192e-10
df.loc['2mass_hmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 16620, 1.144e-10, 4.05446e-10
df.loc['2mass_kmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 21590, 4.306e-11, 2.35016e-10
# http://svo2.cab.inta-csic.es/theory/fps/ # they have GAIA DR3
df.loc['gaia_gmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 5822.39, 2.50386e-9, 3.17259e-9
df.loc['gaia_bpmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 5035.75, 4.07852e-9, 4.27793e-9
df.loc['gaia_rpmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 7619.96, 1.26902e-9, 1.83971e-9
# http://svo2.cab.inta-csic.es/theory/fps/ # no effective wavelength from sloan
df.loc['sdss_gmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 4671.78, 5.45476e-9, 4.98749e-9
df.loc['sdss_rmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 6141.12, 2.49767e-9, 2.88637e-9
df.loc['sdss_imag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 7457.89, 1.38589e-9, 1.95711e-9
df.loc['sdss_zmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 8922.78, 8.38585e-10, 1.36725e-9
# http://svo2.cab.inta-csic.es/theory/fps/
df.loc['tycho_bmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 4280.0, 6.5091e-9, 6.14866e-9
df.loc['tycho_vmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 5340.0, 3.98353e-9, 3.95858e-9
# http://svo2.cab.inta-csic.es/theory/fps/
df.loc['hipparcos_hpmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 4897.85, 4.39107e-9, 4.5377e-9
# http://svo2.cab.inta-csic.es/theory/fps/
df.loc['ps1_gmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 4810.88, 5.04261e-9, 4.70324e-9
df.loc['ps1_imag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 7503.68, 1.37212e-9, 1.9333e-9
df.loc['ps1_omag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 6439.35, 1.88196e-9, 2.6252e-9
df.loc['ps1_rmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 6156.36, 2.48016e-9, 2.8721e-9
df.loc['ps1_wmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 5985.87, 2.45753e-9, 3.03803e-9
df.loc['ps1_ymag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 9613.45, 7.14837e-10, 1.17784e-9
df.loc['ps1_zmag', ['wavelength', 'VEGA_zeropoint', 'AB_zeropoint']] = 8668.56, 9.06849e-10, 1.44861e-9
return df
# convert apparent mag to flux and optionally calculate error
def mag_to_flux(app_mag, zp, e_app_mag=None):
f = zp * 10**(-app_mag/2.5)
if e_app_mag is not None:
error = np.abs(-np.log(10) * f * e_app_mag / 2.5)
return f, error
return f
# calculate the log probability of a point on a normal distribution
def log_normal(x, mu, sigma):
if np.isnan(mu) or np.isnan(sigma):
return 0.0
return np.log(1.0 / (np.sqrt(2 * np.pi) * sigma)) - 0.5 * (x - mu)**2 / sigma**2
# DataFrame of parameter labels used for plotting
# if `zero_extinction=True` (Av=0), drops Av from index
def plot_labels(zero_extinction=False):
params = ['age',
'mass',
'Av',
'f',
'radius',
'Teff',
'density'
]
df = pd.DataFrame(index=params)
df.loc['age', ['label', 'fancy_label', 'fancy_label_unitless']] = 'age (Myr)', '$\\tau_{\\star} \, \mathrm{(Myr)}$', '$\\tau_{\\star}$'
df.loc['mass', ['label', 'fancy_label', 'fancy_label_unitless']] = 'mass (M_Sun)', '$M_{\\star} \, (\mathrm{M}_{\\odot})$', '$M_{\\star}$'
df.loc['Av', ['label', 'fancy_label', 'fancy_label_unitless']] = 'Av [mag]', '$A_V \ \mathrm{[mag]}$', '$A_V$'
df.loc['f', ['label', 'fancy_label', 'fancy_label_unitless']] = 'f [mag]', '$f \ \mathrm{[mag]}$', '$f$'
df.loc['radius', ['label', 'fancy_label', 'fancy_label_unitless']] = 'radius (R_Sun)', '$R_{\\star} \, (\mathrm{R}_{\\odot})$', '$R_{\\star}$'
df.loc['Teff', ['label', 'fancy_label', 'fancy_label_unitless']] = 'Teff (K)', '$T_{eff} \, \mathrm{(K)}$', '$T_{eff}$'
df.loc['density', ['label', 'fancy_label', 'fancy_label_unitless']] = 'density (M_Sun/R_Sun^3)', '$\\rho_{\\star} \, (\mathrm{M}_{\\odot}/{R_{\\odot}}^3)$', '$\\rho_{\\star}$)'
if zero_extinction:
df.drop(index='Av', inplace=True)
return df
# calculate the standard deviation of the residual
def residualSE(x, x_meas, N, ndim):
return np.sqrt(
np.sum((x_meas - x)**2) / (N - ndim)
)
# residual
def residual(a, b):
return a - b
# calculate the square root of sigma**2 given two different errors
def sigma(sigma_a, sigma_b):
return np.sqrt(sigma_a**2 + sigma_b**2)
# fractional residual
def frac_res(a, b):
return (a - b) / b
# fractional residual in units of sigma
def frac_res_sigma(a, sigma_a, b, sigma_b):
return (a - b) / sigma(sigma_a, sigma_b)
# fractional difference
def frac_diff(a, b):
return (a - b) / a
# fractional residual error
def frac_res_error(a, sigma_a, b, sigma_b):
return np.sqrt((sigma_a / b)**2 + (a * sigma_b / b**2)**2)
# fractional difference error
def frac_diff_error(a, sigma_a, b, sigma_b):
return np.sqrt(((b * sigma_a)/a**2)**2 + (sigma_b / a)**2)
# load isochrone table
def load_isochrone(filename):
# suppress FutureWarning about element-wise comparison
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return pd.read_csv(filename, index_col=('age','mass'), dtype=np.float32, converters={'age':Decimal, 'mass':Decimal})
# use DFInterpolator to interpolate the isochrone model grid
def interpolate_true(idx, grid, agelist=None, masslist=None):
age, mass = idx
if type(grid) is str:
grid = pd.read_pickle(grid)
interpolator = DFInterpolator(grid)
df = pd.DataFrame(columns=grid.columns, index=pd.MultiIndex.from_product([[age], [mass]], names=('age', 'mass')), dtype=float)
df[df.columns] = interpolator([age, mass])
del grid
return df
# getting list index out of range for nearest masses
# "interpolate" by finding the nearest point in the isochrone model grid
def interpolate_nearest(idx, grid, agelist=None, masslist=None):
age, mass = idx
if type(grid) is str:
grid = pd.read_pickle(grid)
df = pd.DataFrame(columns=grid.columns, index=pd.MultiIndex.from_product([[age], [mass]], names=('age', 'mass')), dtype=float)
if agelist is None:
agelist = grid.index.get_level_values('age').drop_duplicates()
try:
nearest_ages = [agelist[bisect_left(agelist, age) - 1], agelist[bisect_left(agelist, age)]]
except IndexError:
df[df.columns] = np.nan
if 'grid' in locals():
del grid
return df
else:
closest_age = nearest_ages[np.argmin(np.diff([nearest_ages[0], age, nearest_ages[1]]))]
if masslist is None:
masslist = grid.loc[closest_age].index.get_level_values('mass') # won't be duplicates in mass for a given age
try:
nearest_masses = [masslist[bisect_left(masslist, mass) - 1], masslist[bisect_left(masslist, mass)]]
except IndexError:
df[df.columns] = np.nan
else:
closest_mass = nearest_masses[np.argmin(np.diff([nearest_masses[0], mass, nearest_masses[1]]))]
try:
df[df.columns] = grid.loc[(closest_age, closest_mass)].values
except KeyError:
df[df.columns] = np.nan
finally:
if 'grid' in locals():
del grid
return df
# use nearest neighbor in age, then use `scipy.interpolate.interp1d` to interpolate mass
def interpolate_hybrid(idx, grid, agelist=None, masslist=None):
age, mass = idx
if type(grid) is str:
grid = pd.read_pickle(grid)
df = pd.DataFrame(columns=grid.columns, index=pd.MultiIndex.from_product([[age], [mass]], names=('age', 'mass')), dtype=float)
if agelist is None:
agelist = grid.index.get_level_values('age').drop_duplicates()
try:
nearest_ages = [agelist[bisect_left(agelist, age) - 1], agelist[bisect_left(agelist, age)]]
except IndexError:
df[df.columns] = np.nan
if 'grid' in locals():
del grid
return df
else:
closest_age = nearest_ages[np.argmin(np.diff([nearest_ages[0], age, nearest_ages[1]]))]
mass_df = grid.loc[closest_age]
f = interp1d(mass_df.index.values, mass_df.values, kind='linear', axis=0, bounds_error=False, fill_value=np.nan, assume_sorted=False)
try:
df[df.columns] = f(mass)
except KeyError:
df[df.columns] = np.nan
finally:
if 'grid' in locals():
del grid
return df
# ``with WaitingAnimation(): ...`` to print a dynamic loading "..."
# combination of stackoverflow comments from
# https://stackoverflow.com/questions/4995733/how-to-create-a-spinning-command-line-cursor
# https://stackoverflow.com/questions/44606005/print-text-loading-with-dots-going-forward-and-backward-in-python-shell
class WaitingAnimation():
def __init__(self, text='', n=3, delay=0.5):
self.text = text
self.delay = delay
self.busy = False
self.n = n
def task(self):
ndots = 1
print('')
while self.busy:
print('\r' + self.text, ndots*'.', end="", flush=True)
time.sleep(self.delay)
if ndots == self.n:
print('\b \b'*ndots, end="")
ndots = 0
else:
ndots += 1
def __enter__(self):
self.busy = True
threading.Thread(target=self.task).start()
def __exit__(self, exception, value, traceback):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
|
manager.py | import collections
import copy
import logging
import queue
import sys
import tempfile
import threading
from . import checker
from . import mutator
from . import options
from . import parser
from . import semantics
Candidate = collections.namedtuple('Candidate', ['counter', 'simplification', 'exprs'])
"""Represents a simplification candidate.
:code:`counter` contains the number of the simplified node in the pre-order iteration of :meth:`semantics.iterate_nodes`.
:code:`exprs` contains the simplified input.
:code:`simplification` contains the name of the applied mutator.
"""
class Manager:
"""Manages the asynchronous generation and checking of mutated inputs.
One thread runs the :meth:`producer` method that fills a :class:`queue.Queue`
while as many threads as given by the :code:`--max-threads` options run and evaluate the candidates from the queue.
The :meth:`simplify` methods starts all threads and terminates them as soon as one valid simplication has been found.
"""
def __init__(self):
self.q = queue.Queue(maxsize = 20)
self.stop_operation = False
self.finished_generation = False
self.result = None
self.result_lock = threading.Lock()
def __empty_queue(self):
"""Empty the queue."""
try:
while not self.q.empty():
self.q.get(timeout = 0.1)
self.q.task_done()
except queue.Empty:
pass
def producer(self, original, skip):
"""Produces new mutated variants of the given input."""
original_size = semantics.node_count(original)
gen = mutator.MutationGenerator(skip)
for candidate in gen.generate_mutations(original):
if options.args().mode_aggressive:
if semantics.node_count(candidate[2]) > original_size * (1 - options.args().aggressiveness):
continue
if options.args().mode_reduction_only:
if semantics.node_count(candidate[2]) >= original_size:
continue
self.q.put(Candidate(candidate[0], candidate[1], copy.deepcopy(candidate[2])))
if self.stop_operation:
break
self.finished_generation = True
def consumer(self):
"""Takes candidates from the queue and checks whether their output matches the reference result."""
while not self.stop_operation:
try:
candidate = self.q.get(timeout = 0.25)
self.q.task_done()
try:
with tempfile.NamedTemporaryFile('w', suffix = '.smt2') as tmp:
tmp.write(parser.render_smtlib(candidate.exprs))
tmp.flush()
res = checker.execute(options.args().cmd, tmp.name)
except FileNotFoundError:
logging.info('Removing the temporary file failed.')
if checker.matches_reference(res):
with self.result_lock:
if self.result is None:
self.stop_operation = True
self.result = candidate
except queue.Empty:
if self.finished_generation:
break
self.__empty_queue()
def simplify(self, original, skip = 0):
"""Starts one producer thread and multiple consumer thread and then waits for a valid simplification."""
assert self.q.empty()
self.q = queue.Queue(maxsize = 20)
self.stop_operation = False
self.finished_generation = False
self.result = None
try:
threads = [
threading.Thread(target = self.producer, name = 'producer', args = (original, skip))
] + [
threading.Thread(target = self.consumer, name = 'consumer-{}'.format(i + 1))
for i in range(options.args().max_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.__empty_queue()
except KeyboardInterrupt:
sys.stdout.write('\n')
logging.warning('Stopping all computations.')
self.stop_operation = True
self.__empty_queue()
raise
sys.stdout.write('\n')
return self.result
|
pool_scheduler.py | import paho.mqtt.client as mqtt
from threading import Thread
import time
import datetime
global current_temperature
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("POOL/temp")
def on_message(client, userdata, msg):
global current_temperature
print(msg.topic+" "+str(msg.payload)+"\n")
if msg.topic == "POOL/temp":
print "\treceived temperature update "
current_temperature = float(str(msg.payload))
print current_temperature
def poolPumpScheduler(mqttClient):
while True:
print "checking to run pump"
time.sleep(1)
now = datetime.datetime.now().strftime('%H:%M')
print now
def main():
mqttClient = mqtt.Client()
mqttClient.on_connect = on_connect
mqttClient.on_message = on_message
mqttClient.connect("localhost")
PoolPumpSchedulerThread = Thread(target=poolPumpScheduler, args=(mqttClient,))
PoolPumpSchedulerThread.start()
mqttClient.loop_forever()
if __name__ == '__main__':
main()
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import namedtuple, OrderedDict
import os
import pickle
import platform
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import threading
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTests(unittest.TestCase):
def setUp(self):
here = os.path.abspath(__file__)
basepath = os.path.dirname(os.path.dirname(os.path.dirname(here)))
exename = "_testembed"
if sys.platform.startswith("win"):
ext = ("_d" if "_d" in sys.executable else "") + ".exe"
exename += ext
exepath = os.path.dirname(sys.executable)
else:
exepath = os.path.join(basepath, "Programs")
self.test_exe = exe = os.path.join(exepath, exename)
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
self.oldcwd = os.getcwd()
os.chdir(basepath)
def tearDown(self):
os.chdir(self.oldcwd)
def run_embedded_interpreter(self, *args, env=None):
"""Runs a test in the embedded interpreter"""
cmd = [self.test_exe]
cmd.extend(args)
if env is not None and sys.platform == 'win32':
# Windows requires at least the SYSTEMROOT environment variable to
# start Python.
env = env.copy()
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=env)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
return out, err
def run_repeated_init_and_subinterpreters(self):
out, err = self.run_embedded_interpreter("repeated_init_and_subinterpreters")
self.assertEqual(err, "")
# The output from _testembed looks like this:
# --- Pass 0 ---
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# interp 1 <0x1d4f690>, thread state <0x1d35350>: id(modules) = 139650431165784
# interp 2 <0x1d5a690>, thread state <0x1d99ed0>: id(modules) = 139650413140368
# interp 3 <0x1d4f690>, thread state <0x1dc3340>: id(modules) = 139650412862200
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# --- Pass 1 ---
# ...
interp_pat = (r"^interp (\d+) <(0x[\dA-F]+)>, "
r"thread state <(0x[\dA-F]+)>: "
r"id\(modules\) = ([\d]+)$")
Interp = namedtuple("Interp", "id interp tstate modules")
numloops = 0
current_run = []
for line in out.splitlines():
if line == "--- Pass {} ---".format(numloops):
self.assertEqual(len(current_run), 0)
if support.verbose:
print(line)
numloops += 1
continue
self.assertLess(len(current_run), 5)
match = re.match(interp_pat, line)
if match is None:
self.assertRegex(line, interp_pat)
# Parse the line from the loop. The first line is the main
# interpreter and the 3 afterward are subinterpreters.
interp = Interp(*match.groups())
if support.verbose:
print(interp)
self.assertTrue(interp.interp)
self.assertTrue(interp.tstate)
self.assertTrue(interp.modules)
current_run.append(interp)
# The last line in the loop should be the same as the first.
if len(current_run) == 5:
main = current_run[0]
self.assertEqual(interp, main)
yield current_run
current_run = []
def test_subinterps_main(self):
for run in self.run_repeated_init_and_subinterpreters():
main = run[0]
self.assertEqual(main.id, '0')
def test_subinterps_different_ids(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
mainid = int(main.id)
for i, sub in enumerate(subs):
self.assertEqual(sub.id, str(mainid + i + 1))
def test_subinterps_distinct_state(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
if '0x0' in main:
# XXX Fix on Windows (and other platforms): something
# is going on with the pointers in Programs/_testembed.c.
# interp.interp is 0x0 and interp.modules is the same
# between interpreters.
raise unittest.SkipTest('platform prints pointers as 0x0')
for sub in subs:
# A new subinterpreter may have the same
# PyInterpreterState pointer as a previous one if
# the earlier one has already been destroyed. So
# we compare with the main interpreter. The same
# applies to tstate.
self.assertNotEqual(sub.interp, main.interp)
self.assertNotEqual(sub.tstate, main.tstate)
self.assertNotEqual(sub.modules, main.modules)
def test_forced_io_encoding(self):
# Checks forced configuration of embedded interpreter IO streams
env = dict(os.environ, PYTHONIOENCODING="utf-8:surrogateescape")
out, err = self.run_embedded_interpreter("forced_io_encoding", env=env)
if support.verbose > 1:
print()
print(out)
print(err)
expected_stream_encoding = "utf-8"
expected_errors = "surrogateescape"
expected_output = '\n'.join([
"--- Use defaults ---",
"Expected encoding: default",
"Expected errors: default",
"stdin: {in_encoding}:{errors}",
"stdout: {out_encoding}:{errors}",
"stderr: {out_encoding}:backslashreplace",
"--- Set errors only ---",
"Expected encoding: default",
"Expected errors: ignore",
"stdin: {in_encoding}:ignore",
"stdout: {out_encoding}:ignore",
"stderr: {out_encoding}:backslashreplace",
"--- Set encoding only ---",
"Expected encoding: latin-1",
"Expected errors: default",
"stdin: latin-1:{errors}",
"stdout: latin-1:{errors}",
"stderr: latin-1:backslashreplace",
"--- Set encoding and errors ---",
"Expected encoding: latin-1",
"Expected errors: replace",
"stdin: latin-1:replace",
"stdout: latin-1:replace",
"stderr: latin-1:backslashreplace"])
expected_output = expected_output.format(
in_encoding=expected_stream_encoding,
out_encoding=expected_stream_encoding,
errors=expected_errors)
# This is useful if we ever trip over odd platform behaviour
self.maxDiff = None
self.assertEqual(out.strip(), expected_output)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accommodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = (0,)
dict_b = {'b':1}
keywords = ["a", "b"]
for i in range(32, 127):
c = chr(i)
# skip parentheses, the error reporting is inconsistent about them
# skip 'e', it's always a two-character code
# skip '|' and '$', they don't represent arguments anyway
if c in '()e|$':
continue
# test the format unit when not skipped
format = c + "i"
try:
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
format, keywords)
when_not_skipped = False
except SystemError as e:
s = "argument 1 (impossible<bad format char>)"
when_not_skipped = (str(e) == s)
except TypeError:
when_not_skipped = False
# test the format unit when skipped
optional_format = "|" + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format, keywords)
when_skipped = False
except SystemError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = (str(e) == s)
message = ("test_skipitem_parity: "
"detected mismatch between convertsimple and skipitem "
"for format unit '{}' ({}), not skipped {}, skipped {}".format(
c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
# Test handling errors in the parse_tuple_and_keywords helper itself
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
(), {}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', [42])
def test_bad_use(self):
# Test handling invalid format and keywords in
# PyArg_ParseTupleAndKeywords()
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '||O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1, 2), {}, '|O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$$O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O$O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$|O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|OO', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|$O', [''])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|OO', ['a', ''])
def test_positional_only(self):
parse = _testcapi.parse_tuple_and_keywords
parse((1, 2, 3), {}, 'OOO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OOO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 2 positional arguments \(1 given\)'):
parse((1,), {'a': 3}, 'OOO', ['', '', 'a'])
parse((1,), {}, 'O|OO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 1 positional arguments \(0 given\)'):
parse((), {}, 'O|OO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OO$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes exactly 2 positional arguments \(1 given\)'):
parse((1,), {'a': 3}, 'OO$O', ['', '', 'a'])
parse((1,), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 1 positional arguments \(0 given\)'):
parse((), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, r'Empty parameter name after \$'):
parse((1,), {}, 'O|$OO', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, 'Empty keyword'):
parse((1,), {}, 'O|OO', ['', 'a', ''])
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
for name in dir(_testcapi):
if name.startswith('test_'):
with self.subTest("internal", name=name):
test = getattr(_testcapi, name)
test()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfb\n"
r" at tail\+2: 0xfb\n"
r" .*\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(sysconfig.get_config_var('WITH_PYMALLOC') == 1,
'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
start.py | import os
import subprocess
from multiprocessing import current_process
from multiprocessing.context import Process
class SubprocessService:
@staticmethod
def start(python_executable,application_name,path:str):
print(f"Application : {application_name}")
print(f"Process Name : {current_process().name}")
print(f"Pid : {os.getpid()}")
print(f"Parent Pid : {os.getppid()}")
subprocess.call(f"{python_executable} {path}")
@staticmethod
def start_process(python_executable,application_name,path):
proc = subprocess.Popen(f"{python_executable} {path}")
# subprocess=Process(name=application_name,target=SubprocessService.start,args=(python_executable,application_name,path,))
# subprocess.start()
return proc
if __name__ == '__main__':
python_executable= os.getenv("PYTHON_EXECUTABLE", "python")
root_directory = os.path.join(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)))),'src')
api_subprocess=SubprocessService.start_process(python_executable=python_executable, application_name="Pdi-Api",path=os.path.join(root_directory, "api", "app.py"))
SubprocessService.start_process(python_executable=python_executable,application_name="Pdi-Scheduler",path=os.path.join(root_directory, "scheduler", "app.py"))
SubprocessService.start_process(python_executable=python_executable,application_name="Pdi-Process",path=os.path.join(root_directory, "process", "app.py"))
api_subprocess.wait()
print("finished") |
test_base.py | #!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
# from __future__ import unicode_literals
import copy
import os
import psutil
import random
import re
import signal
import subprocess
import sys
import time
import threading
import unittest
import utils
import pexpect
# While this path can be variable, in practice is lives statically.
OSQUERY_DEPENDENCIES = "/usr/local/osquery"
sys.path = [OSQUERY_DEPENDENCIES + "/lib/python2.7/site-packages"] + sys.path
try:
from pexpect.replwrap import REPLWrapper
except ImportError as e:
print("Could not import pexpect.replwrap: %s" % (str(e)))
print(" Need pexpect version 3.3, installed version: %s" % (
str(pexpect.__version__)))
print(" pexpect location: %s" % (str(pexpect.__file__)))
exit(1)
try:
import argparse
except ImportError:
print("Cannot import argparse: pip install argparse?")
exit(1)
try:
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
except ImportError as e:
print("Cannot import thrift: pip install thrift?")
print(str(e))
exit(1)
'''Defaults that should be used in integration tests.'''
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_DIR = "/tmp/osquery-tests-python%d/" % (os.getuid())
CONFIG_NAME = CONFIG_DIR + "tests"
DEFAULT_CONFIG = {
"options": {
"database_path": "%s.db" % CONFIG_NAME,
"pidfile": "%s.pid" % CONFIG_NAME,
"config_path": "%s.conf" % CONFIG_NAME,
"extensions_socket": "%s.em" % CONFIG_NAME,
"extensions_interval": "1",
"extensions_timeout": "0",
"watchdog_level": "3",
"disable_logging": "true",
"disable_events": "true",
"force": "true",
},
"schedule": {},
}
'''Expect CONFIG to be set during Tester.main() to a python dict.'''
CONFIG = None
'''Expect ARGS to contain the argparsed namespace.'''
ARGS = None
class OsqueryUnknownException(Exception):
'''Exception thrown for unknown output from the shell'''
pass
class OsqueryException(Exception):
'''Exception thrown when the shell returns an error'''
pass
class OsqueryWrapper(REPLWrapper):
'''A pexpect wrapper intended for interacting with the osqueryi REPL'''
PROMPT = u'osquery> '
CONTINUATION_PROMPT = u' ...> '
ERROR_PREFIX = 'Error:'
def __init__(self, command='../osqueryi', args={}, env={}):
global CONFIG_NAME, CONFIG
options = copy.deepcopy(CONFIG)["options"]
for option in args.keys():
options[option] = args[option]
options["database_path"] += str(random.randint(1000, 9999))
command = command + " " + " ".join(["--%s=%s" % (k, v) for
k, v in options.iteritems()])
proc = pexpect.spawn(command, env=env)
super(OsqueryWrapper, self).__init__(
proc,
self.PROMPT,
None,
continuation_prompt=self.CONTINUATION_PROMPT)
def run_query(self, query):
'''Run a query, returning the results as a list of dictionaries
When unknown output is encountered, OsqueryUnknownException is thrown.
When osqueryi returns an error, OsqueryException is thrown.
'''
query = query + ';' # Extra semicolon causes no harm
result = self.run_command(query)
# On Mac, the query appears first in the string. Remove it if so.
result = re.sub(re.escape(query), '', result).strip()
result_lines = result.splitlines()
if len(result_lines) < 2:
raise OsqueryUnknownException(
'Unexpected output:\n %s' % result_lines[0])
if result_lines[1].startswith(self.ERROR_PREFIX):
raise OsqueryException(result_lines[1])
noise = 0
for l in result_lines:
if len(l) == 0 or l[0] != '+':
# This is not a result line
noise += 1
elif l[0] == '+':
break
for l in range(noise):
result_lines.pop(0)
try:
header = result_lines[1]
columns = re.findall('[^ |]+', header)
rows = []
for line in result_lines[3:-1]:
values = re.findall('[^ |]+', line)
rows.append(
dict((col, val) for col, val in zip(columns, values)))
return rows
except:
raise OsqueryUnknownException(
'Unexpected output:\n %s' % result_lines)
class ProcRunner(object):
'''A helper class to open a subprocess and perform testing actions.
The subprocess is opened in a new thread and state is tracked using
this class wrapper.
'''
def __init__(self, name, path, _args=[], interval=0.02, silent=False):
self.started = False
self.proc = None
self.name = name
self.path = path
self.args = _args
self.interval = interval
self.silent = silent
self.retcode = -1
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
pid = 0
try:
if self.silent:
self.proc = subprocess.Popen(
[self.path] + self.args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
self.proc = subprocess.Popen([self.path] + self.args)
pid = self.proc.pid
self.started = True
except Exception as e:
print(utils.red("Process start failed:") + " %s" % self.name)
print(str(e))
sys.exit(1)
try:
while self.proc.poll() is None:
self.started = True
time.sleep(self.interval)
self.started = True
self.retcode = -1 if self.proc is None else self.proc.poll()
self.proc = None
except Exception as e:
return
def requireStarted(self, timeout=2):
delay = 0
while delay < timeout:
if self.started is True:
break
time.sleep(self.interval * 10)
delay += self.interval * 10
def getChildren(self, timeout=1):
'''Get the child pids.'''
self.requireStarted()
if not self.proc:
return []
try:
proc = psutil.Process(pid=self.proc.pid)
delay = 0
while len(proc.children()) == 0:
if delay > timeout:
return []
time.sleep(self.interval)
delay += self.interval
return [p.pid for p in proc.children()]
except:
pass
return []
@property
def code(self):
self.requireStarted()
return self.retcode
@property
def pid(self):
self.requireStarted()
return self.proc.pid if self.proc is not None else None
def kill(self, children=False):
self.requireStarted()
if children:
for child in self.getChildren():
try:
os.kill(child, 9)
except:
pass
if self.proc:
try:
os.kill(self.pid, 9)
except:
pass
self.proc = None
def isAlive(self, timeout=3):
self.requireStarted()
'''Check if the process is alive.'''
delay = 0
while self.proc is None:
if delay > timeout:
break
time.sleep(self.interval)
delay += self.interval
if self.proc is None:
return False
return self.proc.poll() is None
def isDead(self, pid, timeout=5):
self.requireStarted()
'''Check if the process was killed.
This is different than `isAlive` in that the timeout is an expectation
that the process will die before the timeout, `isAlive`'s timeout is
an expectation that the process will be scheduled before the timeout.
'''
try:
proc = psutil.Process(pid=pid)
except psutil.NoSuchProcess as e:
return True
delay = 0
while delay < timeout:
if not proc.is_running():
return True
time.sleep(self.interval)
delay += self.interval
return False
class ProcessGenerator(object):
'''Helper methods to patch into a unittest'''
generators = []
def setUp(self):
utils.reset_dir(CONFIG_DIR)
def _run_daemon(self, options={}, silent=False, options_only={},
overwrite={}):
'''Spawn an osquery daemon process'''
global ARGS, CONFIG_NAME, CONFIG
config = copy.deepcopy(CONFIG)
config["options"]["database_path"] += str(random.randint(1000, 9999))
config["options"][
"extensions_socket"] += str(random.randint(1000, 9999))
for option in options.keys():
config["options"][option] = options[option]
flags = ["--%s=%s" % (k, v) for k, v in config["options"].items()]
for option in options_only.keys():
config["options"][option] = options_only[option]
for key in overwrite:
config[key] = overwrite[key]
utils.write_config(config)
binary = os.path.join(ARGS.build, "osquery", "osqueryd")
daemon = ProcRunner("daemon", binary, flags, silent=silent)
daemon.options = config["options"]
self.generators.append(daemon)
return daemon
def _run_extension(self, timeout=0, path=None, silent=False):
'''Spawn an osquery extension (example_extension)'''
global ARGS, CONFIG
config = copy.deepcopy(CONFIG)
config["options"][
"extensions_socket"] += str(random.randint(1000, 9999))
binary = os.path.join(ARGS.build, "osquery", "example_extension.ext")
if path is not None:
config["options"]["extensions_socket"] = path
extension = ProcRunner("extension",
binary,
[
"--socket=%s" % config["options"][
"extensions_socket"],
"--verbose" if not silent else "",
"--timeout=%d" % timeout,
"--interval=%d" % 0,
],
silent=silent)
self.generators.append(extension)
extension.options = config["options"]
return extension
def tearDown(self):
'''When the unit test stops, clean up child-generated processes.
Iterate through the generated daemons and extensions, and kill -9 them.
Unittest should stop processes they generate, but on failure the
tearDown method will cleanup.
'''
for generator in self.generators:
if generator.pid is not None:
try:
os.kill(generator.pid, signal.SIGKILL)
except Exception as e:
pass
class EXClient(object):
'''An osquery Thrift/extensions python client generator.'''
transport = None
'''The instance transport object.'''
_manager = None
'''The client class's reference to run-time discovered manager.'''
_client = None
'''The client class's reference to run-time discovered client.'''
def __init__(self, path=None, uuid=None):
global CONFIG
'''Create a extensions client to a UNIX path and optional UUID.'''
if path is None:
path = CONFIG["options"]["extensions_socket"]
self.path = path
if uuid:
self.path += ".%s" % str(uuid)
transport = TSocket.TSocket(unix_socket=self.path)
transport = TTransport.TBufferedTransport(transport)
self.protocol = TBinaryProtocol.TBinaryProtocol(transport)
self.transport = transport
@classmethod
def setUp(cls, manager, client):
'''Set the manager and client modules to generate clients from.'''
cls._manager = manager
cls._client = client
def close(self):
if self.transport:
self.transport.close()
def try_open(self, timeout=0.1, interval=0.01):
'''Try to open, on success, close the UNIX domain socket.'''
did_open = self.open(timeout, interval)
if did_open:
self.close()
return did_open
def open(self, timeout=0.1, interval=0.01):
'''Attempt to open the UNIX domain socket.'''
delay = 0
while delay < timeout:
try:
self.transport.open()
return True
except Exception as e:
pass
delay += interval
time.sleep(interval)
return False
def getEM(self):
'''Return an extension manager (osquery core) client.'''
if self._manager is None:
raise(Exception, "The EXClient must be 'setUp' with a manager")
return self._manager.Client(self.protocol)
def getEX(self):
'''Return an extension (osquery extension) client.'''
if self._client is None:
raise(Exception, "The EXClient must be 'setUp' with a client")
return self._client.Client(self.protocol)
class Autoloader(object):
'''Helper class to write a module or extension autoload file.'''
def __init__(self, autoloads=[]):
global CONFIG_DIR
self.path = CONFIG_DIR + "ext.load" + str(random.randint(1000, 9999))
with open(self.path, "w") as fh:
fh.write("\n".join(autoloads))
def __del__(self):
try:
os.unlink(self.path)
except:
pass
class TimeoutRunner(object):
def __init__(self, cmd=[], timeout_sec=1):
self.stdout = None
self.stderr = None
self.proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
kill_proc = lambda p: p.kill()
timer = threading.Timer(timeout_sec, kill_proc, [self.proc])
timer.start()
self.stdout, self.stderr = self.proc.communicate()
timer.cancel()
def flaky(gen):
exceptions = []
def attempt(this):
try:
worked = gen(this)
return True
except Exception as e:
import traceback
exc_type, exc_obj, tb = sys.exc_info()
exceptions.append(e)
print (traceback.format_tb(tb)[1])
return False
def wrapper(this):
for i in range(3):
if attempt(this):
return True
i = 1
for exc in exceptions:
print("Test (attempt %d) %s::%s failed: %s" % (
i,
this.__class__.__name__,
gen.__name__, str(exc[0])))
i += 1
if len(exceptions) > 0:
raise exceptions[0]
return False
return wrapper
class Tester(object):
def __init__(self):
global ARGS, CONFIG, CONFIG_DIR
parser = argparse.ArgumentParser(description=(
"osquery python integration testing."
))
parser.add_argument(
"--config", metavar="FILE", default=None,
help="Use special options from a config."
)
parser.add_argument(
"--verbose", default=False, action="store_true",
help="Run daemons and extensions with --verbose"
)
# Directory structure options
parser.add_argument(
"--build", metavar="PATH", default=".",
help="Path to osquery build (./build/<sys>/)."
)
ARGS = parser.parse_args()
if not os.path.exists(ARGS.build):
print("Cannot find --build: %s" % ARGS.build)
print("You must first run: make")
exit(1)
# Write config
random.seed(time.time())
utils.reset_dir(CONFIG_DIR)
CONFIG = read_config(ARGS.config) if ARGS.config else DEFAULT_CONFIG
def run(self):
os.setpgrp()
unittest_args = [sys.argv[0]]
if ARGS.verbose:
unittest_args += ["-v"]
unittest.main(argv=unittest_args)
def expect(functional, expected, interval=0.01, timeout=4):
"""Helper function to run a function with expected latency"""
delay = 0
result = None
while result is None or len(result) != expected:
try:
result = functional()
if len(result) == expected:
break
except Exception as e:
print("Expect exception (%s): %s not %s" % (
str(e), str(functional), expected))
return None
if delay >= timeout:
return None
time.sleep(interval)
delay += interval
return result
class QueryTester(ProcessGenerator, unittest.TestCase):
def setUp(self):
self.binary = os.path.join(ARGS.build, "osquery", "osqueryi")
self.daemon = self._run_daemon({
# The set of queries will hammer the daemon process.
"disable_watchdog": True,
# Enable the 'hidden' flag "registry_exceptions" to prevent
# catching.
"registry_exceptions": True,
})
self.assertTrue(self.daemon.isAlive())
# The sets of example tests will use the extensions APIs.
self.client = EXClient(self.daemon.options["extensions_socket"])
expectTrue(self.client.try_open)
self.assertTrue(self.client.open())
self.em = self.client.getEM()
def tearDown(self):
self.client.close()
self.daemon.kill()
def _execute(self, query):
try:
result = self.em.query(query)
self.assertEqual(result.status.code, 0)
return result.response
except Exception as e:
print("General exception executing query: %s (%s)" % (
utils.lightred(query), str(e)))
raise e
def _execute_set(self, queries):
for example in queries:
start_time = time.time()
result = self._execute(example)
end_time = time.time()
duration_ms = int((end_time - start_time) * 1000)
if duration_ms > 2000:
# Query took longer than 2 seconds.
duration_ms = utils.lightred(duration_ms)
print("Query (%sms): %s, rows: %d" % (
duration_ms, example, len(result)))
def expectTrue(functional, interval=0.01, timeout=8):
"""Helper function to run a function with expected latency"""
delay = 0
while delay < timeout:
if functional():
return True
time.sleep(interval)
delay += interval
return False
def assertPermissions():
stat_info = os.stat('.')
if stat_info.st_uid != os.getuid():
print(utils.lightred("Will not load modules/extensions in tests."))
print(utils.lightred("Repository owner (%d) executer (%d) mismatch" % (
stat_info.st_uid, os.getuid())))
exit(1)
def getTestDirectory(base):
path = os.path.join(base, "test-dir" + str(random.randint(1000, 9999)))
utils.reset_dir(path)
return path
def loadThriftFromBuild(build_dir):
'''Find and import the thrift-generated python interface.'''
thrift_path = build_dir + "/generated/gen-py"
try:
sys.path = [thrift_path, thrift_path + "/osquery"] + sys.path
from osquery import ExtensionManager, Extension
EXClient.setUp(ExtensionManager, Extension)
except ImportError as e:
print("Cannot import osquery thrift API from %s" % (thrift_path))
print("Exception: %s" % (str(e)))
print("You must first run: make")
exit(1)
|
test_core.py | from datetime import timedelta
from functools import partial
import itertools
import json
import operator
from operator import add
import os
from time import time, sleep
import sys
import pytest
from tornado import gen
from tornado.queues import Queue
from tornado.ioloop import IOLoop
import streamz as sz
from streamz import Stream, RefCounter
from streamz.sources import sink_to_file, PeriodicCallback
from streamz.utils_test import (inc, double, gen_test, tmpfile, captured_logger, # noqa: F401
clean, await_for, metadata) # noqa: F401
from distributed.utils_test import loop # noqa: F401
def test_basic():
source = Stream()
b1 = source.map(inc)
b2 = source.map(double)
c = b1.scan(add)
Lc = c.sink_to_list()
Lb = b2.sink_to_list()
for i in range(4):
source.emit(i)
assert Lc == [1, 3, 6, 10]
assert Lb == [0, 2, 4, 6]
def test_no_output():
source = Stream()
assert source.emit(1) is None
def test_scan():
source = Stream()
def f(acc, i):
acc = acc + i
return acc, acc
L = source.scan(f, returns_state=True).sink_to_list()
for i in range(3):
source.emit(i)
assert L == [0, 1, 3]
def test_kwargs():
source = Stream()
def f(acc, x, y=None):
acc = acc + x + y
return acc
L = source.scan(f, y=10).sink_to_list()
for i in range(3):
source.emit(i)
assert L == [0, 11, 23]
def test_filter():
source = Stream()
L = source.filter(lambda x: x % 2 == 0).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_args():
source = Stream()
L = source.filter(lambda x, n: x % n == 0, 2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_kwargs():
source = Stream()
L = source.filter(lambda x, n=1: x % n == 0, n=2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_none():
source = Stream()
L = source.filter(None).sink_to_list()
for i in range(10):
source.emit(i % 3)
assert L == [1, 2, 1, 2, 1, 2]
def test_map():
def add(x=0, y=0):
return x + y
source = Stream()
L = source.map(add, y=10).sink_to_list()
source.emit(1)
assert L[0] == 11
def test_map_args():
source = Stream()
L = source.map(operator.add, 10).sink_to_list()
source.emit(1)
assert L == [11]
def test_starmap():
def add(x=0, y=0):
return x + y
source = Stream()
L = source.starmap(add).sink_to_list()
source.emit((1, 10))
assert L[0] == 11
def test_remove():
source = Stream()
L = source.remove(lambda x: x % 2 == 0).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [1, 3, 5, 7, 9]
def test_partition():
source = Stream()
L = source.partition(2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
def test_sliding_window():
source = Stream()
L = source.sliding_window(2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, ), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5),
(5, 6), (6, 7), (7, 8), (8, 9)]
L = source.sliding_window(2, return_partial=False).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5),
(5, 6), (6, 7), (7, 8), (8, 9)]
def test_sliding_window_ref_counts():
source = Stream()
_ = source.sliding_window(2)
r_prev = RefCounter()
source.emit(-2)
source.emit(-1, metadata=[{'ref': r_prev}])
for i in range(10):
r = RefCounter()
assert r_prev.count == 1
source.emit(i, metadata=[{'ref': r}])
assert r_prev.count == 0
assert r.count == 1
r_prev = r
def test_sliding_window_metadata():
source = Stream()
L = metadata(source.sliding_window(2)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
assert L == [
[{'v': 1}], # First emit, because 0 has no metadata
[{'v': 1}, {'v': 2}], # Second emit
[{'v': 2}, {'v': 3}] # Third emit
]
@gen_test()
def test_backpressure():
q = Queue(maxsize=2)
source = Stream(asynchronous=True)
source.map(inc).scan(add, start=0).sink(q.put)
@gen.coroutine
def read_from_q():
while True:
yield q.get()
yield gen.sleep(0.1)
IOLoop.current().add_callback(read_from_q)
start = time()
for i in range(5):
yield source.emit(i)
end = time()
assert end - start >= 0.2
@gen_test()
def test_timed_window():
source = Stream(asynchronous=True)
a = source.timed_window(0.01)
assert a.loop is IOLoop.current()
L = a.sink_to_list()
for i in range(10):
yield source.emit(i)
yield gen.sleep(0.004)
yield gen.sleep(a.interval)
assert L
assert sum(L, []) == list(range(10))
assert all(len(x) <= 3 for x in L)
assert any(len(x) >= 2 for x in L)
yield gen.sleep(0.1)
assert not L[-1]
@gen_test()
def test_timed_window_ref_counts():
source = Stream()
_ = source.timed_window(0.01)
ref1 = RefCounter()
source.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
yield gen.sleep(0.05)
ref2 = RefCounter()
source.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
@gen_test()
def test_timed_window_metadata():
source = Stream()
L = metadata(source.timed_window(0.01)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
yield gen.sleep(0.1)
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
yield gen.sleep(0.1)
assert L == [
[{'v': 1}], # first emit because 0 has no metadata
[{'v': 2}, {'v': 3}] # second emit
]
def test_timed_window_timedelta(clean): # noqa: F811
pytest.importorskip('pandas')
source = Stream(asynchronous=True)
a = source.timed_window('10ms')
assert a.interval == 0.010
@gen_test()
def test_timed_window_backpressure():
q = Queue(maxsize=1)
source = Stream(asynchronous=True)
source.timed_window(0.01).sink(q.put)
@gen.coroutine
def read_from_q():
while True:
yield q.get()
yield gen.sleep(0.1)
IOLoop.current().add_callback(read_from_q)
start = time()
for i in range(5):
yield source.emit(i)
yield gen.sleep(0.01)
stop = time()
assert stop - start > 0.2
def test_sink_to_file():
with tmpfile() as fn:
source = Stream()
with sink_to_file(fn, source) as f:
source.emit('a')
source.emit('b')
with open(fn) as f:
data = f.read()
assert data == 'a\nb\n'
def test_sink_with_args_and_kwargs():
L = dict()
def mycustomsink(elem, key, prefix=""):
key = prefix + key
if key not in L:
L[key] = list()
L[key].append(elem)
s = Stream()
s.sink(mycustomsink, "cat", "super")
s.emit(1)
s.emit(2)
assert L['supercat'] == [1, 2]
@gen_test()
def test_counter():
counter = itertools.count()
source = PeriodicCallback(lambda: next(counter), 0.001, asynchronous=True)
L = source.sink_to_list()
yield gen.sleep(0.05)
assert L
@gen_test()
def test_rate_limit():
source = Stream(asynchronous=True)
L = source.rate_limit(0.05).sink_to_list()
start = time()
for i in range(5):
yield source.emit(i)
stop = time()
assert stop - start > 0.2
assert len(L) == 5
@gen_test()
def test_delay():
source = Stream(asynchronous=True)
L = source.delay(0.02).sink_to_list()
for i in range(5):
yield source.emit(i)
assert not L
yield gen.sleep(0.04)
assert len(L) < 5
yield gen.sleep(0.1)
assert len(L) == 5
@gen_test()
def test_delay_ref_counts():
source = Stream(asynchronous=True)
_ = source.delay(0.01)
refs = []
for i in range(5):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
yield gen.sleep(0.05)
assert all(r.count == 0 for r in refs)
@gen_test()
def test_buffer():
source = Stream(asynchronous=True)
L = source.map(inc).buffer(10).map(inc).rate_limit(0.05).sink_to_list()
start = time()
for i in range(10):
yield source.emit(i)
stop = time()
assert stop - start < 0.01
assert not L
start = time()
for i in range(5):
yield source.emit(i)
stop = time()
assert L
assert stop - start > 0.04
@gen_test()
def test_buffer_ref_counts():
source = Stream(asynchronous=True)
_ = source.buffer(5)
refs = []
for i in range(5):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
yield gen.sleep(0.05)
assert all(r.count == 0 for r in refs)
def test_zip():
a = Stream()
b = Stream()
c = sz.zip(a, b)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
b.emit('b')
assert L == [(1, 'a'), (2, 'b')]
d = Stream()
# test zip from the object itself
# zip 3 streams together
e = a.zip(b, d)
L2 = e.sink_to_list()
a.emit(1)
b.emit(2)
d.emit(3)
assert L2 == [(1, 2, 3)]
def test_zip_literals():
a = Stream()
b = Stream()
c = sz.zip(a, 123, b)
L = c.sink_to_list()
a.emit(1)
b.emit(2)
assert L == [(1, 123, 2)]
a.emit(4)
b.emit(5)
assert L == [(1, 123, 2),
(4, 123, 5)]
def test_zip_same():
a = Stream()
b = a.zip(a)
L = b.sink_to_list()
a.emit(1)
a.emit(2)
assert L == [(1, 1), (2, 2)]
def test_combine_latest():
a = Stream()
b = Stream()
c = a.combine_latest(b)
d = a.combine_latest(b, emit_on=[a, b])
L = c.sink_to_list()
L2 = d.sink_to_list()
a.emit(1)
a.emit(2)
b.emit('a')
a.emit(3)
b.emit('b')
assert L == [(2, 'a'), (3, 'a'), (3, 'b')]
assert L2 == [(2, 'a'), (3, 'a'), (3, 'b')]
def test_combine_latest_emit_on():
a = Stream()
b = Stream()
c = a.combine_latest(b, emit_on=a)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(2, 'a'), (3, 'a'), (4, 'b')]
def test_combine_latest_emit_on_stream():
a = Stream()
b = Stream()
c = a.combine_latest(b, emit_on=0)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(2, 'a'), (3, 'a'), (4, 'b')]
def test_combine_latest_ref_counts():
a = Stream()
b = Stream()
_ = a.combine_latest(b)
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
# The new value kicks out the old value
ref2 = RefCounter()
a.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
# The value on stream a is still retained and the value on stream b is new
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref2.count == 1
assert ref3.count == 1
def test_combine_latest_metadata():
a = Stream()
b = Stream()
L = metadata(a.combine_latest(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
b.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # first emit when 2 is introduced
[{'v': 1}], # 3 has no metadata but it replaces the value on 'b'
[{'v': 1}, {'v': 4}] # 4 replaces the value without metadata on 'b'
]
@gen_test()
def test_zip_timeout():
a = Stream(asynchronous=True)
b = Stream(asynchronous=True)
c = sz.zip(a, b, maxsize=2)
L = c.sink_to_list()
a.emit(1)
a.emit(2)
future = a.emit(3)
with pytest.raises(gen.TimeoutError):
yield gen.with_timeout(timedelta(seconds=0.01), future)
b.emit('a')
yield future
assert L == [(1, 'a')]
def test_zip_ref_counts():
a = Stream()
b = Stream()
_ = a.zip(b)
# The first value in a becomes buffered
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
# The second value in a also becomes buffered
ref2 = RefCounter()
a.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 1
assert ref2.count == 1
# All emitted values are removed from the buffer
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref1.count == 0
assert ref2.count == 1 # still in the buffer
assert ref3.count == 0
def test_zip_metadata():
a = Stream()
b = Stream()
L = metadata(a.zip(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
a.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # first emit when 2 is introduced
[{'v': 4}] # second emit when 4 is introduced, and 3 has no metadata
]
def test_frequencies():
source = Stream()
L = source.frequencies().sink_to_list()
source.emit('a')
source.emit('b')
source.emit('a')
assert L[-1] == {'a': 2, 'b': 1}
def test_flatten():
source = Stream()
L = source.flatten().sink_to_list()
source.emit([1, 2, 3])
source.emit([4, 5])
source.emit([6, 7, 8])
assert L == [1, 2, 3, 4, 5, 6, 7, 8]
def test_unique():
source = Stream()
L = source.unique().sink_to_list()
source.emit(1)
source.emit(2)
source.emit(1)
assert L == [1, 2]
def test_unique_key():
source = Stream()
L = source.unique(key=lambda x: x % 2, maxsize=1).sink_to_list()
source.emit(1)
source.emit(2)
source.emit(4)
source.emit(6)
source.emit(3)
assert L == [1, 2, 3]
def test_unique_metadata():
source = Stream()
L = metadata(source.unique()).flatten().sink_to_list()
for i in range(5):
source.emit(i, metadata=[{'v': i}])
assert L == [{'v': i} for i in range(5)]
def test_unique_history():
source = Stream()
s = source.unique(maxsize=2)
s2 = source.unique(maxsize=2, hashable=False)
L = s.sink_to_list()
L2 = s2.sink_to_list()
source.emit(1)
source.emit(2)
source.emit(1)
source.emit(2)
source.emit(1)
source.emit(2)
assert L == [1, 2]
assert L == L2
source.emit(3)
source.emit(2)
assert L == [1, 2, 3]
assert L == L2
source.emit(1)
assert L == [1, 2, 3, 1]
assert L == L2
# update 2 position
source.emit(2)
# knock out 1
source.emit(3)
# update 2 position
source.emit(2)
assert L == [1, 2, 3, 1, 3]
assert L == L2
def test_unique_history_dict():
source = Stream()
s = source.unique(maxsize=2, hashable=False)
L = s.sink_to_list()
a = {'hi': 'world'}
b = {'hi': 'bar'}
c = {'foo': 'bar'}
source.emit(a)
source.emit(b)
source.emit(a)
source.emit(b)
source.emit(a)
source.emit(b)
assert L == [a, b]
source.emit(c)
source.emit(b)
assert L == [a, b, c]
source.emit(a)
assert L == [a, b, c, a]
def test_union():
a = Stream()
b = Stream()
c = Stream()
L = a.union(b, c).sink_to_list()
a.emit(1)
assert L == [1]
b.emit(2)
assert L == [1, 2]
a.emit(3)
assert L == [1, 2, 3]
c.emit(4)
assert L == [1, 2, 3, 4]
def test_pluck():
a = Stream()
L = a.pluck(1).sink_to_list()
a.emit([1, 2, 3])
assert L == [2]
a.emit([4, 5, 6, 7, 8, 9])
assert L == [2, 5]
with pytest.raises(IndexError):
a.emit([1])
def test_pluck_list():
a = Stream()
L = a.pluck([0, 2]).sink_to_list()
a.emit([1, 2, 3])
assert L == [(1, 3)]
a.emit([4, 5, 6, 7, 8, 9])
assert L == [(1, 3), (4, 6)]
with pytest.raises(IndexError):
a.emit([1])
def test_collect():
source1 = Stream()
source2 = Stream()
collector = source1.collect()
L = collector.sink_to_list()
source2.sink(collector.flush)
source1.emit(1)
source1.emit(2)
assert L == []
source2.emit('anything') # flushes collector
assert L == [(1, 2)]
source2.emit('anything')
assert L == [(1, 2), ()]
source1.emit(3)
assert L == [(1, 2), ()]
source2.emit('anything')
assert L == [(1, 2), (), (3,)]
def test_collect_ref_counts():
source = Stream()
collector = source.collect()
refs = []
for i in range(10):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
collector.flush()
assert all(r.count == 0 for r in refs)
def test_collect_metadata():
source = Stream()
collector = source.collect()
L = metadata(collector).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
collector.flush()
source.emit(3, metadata=[{'v': 3}])
source.emit(4, metadata=[{'v': 4}])
collector.flush()
assert L == [
[{'v': 1}, {'v': 2}], # Flush 0-2, but 0 has no metadata
[{'v': 3}, {'v': 4}] # Flush the rest
]
def test_map_str():
def add(x=0, y=0):
return x + y
source = Stream()
s = source.map(add, y=10)
assert str(s) == '<map: add>'
def test_filter_str():
def iseven(x):
return x % 2 == 0
source = Stream()
s = source.filter(iseven)
assert str(s) == '<filter: iseven>'
def test_timed_window_str(clean): # noqa: F811
source = Stream()
s = source.timed_window(.05)
assert str(s) == '<timed_window: 0.05>'
def test_partition_str():
source = Stream()
s = source.partition(2)
assert str(s) == '<partition: 2>'
def test_partition_ref_counts():
source = Stream()
_ = source.partition(2)
for i in range(10):
r = RefCounter()
source.emit(i, metadata=[{'ref': r}])
if i % 2 == 0:
assert r.count == 1
else:
assert r.count == 0
def test_partition_metadata():
source = Stream()
L = metadata(source.partition(2)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
assert L == [
[{'v': 1}], # first emit when 1 is introduced. 0 has no metadata
[{'v': 2}, {'v': 3}] # second emit
]
def test_stream_name_str():
source = Stream(stream_name='this is not a stream')
assert str(source) == '<this is not a stream; Stream>'
def test_zip_latest():
a = Stream()
b = Stream()
c = a.zip_latest(b)
d = a.combine_latest(b, emit_on=a)
L = c.sink_to_list()
L2 = d.sink_to_list()
a.emit(1)
a.emit(2)
b.emit('a')
b.emit('b')
a.emit(3)
assert L == [(1, 'a'), (2, 'a'), (3, 'b')]
assert L2 == [(3, 'b')]
def test_zip_latest_reverse():
a = Stream()
b = Stream()
c = a.zip_latest(b)
L = c.sink_to_list()
b.emit('a')
a.emit(1)
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b')]
def test_triple_zip_latest():
from streamz.core import Stream
s1 = Stream()
s2 = Stream()
s3 = Stream()
s_simple = s1.zip_latest(s2, s3)
L_simple = s_simple.sink_to_list()
s1.emit(1)
s2.emit('I')
s2.emit("II")
s1.emit(2)
s2.emit("III")
s3.emit('a')
s3.emit('b')
s1.emit(3)
assert L_simple == [(1, 'III', 'a'), (2, 'III', 'a'), (3, 'III', 'b')]
def test_zip_latest_ref_counts():
a = Stream()
b = Stream()
_ = a.zip_latest(b)
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1 # Retained until stream b has a value
# The lossless stream is never retained if all upstreams have a value
ref2 = RefCounter()
b.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
# Kick out the stream b value and verify it has zero references
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref2.count == 0
assert ref3.count == 1
# Verify the lossless value is not retained, but the lossy value is
ref4 = RefCounter()
a.emit(3, metadata=[{'ref': ref4}])
assert ref3.count == 1
assert ref4.count == 0
def test_zip_latest_metadata():
a = Stream()
b = Stream()
L = metadata(a.zip_latest(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
a.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # the first emit when 2 is introduced
[{'v': 2}] # 3 has no metadata
]
def test_connect():
source_downstream = Stream()
# connect assumes this default behaviour
# of stream initialization
assert not source_downstream.downstreams
assert source_downstream.upstreams == [None]
# initialize the second stream to connect to
source_upstream = Stream()
sout = source_downstream.map(lambda x : x + 1)
L = list()
sout = sout.map(L.append)
source_upstream.connect(source_downstream)
source_upstream.emit(2)
source_upstream.emit(4)
assert L == [3, 5]
def test_multi_connect():
source0 = Stream()
source1 = Stream()
source_downstream = source0.union(source1)
# connect assumes this default behaviour
# of stream initialization
assert not source_downstream.downstreams
# initialize the second stream to connect to
source_upstream = Stream()
sout = source_downstream.map(lambda x : x + 1)
L = list()
sout = sout.map(L.append)
source_upstream.connect(source_downstream)
source_upstream.emit(2)
source_upstream.emit(4)
assert L == [3, 5]
def test_disconnect():
source = Stream()
upstream = Stream()
L = upstream.sink_to_list()
source.emit(1)
assert L == []
source.connect(upstream)
source.emit(2)
source.emit(3)
assert L == [2, 3]
source.disconnect(upstream)
source.emit(4)
assert L == [2, 3]
def test_gc():
source = Stream()
L = []
a = source.map(L.append)
source.emit(1)
assert L == [1]
del a
import gc; gc.collect()
start = time()
while source.downstreams:
sleep(0.01)
assert time() < start + 1
source.emit(2)
assert L == [1]
@gen_test()
def test_from_file():
with tmpfile() as fn:
with open(fn, 'wt') as f:
f.write('{"x": 1, "y": 2}\n')
f.write('{"x": 2, "y": 2}\n')
f.write('{"x": 3, "y": 2}\n')
f.flush()
source = Stream.from_textfile(fn, poll_interval=0.010,
asynchronous=True, start=False)
L = source.map(json.loads).pluck('x').sink_to_list()
assert L == []
source.start()
yield await_for(lambda: len(L) == 3, timeout=5)
assert L == [1, 2, 3]
f.write('{"x": 4, "y": 2}\n')
f.write('{"x": 5, "y": 2}\n')
f.flush()
start = time()
while L != [1, 2, 3, 4, 5]:
yield gen.sleep(0.01)
assert time() < start + 2 # reads within 2s
@gen_test()
def test_from_file_end():
with tmpfile() as fn:
with open(fn, 'wt') as f:
f.write('data1\n')
f.flush()
source = Stream.from_textfile(fn, poll_interval=0.010,
start=False, from_end=True)
out = source.sink_to_list()
source.start()
assert out == []
yield await_for(lambda: source.started, 2, period=0.02)
f.write('data2\n')
f.flush()
yield await_for(lambda: out == ['data2\n'], timeout=5, period=0.1)
@gen_test()
def test_filenames():
with tmpfile() as fn:
os.mkdir(fn)
with open(os.path.join(fn, 'a'), 'w'):
pass
with open(os.path.join(fn, 'b'), 'w'):
pass
source = Stream.filenames(fn, asynchronous=True)
L = source.sink_to_list()
source.start()
while len(L) < 2:
yield gen.sleep(0.01)
assert L == [os.path.join(fn, x) for x in ['a', 'b']]
with open(os.path.join(fn, 'c'), 'w'):
pass
while len(L) < 3:
yield gen.sleep(0.01)
assert L == [os.path.join(fn, x) for x in ['a', 'b', 'c']]
def test_docstrings():
for s in [Stream, Stream()]:
assert 'every element' in s.map.__doc__
assert s.map.__name__ == 'map'
assert 'predicate' in s.filter.__doc__
assert s.filter.__name__ == 'filter'
def test_subclass():
class NewStream(Stream):
pass
@NewStream.register_api()
class foo(NewStream):
pass
assert hasattr(NewStream, 'map')
assert hasattr(NewStream(), 'map')
assert hasattr(NewStream, 'foo')
assert hasattr(NewStream(), 'foo')
assert not hasattr(Stream, 'foo')
assert not hasattr(Stream(), 'foo')
@gen_test()
def test_latest():
source = Stream(asynchronous=True)
L = []
@gen.coroutine
def slow_write(x):
yield gen.sleep(0.050)
L.append(x)
s = source.map(inc).latest().map(slow_write) # noqa: F841
source.emit(1)
yield gen.sleep(0.010)
source.emit(2)
source.emit(3)
start = time()
while len(L) < 2:
yield gen.sleep(0.01)
assert time() < start + 3
assert L == [2, 4]
yield gen.sleep(0.060)
assert L == [2, 4]
def test_latest_ref_counts():
source = Stream()
_ = source.latest()
ref1 = RefCounter()
source.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
ref2 = RefCounter()
source.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
def test_destroy():
source = Stream()
s = source.map(inc)
L = s.sink_to_list()
source.emit(1)
assert L == [2]
s.destroy()
assert not list(source.downstreams)
assert not s.upstreams
source.emit(2)
assert L == [2]
def dont_test_stream_kwargs(clean): # noqa: F811
''' Test the good and bad kwargs for the stream
Currently just stream_name
'''
test_name = "some test name"
sin = Stream(stream_name=test_name)
sin2 = Stream()
assert sin.name == test_name
# when not defined, should be None
assert sin2.name is None
# add new core methods here, initialized
# these should be functions, use partial to partially initialize them
# (if they require more arguments)
streams = [
# some filter kwargs, so we comment them out
partial(sin.map, lambda x : x),
partial(sin.accumulate, lambda x1, x2 : x1),
partial(sin.filter, lambda x : True),
partial(sin.partition, 2),
partial(sin.sliding_window, 2),
partial(sin.timed_window, .01),
partial(sin.rate_limit, .01),
partial(sin.delay, .02),
partial(sin.buffer, 2),
partial(sin.zip, sin2),
partial(sin.combine_latest, sin2),
sin.frequencies,
sin.flatten,
sin.unique,
sin.union,
partial(sin.pluck, 0),
sin.collect,
]
good_kwargs = dict(stream_name=test_name)
bad_kwargs = dict(foo="bar")
for s in streams:
# try good kwargs
sout = s(**good_kwargs)
assert sout.name == test_name
del sout
with pytest.raises(TypeError):
sout = s(**bad_kwargs)
sin.emit(1)
# need a second emit for accumulate
sin.emit(1)
del sout
# verify that sout is properly deleted each time by emitting once into sin
# and not getting TypeError
# garbage collect and then try
import gc
gc.collect()
sin.emit(1)
@pytest.fixture
def thread(loop): # noqa: F811
from threading import Thread, Event
thread = Thread(target=loop.start)
thread.daemon = True
thread.start()
event = Event()
loop.add_callback(event.set)
event.wait()
return thread
def test_percolate_loop_information(clean): # noqa: F811
source = Stream()
assert not source.loop
s = source.timed_window(0.5)
assert source.loop is s.loop
def test_separate_thread_without_time(loop, thread): # noqa: F811
assert thread.is_alive()
source = Stream(loop=loop)
L = source.map(inc).sink_to_list()
for i in range(10):
source.emit(i)
assert L[-1] == i + 1
def test_separate_thread_with_time(clean): # noqa: F811
L = []
@gen.coroutine
def slow_write(x):
yield gen.sleep(0.1)
L.append(x)
source = Stream(asynchronous=False)
source.map(inc).sink(slow_write)
start = time()
source.emit(1)
stop = time()
assert stop - start > 0.1
assert L == [2]
def test_execution_order():
L = []
for i in range(5):
s = Stream()
b = s.pluck(1)
a = s.pluck(0)
li = a.combine_latest(b, emit_on=a).sink_to_list()
z = [(1, 'red'), (2, 'blue'), (3, 'green')]
for zz in z:
s.emit(zz)
L.append((li, ))
for ll in L:
assert ll == L[0]
L2 = []
for i in range(5):
s = Stream()
a = s.pluck(0)
b = s.pluck(1)
li = a.combine_latest(b, emit_on=a).sink_to_list()
z = [(1, 'red'), (2, 'blue'), (3, 'green')]
for zz in z:
s.emit(zz)
L2.append((li,))
for ll, ll2 in zip(L, L2):
assert ll2 == L2[0]
assert ll != ll2
@gen_test()
def test_map_errors_log():
a = Stream(asynchronous=True)
b = a.delay(0.001).map(lambda x: 1 / x) # noqa: F841
with captured_logger('streamz') as logger:
a._emit(0)
yield gen.sleep(0.1)
out = logger.getvalue()
assert 'ZeroDivisionError' in out
def test_map_errors_raises():
a = Stream()
b = a.map(lambda x: 1 / x) # noqa: F841
with pytest.raises(ZeroDivisionError):
a.emit(0)
@gen_test()
def test_accumulate_errors_log():
a = Stream(asynchronous=True)
b = a.delay(0.001).accumulate(lambda x, y: x / y, with_state=True) # noqa: F841
with captured_logger('streamz') as logger:
a._emit(1)
a._emit(0)
yield gen.sleep(0.1)
out = logger.getvalue()
assert 'ZeroDivisionError' in out
def test_accumulate_errors_raises():
a = Stream()
b = a.accumulate(lambda x, y: x / y, with_state=True) # noqa: F841
with pytest.raises(ZeroDivisionError):
a.emit(1)
a.emit(0)
@gen_test()
def test_sync_in_event_loop():
a = Stream()
assert not a.asynchronous
L = a.timed_window(0.01).sink_to_list()
sleep(0.05)
assert L
assert a.loop
assert a.loop is not IOLoop.current()
def test_share_common_ioloop(clean): # noqa: F811
a = Stream()
b = Stream()
aa = a.timed_window(0.01)
bb = b.timed_window(0.01)
assert aa.loop is bb.loop
@pytest.mark.parametrize('data', [
[[], [0, 1, 2, 3, 4, 5]],
[[None, None, None], [0, 1, 2, 3, 4, 5]],
[[1, None, None], [1, 2, 3, 4, 5]],
[[None, 4, None], [0, 1, 2, 3]],
[[None, 4, 2], [0, 2]],
[[3, 1, None], []]
])
def test_slice(data):
pars, expected = data
a = Stream()
b = a.slice(*pars)
out = b.sink_to_list()
for i in range(6):
a.emit(i)
assert out == expected
def test_slice_err():
a = Stream()
with pytest.raises(ValueError):
a.slice(end=-1)
def test_start():
flag = []
class MySource(Stream):
def start(self):
flag.append(True)
s = MySource().map(inc)
s.start()
assert flag == [True]
def test_connect_zip():
a = Stream()
b = Stream()
c = Stream()
x = a.zip(b)
L = x.sink_to_list()
c.connect(x)
a.emit(1)
b.emit(1)
assert not L
c.emit(1)
assert L == [(1, 1, 1)]
def test_disconnect_zip():
a = Stream()
b = Stream()
c = Stream()
x = a.zip(b, c)
L = x.sink_to_list()
b.disconnect(x)
a.emit(1)
b.emit(1)
assert not L
c.emit(1)
assert L == [(1, 1)]
def test_connect_combine_latest():
a = Stream()
b = Stream()
c = Stream()
x = a.combine_latest(b, emit_on=a)
L = x.sink_to_list()
c.connect(x)
b.emit(1)
c.emit(1)
a.emit(1)
assert L == [(1, 1, 1)]
def test_connect_discombine_latest():
a = Stream()
b = Stream()
c = Stream()
x = a.combine_latest(b, c, emit_on=a)
L = x.sink_to_list()
c.disconnect(x)
b.emit(1)
c.emit(1)
a.emit(1)
assert L == [(1, 1)]
if sys.version_info >= (3, 5):
from streamz.tests.py3_test_core import * # noqa
def test_buffer_after_partition():
Stream().partition(1).buffer(1)
def test_buffer_after_timed_window():
Stream().timed_window(1).buffer(1)
def test_buffer_after_sliding_window():
Stream().sliding_window(1).buffer(1)
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import threading
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion ".*" failed; '
br'object has negative ref count')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfb\n"
r" at tail\+2: 0xfb\n"
r" .*\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
notifications.py | import json
from threading import Thread
from pywebpush import webpush, WebPushException
from flask import current_app
from syllabin.models import User, Group, Subscription, Announcement
from syllabin.components import db
class FlaskThread(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.app = current_app._get_current_object()
def run(self):
with self.app.app_context():
super().run()
def notify_group(text, group_name = None):
thread = FlaskThread(target=notify_group_thread, kwargs=dict(text=text, group_name=group_name))
thread.daemon = True
thread.start()
def notify_group_thread(text, group_name):
if group_name is None:
subscriptions = Subscription.query.all()
announcement = Announcement(message=text)
else:
group = Group.query.filter_by(name=group_name).first()
subscriptions = Subscription.query.filter_by(group_id=group.id).all()
announcement = Announcement(message=text, group_id=group.id)
db.session.add(announcement)
db.session.commit()
for subscription in subscriptions:
if subscription.subscription_info:
try:
webpush(
subscription_info=json.loads(subscription.subscription_info),
data=text,
vapid_private_key=current_app.config['SYLLABIN_PUSH_PRIVATE_KEY'],
vapid_claims={
"sub": "mailto:ilya@mzp.icu",
}
)
except WebPushException as ex:
print("I'm sorry, Dave, but I can't do that: {}", repr(ex))
# Mozilla returns additional information in the body of the response.
if ex.response and ex.response.json():
extra = ex.response.json()
print("Remote service replied with a {}:{}, {}",
extra.code,
extra.errno,
extra.message
)
|
check_mem_usage.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Memory usage check."""
import time
from threading import Thread
from typing import List, Tuple, Union
import click
from aea.protocols.base import Message
from aea.registries.resources import Resources
from aea.skills.base import Handler
from benchmark.checks.utils import SyncedGeneratorConnection # noqa: I100
from benchmark.checks.utils import (
get_mem_usage_in_mb,
make_agent,
make_envelope,
make_skill,
multi_run,
print_results,
wait_for_condition,
)
from packages.fetchai.protocols.default.message import DefaultMessage
class TestHandler(Handler):
"""Dummy handler to handle messages."""
SUPPORTED_PROTOCOL = DefaultMessage.protocol_id
def setup(self) -> None:
"""Noop setup."""
def teardown(self) -> None:
"""Noop teardown."""
def handle(self, message: Message) -> None:
"""Handle incoming message."""
self.context.outbox.put(make_envelope(message.to, message.sender))
def run(duration: int, runtime_mode: str) -> List[Tuple[str, Union[int, float]]]:
"""Check memory usage."""
# pylint: disable=import-outside-toplevel,unused-import
# import manually due to some lazy imports in decision_maker
import aea.decision_maker.default # noqa: F401
connection = SyncedGeneratorConnection.make()
resources = Resources()
resources.add_connection(connection)
agent = make_agent(runtime_mode=runtime_mode, resources=resources)
agent.resources.add_skill(make_skill(agent, handlers={"test": TestHandler}))
t = Thread(target=agent.start, daemon=True)
t.start()
wait_for_condition(lambda: agent.is_running, timeout=5)
connection.enable()
time.sleep(duration)
connection.disable()
mem_usage = get_mem_usage_in_mb()
agent.stop()
t.join(5)
rate = connection.count_in / duration
return [
("envelopes received", connection.count_in),
("envelopes sent", connection.count_out),
("rate (envelopes/second)", rate),
("mem usage (Mb)", mem_usage),
]
@click.command()
@click.option("--duration", default=3, help="Run time in seconds.")
@click.option(
"--runtime_mode", default="async", help="Runtime mode: async or threaded."
)
@click.option("--number_of_runs", default=10, help="How many times run test.")
def main(duration: int, runtime_mode: str, number_of_runs: int) -> None:
"""Run test."""
click.echo("Start test with options:")
click.echo(f"* Duration: {duration} seconds")
click.echo(f"* Runtime mode: {runtime_mode}")
click.echo(f"* Number of runs: {number_of_runs}")
print_results(multi_run(int(number_of_runs), run, (duration, runtime_mode),))
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
program.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for TensorBoard command line program.
This is a lightweight module for bringing up a TensorBoard HTTP server
or emulating the `tensorboard` shell command.
Those wishing to create custom builds of TensorBoard can use this module
by swapping out `tensorboard.main` with the custom definition that
modifies the set of plugins and static assets.
This module does not depend on first-party plugins or the default web
server assets. Those are defined in `tensorboard.default`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import argparse
import atexit
from collections import defaultdict
import errno
import inspect
import logging
import os
import signal
import socket
import sys
import threading
import time
from absl import flags as absl_flags
from absl.flags import argparse_flags
import absl.logging
import six
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from werkzeug import serving
from tensorboard import manager
from tensorboard import version
from tensorboard.backend import application
from tensorboard.backend.event_processing import event_file_inspector as efi
from tensorboard.plugins import base_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
def setup_environment():
"""Makes recommended modifications to the environment.
This functions changes global state in the Python process. Calling
this function is a good idea, but it can't appropriately be called
from library routines.
"""
absl.logging.set_verbosity(absl.logging.WARNING)
# The default is HTTP/1.0 for some strange reason. If we don't use
# HTTP/1.1 then a new TCP socket and Python thread is created for
# each HTTP request. The tradeoff is we must always specify the
# Content-Length header, or do chunked encoding for streaming.
serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
def get_default_assets_zip_provider():
"""Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
"""
path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))),
'webfiles.zip')
if not os.path.exists(path):
logger.warning('webfiles.zip static assets not found: %s', path)
return None
return lambda: open(path, 'rb')
class TensorBoard(object):
"""Class for running TensorBoard.
Fields:
plugin_loaders: Set from plugins passed to constructor.
assets_zip_provider: Set by constructor.
server_class: Set by constructor.
flags: An argparse.Namespace set by the configure() method.
cache_key: As `manager.cache_key`; set by the configure() method.
"""
def __init__(self,
plugins=None,
assets_zip_provider=None,
server_class=None):
"""Creates new instance.
Args:
plugin: A list of TensorBoard plugins to load, as TBPlugin classes or
TBLoader instances or classes. If not specified, defaults to first-party
plugins.
assets_zip_provider: Delegates to TBContext or uses default if None.
server_class: An optional factory for a `TensorBoardServer` to use
for serving the TensorBoard WSGI app. If provided, its callable
signature should match that of `TensorBoardServer.__init__`.
"""
if plugins is None:
from tensorboard import default
plugins = default.get_plugins()
if assets_zip_provider is None:
assets_zip_provider = get_default_assets_zip_provider()
if server_class is None:
server_class = create_port_scanning_werkzeug_server
self.plugin_loaders = [application.make_plugin_loader(p) for p in plugins]
self.assets_zip_provider = assets_zip_provider
self.server_class = server_class
self.flags = None
def configure(self, argv=('',), **kwargs):
"""Configures TensorBoard behavior via flags.
This method will populate the "flags" property with an argparse.Namespace
representing flag values parsed from the provided argv list, overridden by
explicit flags from remaining keyword arguments.
Args:
argv: Can be set to CLI args equivalent to sys.argv; the first arg is
taken to be the name of the path being executed.
kwargs: Additional arguments will override what was parsed from
argv. They must be passed as Python data structures, e.g.
`foo=1` rather than `foo="1"`.
Returns:
Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism
for absl.app.run() compatibility.
Raises:
ValueError: If flag values are invalid.
"""
parser = argparse_flags.ArgumentParser(
prog='tensorboard',
description=('TensorBoard is a suite of web applications for '
'inspecting and understanding your TensorFlow runs '
'and graphs. https://github.com/tensorflow/tensorboard '))
for loader in self.plugin_loaders:
loader.define_flags(parser)
arg0 = argv[0] if argv else ''
flags = parser.parse_args(argv[1:]) # Strip binary name from argv.
self.cache_key = manager.cache_key(
working_directory=os.getcwd(),
arguments=argv[1:],
configure_kwargs=kwargs,
)
if arg0:
# Only expose main module Abseil flags as TensorBoard native flags.
# This is the same logic Abseil's ArgumentParser uses for determining
# which Abseil flags to include in the short helpstring.
for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)):
if hasattr(flags, flag.name):
raise ValueError('Conflicting Abseil flag: %s' % flag.name)
setattr(flags, flag.name, flag.value)
for k, v in kwargs.items():
if not hasattr(flags, k):
raise ValueError('Unknown TensorBoard flag: %s' % k)
setattr(flags, k, v)
for loader in self.plugin_loaders:
loader.fix_flags(flags)
self.flags = flags
return [arg0]
def main(self, ignored_argv=('',)):
"""Blocking main function for TensorBoard.
This method is called by `tensorboard.main.run_main`, which is the
standard entrypoint for the tensorboard command line program. The
configure() method must be called first.
Args:
ignored_argv: Do not pass. Required for Abseil compatibility.
Returns:
Process exit code, i.e. 0 if successful or non-zero on failure. In
practice, an exception will most likely be raised instead of
returning non-zero.
:rtype: int
"""
self._install_signal_handler(signal.SIGTERM, "SIGTERM")
if self.flags.inspect:
logger.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(self.flags.event_file)
efi.inspect(self.flags.logdir, event_file, self.flags.tag)
return 0
if self.flags.version_tb:
print(version.VERSION)
return 0
try:
server = self._make_server()
server.print_serving_message()
self._register_info(server)
server.serve_forever()
return 0
except TensorBoardServerException as e:
logger.error(e.msg)
sys.stderr.write('ERROR: %s\n' % e.msg)
sys.stderr.flush()
return -1
def launch(self):
"""Python API for launching TensorBoard.
This method is the same as main() except it launches TensorBoard in
a separate permanent thread. The configure() method must be called
first.
Returns:
The URL of the TensorBoard web server.
:rtype: str
"""
# Make it easy to run TensorBoard inside other programs, e.g. Colab.
server = self._make_server()
thread = threading.Thread(target=server.serve_forever, name='TensorBoard')
thread.daemon = True
thread.start()
return server.get_url()
def _register_info(self, server):
"""Write a TensorBoardInfo file and arrange for its cleanup.
Args:
server: The result of `self._make_server()`.
"""
server_url = urllib.parse.urlparse(server.get_url())
info = manager.TensorBoardInfo(
version=version.VERSION,
start_time=int(time.time()),
port=server_url.port,
pid=os.getpid(),
path_prefix=self.flags.path_prefix,
logdir=self.flags.logdir or self.flags.logdir_spec,
db=self.flags.db,
cache_key=self.cache_key,
)
atexit.register(manager.remove_info_file)
manager.write_info_file(info)
def _install_signal_handler(self, signal_number, signal_name):
"""Set a signal handler to gracefully exit on the given signal.
When this process receives the given signal, it will run `atexit`
handlers and then exit with `0`.
Args:
signal_number: The numeric code for the signal to handle, like
`signal.SIGTERM`.
signal_name: The human-readable signal name.
"""
old_signal_handler = None # set below
def handler(handled_signal_number, frame):
# In case we catch this signal again while running atexit
# handlers, take the hint and actually die.
signal.signal(signal_number, signal.SIG_DFL)
sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name)
# The main thread is the only non-daemon thread, so it suffices to
# exit hence.
if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL):
old_signal_handler(handled_signal_number, frame)
sys.exit(0)
old_signal_handler = signal.signal(signal_number, handler)
def _make_server(self):
"""Constructs the TensorBoard WSGI app and instantiates the server."""
app = application.standard_tensorboard_wsgi(self.flags,
self.plugin_loaders,
self.assets_zip_provider)
return self.server_class(app, self.flags)
@six.add_metaclass(ABCMeta)
class TensorBoardServer(object):
"""Class for customizing TensorBoard WSGI app serving."""
@abstractmethod
def __init__(self, wsgi_app, flags):
"""Create a flag-configured HTTP server for TensorBoard's WSGI app.
Args:
wsgi_app: The TensorBoard WSGI application to create a server for.
flags: argparse.Namespace instance of TensorBoard flags.
"""
raise NotImplementedError()
@abstractmethod
def serve_forever(self):
"""Blocking call to start serving the TensorBoard server."""
raise NotImplementedError()
@abstractmethod
def get_url(self):
"""Returns a URL at which this server should be reachable."""
raise NotImplementedError()
def print_serving_message(self):
"""Prints a user-friendly message prior to server start.
This will be called just before `serve_forever`.
"""
sys.stderr.write(
'TensorBoard %s at %s (Press CTRL+C to quit)\n'
% (version.VERSION, self.get_url())
)
sys.stderr.flush()
class TensorBoardServerException(Exception):
"""Exception raised by TensorBoardServer for user-friendly errors.
Subclasses of TensorBoardServer can raise this exception in order to
generate a clean error message for the user rather than a stacktrace.
"""
def __init__(self, msg):
self.msg = msg
class TensorBoardPortInUseError(TensorBoardServerException):
"""Error raised when attempting to bind to a port that is in use.
This should be raised when it is expected that binding to another
similar port would succeed. It is used as a signal to indicate that
automatic port searching should continue rather than abort.
"""
pass
def with_port_scanning(cls):
"""Create a server factory that performs port scanning.
This function returns a callable whose signature matches the
specification of `TensorBoardServer.__init__`, using `cls` as an
underlying implementation. It passes through `flags` unchanged except
in the case that `flags.port is None`, in which case it repeatedly
instantiates the underlying server with new port suggestions.
Args:
cls: A valid implementation of `TensorBoardServer`. This class's
initializer should raise a `TensorBoardPortInUseError` upon
failing to bind to a port when it is expected that binding to
another nearby port might succeed.
The initializer for `cls` will only ever be invoked with `flags`
such that `flags.port is not None`.
Returns:
A function that implements the `__init__` contract of
`TensorBoardServer`.
"""
def init(wsgi_app, flags):
# base_port: what's the first port to which we should try to bind?
# should_scan: if that fails, shall we try additional ports?
# max_attempts: how many ports shall we try?
should_scan = flags.port is None
base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port
max_attempts = 10 if should_scan else 1
if base_port > 0xFFFF:
raise TensorBoardServerException(
'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF)
)
max_attempts = 10 if should_scan else 1
base_port = min(base_port + max_attempts, 0x10000) - max_attempts
for port in xrange(base_port, base_port + max_attempts):
subflags = argparse.Namespace(**vars(flags))
subflags.port = port
try:
return cls(wsgi_app=wsgi_app, flags=subflags)
except TensorBoardPortInUseError:
if not should_scan:
raise
# All attempts failed to bind.
raise TensorBoardServerException(
'TensorBoard could not bind to any port around %s '
'(tried %d times)'
% (base_port, max_attempts))
return init
class WerkzeugServer(serving.ThreadedWSGIServer, TensorBoardServer):
"""Implementation of TensorBoardServer using the Werkzeug dev server."""
# ThreadedWSGIServer handles this in werkzeug 0.12+ but we allow 0.11.x.
daemon_threads = True
def __init__(self, wsgi_app, flags):
self._flags = flags
host = flags.host
port = flags.port
self._auto_wildcard = flags.bind_all
if self._auto_wildcard:
# Serve on all interfaces, and attempt to serve both IPv4 and IPv6
# traffic through one socket.
host = self._get_wildcard_address(port)
elif host is None:
host = 'localhost'
self._host = host
self._fix_werkzeug_logging()
try:
super(WerkzeugServer, self).__init__(host, port, wsgi_app)
except socket.error as e:
if hasattr(errno, 'EACCES') and e.errno == errno.EACCES:
raise TensorBoardServerException(
'TensorBoard must be run as superuser to bind to port %d' %
port)
elif hasattr(errno, 'EADDRINUSE') and e.errno == errno.EADDRINUSE:
if port == 0:
raise TensorBoardServerException(
'TensorBoard unable to find any open port')
else:
raise TensorBoardPortInUseError(
'TensorBoard could not bind to port %d, it was already in use' %
port)
elif hasattr(errno, 'EADDRNOTAVAIL') and e.errno == errno.EADDRNOTAVAIL:
raise TensorBoardServerException(
'TensorBoard could not bind to unavailable address %s' % host)
elif hasattr(errno, 'EAFNOSUPPORT') and e.errno == errno.EAFNOSUPPORT:
raise TensorBoardServerException(
'Tensorboard could not bind to unsupported address family %s' %
host)
# Raise the raw exception if it wasn't identifiable as a user error.
raise
def _get_wildcard_address(self, port):
"""Returns a wildcard address for the port in question.
This will attempt to follow the best practice of calling getaddrinfo() with
a null host and AI_PASSIVE to request a server-side socket wildcard address.
If that succeeds, this returns the first IPv6 address found, or if none,
then returns the first IPv4 address. If that fails, then this returns the
hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0".
"""
fallback_address = '::' if socket.has_ipv6 else '0.0.0.0'
if hasattr(socket, 'AI_PASSIVE'):
try:
addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, socket.IPPROTO_TCP,
socket.AI_PASSIVE)
except socket.gaierror as e:
logger.warn('Failed to auto-detect wildcard address, assuming %s: %s',
fallback_address, str(e))
return fallback_address
addrs_by_family = defaultdict(list)
for family, _, _, _, sockaddr in addrinfos:
# Format of the "sockaddr" socket address varies by address family,
# but [0] is always the IP address portion.
addrs_by_family[family].append(sockaddr[0])
if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]:
return addrs_by_family[socket.AF_INET6][0]
if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]:
return addrs_by_family[socket.AF_INET][0]
logger.warn('Failed to auto-detect wildcard address, assuming %s',
fallback_address)
return fallback_address
def server_bind(self):
"""Override to enable IPV4 mapping for IPV6 sockets when desired.
The main use case for this is so that when no host is specified, TensorBoard
can listen on all interfaces for both IPv4 and IPv6 connections, rather than
having to choose v4 or v6 and hope the browser didn't choose the other one.
"""
socket_is_v6 = (
hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6)
has_v6only_option = (
hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'))
if self._auto_wildcard and socket_is_v6 and has_v6only_option:
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except socket.error as e:
# Log a warning on failure to dual-bind, except for EAFNOSUPPORT
# since that's expected if IPv4 isn't supported at all (IPv6-only).
if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT:
logger.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e))
super(WerkzeugServer, self).server_bind()
def handle_error(self, request, client_address):
"""Override to get rid of noisy EPIPE errors."""
del request # unused
# Kludge to override a SocketServer.py method so we can get rid of noisy
# EPIPE errors. They're kind of a red herring as far as errors go. For
# example, `curl -N http://localhost:6006/ | head` will cause an EPIPE.
exc_info = sys.exc_info()
e = exc_info[1]
if isinstance(e, IOError) and e.errno == errno.EPIPE:
logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address))
else:
logger.error('HTTP serving error', exc_info=exc_info)
def get_url(self):
if self._auto_wildcard:
display_host = socket.gethostname()
else:
host = self._host
display_host = (
'[%s]' % host if ':' in host and not host.startswith('[') else host)
return 'http://%s:%d%s/' % (display_host, self.server_port,
self._flags.path_prefix.rstrip('/'))
def print_serving_message(self):
if self._flags.host is None and not self._flags.bind_all:
sys.stderr.write(
'Serving TensorBoard on localhost; to expose to the network, '
'use a proxy or pass --bind_all\n'
)
sys.stderr.flush()
super(WerkzeugServer, self).print_serving_message()
def _fix_werkzeug_logging(self):
"""Fix werkzeug logging setup so it inherits TensorBoard's log level.
This addresses a change in werkzeug 0.15.0+ [1] that causes it set its own
log level to INFO regardless of the root logger configuration. We instead
want werkzeug to inherit TensorBoard's root logger log level (set via absl
to WARNING by default).
[1]: https://github.com/pallets/werkzeug/commit/4cf77d25858ff46ac7e9d64ade054bf05b41ce12
"""
# Log once at DEBUG to force werkzeug to initialize its singleton logger,
# which sets the logger level to INFO it if is unset, and then access that
# object via logging.getLogger('werkzeug') to durably revert the level to
# unset (and thus make messages logged to it inherit the root logger level).
self.log('debug', 'Fixing werkzeug logger to inherit TensorBoard log level')
logging.getLogger('werkzeug').setLevel(logging.NOTSET)
create_port_scanning_werkzeug_server = with_port_scanning(WerkzeugServer)
|
util.py | import io
import os
import re
import socket
import subprocess
import threading
import time
from contextlib import contextmanager
from functools import partial as p
from typing import Dict, List
import docker
import netifaces as ni
import yaml
from tests.helpers.assertions import regex_search_matches_output
from tests.paths import TEST_SERVICES_DIR, SELFDESCRIBE_JSON
DEFAULT_TIMEOUT = os.environ.get("DEFAULT_TIMEOUT", 30)
DOCKER_API_VERSION = "1.34"
STATSD_RE = re.compile(r"SignalFx StatsD monitor: Listening on host & port udp:\[::\]:([0-9]*)")
def get_docker_client():
return docker.from_env(version=DOCKER_API_VERSION)
def assert_wait_for(test, timeout_seconds=DEFAULT_TIMEOUT, interval_seconds=0.2, on_fail=None):
"""
Runs `wait_for` but raises an assertion if it fails, optionally calling
`on_fail` before raising an AssertionError
"""
if not wait_for(test, timeout_seconds, interval_seconds):
if on_fail:
on_fail()
raise AssertionError("test '%s' still failng after %d seconds" % (test, timeout_seconds))
def wait_for(test, timeout_seconds=DEFAULT_TIMEOUT, interval_seconds=0.2):
"""
Repeatedly calls the test function for timeout_seconds until either test
returns a truthy value, at which point the function returns True -- or the
timeout is exceeded, at which point it will return False.
"""
start = time.time()
while True:
if test():
return True
if time.time() - start > timeout_seconds:
return False
time.sleep(interval_seconds)
def ensure_always(test, timeout_seconds=DEFAULT_TIMEOUT, interval_seconds=0.2):
"""
Repeatedly calls the given test. If it ever returns false before the timeout
given is completed, returns False, otherwise True.
"""
start = time.time()
while True:
if not test():
return False
if time.time() - start > timeout_seconds:
return True
time.sleep(interval_seconds)
def ensure_never(test, timeout_seconds=DEFAULT_TIMEOUT):
"""
Repeatedly calls the given test. If it ever returns true before the timeout
given is completed, returns False, otherwise True.
"""
start = time.time()
while True:
if test():
return False
if time.time() - start > timeout_seconds:
return True
time.sleep(0.2)
def print_lines(msg):
"""
Print each line separately to make it easier to read in pytest output
"""
for line in msg.splitlines():
print(line)
def container_ip(container):
container.reload()
return container.attrs["NetworkSettings"]["IPAddress"]
# Ensure a unique internal status server host address. This supports up to
# 255 concurrent agents on the same pytest worker process, and up to 255
# pytest workers, which should be plenty
def get_unique_localhost():
worker = int(re.sub(r"\D", "", os.environ.get("PYTEST_XDIST_WORKER", "0")))
get_unique_localhost.counter += 1
return "127.%d.%d.0" % (worker, get_unique_localhost.counter % 255)
get_unique_localhost.counter = 0
@contextmanager
def run_subprocess(command: List[str], env: Dict[any, any] = None):
# subprocess on Windows has a bug where it doesn't like Path.
proc = subprocess.Popen([str(c) for c in command], env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = io.BytesIO()
def pull_output():
while True:
# If any output is waiting, grab it.
byt = proc.stdout.read(1)
if not byt:
return
output.write(byt)
def get_output():
return output.getvalue().decode("utf-8")
threading.Thread(target=pull_output).start()
try:
yield [get_output, proc.pid]
finally:
proc.terminate()
proc.wait(15)
@contextmanager
def run_container(image_name, wait_for_ip=True, print_logs=True, **kwargs):
client = get_docker_client()
container = retry(lambda: client.containers.run(image_name, detach=True, **kwargs), docker.errors.DockerException)
def has_ip_addr():
container.reload()
return container.attrs["NetworkSettings"]["IPAddress"]
if wait_for_ip:
wait_for(has_ip_addr, timeout_seconds=5)
try:
yield container
finally:
try:
if print_logs:
print_lines(
"Container %s/%s logs:\n%s" % (image_name, container.name, container.logs().decode("utf-8"))
)
container.remove(force=True, v=True)
except docker.errors.NotFound:
pass
@contextmanager
def run_service(service_name, buildargs=None, print_logs=True, path=None, dockerfile="./Dockerfile", **kwargs):
if buildargs is None:
buildargs = {}
if path is None:
path = os.path.join(TEST_SERVICES_DIR, service_name)
client = get_docker_client()
image, _ = retry(
lambda: client.images.build(path=str(path), dockerfile=dockerfile, rm=True, forcerm=True, buildargs=buildargs),
docker.errors.BuildError,
)
with run_container(image.id, print_logs=print_logs, **kwargs) as cont:
yield cont
def get_monitor_metrics_from_selfdescribe(monitor, json_path=SELFDESCRIBE_JSON):
metrics = set()
with open(json_path, "r", encoding="utf-8") as fd:
doc = yaml.safe_load(fd.read())
for mon in doc["Monitors"]:
if monitor == mon["monitorType"] and "metrics" in mon.keys() and mon["metrics"]:
metrics = {metric["name"] for metric in mon["metrics"]}
break
return metrics
def get_monitor_dims_from_selfdescribe(monitor, json_path=SELFDESCRIBE_JSON):
dims = set()
with open(json_path, "r", encoding="utf-8") as fd:
doc = yaml.safe_load(fd.read())
for mon in doc["Monitors"]:
if monitor == mon["monitorType"] and "dimensions" in mon.keys() and mon["dimensions"]:
dims = {dim["name"] for dim in mon["dimensions"]}
break
return dims
def get_observer_dims_from_selfdescribe(observer, json_path=SELFDESCRIBE_JSON):
dims = set()
with open(json_path, "r", encoding="utf-8") as fd:
doc = yaml.safe_load(fd.read())
for obs in doc["Observers"]:
if observer == obs["observerType"] and "dimensions" in obs.keys() and obs["dimensions"]:
dims = {dim["name"] for dim in obs["dimensions"]}
break
return dims
def get_host_ip():
gws = ni.gateways()
interface = gws["default"][ni.AF_INET][1]
return ni.ifaddresses(interface)[ni.AF_INET][0]["addr"]
def send_udp_message(host, port, msg):
"""
Send a datagram to the given host/port
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Internet # UDP
sock.sendto(msg.encode("utf-8"), (host, port))
def retry(function, exception, max_attempts=5, interval_seconds=5):
"""
Retry function up to max_attempts if exception is caught
"""
for attempt in range(max_attempts):
try:
return function()
except exception as e:
assert attempt < (max_attempts - 1), "%s failed after %d attempts!\n%s" % (function, max_attempts, str(e))
time.sleep(interval_seconds)
def get_statsd_port(agent):
"""
Discover an open port of running StatsD monitor
"""
assert wait_for(p(regex_search_matches_output, agent.get_output, STATSD_RE.search))
regex_results = STATSD_RE.search(agent.output)
return int(regex_results.groups()[0])
|
events.py | import datetime
from RPi import GPIO
from threading import Lock, Thread
import time
_lock = Lock()
# sets up the buttons and stores which button does what
class Buttons:
button_numbers = {5: "a", 6: "b", 16: "x", 24: "y"}
home_menu_buttons = {"a": "home", "b": "none", "y": "none", "x": "backlight"}
list_menu_buttons = {"a": "select", "b": "up", "y": "down", "x": "home"}
time_menu_buttons = {"a": "next", "b": "decrease", "y": "increase", "x": "units"}
alert_menu_buttons = {"a": "dismiss", "b": "complete", "x": "none", "y": "delay"}
# sets up buttons with handler provided
# handler is a function taking the letter of the button pressed
@staticmethod
def setup_buttons(handler):
# stop multiple button handlers being called at once
def lock_button_handler(gpio_pin):
with _lock:
handler(Buttons.button_numbers[gpio_pin])
# BCM numbering scheme
GPIO.setmode(GPIO.BCM)
GPIO.setup(list(Buttons.button_numbers.keys()), GPIO.IN, pull_up_down=GPIO.PUD_UP)
for pin in Buttons.button_numbers:
GPIO.add_event_detect(pin, GPIO.FALLING, lock_button_handler, bouncetime=300)
# contains a function to run in a thread
# checks the time and calls the handler at each new minute
class Clock:
last_time = datetime.datetime.now()
@staticmethod
def set_up_clock(handler):
def clock_updater():
while True:
time.sleep(1)
now = datetime.datetime.now()
if Clock.last_time.hour != now.hour or Clock.last_time.minute != now.minute:
Clock.last_time = now
with _lock:
handler()
updater = Thread(target=clock_updater, daemon=True)
updater.start()
# stores the list of scheduled items that are turned on
# manages the list to make sure items are unique and the list is in order
# contains thread to check if it's time for an alert
class Alerts:
_alerts = []
_last_updated = datetime.datetime.fromtimestamp(0)
@staticmethod
def get_last_updated():
return Alerts._last_updated
@staticmethod
def set_last_updated(last_updated):
Alerts._last_updated = last_updated
@staticmethod
def print_schedule():
print("scheduled:", [each.name + " " + str(each.get_task_time()) for each in Alerts._alerts])
@staticmethod
def add_to_schedule(task):
if task not in Alerts._alerts:
Alerts._alerts.append(task)
Alerts._alerts.sort(key=lambda x: x.get_task_time())
Alerts.print_schedule()
@staticmethod
def remove_from_schedule(task):
if task in Alerts._alerts:
Alerts._alerts.remove(task)
Alerts.print_schedule()
@staticmethod
def set_up_alerts():
def alert_checker():
alert_tf = False
while True:
time.sleep(5)
with _lock:
if len(Alerts._alerts) > 0 and datetime.datetime.now() >= Alerts._alerts[0].get_task_time():
alert_now = Alerts._alerts.pop(0)
alert_tf = True
if alert_tf:
alert_tf = False
alert_now.alert()
updater = Thread(target=alert_checker(), daemon=True)
updater.start()
@staticmethod
def sort_alerts():
Alerts._alerts.sort(key=lambda x: x.get_task_time())
Alerts.print_schedule()
|
train.py | import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
is_coco = opt.data.endswith('coco.yaml')
# Logging- Doing this before checking the dataset. Might update data_dict
loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
elif plots and ni == 10 and wandb_logger.wandb:
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, times = test.test(data_dict,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss,
is_coco=is_coco)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(model.module if is_parallel(model) else model).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if wandb_logger.wandb:
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb_logger.wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False,
is_coco=is_coco)
# Strip optimizers
final = best if best.exists() else last # final model
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
if wandb_logger.wandb and not opt.evolve: # Log the stripped model
wandb_logger.wandb.log_artifact(str(final), type='model',
name='run_' + wandb_logger.wandb_run.id + '_model',
aliases=['last', 'best', 'stripped'])
wandb_logger.finish_run()
else:
dist.destroy_process_group()
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
print(torch.__version__)
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='models/yolov5s.yaml', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/Data_CF.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=8, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
opt = parser.parse_args()
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements()
# Resume
wandb_run = check_wandb_resume(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
|
ga_fuzzing.py | import sys
import os
from importlib_metadata import version
from customized_utils import parse_fuzzing_arguments
sys.path.append('pymoo')
sys.path.append('fuzzing_utils')
fuzzing_arguments = parse_fuzzing_arguments()
if not fuzzing_arguments.debug:
import warnings
warnings.filterwarnings("ignore")
if fuzzing_arguments.simulator in ['carla', 'svl']:
sys.path.append('..')
carla_lbc_root = 'carla_lbc'
sys.path.append(carla_lbc_root)
sys.path.append(carla_lbc_root+'/leaderboard')
sys.path.append(carla_lbc_root+'/leaderboard/team_code')
sys.path.append(carla_lbc_root+'/scenario_runner')
sys.path.append(carla_lbc_root+'/carla_project')
sys.path.append(carla_lbc_root+'/carla_project/src')
sys.path.append(carla_lbc_root+'/carla_specific_utils')
if fuzzing_arguments.simulator in ['carla']:
carla_root = os.path.expanduser('~/Documents/self-driving-cars/carla_0994_no_rss')
sys.path.append(carla_root+'/PythonAPI/carla/dist/carla-0.9.9-py3.7-linux-x86_64.egg')
sys.path.append(carla_root+'/PythonAPI/carla')
sys.path.append(carla_root+'/PythonAPI')
if version('carla') != '0.9.9':
assert os.path.exists(carla_root+'/PythonAPI/carla/dist/carla-0.9.9-py3.7-linux-x86_64.egg')
egg_path = os.path.join(carla_root, 'PythonAPI/carla/dist/carla-0.9.9-py3.7-linux-x86_64.egg')
# os.system('pip uninstall carla')
os.system('easy_install '+egg_path)
elif fuzzing_arguments.simulator in ['carla_op']:
carla_root = os.path.expanduser('~/Documents/self-driving-cars/carla_0911_rss')
if not os.path.exists(carla_root):
carla_root = os.path.expanduser('~/Documents/self-driving-cars/carla_0911_no_rss')
fuzzing_arguments.carla_path = os.path.join(carla_root, "CarlaUE4.sh")
sys.path.append(carla_root+'/PythonAPI/carla/dist/carla-0.9.11-py3.7-linux-x86_64.egg')
sys.path.append(carla_root+'/PythonAPI/carla')
sys.path.append(carla_root+'/PythonAPI')
# TBD: change to relative paths
sys.path.append(os.path.expanduser('~/openpilot'))
sys.path.append(os.path.expanduser('~/openpilot/tools/sim'))
if version('carla') != '0.9.11':
egg_path = os.path.join(carla_root, 'PythonAPI/carla/dist/carla-0.9.11-py3.7-linux-x86_64.egg')
# os.system('pip uninstall carla')
os.system('easy_install '+egg_path)
import json
import re
import time
import pathlib
import pickle
import copy
import atexit
import traceback
import math
from datetime import datetime
from distutils.dir_util import copy_tree
import numpy as np
from sklearn.preprocessing import StandardScaler
from scipy.stats import rankdata
from multiprocessing import Process, Manager, set_start_method
from pymoo.model.problem import Problem
from pymoo.model.crossover import Crossover
from pymoo.model.mutation import Mutation
from pymoo.model.population import Population
from pymoo.model.evaluator import Evaluator
from pymoo.algorithms.nsga2 import NSGA2, binary_tournament
from pymoo.operators.selection.tournament_selection import TournamentSelection
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.model.termination import Termination
from pymoo.util.termination.default import MultiObjectiveDefaultTermination, SingleObjectiveDefaultTermination
from pymoo.operators.mixed_variable_operator import MixedVariableMutation, MixedVariableCrossover
from pymoo.factory import get_crossover, get_mutation, get_termination
from pymoo.model.initialization import Initialization
from pymoo.model.duplicate import NoDuplicateElimination
from pymoo.model.survival import Survival
from pymoo.model.individual import Individual
# disable pymoo optimization warning
from pymoo.configuration import Configuration
Configuration.show_compile_hint = False
from pgd_attack import pgd_attack, train_net, train_regression_net, VanillaDataset
from acquisition import map_acquisition
from sampling import MySamplingVectorized, GridSampling, RandomDirectionSampling
from mating_and_repair import MyMatingVectorized, ClipRepair
from customized_utils import rand_real, make_hierarchical_dir, exit_handler, is_critical_region, if_violate_constraints, filter_critical_regions, encode_fields, remove_fields_not_changing, get_labels_to_encode, customized_fit, customized_standardize, customized_inverse_standardize, decode_fields, encode_bounds, recover_fields_not_changing, process_X, inverse_process_X, calculate_rep_d, select_batch_max_d_greedy, if_violate_constraints_vectorized, is_distinct_vectorized, eliminate_repetitive_vectorized, get_sorted_subfolders, load_data, get_F, set_general_seed, emptyobject, get_job_results, choose_farthest_offs, torch_subset
# eliminate some randomness
set_general_seed(seed=fuzzing_arguments.random_seed)
def fun(obj, x, launch_server, counter, port, return_dict):
dt = obj.dt
estimator = obj.estimator
critical_unique_leaves = obj.critical_unique_leaves
customized_constraints = obj.customized_constraints
labels = obj.labels
default_objectives = obj.fuzzing_arguments.default_objectives
run_simulation = obj.run_simulation
fuzzing_content = obj.fuzzing_content
fuzzing_arguments = obj.fuzzing_arguments
sim_specific_arguments = obj.sim_specific_arguments
dt_arguments = obj.dt_arguments
not_critical_region = dt and not is_critical_region(x, estimator, critical_unique_leaves)
violate_constraints, _ = if_violate_constraints(x, customized_constraints, labels, verbose=True)
if not_critical_region or violate_constraints:
returned_data = [default_objectives, None, 0]
else:
objectives, run_info = run_simulation(x, fuzzing_content, fuzzing_arguments, sim_specific_arguments, dt_arguments, launch_server, counter, port)
print('\n'*3)
print("counter:", counter, " run_info['is_bug']:", run_info['is_bug'], " run_info['bug_type']:", run_info['bug_type'], " objectives:", objectives)
print('\n'*3)
# correct_travel_dist(x, labels, customized_data['tmp_travel_dist_file'])
returned_data = [objectives, run_info, 1]
if return_dict is not None:
return_dict['returned_data'] = returned_data
return returned_data
class MyProblem(Problem):
def __init__(self, fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation, dt_arguments):
self.fuzzing_arguments = fuzzing_arguments
self.sim_specific_arguments = sim_specific_arguments
self.fuzzing_content = fuzzing_content
self.run_simulation = run_simulation
self.dt_arguments = dt_arguments
self.ego_car_model = fuzzing_arguments.ego_car_model
#self.scheduler_port = fuzzing_arguments.scheduler_port
#self.dashboard_address = fuzzing_arguments.dashboard_address
self.ports = fuzzing_arguments.ports
self.episode_max_time = fuzzing_arguments.episode_max_time
self.objective_weights = fuzzing_arguments.objective_weights
self.check_unique_coeff = fuzzing_arguments.check_unique_coeff
self.consider_interested_bugs = fuzzing_arguments.consider_interested_bugs
self.record_every_n_step = fuzzing_arguments.record_every_n_step
self.use_single_objective = fuzzing_arguments.use_single_objective
self.simulator = fuzzing_arguments.simulator
if self.fuzzing_arguments.sample_avoid_ego_position and hasattr(self.sim_specific_arguments, 'ego_start_position'):
self.ego_start_position = self.sim_specific_arguments.ego_start_position
else:
self.ego_start_position = None
self.call_from_dt = dt_arguments.call_from_dt
self.dt = dt_arguments.dt
self.estimator = dt_arguments.estimator
self.critical_unique_leaves = dt_arguments.critical_unique_leaves
self.cumulative_info = dt_arguments.cumulative_info
cumulative_info = dt_arguments.cumulative_info
if cumulative_info:
self.counter = cumulative_info['counter']
self.has_run = cumulative_info['has_run']
self.start_time = cumulative_info['start_time']
self.time_list = cumulative_info['time_list']
self.bugs = cumulative_info['bugs']
self.unique_bugs = cumulative_info['unique_bugs']
self.interested_unique_bugs = cumulative_info['interested_unique_bugs']
self.bugs_type_list = cumulative_info['bugs_type_list']
self.bugs_inds_list = cumulative_info['bugs_inds_list']
self.bugs_num_list = cumulative_info['bugs_num_list']
self.unique_bugs_num_list = cumulative_info['unique_bugs_num_list']
self.has_run_list = cumulative_info['has_run_list']
else:
self.counter = 0
self.has_run = 0
self.start_time = time.time()
self.time_list = []
self.bugs = []
self.unique_bugs = []
self.interested_unique_bugs = []
self.bugs_type_list = []
self.bugs_inds_list = []
self.bugs_num_list = []
self.unique_bugs_num_list = []
self.has_run_list = []
self.labels = fuzzing_content.labels
self.mask = fuzzing_content.mask
self.parameters_min_bounds = fuzzing_content.parameters_min_bounds
self.parameters_max_bounds = fuzzing_content.parameters_max_bounds
self.parameters_distributions = fuzzing_content.parameters_distributions
self.customized_constraints = fuzzing_content.customized_constraints
self.customized_center_transforms = fuzzing_content.customized_center_transforms
xl = [pair[1] for pair in self.parameters_min_bounds.items()]
xu = [pair[1] for pair in self.parameters_max_bounds.items()]
n_var = fuzzing_content.n_var
self.p, self.c, self.th = self.check_unique_coeff
self.launch_server = True
self.objectives_list = []
self.trajectory_vector_list = []
self.x_list = []
self.y_list = []
self.F_list = []
super().__init__(n_var=n_var, n_obj=4, n_constr=0, xl=xl, xu=xu)
def _evaluate(self, X, out, *args, **kwargs):
objective_weights = self.objective_weights
customized_center_transforms = self.customized_center_transforms
episode_max_time = self.episode_max_time
default_objectives = self.fuzzing_arguments.default_objectives
standardize_objective = self.fuzzing_arguments.standardize_objective
normalize_objective = self.fuzzing_arguments.normalize_objective
traj_dist_metric = self.fuzzing_arguments.traj_dist_metric
all_final_generated_transforms_list = []
# non-dask subprocess implementation
# rng = np.random.default_rng(random_seeds[1])
tmp_run_info_list = []
x_sublist = []
objectives_sublist_non_traj = []
trajectory_vector_sublist = []
for i in range(X.shape[0]):
if self.counter == 0:
launch_server = True
else:
launch_server = False
cur_i = i
total_i = self.counter
port = self.ports[0]
x = X[cur_i]
# No need to use subprocess when no simulation is running
if self.fuzzing_arguments.simulator in ['no_simulation_dataset', 'no_simulation_function']:
return_dict = {}
fun(self, x, launch_server, self.counter, port, return_dict)
else:
manager = Manager()
return_dict = manager.dict()
try:
p = Process(target=fun, args=(self, x, launch_server, self.counter, port, return_dict))
p.start()
p.join(240)
if p.is_alive():
print("Function is hanging!")
p.terminate()
print("Kidding, just terminated!")
except:
traceback.print_exc()
objectives, run_info, has_run = default_objectives, None, 0
if 'returned_data' in return_dict:
objectives, run_info, has_run = return_dict['returned_data']
else:
# TBD: add an error log
print('\n'*3, 'returned_data is missing', '\n'*3)
objectives, run_info, has_run = default_objectives, None, 0
print('get job result for', total_i)
if run_info and 'all_final_generated_transforms' in run_info:
all_final_generated_transforms_list.append(run_info['all_final_generated_transforms'])
self.has_run_list.append(has_run)
self.has_run += has_run
# record bug
if run_info and run_info['is_bug']:
self.bugs.append(X[cur_i].astype(float))
self.bugs_inds_list.append(total_i)
self.bugs_type_list.append(run_info['bug_type'])
self.y_list.append(run_info['bug_type'])
else:
self.y_list.append(0)
self.counter += 1
tmp_run_info_list.append(run_info)
x_sublist.append(x)
objectives_sublist_non_traj.append(objectives)
if run_info and 'trajectory_vector' in run_info:
trajectory_vector_sublist.append(run_info['trajectory_vector'])
else:
trajectory_vector_sublist.append(None)
job_results, self.x_list, self.objectives_list, self.trajectory_vector_list = get_job_results(tmp_run_info_list, x_sublist, objectives_sublist_non_traj, trajectory_vector_sublist, self.x_list, self.objectives_list, self.trajectory_vector_list, traj_dist_metric)
# print('self.objectives_list', self.objectives_list)
# hack:
if run_info and 'all_final_generated_transforms' in run_info:
with open('carla_lbc/tmp_folder/total.pickle', 'wb') as f_out:
pickle.dump(all_final_generated_transforms_list, f_out)
# record time elapsed and bug numbers
time_elapsed = time.time() - self.start_time
self.time_list.append(time_elapsed)
current_F = get_F(job_results, self.objectives_list, objective_weights, self.use_single_objective, standardize=standardize_objective, normalize=normalize_objective)
out["F"] = current_F
self.F_list.append(current_F)
# print('\n'*3, 'self.F_list', len(self.F_list), self.F_list, '\n'*3)
print('\n'*10, '+'*100)
bugs_type_list_tmp = self.bugs_type_list
bugs_tmp = self.bugs
bugs_inds_list_tmp = self.bugs_inds_list
self.unique_bugs, unique_bugs_inds_list, self.interested_unique_bugs, bugcounts = get_unique_bugs(self.x_list, self.objectives_list, self.mask, self.xl, self.xu, self.check_unique_coeff, objective_weights, return_mode='unique_inds_and_interested_and_bugcounts', consider_interested_bugs=1, bugs_type_list=bugs_type_list_tmp, bugs=bugs_tmp, bugs_inds_list=bugs_inds_list_tmp, trajectory_vector_list=self.trajectory_vector_list)
time_elapsed = time.time() - self.start_time
num_of_bugs = len(self.bugs)
num_of_unique_bugs = len(self.unique_bugs)
num_of_interested_unique_bugs = len(self.interested_unique_bugs)
self.bugs_num_list.append(num_of_bugs)
self.unique_bugs_num_list.append(num_of_unique_bugs)
mean_objectives_this_generation = np.mean(np.array(self.objectives_list[-X.shape[0]:]), axis=0)
with open(self.fuzzing_arguments.mean_objectives_across_generations_path, 'a') as f_out:
info_dict = {
'counter': self.counter,
'has_run': self.has_run,
'time_elapsed': time_elapsed,
'num_of_bugs': num_of_bugs,
'num_of_unique_bugs': num_of_unique_bugs,
'num_of_interested_unique_bugs': num_of_interested_unique_bugs,
'bugcounts and unique bug counts': bugcounts, 'mean_objectives_this_generation': mean_objectives_this_generation.tolist(),
'current_F': current_F
}
f_out.write(str(info_dict))
f_out.write(';'.join([str(ind) for ind in unique_bugs_inds_list])+' objective_weights : '+str(self.objective_weights)+'\n')
print(info_dict)
print('+'*100, '\n'*10)
def do_emcmc(parents, off, n_gen, objective_weights, default_objectives):
base_val = np.sum(np.array(default_objectives[:len(objective_weights)])*np.array(objective_weights))
filtered_off = []
F_list = []
for i in off:
for p in parents:
print(i.F, p.F)
i_val = np.sum(np.array(i.F) * np.array(objective_weights))
p_val = np.sum(np.array(p.F) * np.array(objective_weights))
print('1', base_val, i_val, p_val)
i_val = np.abs(base_val-i_val)
p_val = np.abs(base_val-p_val)
prob = np.min([i_val / p_val, 1])
print('2', base_val, i_val, p_val, prob)
if np.random.uniform() < prob:
filtered_off.append(i.X)
F_list.append(i.F)
pop = Population(len(filtered_off), individual=Individual())
pop.set("X", filtered_off, "F", F_list, "n_gen", n_gen, "CV", [0 for _ in range(len(filtered_off))], "feasible", [[True] for _ in range(len(filtered_off))])
return Population.merge(parents, off)
class NSGA2_CUSTOMIZED(NSGA2):
def __init__(self, dt=False, X=None, F=None, fuzzing_arguments=None, random_sampling=None, local_mating=None, **kwargs):
self.dt = dt
self.X = X
self.F = F
self.random_sampling = random_sampling
self.sampling = kwargs['sampling']
self.pop_size = fuzzing_arguments.pop_size
self.n_offsprings = fuzzing_arguments.n_offsprings
self.survival_multiplier = fuzzing_arguments.survival_multiplier
self.algorithm_name = fuzzing_arguments.algorithm_name
self.emcmc = fuzzing_arguments.emcmc
self.initial_fit_th = fuzzing_arguments.initial_fit_th
self.rank_mode = fuzzing_arguments.rank_mode
self.min_bug_num_to_fit_dnn = fuzzing_arguments.min_bug_num_to_fit_dnn
self.ranking_model = fuzzing_arguments.ranking_model
self.use_unique_bugs = fuzzing_arguments.use_unique_bugs
self.pgd_eps = fuzzing_arguments.pgd_eps
self.adv_conf_th = fuzzing_arguments.adv_conf_th
self.attack_stop_conf = fuzzing_arguments.attack_stop_conf
self.uncertainty = fuzzing_arguments.uncertainty
self.warm_up_path = fuzzing_arguments.warm_up_path
self.warm_up_len = fuzzing_arguments.warm_up_len
self.regression_nn_use_running_data = fuzzing_arguments.regression_nn_use_running_data
self.only_run_unique_cases = fuzzing_arguments.only_run_unique_cases
super().__init__(pop_size=self.pop_size, n_offsprings=self.n_offsprings, **kwargs)
self.random_initialization = Initialization(self.random_sampling, individual=Individual(), repair=self.repair, eliminate_duplicates= NoDuplicateElimination())
# heuristic: we keep up about 1 times of each generation's population
self.survival_size = self.pop_size * self.survival_multiplier
self.all_pop_run_X = []
# hack: defined separately w.r.t. MyMating
self.mating_max_iterations = 1
self.tmp_off = []
self.tmp_off_type_1_len = 0
# self.tmp_off_type_1and2_len = 0
self.high_conf_configs_stack = []
self.high_conf_configs_ori_stack = []
self.device_name = 'cuda'
# avfuzzer variables
self.best_y_gen = []
self.global_best_y = [None, 10000]
self.restart_best_y = [None, 10000]
self.local_best_y = [None, 10000]
self.pop_before_local = None
self.local_gen = -1
self.restart_gen = 0
self.cur_gen = -1
self.local_mating = local_mating
self.mutation = kwargs['mutation']
self.minLisGen = 2
def set_off(self):
self.tmp_off = []
if self.algorithm_name == 'avfuzzer':
cur_best_y = [None, 10000]
if self.cur_gen >= 0:
# local search
if 0 <= self.local_gen <= 4:
with open('tmp_log.txt', 'a') as f_out:
f_out.write(str(self.cur_gen)+' local '+str(self.local_gen)+'\n')
cur_pop = self.pop[-self.pop_size:]
for p in cur_pop:
if p.F < self.local_best_y[1]:
self.local_best_y = [p, p.F]
if self.local_gen == 4:
self.local_gen = -1
if self.local_best_y[1] < self.global_best_y[1]:
self.global_best_y = self.local_best_y
if self.local_best_y[1] < self.best_y_gen[-1][1]:
self.best_y_gen[-1] = self.local_best_y
# if self.local_best_y[1] < self.restart_best_y[1]:
# self.restart_best_y = self.local_best_y
tmp_best_ind = 0
tmp_best_y = [None, 10000]
for i, p in enumerate(self.pop_before_local):
if p.F < tmp_best_y[1]:
tmp_best_y = [p, p.F]
tmp_best_ind = i
self.pop_before_local[tmp_best_ind] = self.local_best_y[0]
self.tmp_off, _ = self.mating.do(self.problem, self.pop_before_local, self.n_offsprings, algorithm=self)
self.cur_gen += 1
else:
self.local_gen += 1
self.tmp_off, _ = self.local_mating.do(self.problem, self.pop, self.n_offsprings, algorithm=self)
# global search
else:
cur_pop = self.pop[-self.pop_size:]
for p in cur_pop:
if p.F < cur_best_y[1]:
cur_best_y = [p, p.F]
if cur_best_y[1] < self.global_best_y[1]:
self.global_best_y = cur_best_y
if len(self.best_y_gen) == self.cur_gen:
self.best_y_gen.append(cur_best_y)
else:
if cur_best_y[1] < self.best_y_gen[-1][1]:
self.best_y_gen[-1] = cur_best_y
if self.cur_gen - self.restart_gen <= self.minLisGen:
if cur_best_y[1] < self.restart_best_y[1]:
self.restart_best_y = cur_best_y
with open('tmp_log.txt', 'a') as f_out:
f_out.write('self.global_best_y: '+ str(self.global_best_y[1])+', cur_best_y[1]: '+str(cur_best_y[1])+', self.restart_best_y[1]: '+str(self.restart_best_y[1])+'\n')
normal = True
# restart
if self.cur_gen - self.restart_gen > 4:
last_5_mean = np.mean([v for _, v in self.best_y_gen[-5:]])
with open('tmp_log.txt', 'a') as f_out:
f_out.write('last_5_mean: '+str(last_5_mean)+', cur_best_y[1]: '+str(cur_best_y[1])+'\n')
if cur_best_y[1] >= last_5_mean:
with open('tmp_log.txt', 'a') as f_out:
f_out.write(str(self.cur_gen)+' restart'+'\n')
tmp_off_candidates = self.random_initialization.do(self.problem, 1000, algorithm=self)
tmp_off_candidates_X = np.stack([p.X for p in tmp_off_candidates])
chosen_inds = choose_farthest_offs(tmp_off_candidates_X, self.all_pop_run_X, self.pop_size)
self.tmp_off = tmp_off_candidates[chosen_inds]
self.restart_best_y = [None, 10000]
normal = False
self.cur_gen += 1
self.restart_gen = self.cur_gen
# enter local
if normal and self.cur_gen - self.restart_gen > self.minLisGen and cur_best_y[1] < self.restart_best_y[1]:
with open('tmp_log.txt', 'a') as f_out:
f_out.write(str(self.cur_gen)+'enter local'+'\n')
self.restart_best_y[1] = cur_best_y[1]
self.pop_before_local = copy.deepcopy(self.pop)
pop = Population(self.pop_size, individual=Individual())
pop.set("X", [self.global_best_y[0].X for _ in range(self.pop_size)])
pop.set("F", [self.global_best_y[1] for _ in range(self.pop_size)])
self.tmp_off = self.mutation.do(self.problem, pop)
self.local_best_y = [None, 10000]
self.local_gen = 0
normal = False
# not increasing cur_gen in this case
if normal:
with open('tmp_log.txt', 'a') as f_out:
f_out.write(str(self.cur_gen)+' normal'+'\n')
self.tmp_off, _ = self.mating.do(self.problem, self.pop, self.pop_size, algorithm=self)
self.cur_gen += 1
else:
# initialization
self.tmp_off = self.random_initialization.do(self.problem, self.n_offsprings, algorithm=self)
self.cur_gen += 1
elif self.algorithm_name in ['random', 'grid']:
self.tmp_off = self.initialization.do(self.problem, self.n_offsprings, algorithm=self)
# elif self.algorithm_name in ['random_local_sphere']:
# # print('self.sampling.cur_ind', self.sampling.cur_ind)
# # if self.sampling.cur_ind > -1:
# # print('self.sampling.spheres[self.sampling.cur_ind].sampling_num', self.sampling.spheres[self.sampling.cur_ind].sampling_num)
# if len(self.sampling.spheres) > 0 and self.sampling.spheres[self.sampling.cur_ind].if_local_sampling():
# latest_ind, latest_x, latest_y = len(self.problem.x_list)-1, self.problem.x_list[-1], self.problem.y_list[-1]
# self.sampling.update_cur_sphere(latest_ind, latest_x, latest_y)
#
# if len(self.sampling.spheres) == 0 or not self.sampling.spheres[self.sampling.cur_ind].if_local_sampling():
# self.sampling.add_uncovered_coverable_bugs(self.problem.x_list, self.problem.y_list)
# uncovered_bug = self.sampling.find_an_uncovered_bug(self.problem.x_list, self.problem.y_list)
# # If an uncovered bug is found by global sampling
# if uncovered_bug:
# self.sampling.new_sphere(uncovered_bug, self.problem.x_list, self.problem.y_list)
# tmp_val = self.sampling._do(self.problem, self.n_offsprings)
# pop = Population(0, individual=Individual())
# self.tmp_off = pop.new("X", tmp_val)
# # do global sampling when no available bug can be used as a new center
# else:
# offspring_multiplier = 1000
# sphere_center_d_th_random_sampling = 0.1
#
# tmp_x_list = self.random_sampling._do(self.problem, self.n_offsprings*offspring_multiplier, algorithm=self)
# d_list = self.sampling.d_to_spheres(tmp_x_list)
#
# candidate_x_list_inds = np.where(d_list > sphere_center_d_th_random_sampling)[0]
# if len(candidate_x_list_inds) < self.n_offsprings:
# candidate_x_list_inds = np.argsort(d_list)[-self.n_offsprings:]
# tmp_val = np.array(tmp_x_list)[candidate_x_list_inds]
# else:
# tmp_val = np.random.choice(candidate_x_list_inds, size=self.n_offsprings, replace=False)
# tmp_val = np.array(tmp_x_list)[candidate_x_list_inds]
# pop = Population(0, individual=Individual())
# self.tmp_off = pop.new("X", tmp_val)
# else:
# tmp_val = self.sampling._do(self.problem, self.n_offsprings)
# pop = Population(0, individual=Individual())
# self.tmp_off = pop.new("X", tmp_val)
else:
if self.algorithm_name == 'random-un':
self.tmp_off, parents = [], []
else:
print('len(self.pop)', len(self.pop))
# do the mating using the current population
if len(self.pop) > 0:
self.tmp_off, parents = self.mating.do(self.problem, self.pop, self.n_offsprings, algorithm=self)
print('\n'*3, 'after mating len 0', len(self.tmp_off), 'self.n_offsprings', self.n_offsprings, '\n'*3)
if len(self.tmp_off) < self.n_offsprings:
remaining_num = self.n_offsprings - len(self.tmp_off)
remaining_off = self.initialization.do(self.problem, remaining_num, algorithm=self)
remaining_parrents = remaining_off
if len(self.tmp_off) == 0:
self.tmp_off = remaining_off
parents = remaining_parrents
else:
self.tmp_off = Population.merge(self.tmp_off, remaining_off)
parents = Population.merge(parents, remaining_parrents)
print('\n'*3, 'unique after random generation len 1', len(self.tmp_off), '\n'*3)
self.tmp_off_type_1_len = len(self.tmp_off)
if len(self.tmp_off) < self.n_offsprings:
remaining_num = self.n_offsprings - len(self.tmp_off)
remaining_off = self.random_initialization.do(self.problem, remaining_num, algorithm=self)
remaining_parrents = remaining_off
self.tmp_off = Population.merge(self.tmp_off, remaining_off)
parents = Population.merge(parents, remaining_parrents)
print('\n'*3, 'random generation len 2', len(self.tmp_off), '\n'*3)
# if the mating could not generate any new offspring (duplicate elimination might make that happen)
no_offspring = len(self.tmp_off) == 0
not_nsga2_dt_and_finish_has_run = not self.problem.call_from_dt and self.problem.fuzzing_arguments.finish_after_has_run and self.problem.has_run >= self.problem.fuzzing_arguments.has_run_num
if no_offspring or not_nsga2_dt_and_finish_has_run:
self.termination.force_termination = True
print("Mating cannot generate new springs, terminate earlier.")
print('self.tmp_off', len(self.tmp_off))
return
# if not the desired number of offspring could be created
elif len(self.tmp_off) < self.n_offsprings:
if self.verbose:
print("WARNING: Mating could not produce the required number of (unique) offsprings!")
# additional step to rank and select self.off after gathering initial population
no_ranking = self.rank_mode == 'none'
cla_nn_ranking_and_no_enough_samples_or_no_enough_bugs = self.rank_mode in ['nn', 'adv_nn'] and (len(self.problem.objectives_list) < self.initial_fit_th or np.sum(determine_y_upon_weights(self.problem.objectives_list, self.problem.objective_weights)) < self.min_bug_num_to_fit_dnn)
reg_ranking_and_no_enough_samples = self.rank_mode in ['regression_nn'] and len(self.problem.objectives_list) < self.pop_size
if no_ranking or cla_nn_ranking_and_no_enough_samples_or_no_enough_bugs or reg_ranking_and_no_enough_samples:
self.off = self.tmp_off[:self.pop_size]
else:
if self.rank_mode in ['regression_nn']:
from customized_utils import pretrain_regression_nets
if self.regression_nn_use_running_data:
initial_X = self.all_pop_run_X
initial_objectives_list = self.problem.objectives_list
cutoff = len(initial_X)
cutoff_end = cutoff
else:
subfolders = get_sorted_subfolders(self.warm_up_path)
initial_X, _, initial_objectives_list, _, _, _ = load_data(subfolders)
cutoff = self.warm_up_len
cutoff_end = self.warm_up_len + 100
if cutoff == 0:
cutoff = len(initial_X)
if cutoff_end > len(initial_X):
cutoff_end = len(initial_X)
clfs, confs, chosen_weights, standardize_prev = pretrain_regression_nets(initial_X, initial_objectives_list, self.problem.objective_weights, self.problem.xl, self.problem.xu, self.problem.labels, self.problem.customized_constraints, cutoff, cutoff_end, self.problem.fuzzing_content.keywords_dict, choose_weight_inds)
else:
standardize_prev = None
X_train_ori = self.all_pop_run_X
X_test_ori = self.tmp_off.get("X")
initial_X = np.concatenate([X_train_ori, X_test_ori])
cutoff = X_train_ori.shape[0]
cutoff_end = initial_X.shape[0]
partial = True
X_train, X_test, xl, xu, labels_used, standardize, one_hot_fields_len, param_for_recover_and_decode = process_X(initial_X, self.problem.labels, self.problem.xl, self.problem.xu, cutoff, cutoff_end, partial, len(self.problem.interested_unique_bugs), self.problem.fuzzing_content.keywords_dict, standardize_prev=standardize_prev)
(X_removed, kept_fields, removed_fields, enc, inds_to_encode, inds_non_encode, encoded_fields, _, _, unique_bugs_len) = param_for_recover_and_decode
print('process_X finished')
if self.rank_mode in ['regression_nn']:
weight_inds = choose_weight_inds(self.problem.objective_weights)
obj_preds = []
for clf in clfs:
obj_preds.append(clf.predict(X_test))
tmp_objectives = np.concatenate(obj_preds, axis=1)
if self.use_unique_bugs:
tmp_objectives[:self.tmp_off_type_1_len] -= 100*chosen_weights
tmp_objectives_minus = tmp_objectives - confs
tmp_objectives_plus = tmp_objectives + confs
tmp_pop_minus = Population(X_train.shape[0]+X_test.shape[0], individual=Individual())
tmp_X_minus = np.concatenate([X_train, X_test])
tmp_objectives_minus = np.concatenate([np.array(self.problem.objectives_list)[:, weight_inds], tmp_objectives_minus]) * np.array(self.problem.objective_weights[weight_inds])
tmp_pop_minus.set("X", tmp_X_minus)
tmp_pop_minus.set("F", tmp_objectives_minus)
print('len(tmp_objectives_minus)', len(tmp_objectives_minus))
inds_minus_top = np.array(self.survival.do(self.problem, tmp_pop_minus, self.pop_size, return_indices=True))
print('inds_minus_top', inds_minus_top, 'len(X_train)', len(X_train), np.sum(inds_minus_top<len(X_train)))
num_of_top_already_run = np.sum(inds_minus_top<len(X_train))
num_to_run = self.pop_size - num_of_top_already_run
if num_to_run > 0:
tmp_pop_plus = Population(X_test.shape[0], individual=Individual())
tmp_X_plus = X_test
tmp_objectives_plus = tmp_objectives_plus * np.array(self.problem.objective_weights[weight_inds])
tmp_pop_plus.set("X", tmp_X_plus)
tmp_pop_plus.set("F", tmp_objectives_plus)
print('tmp_objectives_plus', tmp_objectives_plus)
inds_plus_top = np.array(self.survival.do(self.problem, tmp_pop_plus, num_to_run, return_indices=True))
print('inds_plus_top', inds_plus_top)
self.off = self.tmp_off[inds_plus_top]
else:
print('no more offsprings to run (regression nn)')
self.off = Population(0, individual=Individual())
else:
# ---seed selection---
if self.uncertainty:
y_train = determine_y_upon_weights(self.problem.objectives_list, self.problem.objective_weights)
print('uncertainty', self.uncertainty)
if self.uncertainty == 'nndv':
from customized_utils import nndv
# TBD: make the following can be adjusted from the interface
angle_features = [2]
scales = [2, 2, 15]
inds = nndv(X_train, y_train, X_test, self.pop_size, angle_features, scales)
else:
uncertainty_key, uncertainty_conf = self.uncertainty.split('_')
acquisition_strategy = map_acquisition(uncertainty_key)
acquirer = acquisition_strategy(self.pop_size)
if uncertainty_conf == 'conf':
uncertainty_conf = True
else:
uncertainty_conf = False
pool_data = torch_subset(VanillaDataset(X_test, np.zeros(X_test.shape[0]), to_tensor=True))
clf = train_net(X_train, y_train, [], [], batch_train=60, device_name=self.device_name)
if self.use_unique_bugs:
unique_len = self.tmp_off_type_1_len
else:
unique_len = 0
inds = acquirer.select_batch(clf, pool_data, unique_len=unique_len, uncertainty_conf=uncertainty_conf)
else:
adv_conf_th = self.adv_conf_th
attack_stop_conf = self.attack_stop_conf
y_train = determine_y_upon_weights(self.problem.objectives_list, self.problem.objective_weights)
if self.ranking_model == 'nn_pytorch':
print(X_train.shape, y_train.shape)
clf = train_net(X_train, y_train, [], [], batch_train=200, device_name=self.device_name)
elif self.ranking_model == 'adaboost':
from sklearn.ensemble import AdaBoostClassifier
clf = AdaBoostClassifier()
clf = clf.fit(X_train, y_train)
else:
raise ValueError('invalid ranking model', ranking_model)
print('X_train', X_train.shape)
print('clf.predict_proba(X_train)', clf.predict_proba(X_train).shape)
if self.ranking_model == 'adaboost':
prob_train = clf.predict_proba(X_train)[:, 0].squeeze()
else:
prob_train = clf.predict_proba(X_train)[:, 1].squeeze()
cur_y = y_train
if self.adv_conf_th < 0 and self.rank_mode in ['adv_nn']:
# print(sorted(prob_train, reverse=True))
# print('cur_y', cur_y)
# print('np.abs(self.adv_conf_th)', np.abs(self.adv_conf_th))
# print(int(np.sum(cur_y)//np.abs(self.adv_conf_th)))
adv_conf_th = sorted(prob_train, reverse=True)[int(np.sum(cur_y)//np.abs(self.adv_conf_th))]
attack_stop_conf = np.max([self.attack_stop_conf, adv_conf_th])
if self.adv_conf_th > attack_stop_conf:
self.adv_conf_th = attack_stop_conf
pred = clf.predict_proba(X_test)
if len(pred.shape) == 1:
pred = np.expand_dims(pred, axis=0)
scores = pred[:, 1]
print('initial scores', scores)
# when using unique bugs give preference to unique inputs
if self.rank_mode == 'adv_nn':
X_test_pgd_ori = None
X_test_pgd = None
if self.use_unique_bugs:
print('self.tmp_off_type_1_len', self.tmp_off_type_1_len)
scores[:self.tmp_off_type_1_len] += np.max(scores)
# scores[:self.tmp_off_type_1and2_len] += 100
scores *= -1
inds = np.argsort(scores)[:self.pop_size]
print('scores', scores)
print('sorted(scores)', sorted(scores))
print('chosen indices', inds)
# ---additional mutation on selected seeds---
if self.rank_mode == 'nn':
self.off = self.tmp_off[inds]
elif self.rank_mode == 'adv_nn':
X_test_pgd_ori = X_test_ori[inds]
X_test_pgd = X_test[inds]
associated_clf_id = []
# conduct pgd with constraints differently for different types of inputs
if self.use_unique_bugs:
unique_coeff = (self.problem.p, self.problem.c, self.problem.th)
mask = self.problem.mask
y_zeros = np.zeros(X_test_pgd.shape[0])
X_test_adv, new_bug_pred_prob_list, initial_bug_pred_prob_list = pgd_attack(clf, X_test_pgd, y_zeros, xl, xu, encoded_fields, labels_used, self.problem.customized_constraints, standardize, prev_X=self.problem.interested_unique_bugs, base_ind=0, unique_coeff=unique_coeff, mask=mask, param_for_recover_and_decode=param_for_recover_and_decode, eps=self.pgd_eps, adv_conf_th=adv_conf_th, attack_stop_conf=attack_stop_conf, associated_clf_id=associated_clf_id, X_test_pgd_ori=X_test_pgd_ori, consider_uniqueness=True, device_name=self.device_name)
else:
y_zeros = np.zeros(X_test_pgd.shape[0])
X_test_adv, new_bug_pred_prob_list, initial_bug_pred_prob_list = pgd_attack(clf, X_test_pgd, y_zeros, xl, xu, encoded_fields, labels_used, self.problem.customized_constraints, standardize, eps=self.pgd_eps, adv_conf_th=adv_conf_th, attack_stop_conf=attack_stop_conf, associated_clf_id=associated_clf_id, X_test_pgd_ori=X_test_pgd_ori, device_name=self.device_name)
X_test_adv_processed = inverse_process_X(X_test_adv, standardize, one_hot_fields_len, partial, X_removed, kept_fields, removed_fields, enc, inds_to_encode, inds_non_encode, encoded_fields)
X_off = X_test_adv_processed
pop = Population(X_off.shape[0], individual=Individual())
pop.set("X", X_off)
pop.set("F", [None for _ in range(X_off.shape[0])])
self.off = pop
if self.only_run_unique_cases:
X_off = [off_i.X for off_i in self.off]
remaining_inds = is_distinct_vectorized(X_off, self.problem.interested_unique_bugs, self.problem.mask, self.problem.xl, self.problem.xu, self.problem.p, self.problem.c, self.problem.th, verbose=False)
self.off = self.off[remaining_inds]
self.off.set("n_gen", self.n_gen)
print('\n'*2, 'self.n_gen', self.n_gen, '\n'*2)
if len(self.all_pop_run_X) == 0:
self.all_pop_run_X = self.off.get("X")
else:
if len(self.off.get("X")) > 0:
self.all_pop_run_X = np.concatenate([self.all_pop_run_X, self.off.get("X")])
# mainly used to modify survival
def _next(self):
# set self.off
self.set_off()
# evaluate the offspring
if len(self.off) > 0:
self.evaluator.eval(self.problem, self.off, algorithm=self)
if self.algorithm_name in ['random', 'avfuzzer', 'grid', 'random_local_sphere']:
self.pop = self.off
elif self.emcmc:
new_pop = do_emcmc(parents, self.off, self.n_gen, self.problem.objective_weights, self.problem.fuzzing_arguments.default_objectives)
self.pop = Population.merge(self.pop, new_pop)
if self.survival:
self.pop = self.survival.do(self.problem, self.pop, self.survival_size, algorithm=self, n_min_infeas_survive=self.min_infeas_pop_size)
else:
# merge the offsprings with the current population
self.pop = Population.merge(self.pop, self.off)
# the do survival selection
if self.survival:
print('\n'*3)
print('len(self.pop) before', len(self.pop))
print('survival')
self.pop = self.survival.do(self.problem, self.pop, self.survival_size, algorithm=self, n_min_infeas_survive=self.min_infeas_pop_size)
print('len(self.pop) after', len(self.pop))
print(self.pop_size, self.survival_size)
print('\n'*3)
def _initialize(self):
if self.warm_up_path and ((self.dt and not self.problem.cumulative_info) or (not self.dt)):
subfolders = get_sorted_subfolders(self.warm_up_path)
X, _, objectives_list, mask, _, _ = load_data(subfolders)
if self.warm_up_len > 0:
X = X[:self.warm_up_len]
objectives_list = objectives_list[:self.warm_up_len]
else:
self.warm_up_len = len(X)
xl = self.problem.xl
xu = self.problem.xu
p, c, th = self.problem.p, self.problem.c, self.problem.th
unique_coeff = (p, c, th)
self.problem.unique_bugs, (self.problem.bugs, self.problem.bugs_type_list, self.problem.bugs_inds_list, self.problem.interested_unique_bugs) = get_unique_bugs(
X, objectives_list, mask, xl, xu, unique_coeff, self.problem.objective_weights, return_mode='return_bug_info', consider_interested_bugs=self.problem.consider_interested_bugs
)
print('\n'*10)
print('self.problem.bugs', len(self.problem.bugs))
print('self.problem.unique_bugs', len(self.problem.unique_bugs))
print('\n'*10)
self.all_pop_run_X = np.array(X)
self.problem.objectives_list = objectives_list.tolist()
if self.dt:
X_list = list(self.X)
F_list = list(self.F)
pop = Population(len(X_list), individual=Individual())
pop.set("X", X_list, "F", F_list, "n_gen", self.n_gen, "CV", [0 for _ in range(len(X_list))], "feasible", [[True] for _ in range(len(X_list))])
self.pop = pop
self.set_off()
pop = self.off
elif self.warm_up_path:
X_list = X[-self.pop_size:]
current_objectives = objectives_list[-self.pop_size:]
F_list = get_F(current_objectives, objectives_list, self.problem.objective_weights, self.problem.use_single_objective)
pop = Population(len(X_list), individual=Individual())
pop.set("X", X_list, "F", F_list, "n_gen", self.n_gen, "CV", [0 for _ in range(len(X_list))], "feasible", [[True] for _ in range(len(X_list))])
self.pop = pop
self.set_off()
pop = self.off
else:
# create the initial population
if self.use_unique_bugs:
pop = self.initialization.do(self.problem, self.problem.fuzzing_arguments.pop_size, algorithm=self)
else:
pop = self.random_initialization.do(self.problem, self.pop_size, algorithm=self)
pop.set("n_gen", self.n_gen)
if len(pop) > 0:
self.evaluator.eval(self.problem, pop, algorithm=self)
print('\n'*5, 'after initialize evaluator', '\n'*5)
print('len(self.all_pop_run_X)', len(self.all_pop_run_X))
print('len(self.problem.objectives_list)', len(self.problem.objectives_list))
self.all_pop_run_X = pop.get("X")
# that call is a dummy survival to set attributes that are necessary for the mating selection
if self.survival:
pop = self.survival.do(self.problem, pop, len(pop), algorithm=self, n_min_infeas_survive=self.min_infeas_pop_size)
self.pop, self.off = pop, pop
class MyEvaluator(Evaluator):
def __init__(self, correct_spawn_locations_after_run=0, correct_spawn_locations=None, **kwargs):
super().__init__()
self.correct_spawn_locations_after_run = correct_spawn_locations_after_run
self.correct_spawn_locations = correct_spawn_locations
def _eval(self, problem, pop, **kwargs):
super()._eval(problem, pop, **kwargs)
if self.correct_spawn_locations_after_run:
correct_spawn_locations_all(pop[i].X, problem.labels)
def run_nsga2_dt(fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation):
end_when_no_critical_region = True
cumulative_info = None
X_filtered = None
F_filtered = None
X = None
y = None
F = None
labels = None
estimator = None
critical_unique_leaves = None
now = datetime.now()
dt_time_str = now.strftime("%Y_%m_%d_%H_%M_%S")
if fuzzing_arguments.warm_up_path:
subfolders = get_sorted_subfolders(fuzzing_arguments.warm_up_path)
X, _, objectives_list, _, _, _ = load_data(subfolders)
if fuzzing_arguments.warm_up_len > 0:
X = X[:fuzzing_arguments.warm_up_len]
objectives_list = objectives_list[:fuzzing_arguments.warm_up_len]
y = determine_y_upon_weights(objectives_list, fuzzing_arguments.objective_weights)
F = get_F(objectives_list, objectives_list, fuzzing_arguments.objective_weights, fuzzing_arguments.use_single_objective)
estimator, inds, critical_unique_leaves = filter_critical_regions(np.array(X), y)
X_filtered = np.array(X)[inds]
F_filtered = F[inds]
for i in range(fuzzing_arguments.outer_iterations):
dt_time_str_i = dt_time_str
dt = True
if (i == 0 and not fuzzing_arguments.warm_up_path) or np.sum(y)==0:
dt = False
dt_arguments = emptyobject(
call_from_dt=True,
dt=dt,
X=X_filtered,
F=F_filtered,
estimator=estimator,
critical_unique_leaves=critical_unique_leaves,
dt_time_str=dt_time_str_i, dt_iter=i, cumulative_info=cumulative_info)
X_new, y_new, F_new, _, labels, parent_folder, cumulative_info, all_pop_run_X, objective_list, objective_weights = run_ga(fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation, dt_arguments=dt_arguments)
if fuzzing_arguments.finish_after_has_run and cumulative_info['has_run'] > fuzzing_arguments.has_run_num:
break
if len(X_new) == 0:
break
if i == 0 and not fuzzing_arguments.warm_up_path:
X = X_new
y = y_new
F = F_new
else:
X = np.concatenate([X, X_new])
y = np.concatenate([y, y_new])
F = np.concatenate([F, F_new])
estimator, inds, critical_unique_leaves = filter_critical_regions(X, y)
# print(X, F, inds)
X_filtered = X[inds]
F_filtered = F[inds]
if len(X_filtered) == 0 and end_when_no_critical_region:
break
# save running results summary in a pickle file
x_list_all = []
y_list_all = []
objective_list_all = []
for i in range(fuzzing_arguments.outer_iterations):
with open(os.path.join(fuzzing_arguments.parent_folder, i, 'data.pickle'), 'rb') as f_in:
data_d = pickle.load(f_in)
x_list = data_d['x_list']
y_list = data_d['y_list']
objective_list = data_d['objective_list']
x_list_all.append(x_list)
y_list_all.append(y_list)
objective_list_all.append(objective_list)
data_d['x_list'] = np.concatenate(x_list_all)
data_d['y_list'] = np.concatenate(y_list_all)
data_d['objective_list'] = np.concatenate(objective_list_all)
with open(os.path.join(fuzzing_arguments.parent_folder, 'data.pickle'), 'wb') as f_out:
pickle.dump(data_d, f_out)
def run_ga(fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation, dt_arguments=None):
if not dt_arguments:
dt_arguments = emptyobject(
call_from_dt=False,
dt=False,
X=None,
F=None,
estimator=None,
critical_unique_leaves=None,
dt_time_str=None, dt_iter=None, cumulative_info=None)
if dt_arguments.call_from_dt:
fuzzing_arguments.termination_condition = 'generations'
if dt_arguments.dt and len(list(dt_arguments.X)) == 0:
print('No critical leaves!!! Start from random sampling!!!')
dt_arguments.dt = False
time_str = dt_arguments.dt_time_str
else:
now = datetime.now()
p, c, th = fuzzing_arguments.check_unique_coeff
time_str = now.strftime("%Y_%m_%d_%H_%M_%S")+','+'_'.join([str(fuzzing_arguments.pop_size), str(fuzzing_arguments.n_gen), fuzzing_arguments.rank_mode, str(fuzzing_arguments.has_run_num), 'coeff', str(p), str(c), str(th), 'only_unique', str(fuzzing_arguments.only_run_unique_cases)])
if fuzzing_arguments.simulator == 'no_simulation_function':
cur_parent_folder = make_hierarchical_dir([fuzzing_arguments.root_folder, fuzzing_arguments.algorithm_name, fuzzing_arguments.synthetic_function, time_str])
elif fuzzing_arguments.simulator == 'no_simulation_dataset':
cur_parent_folder = make_hierarchical_dir([fuzzing_arguments.root_folder, fuzzing_arguments.algorithm_name, time_str])
else:
cur_parent_folder = make_hierarchical_dir([fuzzing_arguments.root_folder, fuzzing_arguments.algorithm_name, fuzzing_arguments.route_type, fuzzing_arguments.scenario_type, fuzzing_arguments.ego_car_model, time_str])
if dt_arguments.call_from_dt:
parent_folder = make_hierarchical_dir([cur_parent_folder, str(dt_arguments.dt_iter)])
else:
parent_folder = cur_parent_folder
fuzzing_arguments.parent_folder = parent_folder
fuzzing_arguments.mean_objectives_across_generations_path = os.path.join(parent_folder, 'mean_objectives_across_generations.txt')
problem = MyProblem(fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation, dt_arguments)
# deal with real and int separately
crossover = MixedVariableCrossover(problem.mask, {
"real": get_crossover("real_sbx", prob=0.8, eta=5),
"int": get_crossover("int_sbx", prob=0.8, eta=5)
})
# hack: changed from int(prob=0.05*problem.n_var) to prob=0.4
if fuzzing_arguments.algorithm_name in ['avfuzzer']:
mutation_prob = 0.4
else:
mutation_prob = int(0.05*problem.n_var)
mutation = MixedVariableMutation(problem.mask, {
"real": get_mutation("real_pm", eta=5, prob=mutation_prob),
"int": get_mutation("int_pm", eta=5, prob=mutation_prob)
})
selection = TournamentSelection(func_comp=binary_tournament)
repair = ClipRepair()
eliminate_duplicates = NoDuplicateElimination()
mating = MyMatingVectorized(selection,
crossover,
mutation,
fuzzing_arguments.use_unique_bugs,
fuzzing_arguments.emcmc,
fuzzing_arguments.mating_max_iterations,
repair=repair,
eliminate_duplicates=eliminate_duplicates)
# extra mating methods for avfuzzer
local_mutation = MixedVariableMutation(problem.mask, {
"real": get_mutation("real_pm", eta=5, prob=0.6),
"int": get_mutation("int_pm", eta=5, prob=0.6)
})
local_mating = MyMatingVectorized(selection,
crossover,
local_mutation,
fuzzing_arguments.use_unique_bugs,
fuzzing_arguments.emcmc,
fuzzing_arguments.mating_max_iterations,
repair=repair,
eliminate_duplicates=eliminate_duplicates)
random_sampling = MySamplingVectorized(random_seed=fuzzing_arguments.random_seed, use_unique_bugs=False, check_unique_coeff=problem.check_unique_coeff, sample_multiplier=fuzzing_arguments.sample_multiplier)
# For grid search
if fuzzing_arguments.algorithm_name == 'grid':
from carla_specific_utils.grid import grid_dict_dict
assert fuzzing_arguments.grid_dict_name
grid_start_index = fuzzing_arguments.grid_start_index
grid_dict = grid_dict_dict[fuzzing_arguments.grid_dict_name]
sampling = GridSampling(random_seed=fuzzing_arguments.random_seed, grid_start_index=grid_start_index, grid_dict=grid_dict)
elif fuzzing_arguments.algorithm_name == 'random_local_sphere':
sampling = RandomDirectionSampling(random_seed=fuzzing_arguments.random_seed, chosen_labels=fuzzing_arguments.chosen_labels)
else:
sampling = MySamplingVectorized(random_seed=fuzzing_arguments.random_seed, use_unique_bugs=fuzzing_arguments.use_unique_bugs, check_unique_coeff=problem.check_unique_coeff, sample_multiplier=fuzzing_arguments.sample_multiplier)
algorithm = NSGA2_CUSTOMIZED(dt=dt_arguments.dt, X=dt_arguments.X, F=dt_arguments.F, fuzzing_arguments=fuzzing_arguments, random_sampling=random_sampling, local_mating=local_mating, sampling=sampling,
crossover=crossover,
mutation=mutation,
eliminate_duplicates=eliminate_duplicates,
repair=repair,
mating=mating)
# close simulator(s)
atexit.register(exit_handler, fuzzing_arguments.ports)
if fuzzing_arguments.termination_condition == 'generations':
termination = ('n_gen', fuzzing_arguments.n_gen)
elif fuzzing_arguments.termination_condition == 'max_time':
termination = ('time', fuzzing_arguments.max_running_time)
else:
termination = ('n_gen', fuzzing_arguments.n_gen)
termination = get_termination(*termination)
if hasattr(sim_specific_arguments, 'correct_spawn_locations_after_run'):
correct_spawn_locations_after_run = sim_specific_arguments.correct_spawn_locations_after_run
correct_spawn_locations = sim_specific_arguments.correct_spawn_locations
else:
correct_spawn_locations_after_run = False
correct_spawn_locations = None
# initialize the algorithm object given a problem
algorithm.initialize(problem, termination=termination, seed=0,
verbose=False,
save_history=False,
evaluator=MyEvaluator(correct_spawn_locations_after_run=correct_spawn_locations_after_run, correct_spawn_locations=correct_spawn_locations))
# actually execute the algorithm
algorithm.solve()
print('We have found', len(problem.bugs), 'bugs in total.')
# save running results summary in a pickle file
# print('np.array(problem.x_list).shape', np.array(problem.x_list).shape)
# print('np.array(problem.objectives_list).shape', np.array(problem.objectives_list).shape)
data_d = {
'x_list': np.array(problem.x_list),
'objective_list': np.array(problem.objectives_list),
'y_list': np.array(problem.y_list),
'labels': np.array(problem.labels),
'xl': np.array(problem.xl),
'xu': np.array(problem.xu),
'mask': np.array(problem.mask),
'parameters_min_bounds': problem.parameters_min_bounds,
'parameters_max_bounds': problem.parameters_max_bounds,
}
with open(os.path.join(fuzzing_arguments.parent_folder, 'data.pickle'), 'wb') as f_out:
pickle.dump(data_d, f_out)
# additional saving for random_local_sphere
# if fuzzing_arguments.algorithm_name in ['random_local_sphere']:
# with open(os.path.join(fuzzing_arguments.parent_folder, 'spheres.pickle'), 'wb') as f_out:
# print('len(algorithm.sampling.spheres[0].members', len(algorithm.sampling.spheres[0].members))
# print('len(algorithm.sampling.spheres[1].members', len(algorithm.sampling.spheres[1].members))
# pickle.dump(algorithm.sampling.spheres, f_out)
if len(problem.x_list) > 0:
X = np.stack(problem.x_list)
F = np.concatenate(problem.F_list)
objectives = np.stack(problem.objectives_list)
else:
X = []
F = []
objectives = []
y = np.array(problem.y_list)
time_list = np.array(problem.time_list)
bugs_num_list = np.array(problem.bugs_num_list)
unique_bugs_num_list = np.array(problem.unique_bugs_num_list)
labels = problem.labels
has_run = problem.has_run
has_run_list = problem.has_run_list
mask = problem.mask
xl = problem.xl
xu = problem.xu
p = problem.p
c = problem.c
th = problem.th
cumulative_info = {
'has_run': problem.has_run,
'start_time': problem.start_time,
'counter': problem.counter,
'time_list': problem.time_list,
'bugs': problem.bugs,
'unique_bugs': problem.unique_bugs,
'interested_unique_bugs': problem.interested_unique_bugs,
'bugs_type_list': problem.bugs_type_list,
'bugs_inds_list': problem.bugs_inds_list,
'bugs_num_list': problem.bugs_num_list,
'unique_bugs_num_list': problem.unique_bugs_num_list,
'has_run_list': problem.has_run_list
}
return X, y, F, objectives, labels, cur_parent_folder, cumulative_info, algorithm.all_pop_run_X, problem.objectives_list, problem.objective_weights
def run_ga_general(fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation):
if fuzzing_arguments.algorithm_name in ['nsga2-un-dt', 'nsga2-dt']:
run_nsga2_dt(fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation)
else:
run_ga(fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation)
if __name__ == '__main__':
'''
fuzzing_arguments: parameters needed for the fuzzing process, see argparse for details.
sim_specific_arguments: parameters specific to the simulator used.
fuzzing_content: a description of the search space.
labels:
mask:
parameters_min_bounds:
parameters_max_bounds:
parameters_distributions:
customized_constraints:
customized_center_transforms:
n_var:
fixed_hyperparameters:
search_space_info:
run_simulation(x, fuzzing_content, fuzzing_arguments, sim_specific_arguments, ...) -> objectives, run_info: a simulation function specific to the simulator used.
objectives:
run_info:
'''
set_start_method('spawn')
if fuzzing_arguments.simulator == 'carla':
from carla_specific_utils.scene_configs import customized_bounds_and_distributions
from carla_specific_utils.setup_labels_and_bounds import generate_fuzzing_content
from carla_specific_utils.carla_specific import run_carla_simulation, initialize_carla_specific, correct_spawn_locations_all, get_unique_bugs, choose_weight_inds, determine_y_upon_weights, get_all_y
assert len(fuzzing_arguments.objective_weights) == 10
fuzzing_arguments.objective_labels = ['ego_linear_speed', 'min_d', 'd_angle_norm', 'offroad_d', 'wronglane_d', 'dev_dist', 'is_offroad', 'is_wrong_lane', 'is_run_red_light', 'is_collision']
customized_config = customized_bounds_and_distributions[fuzzing_arguments.scenario_type]
fuzzing_content = generate_fuzzing_content(customized_config)
sim_specific_arguments = initialize_carla_specific(fuzzing_arguments)
run_simulation = run_carla_simulation
elif fuzzing_arguments.simulator == 'svl':
from svl_script.scene_configs import customized_bounds_and_distributions
from svl_script.setup_labels_and_bounds import generate_fuzzing_content
from svl_script.svl_specific import run_svl_simulation, initialize_svl_specific, get_unique_bugs, choose_weight_inds, determine_y_upon_weights, get_all_y
assert fuzzing_arguments.ego_car_model in ['apollo_6_with_signal', 'apollo_6_modular', 'apollo_6_modular_2gt', 'apollo_6']
assert fuzzing_arguments.route_type in ['BorregasAve_forward', 'BorregasAve_left', 'SanFrancisco_forward']
assert fuzzing_arguments.scenario_type in ['default', 'turn_left_one_ped_and_one_vehicle', 'one_ped_crossing', 'go_across_junction_sf', 'go_across_junction_ba', 'one_angle_ped_crossing']
assert len(fuzzing_arguments.objective_weights) == 10
# The later fields are ignored for now
fuzzing_arguments.objective_labels = ['ego_linear_speed', 'min_d', 'npc_collisions', 'diversity'] + ['']*6
fuzzing_arguments.ports = [8181]
fuzzing_arguments.root_folder = 'svl_script/run_results_svl'
customized_config = customized_bounds_and_distributions[fuzzing_arguments.scenario_type]
fuzzing_content = generate_fuzzing_content(customized_config)
sim_specific_arguments = initialize_svl_specific(fuzzing_arguments)
run_simulation = run_svl_simulation
elif fuzzing_arguments.simulator == 'carla_op':
sys.path.append('../openpilot')
sys.path.append('../openpilot/tools/sim')
from tools.sim.op_script.scene_configs import customized_bounds_and_distributions
from tools.sim.op_script.setup_labels_and_bounds import generate_fuzzing_content
from tools.sim.op_script.bridge_multiple_sync3 import run_op_simulation
from tools.sim.op_script.op_specific import initialize_op_specific, get_unique_bugs, choose_weight_inds, determine_y_upon_weights, get_all_y, get_job_results
fuzzing_arguments.sample_avoid_ego_position = 1
assert fuzzing_arguments.route_type in ['Town04_Opt_left_highway', 'Town06_Opt_forward', 'Town04_Opt_forward_highway']
# hack
fuzzing_arguments.scenario_type = fuzzing_arguments.route_type
fuzzing_arguments.root_folder = 'run_results_op'
assert fuzzing_arguments.ego_car_model in ['op', 'op_radar', 'mathwork_in_lane', 'mathwork_all', 'mathwork_moving', 'best_sensor', 'ground_truth']
assert len(fuzzing_arguments.objective_weights) == 7
# fuzzing_arguments.objective_weights = np.array([1., 0., 0., 0., -1., -2., -1.])
fuzzing_arguments.default_objectives = np.array([130., 0., 0., 1., 0., 0., 0.])
fuzzing_arguments.objective_labels = ['min_d', 'collision', 'speed', 'd_angle_norm', 'is_bug', 'fusion_error_perc', 'diversity']
customized_config = customized_bounds_and_distributions[fuzzing_arguments.scenario_type]
fuzzing_content = generate_fuzzing_content(customized_config)
sim_specific_arguments = initialize_op_specific(fuzzing_arguments)
run_simulation = run_op_simulation
elif fuzzing_arguments.simulator == 'no_simulation_dataset':
from no_simulation_dataset_script.no_simulation_specific import generate_fuzzing_content, run_no_simulation, initialize_no_simulation_specific
from no_simulation_dataset_script.no_simulation_objectives_and_bugs import get_unique_bugs, choose_weight_inds, determine_y_upon_weights, get_all_y
assert fuzzing_arguments.no_simulation_data_path, 'no fuzzing_arguments.no_simulation_data_path is specified.'
fuzzing_arguments.root_folder = 'no_simulation_dataset_script/run_results_no_simulation'
# These need to be modified to fit one's requirements for objectives
fuzzing_arguments.objective_weights = np.array([1., 1., 1., -1., 0., 0.])
fuzzing_arguments.default_objectives = np.array([20., 1, 10, -1, 0, 0])
fuzzing_arguments.objective_labels = ['min_dist', 'min_angle', 'min_ttc', 'collision_speed', 'collision', 'oob']
scenario_labels = ['ego_pos', 'ego_init_speed', 'other_pos', 'other_init_speed', 'ped_delay', 'ped_init_speed']
scenario_label_types = ['real']*len(scenario_labels)
fuzzing_content = generate_fuzzing_content(fuzzing_arguments, scenario_labels, scenario_label_types)
sim_specific_arguments = initialize_no_simulation_specific(fuzzing_arguments)
run_simulation = run_no_simulation
elif fuzzing_arguments.simulator == 'no_simulation_function':
from no_simulation_function_script.no_simulation_specific import generate_fuzzing_content, run_no_simulation, initialize_no_simulation_specific
from no_simulation_function_script.no_simulation_objectives_and_bugs import get_unique_bugs, choose_weight_inds, determine_y_upon_weights, get_all_y
fuzzing_arguments.root_folder = 'no_simulation_function_script/run_results_no_simulation'
fuzzing_arguments.no_simulation_data_path = ''
# These fields need to be set to be consistent with the synthetic_function used
fuzzing_arguments.objective_weights = np.array([1.])
fuzzing_arguments.default_objectives = np.array([1.])
fuzzing_arguments.objective_labels = ['surrogate_value']
scenario_labels = ['x1', 'x2']
scenario_label_types = ['real']*len(scenario_labels)
min_bounds = [-1]*len(scenario_labels)
max_bounds = [1]*len(scenario_labels)
# synthetic function needs to be specified
assert fuzzing_arguments.synthetic_function
# used only when algorithm_name == 'random_local_sphere'
# fuzzing_arguments.chosen_labels = ['x1', 'x2']
fuzzing_content = generate_fuzzing_content(fuzzing_arguments, scenario_labels, scenario_label_types, min_bounds, max_bounds)
sim_specific_arguments = initialize_no_simulation_specific(fuzzing_arguments)
run_simulation = run_no_simulation
else:
raise
run_ga_general(fuzzing_arguments, sim_specific_arguments, fuzzing_content, run_simulation)
|
ceo.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author : microfat
# @time : 01/26/21 20:18:18
# @File : factory.py
import sys
import logging
from multiprocessing import Process, cpu_count, Queue
import aioprocessing
from depts import purchasing_dept, production_dept, transportation_dept, operation_dept
from design import product_design
from raw_material import data
# logger config
logger = logging.getLogger("Factory ")
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter("\x1b[30;47m[%(asctime)s] %(name)s:%(levelname)s: %(message)s\x1b[0m")
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == "__main__":
# total production line according to cpu core
total_workshop = cpu_count() - 3
if total_workshop <= 0:
logger.warning(f"Not enough resource to create workshop")
sys.exit()
# every workshop has one production line(thread-safe synchronize message queue)
production_line = [Queue() for i in range(total_workshop)]
# all production lines share one transportation line(thread-safe asynchronous message queue)
transportation_line = aioprocessing.AioJoinableQueue()
# one purchasing dept(single process)
purchasing = Process(target=purchasing_dept.buyer, args=(production_line, data.test_data, total_workshop))
# one operation dept(single process)
operation = Process(target=operation_dept.ops, args=(production_line, total_workshop))
# multi workshop(multi production line) and different job position(multi processes + multi threads + multi coroutines)
logger.info(f"Total {total_workshop} workshop")
workshop_pool = []
for line_num, line in enumerate(production_line):
p = Process(target=production_dept.director, args=(line_num, line, transportation_line, product_design.blueprint))
p.start()
workshop_pool.append(p)
# one transportation dept and two different job position(sigle process + multi coroutine)
transportation = Process(target=transportation_dept.dispatcher, args=(transportation_line,))
# start every dept
purchasing.start()
operation.start()
transportation.start()
purchasing.join()
operation.join()
for workshop in workshop_pool:
workshop.join()
transportation.join() |
forsund.py | # -*- coding: utf-8 -*-
# 15/6/27
# create by: snower
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import argparse
import multiprocessing
import atexit
from ..forsun import config
from ..utils import is_py3
parser = argparse.ArgumentParser(description='High-performance timing scheduling service')
parser.add_argument('--conf', dest='conf', default="", help='conf filename')
parser.add_argument('--bind', dest='bind_host', default="", help='bind host (default: 127.0.0.1)')
parser.add_argument('--port', dest='bind_port', default=0, type=int, help='bind port (default: 6458)')
parser.add_argument('--http', dest='http_bind', default="", help='bind http server (default: ) example: 127.0.0.1:80')
parser.add_argument('--demon', dest='demon', nargs='?', const=True, default=False, type=bool, help='run demon mode')
parser.add_argument('--nodemon', dest='nodemon', nargs='?', const=True, default=False, type=bool, help='run no demon mode')
parser.add_argument('--log', dest='log_file', default='', type=str, help='log file')
parser.add_argument('--log-level', dest='log_level', default='', type=str, help='log level (defaul: INFO)')
parser.add_argument('--driver', dest='driver', default='', type=str, help='store driver mem or redis (defaul: mem)')
parser.add_argument('--driver-mem-store-file', dest='store_mem_store_file', default='', type=str, help='store mem driver store file (defaul: ~/.forsun.dump)')
parser.add_argument('--driver-redis-host', dest='driver_redis_host', default='', type=str, help='store reids driver host (defaul: 127.0.0.1)')
parser.add_argument('--driver-redis-port', dest='driver_redis_port', default=0, type=int, help='store reids driver port (defaul: 6379)')
parser.add_argument('--driver-redis-db', dest='driver_redis_db', default=0, type=int, help='store reids driver db (defaul: 0)')
parser.add_argument('--driver-redis-password', dest='driver_redis_password', default='', type=str, help='store reids driver password (defaul: )')
parser.add_argument('--driver-redis-prefix', dest='driver_redis_prefix', default='', type=str, help='store reids driver key prefix (defaul: forsun)')
parser.add_argument('--driver-redis-server-id', dest='driver_redis_server_id', default=0, type=int, help='store reids driver server id (defaul: 0)')
parser.add_argument('--extension-path', dest='extension_path', default='', type=str, help='extension path')
parser.add_argument('--extension', dest='extensions', default=[], action="append", type=str, help='extension name')
def main():
args = parser.parse_args()
if args.conf:
try:
config.load_conf(args.conf)
except Exception as e:
print("load conf file error ", str(e))
exit(1)
if args.log_file:
config.set("LOG_FILE", args.log_file)
if args.log_level:
config.set("LOG_LEVEL", args.log_level)
if args.bind_host:
config.set("BIND_ADDRESS", args.bind_host)
if args.bind_port:
config.set("PORT", args.bind_port)
if args.http_bind:
config.set("HTTP_BIND", args.http_bind)
if args.driver:
config.set("STORE_DRIVER", args.driver)
if args.store_mem_store_file:
config.set("STORE_MEM_STORE_FILE", args.store_mem_store_file)
if args.driver_redis_host:
config.set("STORE_REDIS_HOST", args.driver_redis_host)
if args.driver_redis_port:
config.set("STORE_REDIS_PORT", args.driver_redis_port)
if args.driver_redis_db:
config.set("STORE_REDIS_DB", args.driver_redis_db)
if args.driver_redis_password:
config.set("STORE_REDIS_PASSWORD", args.driver_redis_password)
if args.driver_redis_prefix:
config.set("STORE_REDIS_PREFIX", args.driver_redis_prefix)
if args.driver_redis_server_id:
config.set("STORE_REDIS_SERVER_ID", args.driver_redis_server_id)
if args.extension_path:
config.set("EXTENSION_PATH", args.extension_path)
if args.extensions:
config.set("EXTENSIONS", args.extensions)
if not args.nodemon:
from ..forsun import Forsun
def on_start(forsun):
print("forsund started by pid %s" % p.pid)
sys.stdin.close()
sys.stdin = open(os.devnull)
sys.stdout.close()
sys.stdout = open(os.devnull, 'w')
sys.stderr.close()
sys.stderr = open(os.devnull, 'w')
def run():
try:
forsun = Forsun()
forsun.serve(on_start)
except Exception as e:
print(e)
exit(1)
p = multiprocessing.Process(target = run, name=" ".join(sys.argv))
p.start()
if is_py3:
atexit._clear()
else:
atexit._exithandlers = []
else:
try:
from ..forsun import Forsun
forsun = Forsun()
forsun.serve()
except Exception as e:
print(e)
exit(1)
if __name__ == "__main__":
main() |
index.py | # /usr/bin/env python2
# coding=utf-8
import os.path
import sys
import datetime
import traceback
import threading
# import socket
import SocketServer
import logging
import argparse
from enum import Enum
from dnslib import *
import random
import urllib
import json
from IPy import IP
from urlparse import urlparse
import re
import white_domain
from myrequests import requests_retry_session
class LogLevel(Enum):
debug = 'DEBUG'
info = 'INFO'
warning = 'WARNING'
error = 'ERROR'
critical = 'CRITICAL'
def __str__(self):
return self.value
class Protocol(Enum):
udp = 'udp'
tcp = 'tcp'
both = 'both'
def __str__(self):
return self.value
class IpVersion(Enum):
ipv6_ipv4 = '64'
ipv4_ipv6 = '46'
def __str__(self):
return self.value
white_domain_dict = white_domain.white_domain_dict
dns_servers_in_prc = None
dns4_servers_in_prc = ['tcp/114.114.114.114/53', 'tcp/114.114.115.115/53', ]
dns6_servers_in_prc = ['tcp/240c::6666/53', 'tcp/240c::6644/53', ]
server = 'http://prudent-travels.000webhostapp.com/dns.php'
ua_format = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.{0}.181 Safari/537.36'
args = None
def get_expire_datetime(ttl):
return datetime.datetime.now() + datetime.timedelta(seconds=ttl if ttl > 0 else 365 * 24 * 60 * 60)
def get_inet_version(ip):
if IP(ip).version() == 4:
return socket.AF_INET
else:
return socket.AF_INET6
def query_over_tcp(proxy_request, ip, port):
s = socket.socket(get_inet_version(ip), socket.SOCK_STREAM)
s.connect((ip, port))
q = proxy_request.pack()
b_req = struct.pack(">H", q.__len__()) + q
s.sendall(b_req)
data = s.recv(1024)
s.close()
return data[2:]
def query_over_udp(proxy_request, ip, port):
s = socket.socket(get_inet_version(ip), socket.SOCK_DGRAM)
s.connect((ip, port))
q = proxy_request.pack()
s.sendall(q)
data = s.recv(1024)
s.close()
return data
def query_over_http(qn, qt):
start_time = time.time()
myip = '43.250.236.4'
if args.myip is not None:
myip = args.myip
try:
if args.proxy is None:
name = urllib.quote(base64.b64encode(qn))
t = urllib.quote(base64.b64encode(qt))
ecs = urllib.quote(base64.b64encode(myip))
r = requests_retry_session().get(url=args.server,
params={'name': name, 'type': t, 'edns_client_subnet': ecs},
headers={'User-Agent': ua_format.format(random.randint(1, 9999))},
timeout=(3.05, 27))
resp = base64.b64decode(r.text)
else:
r = requests_retry_session().get(url=args.server,
params={'name': qn, 'type': qt, 'edns_client_subnet': myip},
headers={'User-Agent': ua_format.format(random.randint(1, 9999))},
proxies={'http': args.proxy, 'https': args.proxy}, timeout=(3.05, 27))
resp = r.text
logging.info('Query DNS over http, url: %s', r.url)
logging.debug('Query DNS over http, response: %s', resp)
logging.debug("query_over_http executed --- %s seconds ---" % (time.time() - start_time))
return json.loads(resp)
except Exception as e:
logging.warning("Query DNS over %s %s Error %s", args.server,
{'name': qn, 'type': qt, 'edns_client_subnet': args.myip},
e)
logging.debug("query_over_http executed --- %s seconds ---" % (time.time() - start_time))
def query_cn_domain_by_domain(domain, cn_dns_list):
proxy_request = DNSRecord(q=DNSQuestion(domain))
dns_cn = random.choice(cn_dns_list)
(protocal, ip, port) = dns_cn.split('/')
logging.debug('use random cn DNS server %s %s:%s', protocal, ip, port)
if protocal == 'tcp':
data = query_over_tcp(proxy_request, ip, int(port))
else:
data = query_over_udp(proxy_request, ip, int(port))
dns_result = DNSRecord.parse(data)
logging.debug('cn domain query result is %s', dns_result)
def test_ip_version(domain='people.cn'):
logging.info('test network is ipv6 or ipv4')
global dns_servers_in_prc
if args.ip_version == IpVersion.ipv4_ipv6:
if args.cn:
dns_servers_in_prc = args.cn
args.ipv6 = False
else:
try:
query_cn_domain_by_domain(domain, dns4_servers_in_prc)
dns_servers_in_prc = dns4_servers_in_prc
args.ipv6 = False
except:
dns_servers_in_prc = dns6_servers_in_prc
args.ipv6 = True
else:
if args.cn6:
dns_servers_in_prc = args.cn6
args.ipv6 = True
else:
try:
query_cn_domain_by_domain(domain, dns6_servers_in_prc)
dns_servers_in_prc = dns6_servers_in_prc
args.ipv6 = True
except:
dns_servers_in_prc = dns4_servers_in_prc
args.ipv6 = False
logging.info('cn dns upstream is %r', dns_servers_in_prc)
def query_cn_domain(dns_req):
proxy_request = DNSRecord(q=DNSQuestion(dns_req.q.qname, dns_req.q.qtype))
dns_cn = random.choice(dns_servers_in_prc)
(protocal, ip, port) = dns_cn.split('/')
logging.debug('use random cn DNS server %s %s:%s', protocal, ip, port)
if protocal == 'tcp':
data = query_over_tcp(proxy_request, ip, int(port))
else:
data = query_over_udp(proxy_request, ip, int(port))
dns_result = DNSRecord.parse(data)
logging.debug('cn domain query result is %s', dns_result)
dns_reply = dns_req.reply()
dns_reply.rr = dns_result.rr
return dns_reply
# for r in dns_result.rr:
# dns_reply.add_answer(r)
# for a in dns_result.auth:
# dns_reply.add_auth(a)
def query_domain(dns_req):
qname = dns_req.q.qname
qn = str(qname)
qt = dns_req.q.qtype
qc = dns_req.q.qclass
dns_reply = dns_req.reply()
dns_result = query_over_http(qn, QTYPE[qt])
if dns_result is None:
dns_reply.header.rcode = 2
return dns_reply
else:
if 'Answer' in dns_result:
for a in dns_result['Answer']:
dns_reply.add_answer(RR(a['name'], a['type'], qc, a['TTL'], globals()[QTYPE[a['type']]](a['data'])))
if 'Authority' in dns_result:
for a in dns_result['Authority']:
dns_reply.add_auth(RR(a['name'], a['type'], qc, a['TTL'], globals()[QTYPE[a['type']]](a['data'])))
return dns_reply
def get_root_domain(domain):
fixed_domain = domain
if not fixed_domain.endswith('.'):
fixed_domain += '.'
m = re.search('(.*\.)?([^.\n]+\.[^.\n]+\.)', fixed_domain)
if m:
groups = m.groups()
if len(groups) > 1:
return groups[1][:-1]
return False
def is_domain_white_list(domain):
if not domain.endswith('.cn.'):
root_domain = get_root_domain(domain)
if root_domain:
if not root_domain in white_domain_dict:
logging.debug("domain %s is not in white list", root_domain)
return False
logging.debug("domain %s is in white list", domain)
return True
def is_server_cached():
if args.server_key_4 in args.server_cache and args.server_cache[args.server_key_4]['rdata']:
return True
if args.server_key_6 in args.server_cache and args.server_cache[args.server_key_6]['rdata']:
return True
return False
def dns_response(data):
try:
dns_req = DNSRecord.parse(data)
logging.debug('Received DNS Request: %s', dns_req)
except:
logging.warning('Recieved Unknown %r', data)
return DNSRecord().reply(2).pack()
qname = dns_req.q.qname
qn = str(qname)
qtype = dns_req.q.qtype
logging.debug('qtype %r', qtype)
qt = QTYPE[qtype]
# get args.server from cache
k = qn + '@' + qt
if args.server_cache and k in args.server_cache and args.server_cache[k]['rdata']:
dns_reply = dns_req.reply()
dns_reply.rr = args.server_cache[k]['rdata']
return dns_reply.pack()
# 无代理,server 域名需要解析
if args.proxy is None:
logging.debug(args.server_cache)
if not is_domain_white_list(qn) and args.server_cache and k not in args.server_cache and is_server_cached():
logging.debug('use php server')
dns_reply = query_domain(dns_req)
else:
dns_reply = query_cn_domain(dns_req)
logging.debug('cache server result')
if dns_reply.rr and k in args.server_cache:
args.server_cache[k]['rdata'] = dns_reply.rr
# server cname 也缓存
for r in dns_reply.rr:
logging.debug('server result %r', r)
if r.rname != qname and QTYPE[r.rtype] in ['A', 'AAAA']:
logging.debug('cache server cname result, %s', r.rname)
cname_k = str(r.rname) + '@' + QTYPE[r.rtype]
args.server_cache[cname_k] = {'rdata': None}
# 有代理,无需 server
else:
if not is_domain_white_list(qn):
dns_reply = query_domain(dns_req)
else:
dns_reply = query_cn_domain(dns_req)
logging.debug("response DNS reply %s", dns_reply)
return dns_reply.pack()
class MyBaseRequestHandler(SocketServer.BaseRequestHandler):
def get_data(self):
raise NotImplementedError
def send_data(self, data):
raise NotImplementedError
def handle(self):
now = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
logging.debug("%s request %s (%s %s):" % (self.__class__.__name__[:3], now, self.client_address[0],
self.client_address[1]))
try:
data = self.get_data()
self.send_data(dns_response(data))
except Exception:
traceback.print_exc(file=sys.stderr)
class TcpRequestHandler(MyBaseRequestHandler):
def get_data(self):
data = self.request.recv(1024).strip()
sz = int(data[:2].encode('hex'), 16)
if sz < len(data) - 2:
raise Exception("Wrong size of TCP packet")
elif sz > len(data) - 2:
raise Exception("Too big TCP packet")
return data[2:]
def send_data(self, data):
sz = hex(len(data))[2:].zfill(4).decode('hex')
return self.request.sendall(sz + data)
class UdpRequestHandler(MyBaseRequestHandler):
def get_data(self):
return self.request[0].strip()
def send_data(self, data):
return self.request[1].sendto(data, self.client_address)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
def get_arg():
"""解析参数"""
parser = argparse.ArgumentParser(prog='prc-dns', description='google dns proxy.')
parser.add_argument('-v', '--verbose', help='log out DEBUG', action="store_true")
parser.add_argument('-H', '--host', help='listening IP,default 127.0.0.2', default='127.0.0.2')
parser.add_argument('-P', '--port', help='listening Port,default 5333', type=int, default=5333)
parser.add_argument('--log', help='Log Level,default ERROR', type=LogLevel, choices=list(LogLevel),
default=LogLevel.error)
parser.add_argument('--tcp_udp', help='DNS protocol, tcp udp or both', type=Protocol, default=Protocol.udp)
parser.add_argument('--myip', help='the Public IP v4 of client, will get it automatically', default=None)
parser.add_argument('--ip_version',
help='The IP Version of NetWork, Enum(64=try ipv6 first,46=try ipv4 first),'
'Default 46',
default=IpVersion.ipv4_ipv6)
parser.add_argument('--server', help='The Server proxy DNS Request', default=server)
parser.add_argument('--cn',
help='The DNS Server for cn domain,default is "tcp/114.114.114/53,tcp/114.115.115/53"'
'set demo: "udp/180.76.76.76/53,udp/223.5.5.5/53"',
default=None)
parser.add_argument('--cn6',
help='The DNS Server for cn domain,default is "tcp/240c::6666/53,tcp/240c::6644/53",'
'set demo: "udp/2a00:1450:4009:808::200e/53,udp/::1/53"',
default=None)
parser.add_argument('--proxy',
help='The socks5 proxy for to DNS over HTTPS, option, if it is set, '
'use https://dns.google.com/ to query, --server will not use, '
'demo user:pass@host:port or host:port',
default=None)
parser.add_argument('--prc_domain',
help='file contains domains in prc ',
default='conf/prc-domain.txt')
global args
args = parser.parse_args()
if args.verbose:
args.log = 'DEBUG'
log_level = args.log
numeric_level = getattr(logging, str(log_level).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % log_level)
logging.basicConfig(format='%(asctime)s %(message)s', level=numeric_level)
if args.cn is not None:
cn_ss = []
cn_servers = args.cn.split(',')
for cn_s in cn_servers:
(cn_proto, cn_ip, cn_port) = cn_s.split('/')
if cn_proto not in ['tcp', 'udp']:
raise ValueError('--cn protocol must be one of tcp or udp')
cn_port = int(cn_port)
if cn_port < 1 or cn_port > 65535:
raise ValueError('--cn port error')
IP(cn_ip)
cn_ss.append(cn_s)
args.cn = cn_ss
if args.cn6 is not None:
cn6_ss = []
cn6_servers = args.cn6.split(',')
for cn6_s in cn6_servers:
(cn6_proto, cn6_ip, cn6_port) = cn6_s.split('/')
if cn6_proto not in ['tcp', 'udp']:
raise ValueError('--cn protocol must be one of tcp or udp')
cn6_port = int(cn6_port)
if cn6_port < 1 or cn6_port > 65535:
raise ValueError('--cn port error')
IP(cn6_ip)
cn6_ss.append(cn6_s)
args.cn6 = cn6_ss
if args.proxy is None:
if args.server is None:
args.server = server
parsed_uri = urlparse(args.server)
args.server_domain = parsed_uri.hostname
args.server_key_4 = parsed_uri.hostname + '.@A'
args.server_key_6 = parsed_uri.hostname + '.@AAAA'
args.server_cache = {
args.server_key_4: {'rdata': None, },
args.server_key_6: {'rdata': None, },
}
# root_domain = get_root_domain(parsed_uri.hostname)
# if root_domain:
# white_domain_dict[root_domain] = 1
# else:
# raise Exception('Can not get Root Domain of ' + parsed_uri.hostname)
else:
args.server_cache = None
args.proxy = 'socks5:{0}'.format(args.proxy)
args.server = 'https://dns.google.com/resolve'
# read prc_domain
if os.path.exists(os.path.abspath(args.prc_domain)):
with open(os.path.abspath(args.prc_domain)) as prc_domains:
global white_domain_dict
lines = prc_domains.readlines()
dic = [l.strip() for l in lines if not l.strip().startswith('#')]
for d in dic:
if d and d not in white_domain_dict:
white_domain_dict[d] = 1
if args.myip is not None:
ip = IP(args.myip)
if ip.iptype() == 'PRIVATE':
raise ValueError('Invalid myip, it is a private IP, if you do not know what is it mean, leave it empty.')
logging.info('your public IP v4 is %s', args.myip)
def start_tcp_server(host, port):
tcp_server = ThreadedTCPServer((host, port), TcpRequestHandler)
ip, port = tcp_server.server_address
tcp_server_thread = threading.Thread(target=tcp_server.serve_forever)
tcp_server_thread.daemon = True
tcp_server_thread.start()
print("DNS Server start running at tcp {}:{}".format(ip, port))
return tcp_server
def start_udp_server(host, port, inet=socket.AF_INET):
udp_server = ThreadedUDPServer((host, port), UdpRequestHandler, inet)
ip, port = udp_server.server_address
udp_server_thread = threading.Thread(target=udp_server.serve_forever)
udp_server_thread.daemon = True
udp_server_thread.start()
print("DNS Server start running at udp {}:{}".format(ip, port))
return udp_server
def main():
get_arg()
host, port = args.host, args.port
# 测试IPV6,选择上游cn DNS
test_ip_version('www.people.cn')
# Cache Server DNS Record
if args.proxy is None:
dns_response(DNSRecord(q=DNSQuestion(args.server_domain, 28 if args.ipv6 else 1)).pack())
# DNS服务器启动后,开始解析自身依赖域名
if args.myip is None:
resp = requests_retry_session().get(args.server, timeout=(3.05, 27))
myip_data = resp.json()
args.myip = myip_data['origin']
logging.info('your public IP is %s', args.myip)
if args.server_cache:
logging.debug('server_info is %r', args.server_cache)
servers = []
try:
if args.tcp_udp == Protocol.both:
# servers.append(start_tcp_server(host, port))
servers.append(start_udp_server(host, port))
tcp_server = ThreadedTCPServer((host, port), TcpRequestHandler)
servers.append(tcp_server)
tcp_server.serve_forever()
elif args.tcp_udp == Protocol.tcp:
# servers.append(start_tcp_server(host, port))
tcp_server = ThreadedTCPServer((host, port), TcpRequestHandler)
servers.append(tcp_server)
tcp_server.serve_forever()
else:
# servers.append(start_udp_server(host, port))
udp_server = ThreadedUDPServer((host, port), UdpRequestHandler, socket.AF_INET)
servers.append(udp_server)
udp_server.serve_forever()
# sys.stdin.read()
except Exception as e:
logging.exception(e)
finally:
for s in servers:
logging.info('Close socket server %s %s for exit', s.__class__.__name__[8:11], s.server_address)
s.shutdown()
s.server_close()
if __name__ == "__main__":
main()
|
guestsnapshotter.py | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
try:
import urlparse as urlparser
except ImportError:
import urllib.parse as urlparser
import traceback
import datetime
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
import multiprocessing as mp
import datetime
from common import CommonVariables
from HttpUtil import HttpUtil
from Utils import Status
from Utils import HandlerUtil
from fsfreezer import FsFreezer
class SnapshotInfoIndexerObj():
def __init__(self, index, isSuccessful, snapshotTs, errorMessage):
self.index = index
self.isSuccessful = isSuccessful
self.snapshotTs = snapshotTs
self.errorMessage = errorMessage
def __str__(self):
return 'index: ' + str(self.index) + ' isSuccessful: ' + str(self.isSuccessful) + ' snapshotTs: ' + str(self.snapshotTs) + ' errorMessage: ' + str(self.errorMessage)
class SnapshotError(object):
def __init__(self):
self.errorcode = CommonVariables.success
self.sasuri = None
def __str__(self):
return 'errorcode: ' + str(self.errorcode)
class SnapshotResult(object):
def __init__(self):
self.errors = []
def __str__(self):
error_str = ""
for error in self.errors:
error_str+=(str(error)) + "\n"
return error_str
class GuestSnapshotter(object):
"""description of class"""
def __init__(self, logger):
self.logger = logger
self.configfile='/etc/azure/vmbackup.conf'
def snapshot(self, sasuri, sasuri_index, meta_data, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger):
temp_logger=''
error_logger=''
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
if(sasuri is None):
error_logger = error_logger + str(datetime.datetime.now()) + " Failed to do the snapshot because sasuri is none "
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
try:
sasuri_obj = urlparser.urlparse(sasuri)
if(sasuri_obj is None or sasuri_obj.hostname is None):
error_logger = error_logger + str(datetime.datetime.now()) + " Failed to parse the sasuri "
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
else:
start_time = datetime.datetime.utcnow()
body_content = ''
headers = {}
headers["Content-Length"] = '0'
if(meta_data is not None):
for meta in meta_data:
key = meta['Key']
value = meta['Value']
headers["x-ms-meta-" + key] = value
temp_logger = temp_logger + str(headers)
http_util = HttpUtil(self.logger)
sasuri_obj = urlparser.urlparse(sasuri + '&comp=snapshot')
temp_logger = temp_logger + str(datetime.datetime.now()) + ' start calling the snapshot rest api. '
# initiate http call for blob-snapshot and get http response
result, httpResp, errMsg, responseBody = http_util.HttpCallGetResponse('PUT', sasuri_obj, body_content, headers = headers, responseBodyRequired = True)
temp_logger = temp_logger + str("responseBody: " + responseBody)
if(result == CommonVariables.success and httpResp != None):
# retrieve snapshot information from http response
snapshot_info_indexer, snapshot_error, message = self.httpresponse_get_snapshot_info(httpResp, sasuri_index, sasuri)
temp_logger = temp_logger + str(datetime.datetime.now()) + ' httpresponse_get_snapshot_info message: ' + str(message)
else:
# HttpCall failed
error_logger = error_logger + str(datetime.datetime.now()) + " snapshot HttpCallGetResponse failed "
error_logger = error_logger + str(datetime.datetime.now()) + str(errMsg)
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
end_time = datetime.datetime.utcnow()
time_taken=end_time-start_time
temp_logger = temp_logger + str(datetime.datetime.now()) + ' time taken for snapshot ' + str(time_taken)
except Exception as e:
errorMsg = " Failed to do the snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
error_logger = error_logger + str(datetime.datetime.now()) + errorMsg
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
temp_logger=temp_logger + str(datetime.datetime.now()) + ' snapshot ends..'
global_logger.put(temp_logger)
global_error_logger.put(error_logger)
snapshot_result_error.put(snapshot_error)
snapshot_info_indexer_queue.put(snapshot_info_indexer)
def snapshot_seq(self, sasuri, sasuri_index, meta_data):
result = None
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
if(sasuri is None):
self.logger.log("Failed to do the snapshot because sasuri is none",False,'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
try:
sasuri_obj = urlparser.urlparse(sasuri)
if(sasuri_obj is None or sasuri_obj.hostname is None):
self.logger.log("Failed to parse the sasuri",False,'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
else:
body_content = ''
headers = {}
headers["Content-Length"] = '0'
if(meta_data is not None):
for meta in meta_data:
key = meta['Key']
value = meta['Value']
headers["x-ms-meta-" + key] = value
self.logger.log(str(headers))
http_util = HttpUtil(self.logger)
sasuri_obj = urlparser.urlparse(sasuri + '&comp=snapshot')
self.logger.log("start calling the snapshot rest api")
# initiate http call for blob-snapshot and get http response
result, httpResp, errMsg, responseBody = http_util.HttpCallGetResponse('PUT', sasuri_obj, body_content, headers = headers, responseBodyRequired = True)
self.logger.log("responseBody: " + responseBody)
if(result == CommonVariables.success and httpResp != None):
# retrieve snapshot information from http response
snapshot_info_indexer, snapshot_error, message = self.httpresponse_get_snapshot_info(httpResp, sasuri_index, sasuri)
self.logger.log(' httpresponse_get_snapshot_info message: ' + str(message))
else:
# HttpCall failed
self.logger.log(" snapshot HttpCallGetResponse failed ")
self.logger.log(str(errMsg))
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
except Exception as e:
errorMsg = "Failed to do the snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg, False, 'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
return snapshot_error, snapshot_info_indexer
def snapshotall_parallel(self, paras, freezer, thaw_done, g_fsfreeze_on):
self.logger.log("doing snapshotall now in parallel...")
snapshot_result = SnapshotResult()
snapshot_info_array = []
all_failed = True
exceptOccurred = False
is_inconsistent = False
thaw_done_local = thaw_done
unable_to_sleep = False
all_snapshots_failed = False
try:
mp_jobs = []
global_logger = mp.Queue()
global_error_logger = mp.Queue()
snapshot_result_error = mp.Queue()
snapshot_info_indexer_queue = mp.Queue()
time_before_snapshot_start = datetime.datetime.now()
blobs = paras.blobs
if blobs is not None:
# initialize snapshot_info_array
mp_jobs = []
blob_index = 0
for blob in blobs:
blobUri = blob.split("?")[0]
self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri))
snapshot_info_array.append(Status.SnapshotInfoObj(False, blobUri, None))
mp_jobs.append(mp.Process(target=self.snapshot,args=(blob, blob_index, paras.backup_metadata, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger)))
blob_index = blob_index + 1
for job in mp_jobs:
job.start()
time_after_snapshot_start = datetime.datetime.now()
timeout = self.get_value_from_configfile('timeout')
if timeout == None:
timeout = 60
for job in mp_jobs:
job.join()
thaw_result = None
if g_fsfreeze_on and thaw_done_local == False:
time_before_thaw = datetime.datetime.now()
thaw_result, unable_to_sleep = freezer.thaw_safe()
time_after_thaw = datetime.datetime.now()
HandlerUtil.HandlerUtility.add_to_telemetery_data("ThawTime", str(time_after_thaw-time_before_thaw))
thaw_done_local = True
self.logger.log('T:S thaw result ' + str(thaw_result))
if(thaw_result is not None and len(thaw_result.errors) > 0 and (snapshot_result is None or len(snapshot_result.errors) == 0)):
is_inconsistent = True
snapshot_result.errors.append(thaw_result.errors)
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
self.logger.log('end of snapshot process')
logging = [global_logger.get() for job in mp_jobs]
self.logger.log(str(logging))
error_logging = [global_error_logger.get() for job in mp_jobs]
self.logger.log(error_logging,False,'Error')
if not snapshot_result_error.empty():
results = [snapshot_result_error.get() for job in mp_jobs]
for result in results:
if(result.errorcode != CommonVariables.success):
snapshot_result.errors.append(result)
if not snapshot_info_indexer_queue.empty():
snapshot_info_indexers = [snapshot_info_indexer_queue.get() for job in mp_jobs]
for snapshot_info_indexer in snapshot_info_indexers:
# update snapshot_info_array element properties from snapshot_info_indexer object
self.get_snapshot_info(snapshot_info_indexer, snapshot_info_array[snapshot_info_indexer.index])
if (snapshot_info_array[snapshot_info_indexer.index].isSuccessful == True):
all_failed = False
self.logger.log("index: " + str(snapshot_info_indexer.index) + " blobSnapshotUri: " + str(snapshot_info_array[snapshot_info_indexer.index].snapshotUri))
all_snapshots_failed = all_failed
self.logger.log("Setting all_snapshots_failed to " + str(all_snapshots_failed))
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
else:
self.logger.log("the blobs are None")
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
except Exception as e:
errorMsg = " Unable to perform parallel snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
exceptOccurred = True
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
def snapshotall_seq(self, paras, freezer, thaw_done, g_fsfreeze_on):
exceptOccurred = False
self.logger.log("doing snapshotall now in sequence...")
snapshot_result = SnapshotResult()
snapshot_info_array = []
all_failed = True
is_inconsistent = False
thaw_done_local = thaw_done
unable_to_sleep = False
all_snapshots_failed = False
try:
blobs = paras.blobs
if blobs is not None:
blob_index = 0
for blob in blobs:
blobUri = blob.split("?")[0]
self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri))
snapshot_info_array.append(Status.SnapshotInfoObj(False, blobUri, None))
snapshotError, snapshot_info_indexer = self.snapshot_seq(blob, blob_index, paras.backup_metadata)
if(snapshotError.errorcode != CommonVariables.success):
snapshot_result.errors.append(snapshotError)
# update snapshot_info_array element properties from snapshot_info_indexer object
self.get_snapshot_info(snapshot_info_indexer, snapshot_info_array[blob_index])
if (snapshot_info_array[blob_index].isSuccessful == True):
all_failed = False
blob_index = blob_index + 1
all_snapshots_failed = all_failed
self.logger.log("Setting all_snapshots_failed to " + str(all_snapshots_failed))
thaw_result= None
if g_fsfreeze_on and thaw_done_local== False:
time_before_thaw = datetime.datetime.now()
thaw_result, unable_to_sleep = freezer.thaw_safe()
time_after_thaw = datetime.datetime.now()
HandlerUtil.HandlerUtility.add_to_telemetery_data("ThawTime", str(time_after_thaw-time_before_thaw))
thaw_done_local = True
self.logger.log('T:S thaw result ' + str(thaw_result))
if(thaw_result is not None and len(thaw_result.errors) > 0 and (snapshot_result is None or len(snapshot_result.errors) == 0)):
snapshot_result.errors.append(thaw_result.errors)
is_inconsistent= True
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
else:
self.logger.log("the blobs are None")
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
except Exception as e:
errorMsg = " Unable to perform sequential snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
exceptOccurred = True
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
def get_value_from_configfile(self, key):
value = None
configfile = '/etc/azure/vmbackup.conf'
try :
if os.path.exists(configfile):
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread',key):
value = config.get('SnapshotThread',key)
else:
self.logger.log("Config File doesn't have the key :" + key)
except Exception as e:
errorMsg = " Unable to ed config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
return value
def snapshotall(self, paras, freezer, g_fsfreeze_on):
thaw_done = False
if (self.get_value_from_configfile('doseq') == '1') or (len(paras.blobs) <= 4):
snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done, unable_to_sleep, all_snapshots_failed = self.snapshotall_seq(paras, freezer, thaw_done, g_fsfreeze_on)
else:
snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done, unable_to_sleep, all_snapshots_failed = self.snapshotall_parallel(paras, freezer, thaw_done, g_fsfreeze_on)
self.logger.log("exceptOccurred : " + str(exceptOccurred) + " thaw_done : " + str(thaw_done) + " all_snapshots_failed : " + str(all_snapshots_failed))
if exceptOccurred and thaw_done == False and all_snapshots_failed:
self.logger.log("Trying sequential snapshotting as parallel snapshotting failed")
snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent,thaw_done, unable_to_sleep, all_snapshots_failed = self.snapshotall_seq(paras, freezer, thaw_done, g_fsfreeze_on)
return snapshot_result, snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep, all_snapshots_failed
def httpresponse_get_snapshot_info(self, resp, sasuri_index, sasuri):
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
result = CommonVariables.error_http_failure
message = ""
if(resp != None):
message = message + str(datetime.datetime.now()) + " snapshot resp status: " + str(resp.status) + " "
resp_headers = resp.getheaders()
message = message + str(datetime.datetime.now()) + " snapshot resp-header: " + str(resp_headers) + " "
if(resp.status == 200 or resp.status == 201):
result = CommonVariables.success
snapshot_info_indexer.isSuccessful = True
snapshot_info_indexer.snapshotTs = resp.getheader('x-ms-snapshot')
else:
result = resp.status
snapshot_info_indexer.errorMessage = resp.status
else:
message = message + str(datetime.datetime.now()) + " snapshot Http connection response is None" + " "
message = message + str(datetime.datetime.now()) + ' snapshot api returned: {0} '.format(result) + " "
if(result != CommonVariables.success):
snapshot_error.errorcode = result
snapshot_error.sasuri = sasuri
return snapshot_info_indexer, snapshot_error, message
def get_snapshot_info(self, snapshot_info_indexer, snapshot_info):
if (snapshot_info_indexer != None):
self.logger.log("snapshot_info_indexer: " + str(snapshot_info_indexer))
snapshot_info.isSuccessful = snapshot_info_indexer.isSuccessful
if (snapshot_info.isSuccessful == True):
snapshot_info.snapshotUri = snapshot_info.snapshotUri + "?snapshot=" + str(snapshot_info_indexer.snapshotTs)
else:
snapshot_info.snapshotUri = None
snapshot_info.errorMessage = snapshot_info_indexer.errorMessage
else:
snapshot_info.isSuccessful = False
snapshot_info.snapshotUri = None
|
helpers.py | """High-level functions to help perform complex tasks
"""
from __future__ import print_function, division
import os
import multiprocessing as mp
import warnings
from datetime import datetime
import platform
import struct
import shutil
import copy
import numpy as np
import pandas as pd
import time
pd.options.display.max_colwidth = 100
from ..pyemu_warnings import PyemuWarning
try:
import flopy
except:
pass
import pyemu
from pyemu.utils.os_utils import run, start_workers
def geostatistical_draws(pst, struct_dict,num_reals=100,sigma_range=4,verbose=True,
scale_offset=True):
"""construct a parameter ensemble from a prior covariance matrix
implied by geostatistical structure(s) and parameter bounds.
Args:
pst (`pyemu.Pst`): a control file (or the name of control file). The
parameter bounds in `pst` are used to define the variance of each
parameter group.
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
num_reals (`int`, optional): number of realizations to draw. Default is 100
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
scale_offset (`bool`,optional): flag to apply scale and offset to parameter bounds
when calculating variances - this is passed through to `pyemu.Cov.from_parameter_data()`.
Default is True.
Returns
`pyemu.ParameterEnsemble`: the realized parameter ensemble.
Note:
parameters are realized by parameter group. The variance of each
parameter group is used to scale the resulting geostatistical
covariance matrix Therefore, the sill of the geostatistical structures
in `struct_dict` should be 1.0
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
pe = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
pe.to_csv("my_pe.csv")
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
assert isinstance(pst, pyemu.Pst), "pst arg must be a Pst instance, not {0}". \
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst, sigma_range=sigma_range,
scale_offset=scale_offset)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# par_org = pst.parameter_data.copy # not sure about the need or function of this line? (BH)
par = pst.parameter_data
par_ens = []
pars_in_cov = set()
keys = list(struct_dict.keys())
keys.sort()
for gs in keys:
items = struct_dict[gs]
if verbose: print("processing ", gs)
if isinstance(gs, str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss, list):
warnings.warn("using first geostat structure in file {0}". \
format(gs), PyemuWarning)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn("GeoStruct {0} sill != 1.0 - this is bad!".format(gs.name))
if not isinstance(items, list):
items = [items]
# items.sort()
for item in items:
if isinstance(item, str):
assert os.path.exists(item), "file {0} not found". \
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
if "pargp" in df.columns:
if verbose: print("working on pargroups {0}".format(df.pargp.unique().tolist()))
for req in ['x', 'y', 'parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x: x not in par.parnme), "parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}". \
format(','.join(missing)), PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:, "zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone == zone, :].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),
PyemuWarning)
continue
# df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x, df_zone.y, df_zone.parnme)
if verbose: print("done")
if verbose: print("getting diag var cov", df_zone.shape[0])
# tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
if verbose: print("scaling full cov by diag var cov")
# cov.x *= tpl_var
for i in range(cov.shape[0]):
cov.x[i, :] *= tpl_var
# no fixed values here
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst, cov=cov, num_reals=num_reals,
by_groups=False, fill=False)
# df = pe.iloc[:,:]
par_ens.append(pe._df)
pars_in_cov.update(set(pe.columns))
if verbose: print("adding remaining parameters to diagonal")
fset = set(full_cov.row_names)
diff = list(fset.difference(pars_in_cov))
if (len(diff) > 0):
name_dict = {name: i for i, name in enumerate(full_cov.row_names)}
vec = np.atleast_2d(np.array([full_cov.x[name_dict[d]] for d in diff]))
cov = pyemu.Cov(x=vec, names=diff, isdiagonal=True)
# cov = full_cov.get(diff,diff)
# here we fill in the fixed values
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst, cov, num_reals=num_reals,
fill=False)
par_ens.append(pe._df)
par_ens = pd.concat(par_ens, axis=1)
par_ens = pyemu.ParameterEnsemble(pst=pst, df=par_ens)
return par_ens
def geostatistical_prior_builder(pst, struct_dict, sigma_range=4,
verbose=False, scale_offset=False):
"""construct a full prior covariance matrix using geostastical structures
and parameter bounds information.
Args:
pst (`pyemu.Pst`): a control file instance (or the name of control file)
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
scale_offset (`bool`): a flag to apply scale and offset to parameter upper and lower bounds
before applying log transform. Passed to pyemu.Cov.from_parameter_data(). Default
is False
Returns:
`pyemu.Cov`: a covariance matrix that includes all adjustable parameters in the control
file.
Note:
The covariance of parameters associated with geostatistical structures is defined
as a mixture of GeoStruct and bounds. That is, the GeoStruct is used to construct a
pyemu.Cov, then the entire pyemu.Cov is scaled by the uncertainty implied by the bounds and
sigma_range. Most users will want to sill of the geostruct to sum to 1.0 so that the resulting
covariance matrices have variance proportional to the parameter bounds. Sounds complicated...
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
cov = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
cov.to_binary("prior.jcb")
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
assert isinstance(pst, pyemu.Pst), "pst arg must be a Pst instance, not {0}". \
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst, sigma_range=sigma_range,
scale_offset=scale_offset)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# full_cov = None
par = pst.parameter_data
for gs, items in struct_dict.items():
if verbose: print("processing ", gs)
if isinstance(gs, str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss, list):
warnings.warn("using first geostat structure in file {0}". \
format(gs), PyemuWarning)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn("geostatistical_prior_builder() warning: geostruct sill != 1.0, user beware!")
if not isinstance(items, list):
items = [items]
for item in items:
if isinstance(item, str):
assert os.path.exists(item), "file {0} not found". \
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
for req in ['x', 'y', 'parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x: x not in par.parnme), "parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}". \
format(','.join(missing)), PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:, "zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone == zone, :].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),
PyemuWarning)
continue
# df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x, df_zone.y, df_zone.parnme)
if verbose: print("done")
# find the variance in the diagonal cov
if verbose: print("getting diag var cov", df_zone.shape[0])
# tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
# if np.std(tpl_var) > 1.0e-6:
# warnings.warn("pars have different ranges" +\
# " , using max range as variance for all pars")
# tpl_var = tpl_var.max()
if verbose: print("scaling full cov by diag var cov")
cov *= tpl_var
if verbose: print("test for inversion")
try:
ci = cov.inv
except:
df_zone.to_csv("prior_builder_crash.csv")
raise Exception("error inverting cov {0}".
format(cov.row_names[:3]))
if verbose: print('replace in full cov')
full_cov.replace(cov)
# d = np.diag(full_cov.x)
# idx = np.argwhere(d==0.0)
# for i in idx:
# print(full_cov.names[i])
return full_cov
def _condition_on_par_knowledge(cov, par_knowledge_dict):
""" experimental function to include conditional prior information
for one or more parameters in a full covariance matrix
"""
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception("par knowledge dict parameters not found: {0}". \
format(','.join(missing)))
# build the selection matrix and sigma epsilon
# sel = pyemu.Cov(x=np.identity(cov.shape[0]),names=cov.row_names)
sel = cov.zero2d
sel = cov.to_pearson()
new_cov_diag = pyemu.Cov(x=np.diag(cov.as_2d.diagonal()), names=cov.row_names)
# new_cov_diag = cov.zero2d
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
for _ in range(2):
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
print(new_cov_diag)
return new_cov_diag
def kl_setup(num_eig, sr, struct, prefixes,
factors_file="kl_factors.dat",
islog=True, basis_file=None,
tpl_dir="."):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Args:
num_eig (`int`): the number of basis vectors to retain in the
reduced basis
sr (`flopy.reference.SpatialReference`): a spatial reference instance
struct (`str`): a PEST-style structure file. Can also be a
`pyemu.geostats.Geostruct` instance.
prefixes ([`str`]): a list of parameter prefixes to generate KL
parameterization for.
factors_file (`str`, optional): name of the PEST-style interpolation
factors file to write (can be processed with FAC2REAL).
Default is "kl_factors.dat".
islog (`bool`, optional): flag to indicate if the parameters are log transformed.
Default is True
basis_file (`str`, optional): the name of the PEST-style binary (e.g. jco)
file to write the reduced basis vectors to. Default is None (not saved).
tpl_dir (`str`, optional): the directory to write the resulting
template files to. Default is "." (current directory).
Returns:
`pandas.DataFrame`: a dataframe of parameter information.
Note:
This is the companion function to `helpers.apply_kl()`
Example::
m = flopy.modflow.Modflow.load("mymodel.nam")
prefixes = ["hk","vka","ss"]
df = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",prefixes)
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr, flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct, str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i, j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(sr.xcentergrid.flatten(),
sr.ycentergrid.flatten(),
names=names)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
# trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:, :num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name": eig_names}, index=eig_names)
pp_df.loc[:, "x"] = -1.0 * sr.ncol
pp_df.loc[:, "y"] = -1.0 * sr.nrow
pp_df.loc[:, "zone"] = -999
pp_df.loc[:, "parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"), pp_df)
_eigen_basis_to_factor_file(sr.nrow, sr.ncol, trunc_basis,
factors_file=factors_file, islog=islog)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir, "{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat", tpl_file, prefix)
shutil.copy2("temp.dat", tpl_file.replace(".tpl", ""))
df.loc[:, "tpl_file"] = tpl_file
df.loc[:, "in_file"] = tpl_file.replace(".tpl", "")
df.loc[:, "prefix"] = prefix
df.loc[:, "pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
# arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:, "parubnd"] = 10.0
df.loc[:, "parlbnd"] = 0.1
return pd.concat(dfs)
# back_array_dict = {}
# f = open(tpl_file,'w')
# f.write("ptf ~\n")
# f.write("name,org_val,new_val\n")
# for name,array in array_dict.items():
# mname = name+"mean"
# f.write("{0},{1:20.8E},~ {2} ~\n".format(mname,0.0,mname))
# #array -= array.mean()
# array_flat = pyemu.Matrix(x=np.atleast_2d(array.flatten()).transpose()
# ,col_names=["flat"],row_names=names,
# isdiagonal=False)
# factors = trunc_basis * array_flat
# enames = ["{0}{1:04d}".format(name,i) for i in range(num_eig)]
# for n,val in zip(enames,factors.x):
# f.write("{0},{1:20.8E},~ {0} ~\n".format(n,val[0]))
# back_array_dict[name] = (factors.T * trunc_basis).x.reshape(array.shape)
# print(array_back)
# print(factors.shape)
#
# return back_array_dict
def _eigen_basis_to_factor_file(nrow, ncol, basis, factors_file, islog=True):
assert nrow * ncol == basis.shape[0]
with open(factors_file, 'w') as f:
f.write("junk.dat\n")
f.write("junk.zone.dat\n")
f.write("{0} {1}\n".format(ncol, nrow))
f.write("{0}\n".format(basis.shape[1]))
[f.write(name + "\n") for name in basis.col_names]
t = 0
if islog:
t = 1
for i in range(nrow * ncol):
f.write("{0} {1} {2} {3:8.5e}".format(i + 1, t, basis.shape[1], 0.0))
[f.write(" {0} {1:12.8g} ".format(i + 1, w)) for i, w in enumerate(basis.x[i, :])]
f.write("\n")
def kl_apply(par_file, basis_file, par_to_file_dict, arr_shape):
""" Apply a KL parameterization transform from basis factors to model
input arrays.
Args:
par_file (`str`): the csv file to get factor values from. Must contain
the following columns: "name", "new_val", "org_val"
basis_file (`str`): the PEST-style binary file that contains the reduced
basis
par_to_file_dict (`dict`): a mapping from KL parameter prefixes to array
file names.
arr_shape (tuple): a length 2 tuple of number of rows and columns
the resulting arrays should have.
Note:
This is the companion function to kl_setup.
This function should be called during the forward run
"""
df = pd.read_csv(par_file)
assert "name" in df.columns
assert "org_val" in df.columns
assert "new_val" in df.columns
df.loc[:, "prefix"] = df.name.apply(lambda x: x[:-4])
for prefix in df.prefix.unique():
assert prefix in par_to_file_dict.keys(), "missing prefix:{0}". \
format(prefix)
basis = pyemu.Matrix.from_binary(basis_file)
assert basis.shape[1] == arr_shape[0] * arr_shape[1]
arr_min = 1.0e-10 # a temp hack
# means = df.loc[df.name.apply(lambda x: x.endswith("mean")),:]
# print(means)
df = df.loc[df.name.apply(lambda x: not x.endswith("mean")), :]
for prefix, filename in par_to_file_dict.items():
factors = pyemu.Matrix.from_dataframe(df.loc[df.prefix == prefix, ["new_val"]])
factors.autoalign = False
basis_prefix = basis[:factors.shape[0], :]
arr = (factors.T * basis_prefix).x.reshape(arr_shape)
# arr += means.loc[means.prefix==prefix,"new_val"].values
arr[arr < arr_min] = arr_min
np.savetxt(filename, arr, fmt="%20.8E")
def zero_order_tikhonov(pst, parbounds=True, par_groups=None,
reset=True):
"""setup preferred-value regularization in a pest control file.
Args:
pst (`pyemu.Pst`): the control file instance
parbounds (`bool`, optional): flag to weight the new prior information
equations according to parameter bound width - approx the KL
transform. Default is True
par_groups (`list`): a list of parameter groups to build PI equations for.
If None, all adjustable parameters are used. Default is None
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
Example::
pst = pyemu.Pst("my.pst")
pyemu.helpers.zero_order_tikhonov(pst)
pst.write("my_reg.pst")
"""
if par_groups is None:
par_groups = pst.par_groups
pilbl, obgnme, weight, equation = [], [], [], []
for idx, row in pst.parameter_data.iterrows():
pt = row["partrans"].lower()
try:
pt = pt.decode()
except:
pass
if pt not in ["tied", "fixed"] and \
row["pargp"] in par_groups:
pilbl.append(row["parnme"])
weight.append(1.0)
ogp_name = "regul" + row["pargp"]
obgnme.append(ogp_name[:12])
parnme = row["parnme"]
parval1 = row["parval1"]
if pt == "log":
parnme = "log(" + parnme + ")"
parval1 = np.log10(parval1)
eq = "1.0 * " + parnme + " ={0:15.6E}".format(parval1)
equation.append(eq)
if reset:
pst.prior_information = pd.DataFrame({"pilbl": pilbl,
"equation": equation,
"obgnme": obgnme,
"weight": weight})
else:
pi = pd.DataFrame({"pilbl": pilbl,
"equation": equation,
"obgnme": obgnme,
"weight": weight})
pst.prior_information = pst.prior_information.append(pi)
if parbounds:
_regweight_from_parbound(pst)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def _regweight_from_parbound(pst):
"""sets regularization weights from parameter bounds
which approximates the KL expansion. Called by
zero_order_tikhonov().
"""
pst.parameter_data.index = pst.parameter_data.parnme
pst.prior_information.index = pst.prior_information.pilbl
for idx, parnme in enumerate(pst.prior_information.pilbl):
if parnme in pst.parameter_data.index:
row = pst.parameter_data.loc[parnme, :]
lbnd, ubnd = row["parlbnd"], row["parubnd"]
if row["partrans"].lower() == "log":
weight = 1.0 / (np.log10(ubnd) - np.log10(lbnd))
else:
weight = 1.0 / (ubnd - lbnd)
pst.prior_information.loc[parnme, "weight"] = weight
else:
print("prior information name does not correspond" + \
" to a parameter: " + str(parnme))
def first_order_pearson_tikhonov(pst, cov, reset=True, abs_drop_tol=1.0e-3):
"""setup preferred-difference regularization from a covariance matrix.
Args:
pst (`pyemu.Pst`): the PEST control file
cov (`pyemu.Cov`): a covariance matrix instance with
some or all of the parameters listed in `pst`.
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
abs_drop_tol (`float`, optional): tolerance to control how many pi equations
are written. If the absolute value of the Pearson CC is less than
abs_drop_tol, the prior information equation will not be included in
the control file.
Note:
The weights on the prior information equations are the Pearson
correlation coefficients implied by covariance matrix.
Example::
pst = pyemu.Pst("my.pst")
cov = pyemu.Cov.from_ascii("my.cov")
pyemu.helpers.first_order_pearson_tikhonov(pst,cov)
pst.write("my_reg.pst")
"""
assert isinstance(cov, pyemu.Cov)
print("getting CC matrix")
cc_mat = cov.to_pearson()
# print(pst.parameter_data.dtypes)
try:
ptrans = pst.parameter_data.partrans.apply(lambda x: x.decode()).to_dict()
except:
ptrans = pst.parameter_data.partrans.to_dict()
pi_num = pst.prior_information.shape[0] + 1
pilbl, obgnme, weight, equation = [], [], [], []
sadj_names = set(pst.adj_par_names)
print("processing")
for i, iname in enumerate(cc_mat.row_names):
if iname not in sadj_names:
continue
for j, jname in enumerate(cc_mat.row_names[i + 1:]):
if jname not in sadj_names:
continue
# print(i,iname,i+j+1,jname)
cc = cc_mat.x[i, j + i + 1]
if cc < abs_drop_tol:
continue
pilbl.append("pcc_{0}".format(pi_num))
iiname = str(iname)
if str(ptrans[iname]) == "log":
iiname = "log(" + iname + ")"
jjname = str(jname)
if str(ptrans[jname]) == "log":
jjname = "log(" + jname + ")"
equation.append("1.0 * {0} - 1.0 * {1} = 0.0". \
format(iiname, jjname))
weight.append(cc)
obgnme.append("regul_cc")
pi_num += 1
df = pd.DataFrame({"pilbl": pilbl, "equation": equation,
"obgnme": obgnme, "weight": weight})
df.index = df.pilbl
if reset:
pst.prior_information = df
else:
pst.prior_information = pst.prior_information.append(df)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def simple_tpl_from_pars(parnames, tplfilename='model.input.tpl'):
"""Make a simple template file from a list of parameter names.
Args:
parnames ([`str`]): list of parameter names to put in the
new template file
tplfilename (`str`): Name of the template file to create. Default
is "model.input.tpl"
Note:
writes a file `tplfilename` with each parameter name in `parnames` on a line
"""
with open(tplfilename, 'w') as ofp:
ofp.write('ptf ~\n')
[ofp.write('~{0:^12}~\n'.format(cname)) for cname in parnames]
def simple_ins_from_obs(obsnames, insfilename='model.output.ins'):
"""write a simple instruction file that reads the values named
in obsnames in order, one per line from a model output file
Args:
obsnames (`str`): list of observation names to put in the
new instruction file
insfilename (`str`): the name of the instruction file to
create. Default is "model.output.ins"
Note:
writes a file `insfilename` with each observation read off
of a single line
"""
with open(insfilename, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('!{0}!\n'.format(cob)) for cob in obsnames]
def pst_from_parnames_obsnames(parnames, obsnames,
tplfilename='model.input.tpl', insfilename='model.output.ins'):
"""Creates a Pst object from a list of parameter names and a list of observation names.
Args:
parnames (`str`): list of parameter names
obsnames (`str`): list of observation names
tplfilename (`str`): template filename. Default is "model.input.tpl"
insfilename (`str`): instruction filename. Default is "model.output.ins"
Returns:
`pyemu.Pst`: the generic control file
"""
simple_tpl_from_pars(parnames, tplfilename)
simple_ins_from_obs(obsnames, insfilename)
modelinputfilename = tplfilename.replace('.tpl', '')
modeloutputfilename = insfilename.replace('.ins', '')
return pyemu.Pst.from_io_files(tplfilename, modelinputfilename, insfilename, modeloutputfilename)
def read_pestpp_runstorage(filename, irun=0, with_metadata=False):
"""read pars and obs from a specific run in a pest++ serialized
run storage file into dataframes.
Args:
filename (`str`): the name of the run storage file
irun (`int`): the run id to process. If 'all', then all runs are
read. Default is 0
with_metadata (`bool`): flag to return run stats and info txt as well
Returns:
tuple containing
- **pandas.DataFrame**: parameter information
- **pandas.DataFrame**: observation information
- **pandas.DataFrame**: optionally run status and info txt.
"""
header_dtype = np.dtype([("n_runs", np.int64), ("run_size", np.int64), ("p_name_size", np.int64),
("o_name_size", np.int64)])
try:
irun = int(irun)
except:
if irun.lower() == "all":
irun = irun.lower()
else:
raise Exception("unrecognized 'irun': should be int or 'all', not '{0}'".
format(irun))
def status_str(r_status):
if r_status == 0:
return "not completed"
if r_status == 1:
return "completed"
if r_status == -100:
return "canceled"
else:
return "failed"
assert os.path.exists(filename)
f = open(filename, "rb")
header = np.fromfile(f, dtype=header_dtype, count=1)
p_name_size, o_name_size = header["p_name_size"][0], header["o_name_size"][0]
par_names = struct.unpack('{0}s'.format(p_name_size),
f.read(p_name_size))[0].strip().lower().decode().split('\0')[:-1]
obs_names = struct.unpack('{0}s'.format(o_name_size),
f.read(o_name_size))[0].strip().lower().decode().split('\0')[:-1]
n_runs, run_size = header["n_runs"][0], header["run_size"][0]
run_start = f.tell()
def _read_run(irun):
f.seek(run_start + (irun * run_size))
r_status = np.fromfile(f, dtype=np.int8, count=1)
info_txt = struct.unpack("41s", f.read(41))[0].strip().lower().decode()
par_vals = np.fromfile(f, dtype=np.float64, count=len(par_names) + 1)[1:]
obs_vals = np.fromfile(f, dtype=np.float64, count=len(obs_names) + 1)[:-1]
par_df = pd.DataFrame({"parnme": par_names, "parval1": par_vals})
par_df.index = par_df.pop("parnme")
obs_df = pd.DataFrame({"obsnme": obs_names, "obsval": obs_vals})
obs_df.index = obs_df.pop("obsnme")
return r_status, info_txt, par_df, obs_df
if irun == "all":
par_dfs, obs_dfs = [], []
r_stats, txts = [], []
for irun in range(n_runs):
# print(irun)
r_status, info_txt, par_df, obs_df = _read_run(irun)
par_dfs.append(par_df)
obs_dfs.append(obs_df)
r_stats.append(r_status)
txts.append(info_txt)
par_df = pd.concat(par_dfs, axis=1).T
par_df.index = np.arange(n_runs)
obs_df = pd.concat(obs_dfs, axis=1).T
obs_df.index = np.arange(n_runs)
meta_data = pd.DataFrame({"r_status": r_stats, "info_txt": txts})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
else:
assert irun <= n_runs
r_status, info_txt, par_df, obs_df = _read_run(irun)
meta_data = pd.DataFrame({"r_status": [r_status], "info_txt": [info_txt]})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
f.close()
if with_metadata:
return par_df, obs_df, meta_data
else:
return par_df, obs_df
def jco_from_pestpp_runstorage(rnj_filename, pst_filename):
""" read pars and obs from a pest++ serialized run storage
file (e.g., .rnj) and return jacobian matrix instance
Args:
rnj_filename (`str`): the name of the run storage file
pst_filename (`str`): the name of the pst file
Note:
This can then be passed to Jco.to_binary or Jco.to_coo, etc., to write jco
file in a subsequent step to avoid memory resource issues associated
with very large problems.
Returns:
`pyemu.Jco`: a jacobian matrix constructed from the run results and
pest control file information.
TODO:
Check rnj file contains transformed par vals (i.e., in model input space)
Currently only returns pyemu.Jco; doesn't write jco file due to memory
issues associated with very large problems
Compare rnj and jco from Freyberg problem in autotests
"""
header_dtype = np.dtype([("n_runs", np.int64), ("run_size", np.int64), ("p_name_size", np.int64),
("o_name_size", np.int64)])
pst = pyemu.Pst(pst_filename)
par = pst.parameter_data
log_pars = set(par.loc[par.partrans == "log", "parnme"].values)
with open(rnj_filename, 'rb') as f:
header = np.fromfile(f, dtype=header_dtype, count=1)
try:
base_par, base_obs = read_pestpp_runstorage(rnj_filename, irun=0)
except:
raise Exception("couldn't get base run...")
par = par.loc[base_par.index, :]
li = base_par.index.map(lambda x: par.loc[x, "partrans"] == "log")
base_par.loc[li] = base_par.loc[li].apply(np.log10)
jco_cols = {}
for irun in range(1, int(header["n_runs"])):
par_df, obs_df = read_pestpp_runstorage(rnj_filename, irun=irun)
par_df.loc[li] = par_df.loc[li].apply(np.log10)
obs_diff = base_obs - obs_df
par_diff = base_par - par_df
# check only one non-zero element per col(par)
if len(par_diff[par_diff.parval1 != 0]) > 1:
raise Exception("more than one par diff - looks like the file wasn't created during jco filling...")
parnme = par_diff[par_diff.parval1 != 0].index[0]
parval = par_diff.parval1.loc[parnme]
# derivatives
jco_col = obs_diff / parval
# some tracking, checks
print("processing par {0}: {1}...".format(irun, parnme))
print("%nzsens: {0}%...".format((jco_col[abs(jco_col.obsval) > 1e-8].shape[0] / jco_col.shape[0]) * 100.))
jco_cols[parnme] = jco_col.obsval
jco_cols = pd.DataFrame.from_records(data=jco_cols, index=list(obs_diff.index.values))
jco_cols = pyemu.Jco.from_dataframe(jco_cols)
# write # memory considerations important here for very large matrices - break into chunks...
# jco_fnam = "{0}".format(filename[:-4]+".jco")
# jco_cols.to_binary(filename=jco_fnam, droptol=None, chunk=None)
return jco_cols
def parse_dir_for_io_files(d,prepend_path=False):
""" find template/input file pairs and instruction file/output file
pairs by extension.
Args:
d (`str`): directory to search for interface files
prepend_path (`bool, optional): flag to prepend `d` to each file name.
Default is False
Note:
the return values from this function can be passed straight to
`pyemu.Pst.from_io_files()` classmethod constructor. Assumes the
template file names are <input_file>.tpl and instruction file names
are <output_file>.ins.
Returns:
tuple containing
- **[`str`]**: list of template files in d
- **[`str`]**: list of input files in d
- **[`str`]**: list of instruction files in d
- **[`str`]**: list of output files in d
"""
files = os.listdir(d)
tpl_files = [f for f in files if f.endswith(".tpl")]
in_files = [f.replace(".tpl", "") for f in tpl_files]
ins_files = [f for f in files if f.endswith(".ins")]
out_files = [f.replace(".ins", "") for f in ins_files]
if prepend_path:
tpl_files = [os.path.join(d,item) for item in tpl_files]
in_files = [os.path.join(d, item) for item in in_files]
ins_files = [os.path.join(d, item) for item in ins_files]
out_files = [os.path.join(d, item) for item in out_files]
return tpl_files, in_files, ins_files, out_files
def pst_from_io_files(tpl_files, in_files, ins_files, out_files,
pst_filename=None, pst_path=None):
""" create a Pst instance from model interface files.
Args:
tpl_files ([`str`]): list of template file names
in_files ([`str`]): list of model input file names (pairs with template files)
ins_files ([`str`]): list of instruction file names
out_files ([`str`]): list of model output file names (pairs with instruction files)
pst_filename (`str`): name of control file to write. If None, no file is written.
Default is None
pst_path (`str`): the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. If python is being run in a directory other than where the control
file will reside, it is useful to pass `pst_path` as `.`. Default is None
Returns:
`Pst`: new control file instance with parameter and observation names
found in `tpl_files` and `ins_files`, repsectively.
Note:
calls `pyemu.helpers.pst_from_io_files()`
Assigns generic values for parameter info. Tries to use INSCHEK
to set somewhat meaningful observation values
all file paths are relatively to where python is running.
TODO:
add pst_path option
make in_files and out_files optional
Example::
tpl_files = ["my.tpl"]
in_files = ["my.in"]
ins_files = ["my.ins"]
out_files = ["my.out"]
pst = pyemu.Pst.from_io_files(tpl_files,in_files,ins_files,out_files)
pst.control_data.noptmax = 0
pst.write("my.pst)
"""
par_names = set()
if not isinstance(tpl_files, list):
tpl_files = [tpl_files]
if not isinstance(in_files, list):
in_files = [in_files]
assert len(in_files) == len(tpl_files), "len(in_files) != len(tpl_files)"
for tpl_file in tpl_files:
assert os.path.exists(tpl_file), "template file not found: " + str(tpl_file)
# new_names = [name for name in pyemu.pst_utils.parse_tpl_file(tpl_file) if name not in par_names]
# par_names.extend(new_names)
new_names = pyemu.pst_utils.parse_tpl_file(tpl_file)
par_names.update(new_names)
if not isinstance(ins_files, list):
ins_files = [ins_files]
if not isinstance(out_files, list):
out_files = [out_files]
assert len(ins_files) == len(out_files), "len(out_files) != len(out_files)"
obs_names = []
for ins_file in ins_files:
assert os.path.exists(ins_file), "instruction file not found: " + str(ins_file)
obs_names.extend(pyemu.pst_utils.parse_ins_file(ins_file))
new_pst = pyemu.pst_utils.generic_pst(list(par_names), list(obs_names))
if "window" in platform.platform().lower() and pst_path == ".":
pst_path = ''
new_pst.instruction_files = ins_files
new_pst.output_files = out_files
# try to run inschek to find the observtion values
pyemu.pst_utils.try_process_output_pst(new_pst)
if pst_path is None:
new_pst.template_files = tpl_files
new_pst.input_files = in_files
else:
new_pst.template_files = [os.path.join(
pst_path, os.path.split(tpl_file)[-1]) for tpl_file in tpl_files]
new_pst.input_files = [os.path.join(
pst_path, os.path.split(in_file)[-1]) for in_file in in_files]
# now set the true path location to instruction files and output files
new_pst.instruction_files = [os.path.join(
pst_path, os.path.split(ins_file)[-1]) for ins_file in ins_files]
new_pst.output_files = [os.path.join(
pst_path, os.path.split(out_file)[-1]) for out_file in out_files]
if pst_filename:
new_pst.write(pst_filename)
return new_pst
wildass_guess_par_bounds_dict = {"hk": [0.01, 100.0], "vka": [0.1, 10.0],
"sy": [0.25, 1.75], "ss": [0.1, 10.0],
"cond": [0.01, 100.0], "flux": [0.25, 1.75],
"rech": [0.9, 1.1], "stage": [0.9, 1.1],
}
class PstFromFlopyModel(object):
""" a monster helper class to setup a complex PEST interface around
an existing MODFLOW-2005-family model.
Args:
model (`flopy.mbase`): a loaded flopy model instance. If model is an str, it is treated as a
MODFLOW nam file (requires org_model_ws)
new_model_ws (`str`): a directory where the new version of MODFLOW input files and PEST(++)
files will be written
org_model_ws (`str`): directory to existing MODFLOW model files. Required if model argument
is an str. Default is None
pp_props ([[`str`,[`int`]]]): pilot point multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup pilot point multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup pilot point multiplier parameters for recharge for stress
period 1,5,11,and 16.
const_props ([[`str`,[`int`]]]): constant (uniform) multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup constant (uniform) multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup constant (uniform) multiplier parameters for recharge for stress
period 1,5,11,and 16.
temporal_list_props ([[`str`,[`int`]]]): list-type input stress-period level multiplier parameters.
A nested list of list-type input elements to parameterize using
name, iterable pairs. The iterable is zero-based stress-period indices.
For example, to setup multipliers for WEL flux and for RIV conductance,
temporal_list_props = [["wel.flux",[0,1,2]],["riv.cond",None]] would setup
multiplier parameters for well flux for stress periods 1,2 and 3 and
would setup one single river conductance multiplier parameter that is applied
to all stress periods
spatial_list_props ([[`str`,[`int`]]]): list-type input for spatial multiplier parameters.
A nested list of list-type elements to parameterize using
names (e.g. [["riv.cond",0],["wel.flux",1] to setup up cell-based parameters for
each list-type element listed. These multiplier parameters are applied across
all stress periods. For this to work, there must be the same number of entries
for all stress periods. If more than one list element of the same type is in a single
cell, only one parameter is used to multiply all lists in the same cell.
grid_props ([[`str`,[`int`]]]): grid-based (every active model cell) multiplier parameters.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 in every active model cell). For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup grid-based multiplier parameters in every active model cell
for recharge for stress period 1,5,11,and 16.
sfr_pars (`bool`): setup parameters for the stream flow routing modflow package.
If list is passed it defines the parameters to set up.
sfr_temporal_pars (`bool`)
flag to include stress-period level spatially-global multipler parameters in addition to
the spatially-discrete `sfr_pars`. Requires `sfr_pars` to be passed. Default is False
grid_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to build the prior parameter covariance matrix
elements for grid-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
pp_space (`int`): number of grid cells between pilot points. If None, use the default
in pyemu.pp_utils.setup_pilot_points_grid. Default is None
zone_props ([[`str`,[`int`]]]): zone-based multiplier parameters.
A nested list of zone-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
pp_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to use for building the prior parameter
covariance matrix for pilot point parameters. If None, a generic
GeoStruct is created using pp_space and grid-spacing information.
Default is None
par_bounds_dict (`dict`): a dictionary of model property/boundary condition name, upper-lower bound pairs.
For example, par_bounds_dict = {"hk":[0.01,100.0],"flux":[0.5,2.0]} would
set the bounds for horizontal hydraulic conductivity to
0.001 and 100.0 and set the bounds for flux parameters to 0.5 and
2.0. For parameters not found in par_bounds_dict,
`pyemu.helpers.wildass_guess_par_bounds_dict` is
used to set somewhat meaningful bounds. Default is None
temporal_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for time-varying list-type multiplier parameters. This GeoStruct
express the time correlation so that the 'a' parameter is the length of
time that boundary condition multiplier parameters are correlated across.
If None, then a generic GeoStruct is created that uses an 'a' parameter
of 3 stress periods. Default is None
spatial_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for spatially-varying list-type multiplier parameters.
If None, a generic GeoStruct is created using an "a" parameter that
is 10 times the max cell size. Default is None.
remove_existing (`bool`): a flag to remove an existing new_model_ws directory. If False and
new_model_ws exists, an exception is raised. If True and new_model_ws
exists, the directory is destroyed - user beware! Default is False.
k_zone_dict (`dict`): a dictionary of zero-based layer index, zone array pairs.
e.g. {lay: np.2darray} Used to
override using ibound zones for zone-based parameterization. If None,
use ibound values greater than zero as zones. Alternatively a dictionary of dictionaries
can be passed to allow different zones to be defined for different parameters.
e.g. {"upw.hk" {lay: np.2darray}, "extra.rc11" {lay: np.2darray}}
or {"hk" {lay: np.2darray}, "rc11" {lay: np.2darray}}
use_pp_zones (`bool`): a flag to use ibound zones (or k_zone_dict, see above) as pilot
point zones. If False, ibound values greater than zero are treated as
a single zone for pilot points. Default is False
obssim_smp_pairs ([[`str`,`str`]]: a list of observed-simulated PEST-type SMP file
pairs to get observations
from and include in the control file. Default is []
external_tpl_in_pairs ([[`str`,`str`]]: a list of existing template file, model input
file pairs to parse parameters
from and include in the control file. Default is []
external_ins_out_pairs ([[`str`,`str`]]: a list of existing instruction file,
model output file pairs to parse
observations from and include in the control file. Default is []
extra_pre_cmds ([`str`]): a list of preprocessing commands to add to the forward_run.py script
commands are executed with os.system() within forward_run.py. Default is None.
redirect_forward_output (`bool`): flag for whether to redirect forward model output to text files (True) or
allow model output to be directed to the screen (False). Default is True
extra_post_cmds ([`str`]): a list of post-processing commands to add to the forward_run.py script.
Commands are executed with os.system() within forward_run.py. Default is None.
tmp_files ([`str`]): a list of temporary files that should be removed at the start of the forward
run script. Default is [].
model_exe_name (`str`): binary name to run modflow. If None, a default from flopy is used,
which is dangerous because of the non-standard binary names
(e.g. MODFLOW-NWT_x64, MODFLOWNWT, mfnwt, etc). Default is None.
build_prior (`bool`): flag to build prior covariance matrix. Default is True
sfr_obs (`bool`): flag to include observations of flow and aquifer exchange from
the sfr ASCII output file
hfb_pars (`bool`): add HFB parameters. uses pyemu.gw_utils.write_hfb_template(). the resulting
HFB pars have parval1 equal to the values in the original file and use the
spatial_list_geostruct to build geostatistical covariates between parameters
kl_props ([[`str`,[`int`]]]): karhunen-loeve based multiplier parameters.
A nested list of KL-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
kl_num_eig (`int`): the number of KL-based eigenvector multiplier parameters to use for each
KL parameter set. default is 100
kl_geostruct (`pyemu.geostats.Geostruct`): the geostatistical structure
to build the prior parameter covariance matrix
elements for KL-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
Note:
Setup up multiplier parameters for an existing MODFLOW model.
Does all kinds of coolness like building a
meaningful prior, assigning somewhat meaningful parameter groups and
bounds, writes a forward_run.py script with all the calls need to
implement multiplier parameters, run MODFLOW and post-process.
Works a lot better if TEMPCHEK, INSCHEK and PESTCHEK are available in the
system path variable
"""
def __init__(self, model, new_model_ws, org_model_ws=None, pp_props=[], const_props=[],
temporal_bc_props=[], temporal_list_props=[], grid_props=[],
grid_geostruct=None, pp_space=None,
zone_props=[], pp_geostruct=None, par_bounds_dict=None, sfr_pars=False, temporal_sfr_pars=False,
temporal_list_geostruct=None, remove_existing=False, k_zone_dict=None,
mflist_waterbudget=True, mfhyd=True, hds_kperk=[], use_pp_zones=False,
obssim_smp_pairs=None, external_tpl_in_pairs=None,
external_ins_out_pairs=None, extra_pre_cmds=None,
extra_model_cmds=None, extra_post_cmds=None, redirect_forward_output=True,
tmp_files=None, model_exe_name=None, build_prior=True,
sfr_obs=False,
spatial_bc_props=[], spatial_list_props=[], spatial_list_geostruct=None,
hfb_pars=False, kl_props=None, kl_num_eig=100, kl_geostruct=None):
self.logger = pyemu.logger.Logger("PstFromFlopyModel.log")
self.log = self.logger.log
self.logger.echo = True
self.zn_suffix = "_zn"
self.gr_suffix = "_gr"
self.pp_suffix = "_pp"
self.cn_suffix = "_cn"
self.kl_suffix = "_kl"
self.arr_org = "arr_org"
self.arr_mlt = "arr_mlt"
self.list_org = "list_org"
self.list_mlt = "list_mlt"
self.forward_run_file = "forward_run.py"
self.remove_existing = remove_existing
self.external_tpl_in_pairs = external_tpl_in_pairs
self.external_ins_out_pairs = external_ins_out_pairs
self._setup_model(model, org_model_ws, new_model_ws)
self._add_external()
self.arr_mult_dfs = []
self.par_bounds_dict = par_bounds_dict
self.pp_props = pp_props
self.pp_space = pp_space
self.pp_geostruct = pp_geostruct
self.use_pp_zones = use_pp_zones
self.const_props = const_props
self.grid_props = grid_props
self.grid_geostruct = grid_geostruct
self.zone_props = zone_props
self.kl_props = kl_props
self.kl_geostruct = kl_geostruct
self.kl_num_eig = kl_num_eig
if len(temporal_bc_props) > 0:
if len(temporal_list_props) > 0:
self.logger.lraise("temporal_bc_props and temporal_list_props. " + \
"temporal_bc_props is deprecated and replaced by temporal_list_props")
self.logger.warn("temporal_bc_props is deprecated and replaced by temporal_list_props")
temporal_list_props = temporal_bc_props
if len(spatial_bc_props) > 0:
if len(spatial_list_props) > 0:
self.logger.lraise("spatial_bc_props and spatial_list_props. " + \
"spatial_bc_props is deprecated and replaced by spatial_list_props")
self.logger.warn("spatial_bc_props is deprecated and replaced by spatial_list_props")
spatial_list_props = spatial_bc_props
self.temporal_list_props = temporal_list_props
self.temporal_list_geostruct = temporal_list_geostruct
if self.temporal_list_geostruct is None:
v = pyemu.geostats.ExpVario(contribution=1.0, a=180.0) # 180 correlation length
self.temporal_list_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="temporal_list_geostruct")
self.spatial_list_props = spatial_list_props
self.spatial_list_geostruct = spatial_list_geostruct
if self.spatial_list_geostruct is None:
dist = 10 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=dist)
self.spatial_list_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="spatial_list_geostruct")
self.obssim_smp_pairs = obssim_smp_pairs
self.hds_kperk = hds_kperk
self.sfr_obs = sfr_obs
self.frun_pre_lines = []
self.frun_model_lines = []
self.frun_post_lines = []
self.tmp_files = []
self.extra_forward_imports = []
if tmp_files is not None:
if not isinstance(tmp_files, list):
tmp_files = [tmp_files]
self.tmp_files.extend(tmp_files)
if k_zone_dict is None:
self.k_zone_dict = {k: self.m.bas6.ibound[k].array for k in np.arange(self.m.nlay)}
else:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in k_zone_dict.values()]):
# loop over outer keys
for par_key in k_zone_dict.keys():
for k, arr in k_zone_dict[par_key].items():
if k not in np.arange(self.m.nlay):
self.logger.lraise("k_zone_dict for par {1}, layer index not in nlay:{0}".
format(k, par_key))
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise("k_zone_dict arr for k {0} for par{2} has wrong shape:{1}".
format(k, arr.shape, par_key))
else:
for k, arr in k_zone_dict.items():
if k not in np.arange(self.m.nlay):
self.logger.lraise("k_zone_dict layer index not in nlay:{0}".
format(k))
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise("k_zone_dict arr for k {0} has wrong shape:{1}".
format(k, arr.shape))
self.k_zone_dict = k_zone_dict
# add any extra commands to the forward run lines
for alist, ilist in zip([self.frun_pre_lines, self.frun_model_lines, self.frun_post_lines],
[extra_pre_cmds, extra_model_cmds, extra_post_cmds]):
if ilist is None:
continue
if not isinstance(ilist, list):
ilist = [ilist]
for cmd in ilist:
self.logger.statement("forward_run line:{0}".format(cmd))
alist.append("pyemu.os_utils.run('{0}')\n".format(cmd))
# add the model call
if model_exe_name is None:
model_exe_name = self.m.exe_name
self.logger.warn("using flopy binary to execute the model:{0}".format(model))
if redirect_forward_output:
line = "pyemu.os_utils.run('{0} {1} 1>{1}.stdout 2>{1}.stderr')".format(model_exe_name, self.m.namefile)
else:
line = "pyemu.os_utils.run('{0} {1} ')".format(model_exe_name, self.m.namefile)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_model_lines.append(line)
self.tpl_files, self.in_files = [], []
self.ins_files, self.out_files = [], []
self._setup_mult_dirs()
self.mlt_files = []
self.org_files = []
self.m_files = []
self.mlt_counter = {}
self.par_dfs = {}
self.mlt_dfs = []
self._setup_list_pars()
self._setup_array_pars()
if not sfr_pars and temporal_sfr_pars:
self.logger.lraise("use of `temporal_sfr_pars` requires `sfr_pars`")
if sfr_pars:
if isinstance(sfr_pars, str):
sfr_pars = [sfr_pars]
if isinstance(sfr_pars, list):
self._setup_sfr_pars(sfr_pars, include_temporal_pars=temporal_sfr_pars)
else:
self._setup_sfr_pars(include_temporal_pars=temporal_sfr_pars)
if hfb_pars:
self._setup_hfb_pars()
self.mflist_waterbudget = mflist_waterbudget
self.mfhyd = mfhyd
self._setup_observations()
self.build_pst()
if build_prior:
self.parcov = self.build_prior()
else:
self.parcov = None
self.log("saving intermediate _setup_<> dfs into {0}".
format(self.m.model_ws))
for tag, df in self.par_dfs.items():
df.to_csv(os.path.join(self.m.model_ws, "_setup_par_{0}_{1}.csv".
format(tag.replace(" ", '_'), self.pst_name)))
for tag, df in self.obs_dfs.items():
df.to_csv(os.path.join(self.m.model_ws, "_setup_obs_{0}_{1}.csv".
format(tag.replace(" ", '_'), self.pst_name)))
self.log("saving intermediate _setup_<> dfs into {0}".
format(self.m.model_ws))
self.logger.statement("all done")
def _setup_sfr_obs(self):
"""setup sfr ASCII observations"""
if not self.sfr_obs:
return
if self.m.sfr is None:
self.logger.lraise("no sfr package found...")
org_sfr_out_file = os.path.join(self.org_model_ws, "{0}.sfr.out".format(self.m.name))
if not os.path.exists(org_sfr_out_file):
self.logger.lraise("setup_sfr_obs() error: could not locate existing sfr out file: {0}".
format(org_sfr_out_file))
new_sfr_out_file = os.path.join(self.m.model_ws, os.path.split(org_sfr_out_file)[-1])
shutil.copy2(org_sfr_out_file, new_sfr_out_file)
seg_group_dict = None
if isinstance(self.sfr_obs, dict):
seg_group_dict = self.sfr_obs
df = pyemu.gw_utils.setup_sfr_obs(new_sfr_out_file, seg_group_dict=seg_group_dict,
model=self.m, include_path=True)
if df is not None:
self.obs_dfs["sfr"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_sfr_obs()")
def _setup_sfr_pars(self, par_cols=None, include_temporal_pars=None):
"""setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1)"""
assert self.m.sfr is not None, "can't find sfr package..."
if isinstance(par_cols, str):
par_cols = [par_cols]
reach_pars = False # default to False
seg_pars = True
par_dfs = {}
df = pyemu.gw_utils.setup_sfr_seg_parameters(
self.m, par_cols=par_cols,
include_temporal_pars=include_temporal_pars) # now just pass model
# self.par_dfs["sfr"] = df
if df.empty:
warnings.warn("No sfr segment parameters have been set up", PyemuWarning)
par_dfs["sfr"] = []
seg_pars = False
else:
par_dfs["sfr"] = [df] # may need df for both segs and reaches
self.tpl_files.append("sfr_seg_pars.dat.tpl")
self.in_files.append("sfr_seg_pars.dat")
if include_temporal_pars:
self.tpl_files.append("sfr_seg_temporal_pars.dat.tpl")
self.in_files.append("sfr_seg_temporal_pars.dat")
if self.m.sfr.reachinput:
# if include_temporal_pars:
# raise NotImplementedError("temporal pars is not set up for reach data style")
df = pyemu.gw_utils.setup_sfr_reach_parameters(self.m, par_cols=par_cols)
if df.empty:
warnings.warn("No sfr reach parameters have been set up", PyemuWarning)
else:
self.tpl_files.append("sfr_reach_pars.dat.tpl")
self.in_files.append("sfr_reach_pars.dat")
reach_pars = True
par_dfs["sfr"].append(df)
if len(par_dfs["sfr"]) > 0:
self.par_dfs["sfr"] = pd.concat(par_dfs["sfr"])
self.frun_pre_lines.append(
"pyemu.gw_utils.apply_sfr_parameters(seg_pars={0}, reach_pars={1})".format(seg_pars, reach_pars))
else:
warnings.warn("No sfr parameters have been set up!", PyemuWarning)
def _setup_hfb_pars(self):
"""setup non-mult parameters for hfb (yuck!)
"""
if self.m.hfb6 is None:
self.logger.lraise("couldn't find hfb pak")
tpl_file, df = pyemu.gw_utils.write_hfb_template(self.m)
self.in_files.append(os.path.split(tpl_file.replace(".tpl", ""))[-1])
self.tpl_files.append(os.path.split(tpl_file)[-1])
self.par_dfs["hfb"] = df
def _setup_mult_dirs(self):
""" setup the directories to use for multiplier parameterization. Directories
are make within the PstFromFlopyModel.m.model_ws directory
"""
# setup dirs to hold the original and multiplier model input quantities
set_dirs = []
# if len(self.pp_props) > 0 or len(self.zone_props) > 0 or \
# len(self.grid_props) > 0:
if self.pp_props is not None or \
self.zone_props is not None or \
self.grid_props is not None or \
self.const_props is not None or \
self.kl_props is not None:
set_dirs.append(self.arr_org)
set_dirs.append(self.arr_mlt)
# if len(self.bc_props) > 0:
if len(self.temporal_list_props) > 0 or len(self.spatial_list_props) > 0:
set_dirs.append(self.list_org)
if len(self.spatial_list_props):
set_dirs.append(self.list_mlt)
for d in set_dirs:
d = os.path.join(self.m.model_ws, d)
self.log("setting up '{0}' dir".format(d))
if os.path.exists(d):
if self.remove_existing:
shutil.rmtree(d, onerror=remove_readonly)
else:
raise Exception("dir '{0}' already exists".
format(d))
os.mkdir(d)
self.log("setting up '{0}' dir".format(d))
def _setup_model(self, model, org_model_ws, new_model_ws):
""" setup the flopy.mbase instance for use with multipler parameters.
Changes model_ws, sets external_path and writes new MODFLOW input
files
"""
split_new_mws = [i for i in os.path.split(new_model_ws) if len(i) > 0]
if len(split_new_mws) != 1:
self.logger.lraise("new_model_ws can only be 1 folder-level deep:{0}".
format(str(split_new_mws)))
if isinstance(model, str):
self.log("loading flopy model")
try:
import flopy
except:
raise Exception("from_flopy_model() requires flopy")
# prepare the flopy model
self.org_model_ws = org_model_ws
self.new_model_ws = new_model_ws
self.m = flopy.modflow.Modflow.load(model, model_ws=org_model_ws,
check=False, verbose=True, forgive=False)
self.log("loading flopy model")
else:
self.m = model
self.org_model_ws = str(self.m.model_ws)
self.new_model_ws = new_model_ws
self.log("updating model attributes")
self.m.array_free_format = True
self.m.free_format_input = True
self.m.external_path = '.'
self.log("updating model attributes")
if os.path.exists(new_model_ws):
if not self.remove_existing:
self.logger.lraise("'new_model_ws' already exists")
else:
self.logger.warn("removing existing 'new_model_ws")
shutil.rmtree(new_model_ws, onerror=pyemu.os_utils._remove_readonly)
time.sleep(1)
self.m.change_model_ws(new_model_ws, reset_external=True)
self.m.exe_name = self.m.exe_name.replace(".exe", '')
self.m.exe = self.m.version
self.log("writing new modflow input files")
self.m.write_input()
self.log("writing new modflow input files")
def _get_count(self, name):
""" get the latest counter for a certain parameter type.
"""
if name not in self.mlt_counter:
self.mlt_counter[name] = 1
c = 0
else:
c = self.mlt_counter[name]
self.mlt_counter[name] += 1
# print(name,c)
return c
def _prep_mlt_arrays(self):
""" prepare multipler arrays. Copies existing model input arrays and
writes generic (ones) multiplier arrays
"""
par_props = [self.pp_props, self.grid_props,
self.zone_props, self.const_props,
self.kl_props]
par_suffixs = [self.pp_suffix, self.gr_suffix,
self.zn_suffix, self.cn_suffix,
self.kl_suffix]
# Need to remove props and suffixes for which no info was provided (e.g. still None)
del_idx = []
for i, cp in enumerate(par_props):
if cp is None:
del_idx.append(i)
for i in del_idx[::-1]:
del (par_props[i])
del (par_suffixs[i])
mlt_dfs = []
for par_prop, suffix in zip(par_props, par_suffixs):
if len(par_prop) == 2:
if not isinstance(par_prop[0], list):
par_prop = [par_prop]
if len(par_prop) == 0:
continue
for pakattr, k_org in par_prop:
attr_name = pakattr.split('.')[1]
pak, attr = self._parse_pakattr(pakattr)
ks = np.arange(self.m.nlay)
if isinstance(attr, flopy.utils.Transient2d):
ks = np.arange(self.m.nper)
try:
k_parse = self._parse_k(k_org, ks)
except Exception as e:
self.logger.lraise("error parsing k {0}:{1}".
format(k_org, str(e)))
org, mlt, mod, layer = [], [], [], []
c = self._get_count(attr_name)
mlt_prefix = "{0}{1}".format(attr_name, c)
mlt_name = os.path.join(self.arr_mlt, "{0}.dat{1}"
.format(mlt_prefix, suffix))
for k in k_parse:
# horrible kludge to avoid passing int64 to flopy
# this gift may give again...
if type(k) is np.int64:
k = int(k)
if isinstance(attr, flopy.utils.Util2d):
fname = self._write_u2d(attr)
layer.append(k)
elif isinstance(attr, flopy.utils.Util3d):
fname = self._write_u2d(attr[k])
layer.append(k)
elif isinstance(attr, flopy.utils.Transient2d):
fname = self._write_u2d(attr.transient_2ds[k])
layer.append(0) # big assumption here
mod.append(os.path.join(self.m.external_path, fname))
mlt.append(mlt_name)
org.append(os.path.join(self.arr_org, fname))
df = pd.DataFrame({"org_file": org, "mlt_file": mlt, "model_file": mod, "layer": layer})
df.loc[:, "suffix"] = suffix
df.loc[:, "prefix"] = mlt_prefix
df.loc[:, "attr_name"] = attr_name
mlt_dfs.append(df)
if len(mlt_dfs) > 0:
mlt_df = pd.concat(mlt_dfs, ignore_index=True)
return mlt_df
def _write_u2d(self, u2d):
""" write a flopy.utils.Util2D instance to an ASCII text file using the
Util2D filename
"""
filename = os.path.split(u2d.filename)[-1]
np.savetxt(os.path.join(self.m.model_ws, self.arr_org, filename),
u2d.array, fmt="%15.6E")
return filename
def _write_const_tpl(self, name, tpl_file, zn_array):
""" write a template file a for a constant (uniform) multiplier parameter
"""
parnme = []
with open(os.path.join(self.m.model_ws, tpl_file), 'w') as f:
f.write("ptf ~\n")
for i in range(self.m.nrow):
for j in range(self.m.ncol):
if zn_array[i, j] < 1:
pname = " 1.0 "
else:
pname = "{0}{1}".format(name, self.cn_suffix)
if len(pname) > 12:
self.logger.warn("zone pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
# df.loc[:,"pargp"] = "{0}{1}".format(self.cn_suffixname)
df.loc[:, "pargp"] = "{0}_{1}".format(self.cn_suffix.replace('_', ''), name)
df.loc[:, "tpl"] = tpl_file
return df
def _write_grid_tpl(self, name, tpl_file, zn_array):
""" write a template file a for grid-based multiplier parameters
"""
parnme, x, y = [], [], []
with open(os.path.join(self.m.model_ws, tpl_file), 'w') as f:
f.write("ptf ~\n")
for i in range(self.m.nrow):
for j in range(self.m.ncol):
if zn_array[i, j] < 1:
pname = ' 1.0 '
else:
pname = "{0}{1:03d}{2:03d}".format(name, i, j)
if len(pname) > 12:
self.logger.warn("grid pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = ' ~ {0} ~ '.format(pname)
x.append(self.m.sr.xcentergrid[i, j])
y.append(self.m.sr.ycentergrid[i, j])
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme, "x": x, "y": y}, index=parnme)
df.loc[:, "pargp"] = "{0}{1}".format(self.gr_suffix.replace('_', ''), name)
df.loc[:, "tpl"] = tpl_file
return df
def _grid_prep(self):
""" prepare grid-based parameterizations
"""
if len(self.grid_props) == 0:
return
if self.grid_geostruct is None:
self.logger.warn("grid_geostruct is None," \
" using ExpVario with contribution=1 and a=(max(delc,delr)*10")
dist = 10 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=dist)
self.grid_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="grid_geostruct", transform="log")
def _pp_prep(self, mlt_df):
""" prepare pilot point based parameterization
"""
if len(self.pp_props) == 0:
return
if self.pp_space is None:
self.logger.warn("pp_space is None, using 10...\n")
self.pp_space = 10
if self.pp_geostruct is None:
self.logger.warn("pp_geostruct is None," \
" using ExpVario with contribution=1 and a=(pp_space*max(delr,delc))")
pp_dist = self.pp_space * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=pp_dist)
self.pp_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="pp_geostruct", transform="log")
pp_df = mlt_df.loc[mlt_df.suffix == self.pp_suffix, :]
layers = pp_df.layer.unique()
layers.sort()
pp_dict = {l: list(pp_df.loc[pp_df.layer == l, "prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
pp_dict_sort = {}
for i, l in enumerate(layers):
p = set(pp_dict[l])
pl = list(p)
pl.sort()
pp_dict_sort[l] = pl
for ll in layers[i + 1:]:
pp = set(pp_dict[ll])
d = list(pp - p)
d.sort()
pp_dict_sort[ll] = d
pp_dict = pp_dict_sort
pp_array_file = {p: m for p, m in zip(pp_df.prefix, pp_df.mlt_file)}
self.logger.statement("pp_dict: {0}".format(str(pp_dict)))
self.log("calling setup_pilot_point_grid()")
if self.use_pp_zones:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in self.k_zone_dict.values()]):
ib = {p.split('.')[-1]: k_dict for p, k_dict in self.k_zone_dict.items()}
for attr in pp_df.attr_name.unique():
if attr not in [p.split('.')[-1] for p in ib.keys()]:
if 'general_zn' not in ib.keys():
warnings.warn("Dictionary of dictionaries passed as zones, {0} not in keys: {1}. "
"Will use ibound for zones".format(attr, ib.keys()), PyemuWarning)
else:
self.logger.statement(
"Dictionary of dictionaries passed as pp zones, "
"using 'general_zn' for {0}".format(attr))
if 'general_zn' not in ib.keys():
ib['general_zn'] = {k: self.m.bas6.ibound[k].array for k in range(self.m.nlay)}
else:
ib = {'general_zn': self.k_zone_dict}
else:
ib = {}
for k in range(self.m.nlay):
a = self.m.bas6.ibound[k].array.copy()
a[a > 0] = 1
ib[k] = a
for k, i in ib.items():
if np.any(i < 0):
u, c = np.unique(i[i > 0], return_counts=True)
counts = dict(zip(u, c))
mx = -1.0e+10
imx = None
for u, c in counts.items():
if c > mx:
mx = c
imx = u
self.logger.warn("resetting negative ibound values for PP zone" + \
"array in layer {0} : {1}".format(k + 1, u))
i[i < 0] = u
ib = {'general_zn': ib}
pp_df = pyemu.pp_utils.setup_pilotpoints_grid(self.m,
ibound=ib,
use_ibound_zones=self.use_pp_zones,
prefix_dict=pp_dict,
every_n_cell=self.pp_space,
pp_dir=self.m.model_ws,
tpl_dir=self.m.model_ws,
shapename=os.path.join(
self.m.model_ws, "pp.shp"))
self.logger.statement("{0} pilot point parameters created".
format(pp_df.shape[0]))
self.logger.statement("pilot point 'pargp':{0}".
format(','.join(pp_df.pargp.unique())))
self.log("calling setup_pilot_point_grid()")
# calc factors for each layer
pargp = pp_df.pargp.unique()
pp_dfs_k = {}
fac_files = {}
pp_processed = set()
pp_df.loc[:, "fac_file"] = np.NaN
for pg in pargp:
ks = pp_df.loc[pp_df.pargp == pg, "k"].unique()
if len(ks) == 0:
self.logger.lraise("something is wrong in fac calcs for par group {0}".format(pg))
if len(ks) == 1:
if np.all([isinstance(v, dict) for v in ib.values()]): # check is dict of dicts
if np.any([pg.startswith(p) for p in ib.keys()]):
p = next(p for p in ib.keys() if pg.startswith(p))
# get dict relating to parameter prefix
ib_k = ib[p][ks[0]]
else:
p = 'general_zn'
ib_k = ib[p][ks[0]]
else:
ib_k = ib[ks[0]]
if len(ks) != 1: # TODO
# self.logger.lraise("something is wrong in fac calcs for par group {0}".format(pg))
self.logger.warn("multiple k values for {0},forming composite zone array...".format(pg))
ib_k = np.zeros((self.m.nrow, self.m.ncol))
for k in ks:
t = ib["general_zn"][k].copy()
t[t < 1] = 0
ib_k[t > 0] = t[t > 0]
k = int(ks[0])
kattr_id = "{}_{}".format(k, p)
kp_id = "{}_{}".format(k, pg)
if kp_id not in pp_dfs_k.keys():
self.log("calculating factors for p={0}, k={1}".format(pg, k))
fac_file = os.path.join(self.m.model_ws, "pp_k{0}.fac".format(kattr_id))
var_file = fac_file.replace("{0}.fac".format(kattr_id),
".var.dat")
pp_df_k = pp_df.loc[pp_df.pargp == pg]
if kattr_id not in pp_processed:
self.logger.statement("saving krige variance file:{0}"
.format(var_file))
self.logger.statement("saving krige factors file:{0}"
.format(fac_file))
ok_pp = pyemu.geostats.OrdinaryKrige(self.pp_geostruct, pp_df_k)
ok_pp.calc_factors_grid(self.m.sr, var_filename=var_file, zone_array=ib_k, num_threads=10)
ok_pp.to_grid_factors_file(fac_file)
pp_processed.add(kattr_id)
fac_files[kp_id] = fac_file
self.log("calculating factors for p={0}, k={1}".format(pg, k))
pp_dfs_k[kp_id] = pp_df_k
for kp_id, fac_file in fac_files.items():
k = int(kp_id.split('_')[0])
pp_prefix = kp_id.split('_', 1)[-1]
# pp_files = pp_df.pp_filename.unique()
fac_file = os.path.split(fac_file)[-1]
# pp_prefixes = pp_dict[k]
# for pp_prefix in pp_prefixes:
self.log("processing pp_prefix:{0}".format(pp_prefix))
if pp_prefix not in pp_array_file.keys():
self.logger.lraise("{0} not in self.pp_array_file.keys()".
format(pp_prefix, ','.
join(pp_array_file.keys())))
out_file = os.path.join(self.arr_mlt, os.path.split(pp_array_file[pp_prefix])[-1])
pp_files = pp_df.loc[pp_df.pp_filename.apply(
lambda x:
os.path.split(x)[-1
].split('.')[0] == "{0}pp".format(pp_prefix)), 'pp_filename']
if pp_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of pp_files found:{0}".format(','.join(pp_files)))
pp_file = os.path.split(pp_files.iloc[0])[-1]
pp_df.loc[pp_df.pargp == pp_prefix, "fac_file"] = fac_file
pp_df.loc[pp_df.pargp == pp_prefix, "pp_file"] = pp_file
pp_df.loc[pp_df.pargp == pp_prefix, "out_file"] = out_file
pp_df.loc[:, "pargp"] = pp_df.pargp.apply(lambda x: "pp_{0}".format(x))
out_files = mlt_df.loc[mlt_df.mlt_file.
apply(lambda x: x.endswith(self.pp_suffix)), "mlt_file"]
# mlt_df.loc[:,"fac_file"] = np.NaN
# mlt_df.loc[:,"pp_file"] = np.NaN
for out_file in out_files:
pp_df_pf = pp_df.loc[pp_df.out_file == out_file, :]
fac_files = pp_df_pf.fac_file
if fac_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of fac files:{0}".format(str(fac_files.unique())))
fac_file = fac_files.iloc[0]
pp_files = pp_df_pf.pp_file
if pp_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of pp files:{0}".format(str(pp_files.unique())))
pp_file = pp_files.iloc[0]
mlt_df.loc[mlt_df.mlt_file == out_file, "fac_file"] = fac_file
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_file"] = pp_file
self.par_dfs[self.pp_suffix] = pp_df
mlt_df.loc[mlt_df.suffix == self.pp_suffix, "tpl_file"] = np.NaN
def _kl_prep(self, mlt_df):
""" prepare KL based parameterizations
"""
if len(self.kl_props) == 0:
return
if self.kl_geostruct is None:
self.logger.warn("kl_geostruct is None," \
" using ExpVario with contribution=1 and a=(10.0*max(delr,delc))")
kl_dist = 10.0 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=kl_dist)
self.kl_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="kl_geostruct", transform="log")
kl_df = mlt_df.loc[mlt_df.suffix == self.kl_suffix, :]
layers = kl_df.layer.unique()
# kl_dict = {l:list(kl_df.loc[kl_df.layer==l,"prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
# for i,l in enumerate(layers):
# p = set(kl_dict[l])
# for ll in layers[i+1:]:
# pp = set(kl_dict[ll])
# d = pp - p
# kl_dict[ll] = list(d)
kl_prefix = list(kl_df.loc[:, "prefix"])
kl_array_file = {p: m for p, m in zip(kl_df.prefix, kl_df.mlt_file)}
self.logger.statement("kl_prefix: {0}".format(str(kl_prefix)))
fac_file = os.path.join(self.m.model_ws, "kl.fac")
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_df = kl_setup(self.kl_num_eig, self.m.sr, self.kl_geostruct, kl_prefix,
factors_file=fac_file, basis_file=fac_file + ".basis.jcb",
tpl_dir=self.m.model_ws)
self.logger.statement("{0} kl parameters created".
format(kl_df.shape[0]))
self.logger.statement("kl 'pargp':{0}".
format(','.join(kl_df.pargp.unique())))
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_mlt_df = mlt_df.loc[mlt_df.suffix == self.kl_suffix]
for prefix in kl_df.prefix.unique():
prefix_df = kl_df.loc[kl_df.prefix == prefix, :]
in_file = os.path.split(prefix_df.loc[:, "in_file"].iloc[0])[-1]
assert prefix in mlt_df.prefix.values, "{0}:{1}".format(prefix, mlt_df.prefix)
mlt_df.loc[mlt_df.prefix == prefix, "pp_file"] = in_file
mlt_df.loc[mlt_df.prefix == prefix, "fac_file"] = os.path.split(fac_file)[-1]
print(kl_mlt_df)
mlt_df.loc[mlt_df.suffix == self.kl_suffix, "tpl_file"] = np.NaN
self.par_dfs[self.kl_suffix] = kl_df
# calc factors for each layer
def _setup_array_pars(self):
""" main entry point for setting up array multipler parameters
"""
mlt_df = self._prep_mlt_arrays()
if mlt_df is None:
return
mlt_df.loc[:, "tpl_file"] = mlt_df.mlt_file.apply(lambda x: os.path.split(x)[-1] + ".tpl")
# mlt_df.loc[mlt_df.tpl_file.apply(lambda x:pd.notnull(x.pp_file)),"tpl_file"] = np.NaN
mlt_files = mlt_df.mlt_file.unique()
# for suffix,tpl_file,layer,name in zip(self.mlt_df.suffix,
# self.mlt_df.tpl,self.mlt_df.layer,
# self.mlt_df.prefix):
par_dfs = {}
for mlt_file in mlt_files:
suffixes = mlt_df.loc[mlt_df.mlt_file == mlt_file, "suffix"]
if suffixes.unique().shape[0] != 1:
self.logger.lraise("wrong number of suffixes for {0}" \
.format(mlt_file))
suffix = suffixes.iloc[0]
tpl_files = mlt_df.loc[mlt_df.mlt_file == mlt_file, "tpl_file"]
if tpl_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of tpl_files for {0}" \
.format(mlt_file))
tpl_file = tpl_files.iloc[0]
layers = mlt_df.loc[mlt_df.mlt_file == mlt_file, "layer"]
# if layers.unique().shape[0] != 1:
# self.logger.lraise("wrong number of layers for {0}"\
# .format(mlt_file))
layer = layers.iloc[0]
names = mlt_df.loc[mlt_df.mlt_file == mlt_file, "prefix"]
if names.unique().shape[0] != 1:
self.logger.lraise("wrong number of names for {0}" \
.format(mlt_file))
name = names.iloc[0]
attr_names = mlt_df.loc[mlt_df.mlt_file == mlt_file, "attr_name"]
if attr_names.unique().shape[0] != 1:
self.logger.lraise("wrong number of attr_names for {0}".format(mlt_file))
attr_name = attr_names.iloc[0]
# ib = self.k_zone_dict[layer]
df = None
if suffix == self.cn_suffix:
self.log("writing const tpl:{0}".format(tpl_file))
# df = self.write_const_tpl(name,tpl_file,self.m.bas6.ibound[layer].array)
try:
df = write_const_tpl(name, os.path.join(self.m.model_ws, tpl_file), self.cn_suffix,
self.m.bas6.ibound[layer].array, (self.m.nrow, self.m.ncol), self.m.sr)
except Exception as e:
self.logger.lraise("error writing const template: {0}".format(str(e)))
self.log("writing const tpl:{0}".format(tpl_file))
elif suffix == self.gr_suffix:
self.log("writing grid tpl:{0}".format(tpl_file))
# df = self.write_grid_tpl(name,tpl_file,self.m.bas6.ibound[layer].array)
try:
df = write_grid_tpl(name, os.path.join(self.m.model_ws, tpl_file), self.gr_suffix,
self.m.bas6.ibound[layer].array, (self.m.nrow, self.m.ncol), self.m.sr)
except Exception as e:
self.logger.lraise("error writing grid template: {0}".format(str(e)))
self.log("writing grid tpl:{0}".format(tpl_file))
elif suffix == self.zn_suffix:
self.log("writing zone tpl:{0}".format(tpl_file))
if np.all([isinstance(v, dict) for v in self.k_zone_dict.values()]): # check is dict of dicts
if attr_name in [p.split('.')[-1] for p in self.k_zone_dict.keys()]:
k_zone_dict = next(k_dict for p, k_dict in self.k_zone_dict.items()
if p.split('.')[-1] == attr_name) # get dict relating to parameter prefix
else:
assert 'general_zn' in self.k_zone_dict.keys(), \
"Neither {0} nor 'general_zn' are in k_zone_dict keys: {1}".format(attr_name,
self.k_zone_dict.keys())
k_zone_dict = self.k_zone_dict['general_zn']
else:
k_zone_dict = self.k_zone_dict
# df = self.write_zone_tpl(self.m, name, tpl_file, self.k_zone_dict[layer], self.zn_suffix, self.logger)
try:
df = write_zone_tpl(name, os.path.join(self.m.model_ws, tpl_file), self.zn_suffix,
k_zone_dict[layer], (self.m.nrow, self.m.ncol), self.m.sr)
except Exception as e:
self.logger.lraise("error writing zone template: {0}".format(str(e)))
self.log("writing zone tpl:{0}".format(tpl_file))
if df is None:
continue
if suffix not in par_dfs:
par_dfs[suffix] = [df]
else:
par_dfs[suffix].append(df)
for suf, dfs in par_dfs.items():
self.par_dfs[suf] = pd.concat(dfs)
if self.pp_suffix in mlt_df.suffix.values:
self.log("setting up pilot point process")
self._pp_prep(mlt_df)
self.log("setting up pilot point process")
if self.gr_suffix in mlt_df.suffix.values:
self.log("setting up grid process")
self._grid_prep()
self.log("setting up grid process")
if self.kl_suffix in mlt_df.suffix.values:
self.log("setting up kl process")
self._kl_prep(mlt_df)
self.log("setting up kl process")
mlt_df.to_csv(os.path.join(self.m.model_ws, "arr_pars.csv"))
ones = np.ones((self.m.nrow, self.m.ncol))
for mlt_file in mlt_df.mlt_file.unique():
self.log("save test mlt array {0}".format(mlt_file))
np.savetxt(os.path.join(self.m.model_ws, mlt_file),
ones, fmt="%15.6E")
self.log("save test mlt array {0}".format(mlt_file))
tpl_files = mlt_df.loc[mlt_df.mlt_file == mlt_file, "tpl_file"]
if tpl_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of tpl_files for {0}" \
.format(mlt_file))
tpl_file = tpl_files.iloc[0]
if pd.notnull(tpl_file):
self.tpl_files.append(tpl_file)
self.in_files.append(mlt_file)
# for tpl_file,mlt_file in zip(mlt_df.tpl_file,mlt_df.mlt_file):
# if pd.isnull(tpl_file):
# continue
# self.tpl_files.append(tpl_file)
# self.in_files.append(mlt_file)
os.chdir(self.m.model_ws)
try:
apply_array_pars()
except Exception as e:
os.chdir("..")
self.logger.lraise("error test running apply_array_pars():{0}".
format(str(e)))
os.chdir("..")
line = "pyemu.helpers.apply_array_pars()\n"
self.logger.statement("forward_run line:{0}".format(line))
self.frun_pre_lines.append(line)
def _setup_observations(self):
""" main entry point for setting up observations
"""
obs_methods = [self._setup_water_budget_obs, self._setup_hyd,
self._setup_smp, self._setup_hob, self._setup_hds,
self._setup_sfr_obs]
obs_types = ["mflist water budget obs", "hyd file",
"external obs-sim smp files", "hob", "hds", "sfr"]
self.obs_dfs = {}
for obs_method, obs_type in zip(obs_methods, obs_types):
self.log("processing obs type {0}".format(obs_type))
obs_method()
self.log("processing obs type {0}".format(obs_type))
def draw(self, num_reals=100, sigma_range=6,use_specsim=False, scale_offset=True):
""" draw from the geostatistically-implied parameter covariance matrix
Args:
num_reals (`int`): number of realizations to generate. Default is 100
sigma_range (`float`): number of standard deviations represented by
the parameter bounds. Default is 6.
use_specsim (`bool`): flag to use spectral simulation for grid-based
parameters. Requires a regular grid but is wicked fast. Default is False
scale_offset (`bool`, optional): flag to apply scale and offset to parameter
bounds when calculating variances - this is passed through to
`pyemu.Cov.from_parameter_data`. Default is True.
Note:
operates on parameters by groups to avoid having to construct a very large
covariance matrix for problems with more the 30K parameters.
uses `helpers.geostatitical_draw()`
Returns:
`pyemu.ParameterEnsemble`: The realized parameter ensemble
"""
self.log("drawing realizations")
struct_dict = {}
gr_par_pe = None
if self.pp_suffix in self.par_dfs.keys():
pp_df = self.par_dfs[self.pp_suffix]
pp_dfs = []
for pargp in pp_df.pargp.unique():
gp_df = pp_df.loc[pp_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
pp_dfs.append(p_df)
# pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]
struct_dict[self.pp_geostruct] = pp_dfs
if self.gr_suffix in self.par_dfs.keys():
gr_df = self.par_dfs[self.gr_suffix]
if not use_specsim:
gr_dfs = []
for pargp in gr_df.pargp.unique():
gp_df = gr_df.loc[gr_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
gr_dfs.append(p_df)
# gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()]
struct_dict[self.grid_geostruct] = gr_dfs
else:
if not pyemu.geostats.SpecSim2d.grid_is_regular(self.m.dis.delr.array, self.m.dis.delc.array):
self.logger.lraise("draw() error: can't use spectral simulation with irregular grid")
gr_df.loc[:, "i"] = gr_df.parnme.apply(lambda x: int(x[-6:-3]))
gr_df.loc[:, "j"] = gr_df.parnme.apply(lambda x: int(x[-3:]))
if gr_df.i.max() > self.m.nrow - 1 or gr_df.i.min() < 0:
self.logger.lraise("draw(): error parsing grid par names for 'i' index")
if gr_df.j.max() > self.m.ncol - 1 or gr_df.j.min() < 0:
self.logger.lraise("draw(): error parsing grid par names for 'j' index")
self.log("spectral simulation for grid-scale pars")
ss = pyemu.geostats.SpecSim2d(delx=self.m.dis.delr.array, dely=self.m.dis.delc.array,
geostruct=self.grid_geostruct)
gr_par_pe = ss.grid_par_ensemble_helper(pst=self.pst, gr_df=gr_df, num_reals=num_reals,
sigma_range=sigma_range, logger=self.logger)
self.log("spectral simulation for grid-scale pars")
if "temporal_list" in self.par_dfs.keys():
bc_df = self.par_dfs["temporal_list"]
bc_df.loc[:, "y"] = 0
bc_df.loc[:, "x"] = bc_df.timedelta.apply(lambda x: x.days)
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(p_df)
# bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()]
struct_dict[self.temporal_list_geostruct] = bc_dfs
if "spatial_list" in self.par_dfs.keys():
bc_df = self.par_dfs["spatial_list"]
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
# p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(gp_df)
struct_dict[self.spatial_list_geostruct] = bc_dfs
pe = geostatistical_draws(self.pst,struct_dict=struct_dict,num_reals=num_reals,
sigma_range=sigma_range,scale_offset=scale_offset)
if gr_par_pe is not None:
pe.loc[:, gr_par_pe.columns] = gr_par_pe.values
self.log("drawing realizations")
return pe
def build_prior(self, fmt="ascii", filename=None, droptol=None, chunk=None,
sigma_range=6):
""" build and optionally save the prior parameter covariance matrix.
Args:
fmt (`str`, optional): the format to save the cov matrix. Options are "ascii","binary","uncfile", "coo".
Default is "ascii". If "none" (lower case string, not None), then no file is created.
filename (`str`, optional): the filename to save the prior cov matrix to. If None, the name is formed using
model nam_file name. Default is None.
droptol (`float`, optional): tolerance for dropping near-zero values when writing compressed binary.
Default is None.
chunk (`int`, optional): chunk size to write in a single pass - for binary only. Default
is None (no chunking).
sigma_range (`float`): number of standard deviations represented by the parameter bounds. Default
is 6.
Returns:
`pyemu.Cov`: the full prior parameter covariance matrix, generated by processing parameters by
groups
"""
fmt = fmt.lower()
acc_fmts = ["ascii", "binary", "uncfile", "none", "coo"]
if fmt not in acc_fmts:
self.logger.lraise("unrecognized prior save 'fmt':{0}, options are: {1}".
format(fmt, ','.join(acc_fmts)))
self.log("building prior covariance matrix")
struct_dict = {}
if self.pp_suffix in self.par_dfs.keys():
pp_df = self.par_dfs[self.pp_suffix]
pp_dfs = []
for pargp in pp_df.pargp.unique():
gp_df = pp_df.loc[pp_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
pp_dfs.append(p_df)
# pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]
struct_dict[self.pp_geostruct] = pp_dfs
if self.gr_suffix in self.par_dfs.keys():
gr_df = self.par_dfs[self.gr_suffix]
gr_dfs = []
for pargp in gr_df.pargp.unique():
gp_df = gr_df.loc[gr_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
gr_dfs.append(p_df)
# gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()]
struct_dict[self.grid_geostruct] = gr_dfs
if "temporal_list" in self.par_dfs.keys():
bc_df = self.par_dfs["temporal_list"]
bc_df.loc[:, "y"] = 0
bc_df.loc[:, "x"] = bc_df.timedelta.apply(lambda x: x.days)
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(p_df)
# bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()]
struct_dict[self.temporal_list_geostruct] = bc_dfs
if "spatial_list" in self.par_dfs.keys():
bc_df = self.par_dfs["spatial_list"]
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
# p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(gp_df)
struct_dict[self.spatial_list_geostruct] = bc_dfs
if "hfb" in self.par_dfs.keys():
if self.spatial_list_geostruct in struct_dict.keys():
struct_dict[self.spatial_list_geostruct].append(self.par_dfs["hfb"])
else:
struct_dict[self.spatial_list_geostruct] = [self.par_dfs["hfb"]]
if "sfr" in self.par_dfs.keys():
self.logger.warn("geospatial prior not implemented for SFR pars")
if len(struct_dict) > 0:
cov = pyemu.helpers.geostatistical_prior_builder(self.pst,
struct_dict=struct_dict,
sigma_range=sigma_range)
else:
cov = pyemu.Cov.from_parameter_data(self.pst, sigma_range=sigma_range)
if filename is None:
filename = os.path.join(self.m.model_ws, self.pst_name + ".prior.cov")
if fmt != "none":
self.logger.statement("saving prior covariance matrix to file {0}".format(filename))
if fmt == 'ascii':
cov.to_ascii(filename)
elif fmt == 'binary':
cov.to_binary(filename, droptol=droptol, chunk=chunk)
elif fmt == 'uncfile':
cov.to_uncfile(filename)
elif fmt == 'coo':
cov.to_coo(filename, droptol=droptol, chunk=chunk)
self.log("building prior covariance matrix")
return cov
def build_pst(self, filename=None):
""" build the pest control file using the parameters and
observations.
Args:
filename (`str`): the filename to save the contorl file to. If None, the
name if formed from the model namfile name. Default is None. The control
is saved in the `PstFromFlopy.m.model_ws` directory.
Note:
calls pyemu.Pst.from_io_files
calls PESTCHEK
"""
self.logger.statement("changing dir in to {0}".format(self.m.model_ws))
os.chdir(self.m.model_ws)
tpl_files = copy.deepcopy(self.tpl_files)
in_files = copy.deepcopy(self.in_files)
try:
files = os.listdir('.')
new_tpl_files = [f for f in files if f.endswith(".tpl") and f not in tpl_files]
new_in_files = [f.replace(".tpl", '') for f in new_tpl_files]
tpl_files.extend(new_tpl_files)
in_files.extend(new_in_files)
ins_files = [f for f in files if f.endswith(".ins")]
out_files = [f.replace(".ins", '') for f in ins_files]
for tpl_file, in_file in zip(tpl_files, in_files):
if tpl_file not in self.tpl_files:
self.tpl_files.append(tpl_file)
self.in_files.append(in_file)
for ins_file, out_file in zip(ins_files, out_files):
if ins_file not in self.ins_files:
self.ins_files.append(ins_file)
self.out_files.append(out_file)
self.log("instantiating control file from i/o files")
self.logger.statement("tpl files: {0}".format(",".join(self.tpl_files)))
self.logger.statement("ins files: {0}".format(",".join(self.ins_files)))
pst = pyemu.Pst.from_io_files(tpl_files=self.tpl_files,
in_files=self.in_files,
ins_files=self.ins_files,
out_files=self.out_files)
self.log("instantiating control file from i/o files")
except Exception as e:
os.chdir("..")
self.logger.lraise("error build Pst:{0}".format(str(e)))
os.chdir('..')
# more customization here
par = pst.parameter_data
for name, df in self.par_dfs.items():
if "parnme" not in df.columns:
continue
df.index = df.parnme
for col in par.columns:
if col in df.columns:
par.loc[df.parnme, col] = df.loc[:, col]
par.loc[:, "parubnd"] = 10.0
par.loc[:, "parlbnd"] = 0.1
for name, df in self.par_dfs.items():
if "parnme" not in df:
continue
df.index = df.parnme
for col in ["parubnd", "parlbnd", "pargp"]:
if col in df.columns:
par.loc[df.index, col] = df.loc[:, col]
for tag, [lw, up] in wildass_guess_par_bounds_dict.items():
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parubnd"] = up
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parlbnd"] = lw
if self.par_bounds_dict is not None:
for tag, [lw, up] in self.par_bounds_dict.items():
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parubnd"] = up
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parlbnd"] = lw
obs = pst.observation_data
for name, df in self.obs_dfs.items():
if "obsnme" not in df.columns:
continue
df.index = df.obsnme
for col in df.columns:
if col in obs.columns:
obs.loc[df.obsnme, col] = df.loc[:, col]
self.pst_name = self.m.name + ".pst"
pst.model_command = ["python forward_run.py"]
pst.control_data.noptmax = 0
self.log("writing forward_run.py")
self.write_forward_run()
self.log("writing forward_run.py")
if filename is None:
filename = os.path.join(self.m.model_ws, self.pst_name)
self.logger.statement("writing pst {0}".format(filename))
pst.write(filename)
self.pst = pst
self.log("running pestchek on {0}".format(self.pst_name))
os.chdir(self.m.model_ws)
try:
pyemu.os_utils.run("pestchek {0} >pestchek.stdout".format(self.pst_name))
except Exception as e:
self.logger.warn("error running pestchek:{0}".format(str(e)))
for line in open("pestchek.stdout"):
self.logger.statement("pestcheck:{0}".format(line.strip()))
os.chdir("..")
self.log("running pestchek on {0}".format(self.pst_name))
def _add_external(self):
""" add external (existing) template files and/or instruction files to the
Pst instance
"""
if self.external_tpl_in_pairs is not None:
if not isinstance(self.external_tpl_in_pairs, list):
external_tpl_in_pairs = [self.external_tpl_in_pairs]
for tpl_file, in_file in self.external_tpl_in_pairs:
if not os.path.exists(tpl_file):
self.logger.lraise("couldn't find external tpl file:{0}". \
format(tpl_file))
self.logger.statement("external tpl:{0}".format(tpl_file))
shutil.copy2(tpl_file, os.path.join(self.m.model_ws,
os.path.split(tpl_file)[-1]))
if os.path.exists(in_file):
shutil.copy2(in_file, os.path.join(self.m.model_ws,
os.path.split(in_file)[-1]))
if self.external_ins_out_pairs is not None:
if not isinstance(self.external_ins_out_pairs, list):
external_ins_out_pairs = [self.external_ins_out_pairs]
for ins_file, out_file in self.external_ins_out_pairs:
if not os.path.exists(ins_file):
self.logger.lraise("couldn't find external ins file:{0}". \
format(ins_file))
self.logger.statement("external ins:{0}".format(ins_file))
shutil.copy2(ins_file, os.path.join(self.m.model_ws,
os.path.split(ins_file)[-1]))
if os.path.exists(out_file):
shutil.copy2(out_file, os.path.join(self.m.model_ws,
os.path.split(out_file)[-1]))
self.logger.warn("obs listed in {0} will have values listed in {1}"
.format(ins_file, out_file))
else:
self.logger.warn("obs listed in {0} will have generic values")
def write_forward_run(self):
""" write the forward run script forward_run.py
Note:
This method can be called repeatedly, especially after any
changed to the pre- and/or post-processing routines.
"""
with open(os.path.join(self.m.model_ws, self.forward_run_file), 'w') as f:
f.write("import os\nimport multiprocessing as mp\nimport numpy as np" + \
"\nimport pandas as pd\nimport flopy\n")
f.write("import pyemu\n")
f.write("def main():\n")
f.write("\n")
s = " "
for ex_imp in self.extra_forward_imports:
f.write(s + 'import {0}\n'.format(ex_imp))
for tmp_file in self.tmp_files:
f.write(s + "try:\n")
f.write(s + " os.remove('{0}')\n".format(tmp_file))
f.write(s + "except Exception as e:\n")
f.write(s + " print('error removing tmp file:{0}')\n".format(tmp_file))
for line in self.frun_pre_lines:
f.write(s + line + '\n')
for line in self.frun_model_lines:
f.write(s + line + '\n')
for line in self.frun_post_lines:
f.write(s + line + '\n')
f.write("\n")
f.write("if __name__ == '__main__':\n")
f.write(" mp.freeze_support()\n main()\n\n")
def _parse_k(self, k, vals):
""" parse the iterable from a property or boundary condition argument
"""
try:
k = int(k)
except:
pass
else:
assert k in vals, "k {0} not in vals".format(k)
return [k]
if k is None:
return vals
else:
try:
k_vals = vals[k]
except Exception as e:
raise Exception("error slicing vals with {0}:{1}".
format(k, str(e)))
return k_vals
def _parse_pakattr(self, pakattr):
""" parse package-iterable pairs from a property or boundary condition
argument
"""
raw = pakattr.lower().split('.')
if len(raw) != 2:
self.logger.lraise("pakattr is wrong:{0}".format(pakattr))
pakname = raw[0]
attrname = raw[1]
pak = self.m.get_package(pakname)
if pak is None:
if pakname == "extra":
self.logger.statement("'extra' pak detected:{0}".format(pakattr))
ud = flopy.utils.Util3d(self.m, (self.m.nlay, self.m.nrow, self.m.ncol), np.float32, 1.0, attrname)
return "extra", ud
self.logger.lraise("pak {0} not found".format(pakname))
if hasattr(pak, attrname):
attr = getattr(pak, attrname)
return pak, attr
elif hasattr(pak, "stress_period_data"):
dtype = pak.stress_period_data.dtype
if attrname not in dtype.names:
self.logger.lraise("attr {0} not found in dtype.names for {1}.stress_period_data". \
format(attrname, pakname))
attr = pak.stress_period_data
return pak, attr, attrname
# elif hasattr(pak,'hfb_data'):
# dtype = pak.hfb_data.dtype
# if attrname not in dtype.names:
# self.logger.lraise('attr {0} not found in dtypes.names for {1}.hfb_data. Thanks for playing.'.\
# format(attrname,pakname))
# attr = pak.hfb_data
# return pak, attr, attrname
else:
self.logger.lraise("unrecognized attr:{0}".format(attrname))
def _setup_list_pars(self):
""" main entry point for setting up list multiplier
parameters
"""
tdf = self._setup_temporal_list_pars()
sdf = self._setup_spatial_list_pars()
if tdf is None and sdf is None:
return
os.chdir(self.m.model_ws)
try:
apply_list_pars()
except Exception as e:
os.chdir("..")
self.logger.lraise("error test running apply_list_pars():{0}".format(str(e)))
os.chdir('..')
line = "pyemu.helpers.apply_list_pars()\n"
self.logger.statement("forward_run line:{0}".format(line))
self.frun_pre_lines.append(line)
def _setup_temporal_list_pars(self):
if len(self.temporal_list_props) == 0:
return
self.log("processing temporal_list_props")
bc_filenames = []
bc_cols = []
bc_pak = []
bc_k = []
bc_dtype_names = []
bc_parnme = []
if len(self.temporal_list_props) == 2:
if not isinstance(self.temporal_list_props[0], list):
self.temporal_list_props = [self.temporal_list_props]
for pakattr, k_org in self.temporal_list_props:
pak, attr, col = self._parse_pakattr(pakattr)
k_parse = self._parse_k(k_org, np.arange(self.m.nper))
c = self._get_count(pakattr)
for k in k_parse:
bc_filenames.append(self._list_helper(k, pak, attr, col))
bc_cols.append(col)
pak_name = pak.name[0].lower()
bc_pak.append(pak_name)
bc_k.append(k)
bc_dtype_names.append(','.join(attr.dtype.names))
bc_parnme.append("{0}{1}_{2:03d}".format(pak_name, col, c))
df = pd.DataFrame({"filename": bc_filenames, "col": bc_cols,
"kper": bc_k, "pak": bc_pak,
"dtype_names": bc_dtype_names,
"parnme": bc_parnme})
tds = pd.to_timedelta(np.cumsum(self.m.dis.perlen.array), unit='d')
dts = pd.to_datetime(self.m._start_datetime) + tds
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "timedelta"] = df.kper.apply(lambda x: tds[x])
df.loc[:, "val"] = 1.0
# df.loc[:,"kper"] = df.kper.apply(np.int)
# df.loc[:,"parnme"] = df.apply(lambda x: "{0}{1}_{2:03d}".format(x.pak,x.col,x.kper),axis=1)
df.loc[:, "tpl_str"] = df.parnme.apply(lambda x: "~ {0} ~".format(x))
df.loc[:, "list_org"] = self.list_org
df.loc[:, "model_ext_path"] = self.m.external_path
df.loc[:, "pargp"] = df.parnme.apply(lambda x: x.split('_')[0])
names = ["filename", "dtype_names", "list_org", "model_ext_path", "col", "kper", "pak", "val"]
df.loc[:, names]. \
to_csv(os.path.join(self.m.model_ws, "temporal_list_pars.dat"), sep=' ')
df.loc[:, "val"] = df.tpl_str
tpl_name = os.path.join(self.m.model_ws, 'temporal_list_pars.dat.tpl')
# f_tpl = open(tpl_name,'w')
# f_tpl.write("ptf ~\n")
# f_tpl.flush()
# df.loc[:,names].to_csv(f_tpl,sep=' ',quotechar=' ')
# f_tpl.write("index ")
# f_tpl.write(df.loc[:,names].to_string(index_names=True))
# f_tpl.close()
_write_df_tpl(tpl_name, df.loc[:, names], sep=' ', index_label="index", quotechar=" ")
self.par_dfs["temporal_list"] = df
self.log("processing temporal_list_props")
return True
def _setup_spatial_list_pars(self):
if len(self.spatial_list_props) == 0:
return
self.log("processing spatial_list_props")
bc_filenames = []
bc_cols = []
bc_pak = []
bc_k = []
bc_dtype_names = []
bc_parnme = []
if len(self.spatial_list_props) == 2:
if not isinstance(self.spatial_list_props[0], list):
self.spatial_list_props = [self.spatial_list_props]
for pakattr, k_org in self.spatial_list_props:
pak, attr, col = self._parse_pakattr(pakattr)
k_parse = self._parse_k(k_org, np.arange(self.m.nlay))
if len(k_parse) > 1:
self.logger.lraise("spatial_list_pars error: each set of spatial list pars can only be applied " + \
"to a single layer (e.g. [wel.flux,0].\n" + \
"You passed [{0},{1}], implying broadcasting to layers {2}".
format(pakattr, k_org, k_parse))
# # horrible special case for HFB since it cannot vary over time
# if type(pak) != flopy.modflow.mfhfb.ModflowHfb:
for k in range(self.m.nper):
bc_filenames.append(self._list_helper(k, pak, attr, col))
bc_cols.append(col)
pak_name = pak.name[0].lower()
bc_pak.append(pak_name)
bc_k.append(k_parse[0])
bc_dtype_names.append(','.join(attr.dtype.names))
info_df = pd.DataFrame({"filename": bc_filenames, "col": bc_cols,
"k": bc_k, "pak": bc_pak,
"dtype_names": bc_dtype_names})
info_df.loc[:, "list_mlt"] = self.list_mlt
info_df.loc[:, "list_org"] = self.list_org
info_df.loc[:, "model_ext_path"] = self.m.external_path
# check that all files for a given package have the same number of entries
info_df.loc[:, "itmp"] = np.NaN
pak_dfs = {}
for pak in info_df.pak.unique():
df_pak = info_df.loc[info_df.pak == pak, :]
itmp = []
for filename in df_pak.filename:
names = df_pak.dtype_names.iloc[0].split(',')
# mif pak != 'hfb6':
fdf = pd.read_csv(os.path.join(self.m.model_ws, filename),
delim_whitespace=True, header=None, names=names)
for c in ['k', 'i', 'j']:
fdf.loc[:, c] -= 1
# else:
# # need to navigate the HFB file to skip both comments and header line
# skiprows = sum(
# [1 if i.strip().startswith('#') else 0
# for i in open(os.path.join(self.m.model_ws, filename), 'r').readlines()]) + 1
# fdf = pd.read_csv(os.path.join(self.m.model_ws, filename),
# delim_whitespace=True, header=None, names=names, skiprows=skiprows ).dropna()
#
# for c in ['k', 'irow1','icol1','irow2','icol2']:
# fdf.loc[:, c] -= 1
itmp.append(fdf.shape[0])
pak_dfs[pak] = fdf
info_df.loc[info_df.pak == pak, "itmp"] = itmp
if np.unique(np.array(itmp)).shape[0] != 1:
info_df.to_csv("spatial_list_trouble.csv")
self.logger.lraise("spatial_list_pars() error: must have same number of " + \
"entries for every stress period for {0}".format(pak))
# make the pak dfs have unique model indices
for pak, df in pak_dfs.items():
# if pak != 'hfb6':
df.loc[:, "idx"] = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(x.k, x.i, x.j), axis=1)
# else:
# df.loc[:, "idx"] = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}{2:04.0f}{2:04.0f}".format(x.k, x.irow1, x.icol1,
# x.irow2, x.icol2), axis=1)
if df.idx.unique().shape[0] != df.shape[0]:
self.logger.warn("duplicate entries in list pak {0}...collapsing".format(pak))
df.drop_duplicates(subset="idx", inplace=True)
df.index = df.idx
pak_dfs[pak] = df
# write template files - find which cols are parameterized...
par_dfs = []
for pak, df in pak_dfs.items():
pak_df = info_df.loc[info_df.pak == pak, :]
# reset all non-index cols to 1.0
for col in df.columns:
if col not in ['k', 'i', 'j', 'inode', 'irow1', 'icol1', 'irow2', 'icol2']:
df.loc[:, col] = 1.0
in_file = os.path.join(self.list_mlt, pak + ".csv")
tpl_file = os.path.join(pak + ".csv.tpl")
# save an all "ones" mult df for testing
df.to_csv(os.path.join(self.m.model_ws, in_file), sep=' ')
parnme, pargp = [], []
# if pak != 'hfb6':
x = df.apply(lambda x: self.m.sr.xcentergrid[int(x.i), int(x.j)], axis=1).values
y = df.apply(lambda x: self.m.sr.ycentergrid[int(x.i), int(x.j)], axis=1).values
# else:
# # note -- for HFB6, only row and col for node 1
# x = df.apply(lambda x: self.m.sr.xcentergrid[int(x.irow1),int(x.icol1)],axis=1).values
# y = df.apply(lambda x: self.m.sr.ycentergrid[int(x.irow1),int(x.icol1)],axis=1).values
for col in pak_df.col.unique():
col_df = pak_df.loc[pak_df.col == col]
k_vals = col_df.k.unique()
npar = col_df.k.apply(lambda x: x in k_vals).shape[0]
if npar == 0:
continue
names = df.index.map(lambda x: "{0}{1}{2}".format(pak[0], col[0], x))
df.loc[:, col] = names.map(lambda x: "~ {0} ~".format(x))
df.loc[df.k.apply(lambda x: x not in k_vals), col] = 1.0
par_df = pd.DataFrame({"parnme": names, "x": x, "y": y, "k": df.k.values}, index=names)
par_df = par_df.loc[par_df.k.apply(lambda x: x in k_vals)]
if par_df.shape[0] == 0:
self.logger.lraise("no parameters found for spatial list k,pak,attr {0}, {1}, {2}".
format(k_vals, pak, col))
par_df.loc[:, "pargp"] = df.k.apply(lambda x: "{0}{1}_k{2:02.0f}".format(pak, col, int(x))).values
par_df.loc[:, "tpl_file"] = tpl_file
par_df.loc[:, "in_file"] = in_file
par_dfs.append(par_df)
# with open(os.path.join(self.m.model_ws,tpl_file),'w') as f:
# f.write("ptf ~\n")
# f.flush()
# df.to_csv(f)
# f.write("index ")
# f.write(df.to_string(index_names=False)+'\n')
_write_df_tpl(os.path.join(self.m.model_ws, tpl_file), df, sep=' ', quotechar=" ", index_label="index")
self.tpl_files.append(tpl_file)
self.in_files.append(in_file)
par_df = pd.concat(par_dfs)
self.par_dfs["spatial_list"] = par_df
info_df.to_csv(os.path.join(self.m.model_ws, "spatial_list_pars.dat"), sep=' ')
self.log("processing spatial_list_props")
return True
def _list_helper(self, k, pak, attr, col):
""" helper to setup list multiplier parameters for a given
k, pak, attr set.
"""
# special case for horrible HFB6 exception
# if type(pak) == flopy.modflow.mfhfb.ModflowHfb:
# filename = pak.file_name[0]
# else:
filename = attr.get_filename(k)
filename_model = os.path.join(self.m.external_path, filename)
shutil.copy2(os.path.join(self.m.model_ws, filename_model),
os.path.join(self.m.model_ws, self.list_org, filename))
return filename_model
def _setup_hds(self):
""" setup modflow head save file observations for given kper (zero-based
stress period index) and k (zero-based layer index) pairs using the
kperk argument.
"""
if self.hds_kperk is None or len(self.hds_kperk) == 0:
return
from .gw_utils import setup_hds_obs
# if len(self.hds_kperk) == 2:
# try:
# if len(self.hds_kperk[0] == 2):
# pass
# except:
# self.hds_kperk = [self.hds_kperk]
oc = self.m.get_package("OC")
if oc is None:
raise Exception("can't find OC package in model to setup hds grid obs")
if not oc.savehead:
raise Exception("OC not saving hds, can't setup grid obs")
hds_unit = oc.iuhead
hds_file = self.m.get_output(unit=hds_unit)
assert os.path.exists(os.path.join(self.org_model_ws, hds_file)), \
"couldn't find existing hds file {0} in org_model_ws".format(hds_file)
shutil.copy2(os.path.join(self.org_model_ws, hds_file),
os.path.join(self.m.model_ws, hds_file))
inact = None
if self.m.lpf is not None:
inact = self.m.lpf.hdry
elif self.m.upw is not None:
inact = self.m.upw.hdry
if inact is None:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x
else:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x
print(self.hds_kperk)
frun_line, df = setup_hds_obs(os.path.join(self.m.model_ws, hds_file),
kperk_pairs=self.hds_kperk, skip=skip)
self.obs_dfs["hds"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_hds_obs('{0}')".format(hds_file))
self.tmp_files.append(hds_file)
def _setup_smp(self):
""" setup observations from PEST-style SMP file pairs
"""
if self.obssim_smp_pairs is None:
return
if len(self.obssim_smp_pairs) == 2:
if isinstance(self.obssim_smp_pairs[0], str):
self.obssim_smp_pairs = [self.obssim_smp_pairs]
for obs_smp, sim_smp in self.obssim_smp_pairs:
self.log("processing {0} and {1} smp files".format(obs_smp, sim_smp))
if not os.path.exists(obs_smp):
self.logger.lraise("couldn't find obs smp: {0}".format(obs_smp))
if not os.path.exists(sim_smp):
self.logger.lraise("couldn't find sim smp: {0}".format(sim_smp))
new_obs_smp = os.path.join(self.m.model_ws,
os.path.split(obs_smp)[-1])
shutil.copy2(obs_smp, new_obs_smp)
new_sim_smp = os.path.join(self.m.model_ws,
os.path.split(sim_smp)[-1])
shutil.copy2(sim_smp, new_sim_smp)
pyemu.smp_utils.smp_to_ins(new_sim_smp)
def _setup_hob(self):
""" setup observations from the MODFLOW HOB package
"""
if self.m.hob is None:
return
hob_out_unit = self.m.hob.iuhobsv
new_hob_out_fname = os.path.join(self.m.model_ws, self.m.get_output_attribute(unit=hob_out_unit))
org_hob_out_fname = os.path.join(self.org_model_ws, self.m.get_output_attribute(unit=hob_out_unit))
if not os.path.exists(org_hob_out_fname):
self.logger.warn("could not find hob out file: {0}...skipping".format(hob_out_fname))
return
shutil.copy2(org_hob_out_fname, new_hob_out_fname)
hob_df = pyemu.gw_utils.modflow_hob_to_instruction_file(new_hob_out_fname)
self.obs_dfs["hob"] = hob_df
self.tmp_files.append(os.path.split(hob_out_fname))
def _setup_hyd(self):
""" setup observations from the MODFLOW HYDMOD package
"""
if self.m.hyd is None:
return
if self.mfhyd:
org_hyd_out = os.path.join(self.org_model_ws, self.m.name + ".hyd.bin")
if not os.path.exists(org_hyd_out):
self.logger.warn("can't find existing hyd out file:{0}...skipping".
format(org_hyd_out))
return
new_hyd_out = os.path.join(self.m.model_ws, os.path.split(org_hyd_out)[-1])
shutil.copy2(org_hyd_out, new_hyd_out)
df = pyemu.gw_utils.modflow_hydmod_to_instruction_file(new_hyd_out)
df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: '_'.join(x.split('_')[:-1]))
line = "pyemu.gw_utils.modflow_read_hydmod_file('{0}')". \
format(os.path.split(new_hyd_out)[-1])
self.logger.statement("forward_run line: {0}".format(line))
self.frun_post_lines.append(line)
self.obs_dfs["hyd"] = df
self.tmp_files.append(os.path.split(new_hyd_out)[-1])
def _setup_water_budget_obs(self):
""" setup observations from the MODFLOW list file for
volume and flux water buget information
"""
if self.mflist_waterbudget:
org_listfile = os.path.join(self.org_model_ws, self.m.lst.file_name[0])
if os.path.exists(org_listfile):
shutil.copy2(org_listfile, os.path.join(self.m.model_ws,
self.m.lst.file_name[0]))
else:
self.logger.warn("can't find existing list file:{0}...skipping".
format(org_listfile))
return
list_file = os.path.join(self.m.model_ws, self.m.lst.file_name[0])
flx_file = os.path.join(self.m.model_ws, "flux.dat")
vol_file = os.path.join(self.m.model_ws, "vol.dat")
df = pyemu.gw_utils.setup_mflist_budget_obs(list_file,
flx_filename=flx_file,
vol_filename=vol_file,
start_datetime=self.m.start_datetime)
if df is not None:
self.obs_dfs["wb"] = df
# line = "try:\n os.remove('{0}')\nexcept:\n pass".format(os.path.split(list_file)[-1])
# self.logger.statement("forward_run line:{0}".format(line))
# self.frun_pre_lines.append(line)
self.tmp_files.append(os.path.split(list_file)[-1])
line = "pyemu.gw_utils.apply_mflist_budget_obs('{0}',flx_filename='{1}',vol_filename='{2}',start_datetime='{3}')". \
format(os.path.split(list_file)[-1],
os.path.split(flx_file)[-1],
os.path.split(vol_file)[-1],
self.m.start_datetime)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_post_lines.append(line)
def _process_chunk_fac2real(chunk):
for args in chunk:
pyemu.geostats.fac2real(**args)
def _process_chunk_model_files(chunk, df):
for model_file in chunk:
_process_model_file(model_file, df)
def _process_model_file(model_file, df):
# find all mults that need to be applied to this array
df_mf = df.loc[df.model_file == model_file, :]
results = []
org_file = df_mf.org_file.unique()
if org_file.shape[0] != 1:
raise Exception("wrong number of org_files for {0}".
format(model_file))
org_arr = np.loadtxt(org_file[0])
for mlt in df_mf.mlt_file:
org_arr *= np.loadtxt(mlt)
if "upper_bound" in df.columns:
ub_vals = df_mf.upper_bound.value_counts().dropna().to_dict()
if len(ub_vals) == 0:
pass
elif len(ub_vals) > 1:
print(ub_vals)
raise Exception("different upper bound values for {0}".format(org_file))
else:
ub = list(ub_vals.keys())[0]
org_arr[org_arr > ub] = ub
if "lower_bound" in df.columns:
lb_vals = df_mf.lower_bound.value_counts().dropna().to_dict()
if len(lb_vals) == 0:
pass
elif len(lb_vals) > 1:
raise Exception("different lower bound values for {0}".format(org_file))
else:
lb = list(lb_vals.keys())[0]
org_arr[org_arr < lb] = lb
np.savetxt(model_file, np.atleast_2d(org_arr), fmt="%15.6E", delimiter='')
def apply_array_pars(arr_par_file="arr_pars.csv"):
""" a function to apply array-based multipler parameters.
Args:
arr_par_file (`str`): path to csv file detailing parameter array multipliers.
This file is written by PstFromFlopy.
Note:
Used to implement the parameterization constructed by
PstFromFlopyModel during a forward run
This function should be added to the forward_run.py script but can
be called on any correctly formatted csv
This function using multiprocessing, spawning one process for each
model input array (and optionally pp files). This speeds up
execution time considerably but means you need to make sure your
forward run script uses the proper multiprocessing idioms for
freeze support and main thread handling.
"""
df = pd.read_csv(arr_par_file, index_col=0)
# for fname in df.model_file:
# try:
# os.remove(fname)
# except:
# print("error removing mult array:{0}".format(fname))
if 'pp_file' in df.columns:
print("starting fac2real", datetime.now())
pp_df = df.loc[df.pp_file.notna(),
['pp_file', 'fac_file', 'mlt_file']].rename(
columns={'fac_file': 'factors_file', 'mlt_file': 'out_file'})
pp_df.loc[:, 'lower_lim'] = 1.0e-10
# don't need to process all (e.g. if const. mults apply across kper...)
pp_args = pp_df.drop_duplicates().to_dict('records')
num_ppargs = len(pp_args)
chunk_len = 50
num_chunk_floor = num_ppargs // chunk_len
main_chunks = np.array(pp_args)[:num_chunk_floor * chunk_len].reshape(
[-1, chunk_len]).tolist()
remainder = np.array(pp_args)[num_chunk_floor * chunk_len:].tolist()
chunks = main_chunks + [remainder]
procs = []
for chunk in chunks:
p = mp.Process(target=_process_chunk_fac2real, args=[chunk])
p.start()
procs.append(p)
for p in procs:
p.join()
print("finished fac2real", datetime.now())
print("starting arr mlt", datetime.now())
uniq = df.model_file.unique() # unique model input files to be produced
num_uniq = len(uniq) # number of input files to be produced
# number of files to send to each processor
chunk_len = 50 # - this may not be the optimum number,
# sure there is some cleverway of working it out
# lazy plitting the files to be processed into even chunks
num_chunk_floor = num_uniq // chunk_len # number of whole chunks
main_chunks = uniq[:num_chunk_floor * chunk_len].reshape(
[-1, chunk_len]).tolist() # the list of files broken down into chunks
remainder = uniq[num_chunk_floor * chunk_len:].tolist() # remaining files
chunks = main_chunks + [remainder]
procs = []
for chunk in chunks: # now only spawn processor for each chunk
p = mp.Process(target=_process_chunk_model_files, args=[chunk, df])
p.start()
procs.append(p)
for p in procs:
p.join()
print("finished arr mlt", datetime.now())
def apply_list_pars():
""" a function to apply boundary condition multiplier parameters.
Note:
Used to implement the parameterization constructed by
PstFromFlopyModel during a forward run
Requires either "temporal_list_pars.csv" or "spatial_list_pars.csv"
Should be added to the forward_run.py script
"""
temp_file = "temporal_list_pars.dat"
spat_file = "spatial_list_pars.dat"
temp_df, spat_df = None, None
if os.path.exists(temp_file):
temp_df = pd.read_csv(temp_file, delim_whitespace=True)
temp_df.loc[:, "split_filename"] = temp_df.filename.apply(lambda x: os.path.split(x)[-1])
org_dir = temp_df.list_org.iloc[0]
model_ext_path = temp_df.model_ext_path.iloc[0]
if os.path.exists(spat_file):
spat_df = pd.read_csv(spat_file, delim_whitespace=True)
spat_df.loc[:, "split_filename"] = spat_df.filename.apply(lambda x: os.path.split(x)[-1])
mlt_dir = spat_df.list_mlt.iloc[0]
org_dir = spat_df.list_org.iloc[0]
model_ext_path = spat_df.model_ext_path.iloc[0]
if temp_df is None and spat_df is None:
raise Exception("apply_list_pars() - no key dfs found, nothing to do...")
# load the spatial mult dfs
sp_mlts = {}
if spat_df is not None:
for f in os.listdir(mlt_dir):
pak = f.split(".")[0].lower()
df = pd.read_csv(os.path.join(mlt_dir, f), index_col=0, delim_whitespace=True)
# if pak != 'hfb6':
df.index = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(x.k, x.i, x.j), axis=1)
# else:
# df.index = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}{2:04.0f}{2:04.0f}".format(x.k, x.irow1, x.icol1,
# x.irow2, x.icol2), axis = 1)
if pak in sp_mlts.keys():
raise Exception("duplicate multiplier csv for pak {0}".format(pak))
if df.shape[0] == 0:
raise Exception("empty dataframe for spatial list file: {0}".format(f))
sp_mlts[pak] = df
org_files = os.listdir(org_dir)
# for fname in df.filename.unique():
for fname in org_files:
# need to get the PAK name to handle stupid horrible expceptions for HFB...
# try:
# pakspat = sum([True if fname in i else False for i in spat_df.filename])
# if pakspat:
# pak = spat_df.loc[spat_df.filename.str.contains(fname)].pak.values[0]
# else:
# pak = 'notHFB'
# except:
# pak = "notHFB"
names = None
if temp_df is not None and fname in temp_df.split_filename.values:
temp_df_fname = temp_df.loc[temp_df.split_filename == fname, :]
if temp_df_fname.shape[0] > 0:
names = temp_df_fname.dtype_names.iloc[0].split(',')
if spat_df is not None and fname in spat_df.split_filename.values:
spat_df_fname = spat_df.loc[spat_df.split_filename == fname, :]
if spat_df_fname.shape[0] > 0:
names = spat_df_fname.dtype_names.iloc[0].split(',')
if names is not None:
df_list = pd.read_csv(os.path.join(org_dir, fname),
delim_whitespace=True, header=None, names=names)
df_list.loc[:, "idx"] = df_list.apply(
lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(x.k - 1, x.i - 1, x.j - 1), axis=1)
df_list.index = df_list.idx
pak_name = fname.split('_')[0].lower()
if pak_name in sp_mlts:
mlt_df = sp_mlts[pak_name]
mlt_df_ri = mlt_df.reindex(df_list.index)
for col in df_list.columns:
if col in ["k", "i", "j", "inode", 'irow1', 'icol1', 'irow2', 'icol2', 'idx']:
continue
if col in mlt_df.columns:
# print(mlt_df.loc[mlt_df.index.duplicated(),:])
# print(df_list.loc[df_list.index.duplicated(),:])
df_list.loc[:, col] *= mlt_df_ri.loc[:, col].values
if temp_df is not None and fname in temp_df.split_filename.values:
temp_df_fname = temp_df.loc[temp_df.split_filename == fname, :]
for col, val in zip(temp_df_fname.col, temp_df_fname.val):
df_list.loc[:, col] *= val
fmts = ''
for name in names:
if name in ["i", "j", "k", "inode", 'irow1', 'icol1', 'irow2', 'icol2']:
fmts += " %9d"
else:
fmts += " %9G"
np.savetxt(os.path.join(model_ext_path, fname), df_list.loc[:, names].values, fmt=fmts)
def write_const_tpl(name, tpl_file, suffix, zn_array=None,
shape=None, longnames=False):
""" write a constant (uniform) template file for a 2-D array
Args:
name (`str`): the base parameter name
tpl_file (`str`): the template file to write
zn_array (`numpy.ndarray`, optional): an array used to skip inactive cells,
and optionally get shape info.
shape (`tuple`): tuple nrow and ncol. Either `zn_array` or `shape`
must be passed
longnames (`bool`): flag to use longer names that exceed 12 chars in length.
Default is False.
Returns:
`pandas.DataFrame`: a dataframe with parameter information
"""
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme = []
with open(tpl_file, 'w') as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname = " 1.0 "
else:
if longnames:
pname = "const_{0}_{1}".format(name, suffix)
else:
pname = "{0}{1}".format(name, suffix)
if len(pname) > 12:
warnings.warn("zone pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
# df.loc[:,"pargp"] = "{0}{1}".format(self.cn_suffixname)
df.loc[:, "pargp"] = "{0}_{1}".format(suffix.replace('_', ''), name)
df.loc[:, "tpl"] = tpl_file
return df
def write_grid_tpl(name, tpl_file, suffix, zn_array=None, shape=None,
spatial_reference=None, longnames=False):
""" write a grid-based template file for a 2-D array
Args:
name (`str`): the base parameter name
tpl_file (`str`): the template file to write - include path
zn_array (`numpy.ndarray`, optional): zone array to identify
inactive cells. Default is None
shape (`tuple`, optional): a length-two tuple of nrow and ncol. Either
`zn_array` or `shape` must be passed.
spatial_reference (`flopy.utils.SpatialReference`): a spatial reference instance.
If `longnames` is True, then `spatial_reference` is used to add spatial info
to the parameter names.
longnames (`bool`): flag to use longer names that exceed 12 chars in length.
Default is False.
Returns:
`pandas.DataFrame`: a dataframe with parameter information
"""
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme, x, y = [], [], []
with open(tpl_file, 'w') as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname = ' 1.0 '
else:
if longnames:
pname = "{0}_i:{0}_j:{1}_{2}".format(name, i, j, suffix)
if spatial_reference is not None:
pname += "_x:{0:10.2E}_y:{1:10.2E}".format(sr.xcentergrid[i, j],
sr.ycentergrid[i, j])
else:
pname = "{0}{1:03d}{2:03d}".format(name, i, j)
if len(pname) > 12:
warnings.warn("grid pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = ' ~ {0} ~ '.format(pname)
if spatial_reference is not None:
x.append(spatial_reference.xcentergrid[i, j])
y.append(spatial_reference.ycentergrid[i, j])
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
if spatial_reference is not None:
df.loc[:, 'x'] = x
df.loc[:, 'y'] = y
df.loc[:, "pargp"] = "{0}_{1}".format(suffix.replace('_', ''), name)
df.loc[:, "tpl"] = tpl_file
return df
def write_zone_tpl(name, tpl_file, suffix, zn_array=None, shape=None,
longnames=False):
""" write a zone-based template file for a 2-D array
Args:
name (`str`): the base parameter name
tpl_file (`str`): the template file to write
zn_array (`numpy.ndarray`, optional): an array used to skip inactive cells,
and optionally get shape info.
shape (`tuple`): tuple nrow and ncol. Either `zn_array` or `shape`
must be passed
longnames (`bool`): flag to use longer names that exceed 12 chars in length.
Default is False.
Returns:
`pandas.DataFrame`: a dataframe with parameter information
"""
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme = []
with open(tpl_file, 'w') as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname = " 1.0 "
else:
zval = 1
if zn_array is not None:
zval = zn_array[i, j]
if longnames:
pname = "{0}_zone:{1}_{2}".format(name, zval, suffix)
else:
pname = "{0}_zn{1}".format(name, zval)
if len(pname) > 12:
warnings.warn("zone pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
df.loc[:, "pargp"] = "{0}_{1}".format(suffix.replace("_", ''), name)
return df
def build_jac_test_csv(pst, num_steps, par_names=None, forward=True):
""" build a dataframe of jactest inputs for use with sweep
Args:
pst (`pyemu.Pst`): existing control file
num_steps (`int`): number of pertubation steps for each parameter
par_names [`str`]: list of parameter names of pars to test.
If None, all adjustable pars are used. Default is None
forward (`bool`): flag to start with forward pertubations.
Default is True
Returns:
`pandas.DataFrame`: the sequence of model runs to evaluate
for the jactesting.
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
# pst.add_transform_columns()
pst.build_increments()
incr = pst.parameter_data.increment.to_dict()
irow = 0
par = pst.parameter_data
if par_names is None:
par_names = pst.adj_par_names
total_runs = num_steps * len(par_names) + 1
idx = ["base"]
for par_name in par_names:
idx.extend(["{0}_{1}".format(par_name, i) for i in range(num_steps)])
df = pd.DataFrame(index=idx, columns=pst.par_names)
li = par.partrans == "log"
lbnd = par.parlbnd.copy()
ubnd = par.parubnd.copy()
lbnd.loc[li] = lbnd.loc[li].apply(np.log10)
ubnd.loc[li] = ubnd.loc[li].apply(np.log10)
lbnd = lbnd.to_dict()
ubnd = ubnd.to_dict()
org_vals = par.parval1.copy()
org_vals.loc[li] = org_vals.loc[li].apply(np.log10)
if forward:
sign = 1.0
else:
sign = -1.0
# base case goes in as first row, no perturbations
df.loc["base", pst.par_names] = par.parval1.copy()
irow = 1
full_names = ["base"]
for jcol, par_name in enumerate(par_names):
org_val = org_vals.loc[par_name]
last_val = org_val
for step in range(num_steps):
vals = org_vals.copy()
i = incr[par_name]
val = last_val + (sign * incr[par_name])
if val > ubnd[par_name]:
sign = -1.0
val = org_val + (sign * incr[par_name])
if val < lbnd[par_name]:
raise Exception("parameter {0} went out of bounds".
format(par_name))
elif val < lbnd[par_name]:
sign = 1.0
val = org_val + (sign * incr[par_name])
if val > ubnd[par_name]:
raise Exception("parameter {0} went out of bounds".
format(par_name))
vals.loc[par_name] = val
vals.loc[li] = 10 ** vals.loc[li]
df.loc[idx[irow], pst.par_names] = vals
full_names.append("{0}_{1:<15.6E}".format(par_name, vals.loc[par_name]).strip())
irow += 1
last_val = val
df.index = full_names
return df
def _write_df_tpl(filename, df, sep=',', tpl_marker='~', **kwargs):
"""function write a pandas dataframe to a template file.
"""
if "line_terminator" not in kwargs:
if "win" in platform.platform().lower():
kwargs["line_terminator"] = "\n"
with open(filename, 'w') as f:
f.write("ptf {0}\n".format(tpl_marker))
f.flush()
df.to_csv(f, sep=sep, mode='a', **kwargs)
def setup_fake_forward_run(pst, new_pst_name, org_cwd='.', bak_suffix="._bak", new_cwd='.'):
"""setup a fake forward run for a pst.
Args:
pst (`pyemu.Pst`): existing control file
new_pst_name (`str`): new control file to write
org_cwd (`str`): existing working dir. Default is "."
bak_suffix (`str`, optional): suffix to add to existing
model output files when making backup copies.
new_cwd (`str`): new working dir. Default is ".".
Note:
The fake forward run simply copies existing backup versions of
model output files to the outfiles pest(pp) is looking
for. This is really a development option for debugging
PEST++ issues.
"""
if new_cwd != org_cwd and not os.path.exists(new_cwd):
os.mkdir(new_cwd)
pairs = {}
for output_file in pst.output_files:
org_pth = os.path.join(org_cwd, output_file)
new_pth = os.path.join(new_cwd, os.path.split(output_file)[-1])
assert os.path.exists(org_pth), org_pth
shutil.copy2(org_pth, new_pth + bak_suffix)
pairs[output_file] = os.path.split(output_file)[-1] + bak_suffix
if new_cwd != org_cwd:
for files in [pst.template_files, pst.instruction_files]:
for f in files:
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd, pth)
if not os.path.exists(pth):
os.makedirs(pth)
org_pth = os.path.join(org_cwd, f)
new_pth = os.path.join(new_cwd, f)
assert os.path.exists(org_pth), org_pth
shutil.copy2(org_pth, new_pth)
for f in pst.input_files:
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd, pth)
if not os.path.exists(pth):
os.makedirs(pth)
for key, f in pst.pestpp_options.items():
if not isinstance(f, str):
continue
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd, pth)
if not os.path.exists(pth):
os.makedirs(pth)
org_pth = os.path.join(org_cwd, f)
new_pth = os.path.join(new_cwd, f)
if os.path.exists(org_pth):
shutil.copy2(org_pth, new_pth)
with open(os.path.join(new_cwd, "fake_forward_run.py"), 'w') as f:
f.write("import os\nimport shutil\n")
for org, bak in pairs.items():
f.write("shutil.copy2('{0}','{1}')\n".format(bak, org))
pst.model_command = "python fake_forward_run.py"
pst.write(os.path.join(new_cwd, new_pst_name))
return pst
def setup_temporal_diff_obs(pst, ins_file, out_file=None,
include_zero_weight=False, include_path=False,
sort_by_name=True,long_names=True,
prefix="dif"):
""" a helper function to setup difference-in-time observations based on an existing
set of observations in an instruction file using the observation grouping in the
control file
Args:
pst (`pyemu.Pst`): existing control file
ins_file (`str`): an existing instruction file
out_file (`str`, optional): an existing model output file that corresponds to
the instruction file. If None, `ins_file.replace(".ins","")` is used
include_zero_weight (`bool`, optional): flag to include zero-weighted observations
in the difference observation process. Default is False so that only non-zero
weighted observations are used.
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
sort_by_name (`bool`,optional): flag to sort observation names in each group prior to setting up
the differencing. The order of the observations matters for the differencing. If False, then
the control file order is used. If observation names have a datetime suffix, make sure the format is
year-month-day to use this sorting. Default is True
long_names (`bool`, optional): flag to use long, descriptive names by concating the two observation names
that are being differenced. This will produce names that are too long for tradtional PEST(_HP).
Default is True.
prefix (`str`, optional): prefix to prepend to observation names and group names. Default is "dif".
Returns:
tuple containing
- **str**: the forward run command to execute the binary file process during model runs.
- **pandas.DataFrame**: a dataframe of observation information for use in the pest control file
Note:
this is the companion function of `helpers.apply_temporal_diff_obs()`.
"""
if not os.path.exists(ins_file):
raise Exception("setup_temporal_diff_obs() error: ins_file '{0}' not found". \
format(ins_file))
# the ins routines will check for missing obs, etc
try:
ins = pyemu.pst_utils.InstructionFile(ins_file, pst)
except Exception as e:
raise Exception("setup_temporal_diff_obs(): error processing instruction file: {0}". \
format(str(e)))
if out_file is None:
out_file = ins_file.replace(".ins", "")
# find obs groups from the obs names in the ins that have more than one observation
# (cant diff single entry groups)
obs = pst.observation_data
if include_zero_weight:
group_vc = pst.observation_data.loc[ins.obs_name_set, "obgnme"].value_counts()
else:
group_vc = obs.loc[obs.apply(lambda x: x.weight > 0 and \
x.obsnme in ins.obs_name_set, axis=1),\
"obgnme"].value_counts()
groups = list(group_vc.loc[group_vc > 1].index)
if len(groups) == 0:
raise Exception("setup_temporal_diff_obs() error: no obs groups found " +
"with more than one non-zero weighted obs")
# process each group
diff_dfs = []
for group in groups:
# get a sub dataframe with non-zero weighted obs that are in this group and in the instruction file
obs_group = obs.loc[obs.obgnme == group,:].copy()
obs_group = obs_group.loc[obs_group.apply(lambda x: x.weight > 0 and x.obsnme in ins.obs_name_set,axis=1),:]
# sort if requested
if sort_by_name:
obs_group = obs_group.sort_values(by="obsnme",ascending=True)
# the names starting with the first
diff1 = obs_group.obsnme[:-1].values
# the names ending with the last
diff2 = obs_group.obsnme[1:].values
# form a dataframe
diff_df = pd.DataFrame({"diff1":diff1,"diff2":diff2})
#build up some obs names
if long_names:
diff_df.loc[:,"obsnme"] = ["{0}_{1}__{2}".format(prefix,d1,d2) for d1,d2 in zip(diff1,diff2)]
else:
diff_df.loc[:,"obsnme"] = ["{0}_{1}_{2}".format(prefix,group,c) for c in len(diff1)]
# set the obs names as the index (per usual)
diff_df.index = diff_df.obsnme
# set the group name for the diff obs
diff_df.loc[:,"obgnme"] = "{0}_{1}".format(prefix,group)
# set the weights using the standard prop of variance formula
d1_std,d2_std = 1./obs_group.weight[:-1].values,1./obs_group.weight[1:].values
diff_df.loc[:,"weight"] = 1./(np.sqrt((d1_std**2)+(d2_std**2)))
diff_dfs.append(diff_df)
# concat all the diff dataframes
diff_df = pd.concat(diff_dfs)
#save the dataframe as a config file
config_file = ins_file.replace(".ins",".diff.config")
f = open(config_file, 'w')
if include_path:
#ins_path = os.path.split(ins_file)[0]
#f = open(os.path.join(ins_path,config_file),'w')
f.write("{0},{1}\n".format(os.path.split(ins_file)[-1],os.path.split(out_file)[-1]))
#diff_df.to_csv(os.path.join(ins_path,config_file))
else:
f.write("{0},{1}\n".format(ins_file,out_file))
#diff_df.to_csv(os.path.join(config_file))
f.flush()
diff_df.to_csv(f,mode="a")
f.flush()
f.close()
# write the instruction file
diff_ins_file = config_file.replace(".config", ".processed.ins")
with open(diff_ins_file, 'w') as f:
f.write("pif ~\n")
f.write("l1 \n")
for oname in diff_df.obsnme:
f.write("l1 w w w !{0}! \n".format(oname))
if include_path:
config_file = os.path.split(config_file)[-1]
diff_ins_file = os.path.split(diff_ins_file)[-1]
# if the corresponding output file exists, try to run the routine
if os.path.exists(out_file):
if include_path:
b_d = os.getcwd()
ins_path = os.path.split(ins_file)[0]
os.chdir(ins_path)
# try:
processed_df = apply_temporal_diff_obs(config_file=config_file)
# except Exception as e:
# if include_path:
# os.chdir(b_d)
#
# ok, now we can use the new instruction file to process the diff outputs
ins = pyemu.pst_utils.InstructionFile(diff_ins_file)
ins_pro_diff_df = ins.read_output_file(diff_ins_file.replace(".ins",""))
if include_path:
os.chdir(b_d)
print(ins_pro_diff_df)
diff_df.loc[ins_pro_diff_df.index,"obsval"] = ins_pro_diff_df.obsval
frun_line = "pyemu.helpers.apply_temporal_diff_obs('{0}')\n".format(config_file)
return frun_line,diff_df
def apply_temporal_diff_obs(config_file):
"""process an instruction-output file pair and formulate difference observations.
Args:
config_file (`str`): configuration file written by `pyemu.helpers.setup_temporal_diff_obs`.
Returns:
diff_df (`pandas.DataFrame`) : processed difference observations
Note:
writes `config_file.replace(".config",".processed")` output file that can be read
with the instruction file that is created by `pyemu.helpers.setup_temporal_diff_obs()`.
this is the companion function of `helpers.setup_setup_temporal_diff_obs()`.
"""
if not os.path.exists(config_file):
raise Exception("apply_temporal_diff_obs() error: config_file '{0}' not found".format(config_file))
with open(config_file,'r') as f:
line = f.readline().strip().split(',')
ins_file,out_file = line[0],line[1]
diff_df = pd.read_csv(f)
if not os.path.exists(out_file):
raise Exception("apply_temporal_diff_obs() error: out_file '{0}' not found".format(out_file))
if not os.path.exists(ins_file):
raise Exception("apply_temporal_diff_obs() error: ins_file '{0}' not found".format(ins_file))
try:
ins = pyemu.pst_utils.InstructionFile(ins_file)
except Exception as e:
raise Exception("apply_temporal_diff_obs() error instantiating ins file: {0}".format(str(e)))
try:
out_df = ins.read_output_file(out_file)
except Exception as e:
raise Exception("apply_temporal_diff_obs() error processing ins-out file pair: {0}".format(str(e)))
#make sure all the listed obs names in the diff_df are in the out_df
diff_names = set(diff_df.diff1.to_list())
diff_names.update(set(diff_df.diff2.to_list()))
missing = diff_names - set(list(out_df.index.values))
if len(missing) > 0:
raise Exception("apply_temporal_diff_obs() error: the following obs names in the config file "+\
"are not in the instruction file processed outputs :" + ",".join(missing))
diff_df.loc[:,"diff1_obsval"] = out_df.loc[diff_df.diff1.values,"obsval"].values
diff_df.loc[:, "diff2_obsval"] = out_df.loc[diff_df.diff2.values, "obsval"].values
diff_df.loc[:,"diff_obsval"] = diff_df.diff1_obsval - diff_df.diff2_obsval
processed_name = config_file.replace(".config",".processed")
diff_df.loc[:, ["obsnme","diff1_obsval", "diff2_obsval", "diff_obsval"]].\
to_csv(processed_name,sep=' ',index=False)
return diff_df
|
PiRaP-2020-lecture-11.py | import numpy
import time
import math
import multiprocessing as mp
import joblib
# own function definitions
import tasks
def clear_all():
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
del globals()[var]
# Main guard - put main script code in here
# (avoids multiprocessing problems when executing functions from same module in parallel)
if __name__ == "__main__":
clear_all()
###############################################################################
# Problem:
# Evaluate logistic map for selected values of r from [1, 2] interval after
# assumed number of iterations
#
# y(n) = r * y(n-1) * (1 - y(n-1))
#
# Evaluations for different r are independent - embarrassingly parallel problem
print("Problem: Logistic map")
# Case 1
# - small number of large tasks (all of same size)
# - few memory accesses
print("Case 1: small number of large tasks")
N = 100 # number of r values to be evaluated
numIters = 1000000 # number of iterations
A = numpy.empty(N)
t = time.perf_counter()
for i in range(N):
r = 1 + (i + 1) / N
A[i] = tasks.logisticMap(r, numIters)
dt_serial = (time.perf_counter() - t)
print("Serial:", dt_serial)
# do the same in parallel with multiprocessing Pool
print("Number of Cores: ", mp.cpu_count())
numWorkers = 4 # use 4 threads anyway
t = time.perf_counter()
with mp.Pool(processes = numWorkers) as pool: # pool only visible in the context below
# starmap method
# arguments: task to be invoked, itereble object with task parameters
# return value: list
B = pool.starmap(tasks.logisticMap, ((1 + (i + 1) / N, numIters) for i in range(N)) )
dt = (time.perf_counter() - t)
print("Pool:", dt, ", speedup:", dt_serial/dt, ", correct: ", (A == B).all())
# alternative solution: joblib library
t = time.perf_counter()
C = joblib.Parallel(n_jobs = numWorkers)(joblib.delayed(tasks.logisticMap)((1 + (i + 1) / N), numIters) for i in range(N))
dt = (time.perf_counter() - t)
print("Joblib:", dt, ", speedup:", dt_serial/dt, ", correct: ", (A == C).all())
##############################################################################
# Case 2
# - different amount of job for each task
print("Case 2: uneven tasks")
N = 100 # number of r values to be evaluated
numIters = 1000000 # number of iterations
numWorkers = 4
A = numpy.empty(N)
t = time.perf_counter()
for i in range(N):
r = 1 + (i + 1) / N
n = (numIters // N) * i * 2
A[i] = tasks.logisticMap(r, n)
dt_serial = (time.perf_counter() - t)
print("Serial:", dt_serial)
# parallel - pool
t = time.perf_counter()
with mp.Pool(processes = numWorkers) as pool: # pool only visible in the context below
B = pool.starmap(tasks.logisticMap, ((1 + (i + 1) / N, (numIters // N) * i * 2) for i in range(N)) )
dt = (time.perf_counter() - t)
print ("Pool:", dt, ", speedup:", dt_serial/dt, ", correct: ", (A == B).all())
# parallel - joblib
t = time.perf_counter()
C = joblib.Parallel(n_jobs = numWorkers)(joblib.delayed(tasks.logisticMap)((1 + (i + 1) / N), (numIters // N) * i * 2) for i in range(N))
dt = (time.perf_counter() - t)
print ("Joblib:", dt, ", speedup:", dt_serial/dt, "correct: ", (C == A).all())
##############################################################################
# Case 3
# - large number of small tasks (all of same size)
# - more memory accesses
print("Case 3: large number of small tasks")
N = 10000000
numIters = 10
numWorkers = 4
# serial
A = numpy.empty(N)
t = time.perf_counter()
for i in range(N):
r = 1 + (i + 1) / N
A[i] = tasks.logisticMap(r, numIters)
dt_serial = (time.perf_counter() - t)
print("Serial:", dt_serial)
# pool
t = time.perf_counter()
with mp.Pool(processes = numWorkers) as pool:
B = pool.starmap(tasks.logisticMap, ((1 + (i + 1) / N, numIters) for i in range(N)) )
dt = (time.perf_counter() - t)
print("Pool:", dt, ", speedup:", dt_serial/dt, "correct: ", (A == B).all())
# joblib
t = time.perf_counter()
C = joblib.Parallel(n_jobs = numWorkers)(joblib.delayed(tasks.logisticMap)((1 + (i + 1) / N), numIters) for i in range(N))
dt = (time.perf_counter() - t)
print("Joblib:", dt, ", speedup:", dt_serial/dt, "correct: ", (A == C).all())
# pool - output arrays
t = time.perf_counter()
blockSize = N // numWorkers # note the reminder!
with mp.Pool(processes = numWorkers) as pool:
res = pool.starmap(tasks.logisticMap_outArray, ((N, numIters, i * blockSize, blockSize ) for i in range(numWorkers)) )
D = numpy.concatenate(res)
dt = (time.perf_counter() - t)
print("Pool, array-wise:", dt, ", speedup:", dt_serial/dt, "correct: ", (A == D).all())
# joblib - output arrays
t = time.perf_counter()
blockSize = N // numWorkers
res = joblib.Parallel(n_jobs = numWorkers)(joblib.delayed(tasks.logisticMap_outArray)(N, numIters, i * blockSize, blockSize ) for i in range(numWorkers))
E = numpy.concatenate(res)
dt = (time.perf_counter() - t)
print("Joblib, array-wise:", dt, ", speedup:", dt_serial/dt, "correct: ", (A == E).all())
##############################################################################
# Problem: apply complex function on a large array
# - in place transformation (modify original array)
# - workers share memory but do not interact
print("\nProblem: complex function")
clear_all()
n = 10 ** 8 # 100 million elements
origA = numpy.random.uniform(0, 1, n)
fun = lambda x: numpy.arctan(numpy.power(numpy.sin(x) + 1.0, 0.75)) # complex function to make task arithmetic-bound
# Serial
A = origA.copy()
t = time.perf_counter()
A = fun(A)
dt_serial = time.perf_counter() - t
print("Serial:", dt_serial)
# Parallel
bufA = mp.Array("d", n) # allocate shared buffers (it contains optional lock)
sharedA = numpy.frombuffer(bufA.get_obj()) # create 1D numpy matrices as views
sharedA[:] = origA # initialize vector (alters underlying buffer)
test = numpy.frombuffer(bufA.get_obj()) # get another view to see if it works
# create list of workers
t = time.perf_counter()
numWorkers = 4
sliceSize = n // numWorkers
workers = [mp.Process(target=tasks.transformComplex, args = (bufA, sid * sliceSize, sliceSize) ) for sid in range(numWorkers)]
for w in workers:
w.start()
for w in workers:
w.join()
dt = (time.perf_counter() - t)
print("Parallel:", dt, ", speedup: ", dt_serial/dt, "correct: ", (A == sharedA).all())
##############################################################################
# Problem: calculating histogram
# - workers share memory and do interact
print("\nProblem: histogram")
sampleSize = 100
n = 10 ** 7
A = numpy.random.binomial(sampleSize, 0.5, n)
histo = numpy.zeros(sampleSize, dtype="i")
t = time.perf_counter()
for i in range(n):
ai = A[i]
histo[ai] = histo[ai] + 1
dt_serial = time.perf_counter() - t
print ("Serial:", dt_serial)
# Parallel
bufA = mp.Array("i", n) # allocate shared buffers with optional lock
sharedA = numpy.frombuffer(bufA.get_obj(), dtype="i") # create 1D numpy matrices as views
sharedA[:] = A
bufHisto = mp.Array("i", sampleSize) # allocate shared buffers with optional lock
sharedHisto = numpy.frombuffer(bufHisto.get_obj(), dtype="i") # create 1D numpy matrices as views
sharedHisto[:] = 0 # initialize vector (alters underlying buffer)
numWorkers = 4
sliceSize = n // numWorkers
# variant 1
t = time.perf_counter()
workers = [mp.Process(target=tasks.histo, args = (bufA, bufHisto, sid, sliceSize) ) for sid in range(numWorkers)]
for w in workers: w.start()
for w in workers: w.join()
dt = (time.perf_counter() - t)
print ("Parallel:", dt, ", speedup: ", dt_serial/dt, "correct: ", (histo == sharedHisto).all())
# variant 2
sharedHisto[:] = 0
t = time.perf_counter()
workers = [mp.Process(target=tasks.histoLock, args = (bufA, bufHisto, sid, sliceSize) ) for sid in range(numWorkers)]
for w in workers: w.start()
for w in workers: w.join()
dt = (time.perf_counter() - t)
print ("Parallel:", dt, ", speedup: ", dt_serial/dt, "correct: ", (histo == sharedHisto).all())
# variant 3
sharedHisto[:] = 0
t = time.perf_counter()
workers = [mp.Process(target=tasks.histoLockFast, args = (bufA, bufHisto, sid, sliceSize) ) for sid in range(numWorkers)]
for w in workers: w.start()
for w in workers: w.join()
dt = (time.perf_counter() - t)
print ("Parallel:", dt, ", speedup: ", dt_serial/dt, "correct: ", (histo == sharedHisto).all()) |
simpleui.py | import tkinter
import threading
from time import sleep
from functools import partial
import queue
##
##t's important that exceptions in my_method do not cause you to miss registering with the mainloop. You may want to put your call to root.after in the finally part of try/finally.
##[10:31:04 AM] Nieri TV: make incoming command queue for main bookworm object. only need to handle code that might be registered with external threads (e.g. callxyzfromui functions)
##[10:34:19 AM] Nieri TV: can that be made into a feature of the simple ui to handle it with a function that runs the queued commands as part of the main object flow
class SimpleUI:
'''
- created and handled within another object
- dynamically generates interface items in separate areas
- windows and frames to contain other elements
- buttons linked to functions
- text output
- keypress watchers
- general default for all functions is to use the root window unless
otherwise specified
- internal structure of ui:
-elements are buttons, text output, etc.
{window name: {'window':window,
'frame name': {'frame':frame,
'element name':element}}}
'''
def __init__(self, root_name = 'root', hide = False):
self._in_q = queue.Queue()
self._out_q = queue.Queue()
self._callback_q = queue.Queue()
self._root_name = root_name
self.t = threading.Thread(target = self._threadmain)
self.t.start()
if hide: self.hide()
def _threadmain(self, **kwargs):
def timertick():
try:
callable, args, kwargs = self._in_q.get_nowait()
except queue.Empty:
pass
else:
retval = callable(*args, **kwargs)
self._out_q.put(retval)
#re-register this function after completion
### get interval from config
self._ui['windows'][self._root_name].after(100, timertick)
self._ui = {}
ui = self._ui
ui['windows'] = {self._root_name: tkinter.Tk()}
ui['windows'][self._root_name].protocol("WM_DELETE_WINDOW",
self._close_ui)
ui['frames'] = {}
ui['buttons'] = {}
ui['textouts'] = {}
timertick()
ui['windows'][self._root_name].mainloop()
def _submit_to_tkinter(self, callable, *args, **kwargs):
if self.t.is_alive():
self._in_q.put((callable, args, kwargs))
return self._out_q.get()
else:
raise RuntimeWarning('UI is already closed but messages are being sent to it.')
def _queue_callback(self, callable):
self._callback_q.put(callable)
def run_external_callbacks(self):
try:
while 1:
callable = self._callback_q.get_nowait()
callable()
except queue.Empty: pass
def is_running(self):
if self.t.is_alive():
return True
else:
return False
def show(self, window_name = None):
self._submit_to_tkinter(self._show, window_name)
def hide(self, window_name = None):
self._submit_to_tkinter(self._hide, window_name)
def add_window(self, name, hidden = False):
self._submit_to_tkinter(self._add_window, name, hidden)
def add_frame(self, name, window_name = None):
self._submit_to_tkinter(self._add_frame, name, window_name)
def add_textout(self, name, frame_name):
self._submit_to_tkinter(self._add_textout, name, frame_name)
def add_button(self, name, b_text, callable, frame_name):
self._submit_to_tkinter(self._add_button, name, b_text,
callable, frame_name)
def change_text(self, textout_name, new_text):
self._submit_to_tkinter(self._change_text, textout_name, new_text)
def close_ui(self):
try: self._submit_to_tkinter(self._close_ui)
except RuntimeWarning: pass
def _close_ui(self):
self._ui['windows'][self._root_name].destroy()
def _show(self, window_name):
window_list = (self._ui['windows'][window_name] if window_name else
[window for window in self._ui['windows'].values()] )
for window in window_list:
window.update()
window.deiconify()
def _hide(self, window_name):
window_list = (self._ui['windows'][window_name] if window_name else
[window for window in self._ui['windows'].values()] )
for window in window_list:
window.withdraw()
def _add_window(self, name, hidden):
windows = self._ui['windows']
windows['name'] = tkinter.TopLevel(windows[self._root_name])
if hidden:
self.hide(name)
def _add_frame(self, frame_name, window_name):
frames = self._ui['frames']
window = (self._ui['windows'][window_name] if window_name else
self._ui['windows'][self._root_name])
frames[frame_name] = tkinter.Frame(window)
frames[frame_name].pack()
def _add_textout(self, name, frame_name):
textouts = self._ui['textouts']
frame = self._ui['frames'][frame_name]
textouts[name] = tkinter.Label(frame)
textouts[name].pack()
def _change_text(self, textout_name, new_text):
self._ui['textouts'][textout_name].config(text = new_text)
def _add_button(self, name, b_text, callable, frame_name):
buttons = self._ui['buttons']
frame = self._ui['frames'][frame_name]
callback = partial(self._queue_callback, callable)
buttons[name] = tkinter.Button(frame, text = b_text,
command = callback)
buttons[name].pack()
def window_names(self):
for window_name in self._ui['windows'].keys():
yield window_name
def frame_names(self, window_name):
for frame_name in self._ui['frames'].keys():
yield frame_name
def main():
def button_pressed():
print('button pressed')
ui = SimpleUI()
## input('press key to hide...')
ui.hide()
## input('press key to show...')
ui.show()
## input('press key to add frame...')
ui.add_frame('frame1')
## input('press key to add textout...')
ui.add_textout('textout1', 'frame1')
## input('press key to change text...')
ui.change_text('textout1', 'added text')
## input('press key to add button...')
ui.add_button('button1', 'button 1', button_pressed, 'frame1')
while 1:#input('(Q)uit or any other key to continue').lower() != 'q':
ui.run_external_callbacks()
if not ui.is_running():
break
sleep(.1)
ui.close_ui()
if __name__ == '__main__':
main()
|
bitmex_websocket.py | import websocket
import threading
import traceback
from time import sleep
import json
import logging
import urllib
import math
import time, urllib, hmac, hashlib
def generate_nonce():
return int(round(time.time() + 3600))
# Generates an API signature.
# A signature is HMAC_SHA256(secret, verb + path + nonce + data), hex encoded.
# Verb must be uppercased, url is relative, nonce must be an increasing 64-bit integer
# and the data, if present, must be JSON without whitespace between keys.
#
# For example, in psuedocode (and in real code below):
#
# verb=POST
# url=/api/v1/order
# nonce=1416993995705
# data={"symbol":"XBTZ14","quantity":1,"price":395.01}
# signature = HEX(HMAC_SHA256(secret, 'POST/api/v1/order1416993995705{"symbol":"XBTZ14","quantity":1,"price":395.01}'))
def generate_signature(secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urllib.parse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = (verb + path + str(nonce) + data).encode('utf-8')
signature = hmac.new(secret.encode('utf-8'), message, digestmod=hashlib.sha256).hexdigest()
return signature
# Naive implementation of connecting to BitMEX websocket for streaming realtime data.
# The Marketmaker still interacts with this as if it were a REST Endpoint, but now it can get
# much more realtime data without polling the hell out of the API.
#
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Right after, the MM can start using its data. It will be updated in realtime, so the MM can
# poll really often if it wants.
class BitMEXWebsocket:
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 200
def __init__(self, endpoint, symbol, api_key=None, api_secret=None):
'''Connect to the websocket and initialize data stores.'''
self.logger = logging.getLogger(__name__)
self.logger.debug("Initializing WebSocket.")
self.endpoint = endpoint
self.symbol = symbol
if api_key is not None and api_secret is None:
raise ValueError('api_secret is required if api_key is provided')
if api_key is None and api_secret is not None:
raise ValueError('api_key is required if api_secret is provided')
self.api_key = api_key
self.api_secret = api_secret
self.data = {}
self.keys = {}
self.exited = False
# We can subscribe right in the connection querystring, so let's build that.
# Subscribe to all pertinent endpoints
wsURL = self.__get_url()
self.logger.info("Connecting to %s" % wsURL)
self.__connect(wsURL, symbol)
self.logger.info('Connected to WS.')
# Connected. Wait for partials
# self.__wait_for_symbol(symbol)
# if api_key:
# self.__wait_for_account()
# self.logger.info('Got all market data. Starting.')
def exit(self):
'''Call this to exit - will close websocket.'''
self.exited = True
self.ws.close()
def get_instrument(self):
'''Get the raw instrument data for this symbol.'''
# Turn the 'tickSize' into 'tickLog' for use in rounding
instrument = self.data['instrument'][0]
instrument['tickLog'] = int(math.fabs(math.log10(instrument['tickSize'])))
return instrument
def get_ticker(self):
'''Return a ticker object. Generated from quote and trade.'''
lastQuote = self.data['quote'][-1]
lastTrade = self.data['trade'][-1]
ticker = {
"last": lastTrade['price'],
"buy": lastQuote['bidPrice'],
"sell": lastQuote['askPrice'],
"mid": (float(lastQuote['bidPrice'] or 0) + float(lastQuote['askPrice'] or 0)) / 2
}
# The instrument has a tickSize. Use it to round values.
instrument = self.data['instrument'][0]
return {k: round(float(v or 0), instrument['tickLog']) for k, v in ticker.items()}
def funds(self):
'''Get your margin details.'''
return self.data['margin'][0]
def positions(self):
'''Get your positions.'''
return self.data['position']
def market_depth(self):
'''Get market depth (orderbook). Returns all levels.'''
return self.data['orderBookL2']
def open_orders(self, clOrdIDPrefix):
'''Get all your open orders.'''
orders = self.data['order']
# Filter to only open orders and those that we actually placed
return [o for o in orders if str(o['clOrdID']).startswith(clOrdIDPrefix) and order_leaves_quantity(o)]
def executions(self):
'''Get your executions'''
return self.data['execution']
def recent_trades(self):
'''Get recent trades.'''
return self.data['trade']
#
# End Public Methods
#
def __connect(self, wsURL, symbol):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
header=self.__get_auth())
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = True
self.wst.start()
self.logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = 5
while not self.ws.sock or not self.ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
self.logger.error("Couldn't connect to WS! Exiting.")
self.exit()
raise websocket.WebSocketTimeoutException('Couldn\'t connect to WS! Exiting.')
def __get_auth(self):
'''Return auth headers. Will use API Keys if present in settings.'''
if self.api_key:
self.logger.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature of a nonce and
# the WS API endpoint.
expires = generate_nonce()
return [
"api-expires: " + str(expires),
"api-signature: " + generate_signature(self.api_secret, 'GET', '/realtime', expires, ''),
"api-key:" + self.api_key
]
else:
self.logger.info("Not authenticating.")
return []
def __get_url(self):
'''
Generate a connection URL. We can define subscriptions right in the querystring.
Most subscription topics are scoped by the symbol we're listening to.
'''
# You can sub to orderBookL2 for all levels, or orderBook10 for top 10 levels & save bandwidth
symbolSubs = ["execution"] #, "instrument", "order", "orderBookL2", "position", "quote", "trade"]
genericSubs = ["margin"]
subscriptions = [sub + ':' + self.symbol for sub in symbolSubs]
subscriptions += genericSubs
urlParts = list(urllib.parse.urlparse(self.endpoint))
urlParts[0] = urlParts[0].replace('http', 'ws')
urlParts[2] = "/realtime?subscribe={}".format(','.join(subscriptions))
return urllib.parse.urlunparse(urlParts)
def __wait_for_account(self):
'''On subscribe, this data will come down. Wait for it.'''
# Wait for the keys to show up from the ws
while not {'margin', 'position', 'order', 'orderBookL2'} <= set(self.data):
sleep(0.1)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
while not {'instrument', 'trade', 'quote'} <= set(self.data):
sleep(0.1)
def __send_command(self, command, args=None):
'''Send a raw command.'''
if args is None:
args = []
self.ws.send(json.dumps({"op": command, "args": args}))
def __on_message(self, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
self.logger.debug(json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
try:
if 'subscribe' in message:
self.logger.debug("Subscribed to %s." % message['subscribe'])
elif action:
if table not in self.data:
self.data[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.logger.debug("%s: partial" % table)
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order', 'orderBookL2'] and len(self.data[table]) > BitMEXWebsocket.MAX_TABLE_LEN:
self.data[table] = self.data[table][int(BitMEXWebsocket.MAX_TABLE_LEN / 2):]
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, message['data']))
# Locate the item in the collection and update it.
for updateData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], updateData)
if not item:
return # No item found to update. Could happen before push
item.update(updateData)
# Remove cancelled / filled orders
if table == 'order' and not order_leaves_quantity(item):
self.data[table].remove(item)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, message['data']))
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
except:
self.logger.error(traceback.format_exc())
def __on_error(self, error):
'''Called on fatal websocket errors. We exit on these.'''
if not self.exited:
self.logger.error("Error : %s" % error)
raise websocket.WebSocketException(error)
def __on_open(self):
'''Called when the WS opens.'''
self.logger.debug("Websocket Opened.")
def __on_close(self):
'''Called on websocket close.'''
self.logger.info('Websocket Closed')
# Utility method for finding an item in the store.
# When an update comes through on the websocket, we need to figure out which item in the array it is
# in order to match that item.
#
# Helpfully, on a data push (or on an HTTP hit to /api/v1/schema), we have a "keys" array. These are the
# fields we can use to uniquely identify an item. Sometimes there is more than one, so we iterate through all
# provided keys.
def findItemByKeys(keys, table, matchData):
for item in table:
matched = True
for key in keys:
if item[key] != matchData[key]:
matched = False
if matched:
return item
def order_leaves_quantity(o):
if o['leavesQty'] is None:
return True
return o['leavesQty'] > 0
|
test_consumer_group.py | import collections
import logging
import threading
import time
import pytest
import six
from kafka import SimpleClient
from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.coordinator.base import MemberState, Generation
from kafka.structs import TopicPartition
from test.conftest import version
from test.testutil import random_string
def get_connect_str(kafka_broker):
return kafka_broker.host + ':' + str(kafka_broker.port)
@pytest.fixture
def simple_client(kafka_broker):
return SimpleClient(get_connect_str(kafka_broker))
@pytest.fixture
def topic(simple_client):
topic = random_string(5)
simple_client.ensure_topic_exists(topic)
return topic
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, version):
# 0.8.2 brokers need a topic to function well
if version >= (0, 8, 2) and version < (0, 9):
topic(simple_client(kafka_broker))
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
consumer.poll(500)
assert len(consumer._client._conns) > 0
node_id = list(consumer._client._conns.keys())[0]
assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_group(kafka_broker, topic):
num_partitions = 4
connect_str = get_connect_str(kafka_broker)
consumers = {}
stop = {}
threads = {}
messages = collections.defaultdict(list)
group_id = 'test-group-' + random_string(6)
def consumer_thread(i):
assert i not in consumers
assert i not in stop
stop[i] = threading.Event()
consumers[i] = KafkaConsumer(topic,
bootstrap_servers=connect_str,
group_id=group_id,
heartbeat_interval_ms=500)
while not stop[i].is_set():
for tp, records in six.itervalues(consumers[i].poll(100)):
messages[i][tp].extend(records)
consumers[i].close()
consumers[i] = None
stop[i] = None
num_consumers = 4
for i in range(num_consumers):
t = threading.Thread(target=consumer_thread, args=(i,))
t.start()
threads[i] = t
try:
timeout = time.time() + 35
while True:
for c in range(num_consumers):
# Verify all consumers have been created
if c not in consumers:
break
# Verify all consumers have an assignment
elif not consumers[c].assignment():
break
# If all consumers exist and have an assignment
else:
logging.info('All consumers have assignment... checking for stable group')
# Verify all consumers are in the same generation
# then log state and break while loop
generations = set([consumer._coordinator._generation.generation_id
for consumer in list(consumers.values())])
# New generation assignment is not complete until
# coordinator.rejoining = False
rejoining = any([consumer._coordinator.rejoining
for consumer in list(consumers.values())])
if not rejoining and len(generations) == 1:
for c, consumer in list(consumers.items()):
logging.info("[%s] %s %s: %s", c,
consumer._coordinator._generation.generation_id,
consumer._coordinator._generation.member_id,
consumer.assignment())
break
else:
logging.info('Rejoining: %s, generations: %s', rejoining, generations)
time.sleep(1)
assert time.time() < timeout, "timeout waiting for assignments"
logging.info('Group stabilized; verifying assignment')
group_assignment = set()
for c in range(num_consumers):
assert len(consumers[c].assignment()) != 0
assert set.isdisjoint(consumers[c].assignment(), group_assignment)
group_assignment.update(consumers[c].assignment())
assert group_assignment == set([
TopicPartition(topic, partition)
for partition in range(num_partitions)])
logging.info('Assignment looks good!')
finally:
logging.info('Shutting down %s consumers', num_consumers)
for c in range(num_consumers):
logging.info('Stopping consumer %s', c)
stop[c].set()
threads[c].join()
threads[c] = None
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_paused(kafka_broker, topic):
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
topics = [TopicPartition(topic, 1)]
consumer.assign(topics)
assert set(topics) == consumer.assignment()
assert set() == consumer.paused()
consumer.pause(topics[0])
assert set([topics[0]]) == consumer.paused()
consumer.resume(topics[0])
assert set() == consumer.paused()
consumer.unsubscribe()
assert set() == consumer.paused()
@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_heartbeat_thread(kafka_broker, topic):
group_id = 'test-group-' + random_string(6)
consumer = KafkaConsumer(topic,
bootstrap_servers=get_connect_str(kafka_broker),
group_id=group_id,
heartbeat_interval_ms=500)
# poll until we have joined group / have assignment
while not consumer.assignment():
consumer.poll(timeout_ms=100)
assert consumer._coordinator.state is MemberState.STABLE
last_poll = consumer._coordinator.heartbeat.last_poll
last_beat = consumer._coordinator.heartbeat.last_send
timeout = time.time() + 30
while True:
if time.time() > timeout:
raise RuntimeError('timeout waiting for heartbeat')
if consumer._coordinator.heartbeat.last_send > last_beat:
break
time.sleep(0.5)
assert consumer._coordinator.heartbeat.last_poll == last_poll
consumer.poll(timeout_ms=100)
assert consumer._coordinator.heartbeat.last_poll > last_poll
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont, QRegExpValidator
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtCore import QTimer, QRegExp
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton, )
import electrum
from electrum.gui import messages
from electrum import (keystore, ecc, constants, util, ravencoin, commands,
paymentrequest, lnutil)
from electrum.ravencoin import COIN, is_address, base_decode, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME,
InvoiceError)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput, RavenValue, script_GetOp)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException, LnInvoiceException
from .asset_workspace import AssetCreateWorkspace, AssetReissueWorkspace
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, RVNAmountEdit, FreezableLineEdit, FeerateEdit, PayToAmountEdit, SizedFreezableLineEdit
from .messages_list import UpdateDevMessagesThread
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog, HeaderTracker)
from .util import ButtonsTextEdit, ButtonsLineEdit, ComplexLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
from ...assets import is_main_asset_name_good, is_sub_asset_name_good, is_unique_asset_name_good
from .qrreader import scan_qrcode
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25, 25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
# ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
# if wallet.has_lightning():
# self.wallet.config.set_key('show_channels_tab', True)
self.asset_blacklist = self.wallet.config.get('asset_blacklist', [])
self.asset_whitelist = self.wallet.config.get('asset_whitelist', [])
self.use_own_cb = QCheckBox(_('Force use own RVN'))
self.force_use_own = False
# Tracks sendable things
self.send_options = [] # type: List[str]
Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet)
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.assets_tab = self.create_assets_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.messages_tab = self.create_messages_tab()
# self.channels_tab = self.create_channels_tab()
self.history_tab = self.create_history_tab()
history_tab_widget = QWidget()
self.history_tab_layout = QVBoxLayout()
self.header_tracker = HeaderTracker()
self.header_tracker.begin()
self.displaying_tracker = False
self.last_header = -1
self.history_tab_layout.addWidget(self.history_tab)
history_tab_widget.setLayout(self.history_tab_layout)
tabs.addTab(history_tab_widget, read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.assets_tab, read_QIcon('tab_assets.png'), _('Assets'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.messages_tab, read_QIcon("tab_message.png"), _("Messages"), "messages", True)
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
# add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QScrollArea()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
self.setMinimumWidth(640)
self.setMinimumHeight(400)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-ravencoin.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self,
lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1) % wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self,
lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1) % wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded', 'asset_meta']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
# self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_(
"For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum-Ravencoin {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
self._dev_notification_thread = None
if config.get('get_dev_notifications', True):
self._dev_notification_thread = UpdateDevMessagesThread(self)
self._dev_notification_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
# return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
# elif event == 'gossip_db_loaded':
# self.channels_list.gossip_db_loaded.emit(*args)
# elif event == 'channels_updated':
# wallet = args[0]
# if wallet == self.wallet:
# self.channels_list.update_rows.emit(*args)
# elif event == 'channel':
# wallet = args[0]
# if wallet == self.wallet:
# self.channels_list.update_single_row.emit(*args)
# self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
elif event == 'asset_meta':
pass
# self.reissue_workspace.refresh_owners(True)
# self.create_workspace.refresh_owners(True)
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
# self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
if not self.isMaximized():
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
if not self.isMaximized():
self.setGeometry(100, 100, 950, 550)
self.setMinimumSize(950, 550)
def watching_only_changed(self):
name = "Electrum Ravencoin Testnet" if constants.net.TESTNET else "Electrum Ravencoin"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]' % ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Ravencoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Ravencoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_hardware(self):
if not self.wallet.keystore or self.wallet.keystore.get_type_text()[:2] != 'hw':
return
if self.config.get('dont_show_hardware_warning', False):
return
msg = ''.join([
_("This is a hardware wallet."), '\n',
_("Mining to this wallet may cause you problems. If mining, ensure you make your mining payouts sporadic"), '\n',
_("or mine to an electrum software wallet and transfer to hardware.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Hardware Wallet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_hardware_warning', True)
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Ravencoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"),
title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(
_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason),
title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in") + " '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d" % (i + 1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
#history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab, default=False):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), default)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.messages_tab, True)
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
# add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
if self.network and self.network.local_watchtower:
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
# Cannot be closed on mac; disabled for now
# tools_menu.addAction(_("&Log viewer"), self.logview_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction("&RVN Electrum Wiki", lambda: webopen("https://raven.wiki/wiki/Electrum"))
help_menu.addAction("&GetRavencoin.org", lambda: webopen("https://GetRavencoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(
QKeySequence.HelpContents)
# if not constants.net.TESTNET:
# help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('raven:%s?message=donation for %s' % (d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version") + " %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Ravencoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Ravencoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = RavenValue()
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
recv = ''
rvn = total_amount.rvn_value
assets = total_amount.assets
recv += self.format_amount_and_units(rvn)
if assets:
recv += ', '
assets = ['{}: {}'.format(asset, self.config.format_amount(val)) for asset, val in assets.items()]
recv += ', '.join(assets)
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), recv))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
recv = ''
rvn = tx_wallet_delta.delta.rvn_value
assets = tx_wallet_delta.delta.assets
recv += self.format_amount_and_units(rvn)
if assets:
recv += ', '
assets = ['{}: {}'.format(asset, self.config.format_amount(val)) for asset, val in assets.items()]
recv += ', '.join(assets)
self.notify(_("New transaction: {}").format(recv))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
self.request_list.refresh_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat: int, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both bitcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BTC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
status_text = ''
local_height = self.network.get_local_height()
server_height = self.network.get_server_height()
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_lag = local_height - server_height
fork_str = "_fork" if len(self.network.get_blockchains()) > 1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Syncing transactions..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png" % fork_str)
else:
c, u, x = self.wallet.get_balance()
c, u, x = c.rvn_value, u.rvn_value, x.rvn_value
text = _("Balance") + ": %s " % (self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]" % (self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]" % (self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s' % (self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png" % fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png" % fork_str)
if local_height < server_height - (2016*2) and self.header_tracker:
if not self.displaying_tracker:
self.history_tab_layout.removeWidget(self.history_tab)
self.displaying_tracker = True
self.history_tab_layout.addWidget(self.header_tracker)
elif self.last_header != local_height:
self.last_header = local_height
self.header_tracker.calculate_stats(local_height, server_height)
elif self.displaying_tracker and self.header_tracker:
self.displaying_tracker = False
self.history_tab_layout.removeWidget(self.header_tracker)
self.history_tab_layout.addWidget(self.history_tab)
self.header_tracker.finished()
self.header_tracker.deleteLater()
self.header_tracker = None # Garbage collect
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_label.setText(status_text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.asset_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
# self.channels_list.update_rows.emit(wallet)
self.update_completions()
self.refresh_send_tab()
if self.wallet.wallet_type not in ('imported, xpub'):
self.create_workspace.refresh_owners()
self.reissue_workspace.refresh_owners()
# def create_channels_tab(self):
# self.channels_list = ChannelsList(self)
# t = self.channels_list.get_toolbar()
# return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_asset(self, asset):
from . import asset_dialog
d = asset_dialog.AssetDialog(self, asset)
d.exec_()
def hide_asset(self, asset):
self.asset_blacklist.append('^' + asset + '$')
self.config.set_key('asset_blacklist', self.asset_blacklist, True)
self.asset_list.update()
self.history_model.refresh('Marked asset as spam')
self.history_list.update()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = FreezableLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = RVNAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
#self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'),
' ',
_('The ravencoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a ravencoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("ravencoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 0, 1, -1)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning: bool):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
except InvoiceError as e:
self.show_error(_('Error creating payment request') + ':\n' + str(e))
return
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount: int, message: str, expiration: int) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(
_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def refresh_send_tab(self):
# Don't interrupt if we don't need to
balance = sum(self.wallet.get_balance(), RavenValue())
new_send_options = [util.decimal_point_to_base_unit_name(self.get_decimal_point())] + \
sorted([asset for asset, bal in balance.assets.items() if bal != 0])
diff = set(new_send_options) - set(self.send_options)
if self.send_options and not diff:
return
self.to_send_combo.clear()
self.send_options = new_send_options
self.to_send_combo.addItems(self.send_options)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(1, 1)
from .paytoedit import PayToEdit
# Let user choose to send RVN or Asset
self.to_send_combo = QComboBox()
self.refresh_send_tab()
# self.amount_e = RVNAmountEdit(self.get_decimal_point)
self.amount_e = PayToAmountEdit(self.get_decimal_point,
lambda: self.send_options[self.to_send_combo.currentIndex()][:4])
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n' \
+ _(
'You may enter a Ravencoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Ravencoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n' \
+ _(
'The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
vis = self.config.get('enable_op_return_messages', False)
self.pubkey_e = FreezableLineEdit()
self.pubkey_e.setMaxLength(40) # Maximum length of an OP_RETURN message is 40. 1 byte for message length
self.pubkey_e.setMinimumWidth(700)
msg = _('OP_RETURN message.') + '\n\n' \
+ _('A short message to be encoded in a null pubkey') + ' ' \
+ _(
'Note that this is not an intented feature of Ravencoin and may be removed in the future.') + '\n\n' \
+ _('This will increase your fee slightly.')
self.pubkey_label = HelpLabel(_('OP_RETURN Message'), msg)
grid.addWidget(self.pubkey_label, 3, 0)
self.pubkey_label.setVisible(vis)
self.pubkey_e.setVisible(vis)
grid.addWidget(self.pubkey_e, 3, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _(
'Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
grid.addWidget(self.to_send_combo, 4, 2)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 3)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
def on_to_send():
i = self.to_send_combo.currentIndex()
self.fiat_send_e.setVisible(i == 0)
if i == 0:
reg = QRegExp('^[0-9]{0,11}\\.([0-9]{1,8})$')
else:
meta = self.wallet.get_asset_meta(self.send_options[i])
if meta:
divs = meta.divisions
if divs == 0:
reg = QRegExp('^[1-9][0-9]{0,10}$')
else:
reg = QRegExp('^[0-9]{0,11}\\.([0-9]{1,' + str(divs) + '})$')
else:
# For some reason we don't have asset data yet;
# give the user the most freedom
reg = QRegExp('^[0-9]{0,11}\\.([0-9]{1,8})$')
validator = QRegExpValidator(reg)
self.amount_e.setValidator(validator)
self.to_send_combo.currentIndexChanged.connect(on_to_send)
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 4)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
# self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def create_messages_tab(self):
from .messages_list import MessageList
self.message_list = l = MessageList(self)
tab = self.create_list_tab(l)
return tab
def create_assets_tab(self):
from .asset_list import AssetList
self.asset_list = l = AssetList(self)
if self.wallet.wallet_type not in ('xpub',):
self.create_workspace = create_w = AssetCreateWorkspace(self,
self.confirm_asset_creation)
self.reissue_workspace = reissue_w = AssetReissueWorkspace(self,
self.confirm_asset_reissue)
else:
self.create_workspace = create_w = QLabel()
self.reissue_workspace = reissue_w = QLabel()
layout = QGridLayout()
w = QWidget()
w.setLayout(layout)
self.asset_tabs = tabwidget = QTabWidget()
tabwidget.addTab(l, "My Assets")
tabwidget.addTab(create_w, "Create Asset")
tabwidget.addTab(reissue_w, "Reissue Asset")
layout.addWidget(tabwidget, 0, 0)
return w
def confirm_asset_reissue(self):
error = self.reissue_workspace.verify_valid()
if error:
self.show_warning(_('Invalid asset metadata:\n'
'{}').format(error))
return
def show_small_association_warning():
if self.reissue_workspace.should_warn_associated_data():
cb = QCheckBox(_("Don't show this message again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
goto = self.question(_('Your associated data is smalled than the '
'34 byte size.\n'
'Double check that you have input the correct '
'data.\n'
'If you continue, null bytes will be prepended '
'to the end of your data to fit this size.\n\n'
'Is this okay?'),
title=_('Warning: Small associated data'), checkbox=cb)
if cb_checked:
self.config.set_key('warn_asset_small_associated', False)
if goto:
return True
else:
return False
else:
return True
def show_non_reissuable_warning():
if self.reissue_workspace.should_warn_on_non_reissuable():
cb = QCheckBox(_("Don't show this message again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
goto = self.question(_('You will not be able to change '
'this asset in the future.\n'
'Are you sure you want to continue?'),
title=_('Warning: Non reissuable asset'), checkbox=cb)
if cb_checked:
self.config.set_key('warn_asset_non_reissuable', False)
if goto:
return True
else:
return False
else:
return True
if not show_small_association_warning():
return
if not show_non_reissuable_warning():
return
norm, new, change_addr = self.reissue_workspace.get_output()
self.pay_onchain_dialog(
self.get_coins(asset=self.reissue_workspace.get_owner()),
norm,
coinbase_outputs=new,
# change_addr=change_addr
)
self.reissue_workspace.reset_workspace()
def confirm_asset_creation(self):
error = self.create_workspace.verify_valid()
if error:
self.show_warning(_('Invalid asset metadata:\n'
'{}').format(error))
return
def show_small_association_warning():
if self.create_workspace.should_warn_associated_data():
cb = QCheckBox(_("Don't show this message again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
goto = self.question(_('Your associated data is smalled than the '
'34 byte size.\n'
'Double check that you have input the correct '
'data.\n'
'If you continue, null bytes will be prepended '
'to the end of your data to fit this size.\n\n'
'Is this okay?'),
title=_('Warning: Small associated data'), checkbox=cb)
if cb_checked:
self.config.set_key('warn_asset_small_associated', False)
if goto:
return True
else:
return False
else:
return True
def show_non_reissuable_warning():
if self.create_workspace.should_warn_on_non_reissuable():
cb = QCheckBox(_("Don't show this message again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
goto = self.question(_('You will not be able to change '
'this asset in the future.\n'
'Are you sure you want to continue?'),
title=_('Warning: Non reissuable asset'), checkbox=cb)
if cb_checked:
self.config.set_key('warn_asset_non_reissuable', False)
if goto:
return True
else:
return False
else:
return True
if not show_small_association_warning():
return
if not show_non_reissuable_warning():
return
norm, new, change_addr = self.create_workspace.get_output()
self.pay_onchain_dialog(
self.get_coins(asset=self.create_workspace.get_owner()),
norm,
coinbase_outputs=new,
#change_addr=change_addr
)
self.create_workspace.reset_workspace()
def get_asset_from_spend_tab(self) -> Optional[str]:
combo_index = self.to_send_combo.currentIndex()
if combo_index != 0:
return self.send_options[combo_index]
return None
def spend_max(self):
if run_hook('abort_send', self):
return
asset = self.get_asset_from_spend_tab()
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(asset=asset),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - RavenValue(x_fee_amount)
assets = amount_after_all_fees.assets
if len(assets) == 0:
to_show = amount_after_all_fees.rvn_value.value
else:
__, v = list(assets.items())[0]
to_show = v.value
self.amount_e.setAmount(to_show)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee.rvn_value.value)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(
frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Ravencoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx + 1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(
self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
try:
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
pubkey_msg = self.pubkey_e.text()
if pubkey_msg != '' and len(pubkey_msg) < 40:
outputs.append(
PartialTxOutput(
value=0,
scriptpubkey=
b'\x6a' +
len(pubkey_msg).to_bytes(1, 'big', signed=False) +
pubkey_msg.encode('ascii')
))
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
except InvoiceError as e:
self.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
l = list(invoice.get_amount_sat().assets.keys())
if l:
a = l[0]
else:
a = None
self.pay_onchain_dialog(self.get_coins(asset=a), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, asset: str = None, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only, asset=asset)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
# TODO: Currently only for ravencoin
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance(), RavenValue())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal.rvn_value)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None,
coinbase_outputs=None,
change_addr=None,
mixed=False) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep,
coinbase_outputs=coinbase_outputs,
change_addr=change_addr)
output_value = \
sum([RavenValue(0, {x.asset: x.value}) if x.asset else RavenValue(x.value) for x in outputs +
(coinbase_outputs if coinbase_outputs else [])], RavenValue())
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value,
mixed=mixed)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs, mixed=mixed)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value,
mixed=mixed)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password, mixed=False):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs, mixed=mixed)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None, mixed=False):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
def sign(tx, external_keypairs, password):
if external_keypairs:
# can sign directly
tx.sign(external_keypairs)
if not external_keypairs or mixed:
self.wallet.sign_transaction(tx, password)
task = partial(sign, tx, external_keypairs, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
if self.wallet.lnworker.has_conflicting_backup_with(node_id):
msg = messages.MGS_CONFLICTING_BACKUP_INSTANCE
if not self.question(msg):
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
self.to_send_combo.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def set_ln_invoice(self, invoice: str):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice)
except LnInvoiceException as e:
self.show_error(_("Error parsing Lightning invoice") + f":\n{e}")
return
self.payto_e.lightning_invoice = invoice
pubkey = bh2u(lnaddr.pubkey.serialize())
for k, v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
# self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def set_bip21(self, text: str):
try:
out = util.parse_URI(text, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
def pay_to_URI(self, text: str):
if not text:
return
# first interpret as lightning invoice
bolt11_invoice = maybe_extract_bolt11_invoice(text)
if bolt11_invoice:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_bip21(text)
# update fiat amount
self.amount_e.textEdited.emit("")
self.show_send_tab()
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
# vbox.setContentsMargins(0, 0, 0, 0)
# vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', True))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
#amount_str = self.format_amount(invoice.amount_sat.rvn_value.value) + ' ' + self.base_unit()
amount_str = invoice.amount_sat.__repr__()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(
map(lambda x: str(x.address) + ' : ' + self.format_amount(x.value) + (self.base_unit() if not x.asset else (' ' + x.asset)), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d), ))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': ravencoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0] == '_' or m in ['network', 'wallet', 'config', 'daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.status_label = QLabel("")
self.status_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.status_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addPermanentWidget(self.status_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"),
self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"),
self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
# sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if not self.wallet.has_lightning():
self.lightning_button.setVisible(False)
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _(
'Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self, dialog):
assert not self.wallet.has_lightning()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
if self.question(msg):
self._init_lightning_dialog(dialog=dialog)
@protected
def _init_lightning_dialog(self, *, dialog, password):
dialog.close()
self.wallet.init_lightning(password=password)
self.update_lightning_icon()
self.show_message(_('Lightning keys have been initialized.'))
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name") + ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type") + ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type") + ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('nocloud'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _(
"Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(QLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(QLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(lambda: self.init_lightning_dialog(dialog))
grid.addWidget(button, 5, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")),
5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx + 1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx + 1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s" % self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title=_("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = ravencoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not ravencoin.is_address(address):
self.show_message(_('Invalid Ravencoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not ravencoin.is_address(address):
self.show_message(_('Invalid Ravencoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2, 3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3, 1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2, 3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3, 1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
def cb(success: bool, error: str, data):
if not success:
if error:
self.show_error(error)
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
scan_qrcode(parent=self.top_level_window(), config=self.config, callback=cb)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t" + x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(
lambda: e.setText("Please wait... %d/%d" % (len(private_keys), len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s" % addr, pk])
else:
f.write(json.dumps(pklist, indent=4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("RVN currently in your wallet will be used for the fee to sweep assets\n"
"if there is no RVN held in the private keys.\n"
"Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
self.use_own_cb = QCheckBox(_('Force use own RVN'))
def on_cb(x):
self.force_use_own = x == Qt.Checked
self.use_own_cb.stateChanged.connect(on_cb)
vbox.addLayout(Buttons(self.use_own_cb, CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if ravencoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet(
(ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
total_held = sum([coin.value_sats() for coin in coins], RavenValue())
coins_rvn = [coin for coin in coins if coin.value_sats().rvn_value.value != 0]
coins_assets = [coin for coin in coins if coin.value_sats().assets]
self.warn_if_watching_only()
# If there is not RVN in the privkeys, use our own
# TODO: dynamically use our own RVN if not enough
# TODO: Ensure that any RVN held in the privkey is moved over
use_own = total_held.rvn_value.value < 0.1 or self.force_use_own
if use_own:
coins_rvn += list(self.get_coins())
outputs = []
if total_held.assets:
outputs = [PartialTxOutput.from_address_and_value(addr, value=value, asset=asset) for asset, value in total_held.assets.items()]
if total_held.rvn_value.value != 0:
outputs += [PartialTxOutput.from_address_and_value(addr, value=total_held.rvn_value, is_max=not use_own)]
self.pay_onchain_dialog(coins_rvn + coins_assets, outputs, external_keypairs=keypairs, mixed=use_own)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
self.asset_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses") + ':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys") + ':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.save_blacklist:
self.config.set_key('asset_blacklist', self.asset_blacklist, True)
if d.save_whitelist:
self.config.set_key('asset_whitelist', self.asset_whitelist, True)
if d.save_whitelist or d.save_blacklist:
self.asset_list.update()
self.history_model.refresh('Changed asset white or black list', True)
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
vis = self.config.get('enable_op_return_messages', False)
self.pubkey_label.setVisible(vis)
self.pubkey_e.setVisible(vis)
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(), g.top(),
g.width(), g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
# TODO: On mac, this cannot be closed; disabled for now
def logview_dialog(self):
from electrum.logging import get_logfile_path, electrum_logger
def watch_file(fn, logviewer):
# poor man's tail
if os.path.exists(fn):
mtime = os.path.getmtime(fn)
if mtime > self.logfile_mtime:
# file modified
self.logfile_mtime = mtime
logviewer.clear()
with open(fn, "r") as f:
for line in f:
logviewer.append(line.partition('Z |')[2].lstrip(' ').rstrip('\n'))
d = WindowModalDialog(self, _('Log Viewer'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
self.logviewer = QTextEdit()
self.logviewer.setAcceptRichText(False)
self.logviewer.setReadOnly(True)
self.logviewer.setPlainText(
_("Enable 'Write logs to file' in Preferences -> General and restart Electrum-Ravencoin to view logs here"))
layout.addWidget(self.logviewer, 1, 1)
logfile = get_logfile_path()
self.logtimer = QTimer(self)
if logfile is not None:
load_logfile = partial(watch_file, logfile, self.logviewer)
self.logfile_mtime = 0
load_logfile()
self.logtimer.timeout.connect(load_logfile)
self.logtimer.start(2500)
d.exec_()
self.logtimer.stop()
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400, 250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0, 1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes' % total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = RVNAmountEdit(self.get_decimal_point)
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb: Optional[int]) -> Optional[int]:
if fee_per_kb is None:
return None
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = round(fee)
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_(
"The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
ipl_bot.py | import telegram
import threading
import time
import json
import urllib.request
match_json = {}
privious_over_details = {}
prevoius_over_last_run = {}
def if_six_hit():
if_six_hit_bool = False
with urllib.request.urlopen("https://www.cricbuzz.com/match-api/livematches.json") as url:
match_json = json.loads(url.read().decode())
for match_id,match in match_json['matches'].items():
if match['series']['name'] == "Indian Premier League 2019":
if 'score' in match:
prev_over = match['score']['prev_overs']
if not match_id in privious_over_details:
prevoius_over_last_run[match_id] = prev_over[-2:]
privious_over_details[match_id] = prev_over
else:
if prev_over[-2:] == "1" and len(privious_over_details[match_id]) != len(prev_over):
privious_over_details[match_id] = prev_over
if_six_hit_bool = True
return if_six_hit_bool
subscriber_list = []
bot = telegram.Bot(token="893131928:AAFtc2fiFDBn5tRdmp17RunwyLozMOqMw7Y")
html = ""
def broadcaster():
while True:
time.sleep(4)
print("if six::::"+str(if_six_hit()))
if if_six_hit():
for subscriber in subscriber_list:
# bot.send_message(chat_id=subscriber, text="I'm sorry Dave I'm afraid I can't do that.",parse_mode=telegram.ParseMode.HTML)
print("hi")
broadcaster_thread = threading.Thread(target=broadcaster, args=())
broadcaster_thread.start()
while True:
updates = bot.get_updates()
if updates:
chat_id = bot.get_updates()[-1].message.chat_id
if not chat_id in subscriber_list:
subscriber_list.append(chat_id)
|
__init__.py | from threading import Thread, active_count
from typing import Iterable
import time
import os
import logging
from sys import exit as sysexit
from os import _exit as osexit
config = {'STOP':False,'KILLALL':False}
def stop(self, cls):
logging.info('Stop Signal Recieved')
global config
config['STOP'] = True
return
def killall(self, cls):
logging.info('kill All Signal Recieved')
try:
sysexit(0)
except SystemExit:
osexit(0)
def threadCount(count):
config['threadCount'] = count
return
def threadit(func):
global config
def wrapper(*args,**kwargs):
if args and isinstance(args[0],Iterable):
def proc():
return "Processed"
threads = []
maxThread = config.get('threadCount')
if maxThread is None:
maxThread = os.cpu_count() + 1
if maxThread<=3:
maxThread = 3
if maxThread>(os.cpu_count() * 10):
maxThread = os.cpu_count() * 10
logging.info('maxthread is' + str(maxThread))
for elem in args[0]:
if config['STOP']:
break
while True:
activecount = active_count()
if (activecount>=maxThread):
time.sleep(1)
else:
break
logging.info('active count is' + str(activecount))
newArgs = (elem,) + args[1:]
if (activecount<maxThread):
t1 = Thread(target = func, args =newArgs,kwargs=kwargs)
t1.start()
logging.info('thread Created with func ' + str(func) + ' args ' + str(newArgs) + ' kwargs ' + str(kwargs))
threads.append(t1)
for thread in threads:
thread.join()
return proc
else:
x=func(*args,**kwargs)
return x
return wrapper
|
callbacks_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
if not context.executing_eagerly():
self.skipTest('Behavior changed in v2.')
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
steps_per_epoch=5,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((200, 3))
y = array_ops.zeros((200, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((200, 3))
y = array_ops.zeros((200, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_with_dataset_and_partial_batch(self):
model = self._get_model(input_shape=(2,))
def generator():
# Have a partial batch at the end.
for _ in range(9):
yield np.random.random(2), 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training, validation_data=validation)
# Make sure the value of val_ metrics are not zeros.
log_content = printed.contents()
val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content)
self.assertLen(val_loss, 1)
self.assertGreater(float(val_loss[0]), 0.0)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1.]] * 16
train_label = [[0.]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(testing_utils.Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegexp(IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_with_bad_path_placeholders(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegexp(KeyError, 'Failed to format this callback '
'filepath.*'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.6
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
def test_progbar_infers_steps(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
data = dataset_ops.DatasetV2.from_tensor_slices((x, y)).batch(2)
data = data.filter(lambda x, y: True) # Unknown cardinality.
progbar = keras.callbacks.ProgbarLogger('steps')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
self.assertIsNone(progbar.target)
model.fit(data, epochs=2, callbacks=[progbar])
self.assertEqual(progbar.target, 5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_callback_passed_floats(self):
class MyCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_batch_end_called = True
def on_epoch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_epoch_end_called = True
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
callback = MyCallback()
model.fit(x, y, epochs=2, callbacks=[callback])
self.assertTrue(callback.on_batch_end_called)
self.assertTrue(callback.on_batch_end_called)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks(self):
class MyCallbackWithBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
def on_epoch_end(self, epoch, logs=None):
self.epochs += 1
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallbackWithBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallbackWithoutBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertLen(cb_list.callbacks, 1)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks_override(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self, should_run=True):
self.should_run = should_run
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
def _implements_train_batch_hooks(self):
return self.should_run
def _implements_test_batch_hooks(self):
return self.should_run
def _implements_predict_batch_hooks(self):
return self.should_run
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallback(should_run=True)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallback(should_run=False)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 0)
self.assertEqual(my_cb.test_batches, 0)
self.assertEqual(my_cb.predict_batches, 0)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, _, filenames) in os.walk(logdir):
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Check for V2 scalar summaries, which have a different PB
# structure.
if event.summary.value[
0].metadata.plugin_data.plugin_name == 'scalars':
container = result.scalars
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, _, filenames) in os.walk(self.logdir):
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def test_custom_summary(self):
if not context.executing_eagerly():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = summary_pb2.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with summary_ops_v2.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return summary_ops_v2.write(
tag=tag,
tensor=math_ops.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', math_ops.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def _get_trace_file(self, logdir):
profile_dir = os.path.join(logdir, 'plugins', 'profile')
for (dirpath, dirnames, filenames) in os.walk(profile_dir):
del dirnames # unused
for filename in filenames:
if filename.endswith('.trace.json.gz'):
return os.path.join(dirpath, filename)
return None
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileBatchRangeSingle(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
# Test case that replicates a Github issue.
# https://github.com/tensorflow/tensorflow/issues/37543
def test_TensorBoard_autoTrace_profileTwiceGraphMode(self):
ops.disable_eager_execution()
inp = keras.Input((1,))
out = keras.layers.Dense(units=1)(inp)
model = keras.Model(inp, out)
model.compile(gradient_descent.SGD(1), 'mse')
logdir = os.path.join(self.get_temp_dir(), 'tb1')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)],
)
# Verifies trace exists in the first logdir.
self.assertIsNotNone(self._get_trace_file(logdir=logdir))
logdir = os.path.join(self.get_temp_dir(), 'tb2')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)],
)
# Verifies trace exists in the second logdir.
self.assertIsNotNone(self._get_trace_file(logdir=logdir))
def test_TensorBoard_autoTrace_profileBatchRange(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False)
model.fit(
x,
y,
batch_size=4,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_3'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileInvalidBatchRange(self):
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='-1,3',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='1,None',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
self.assertIsNone(self._get_trace_file(logdir=self.train_dir))
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
|
realfunction.py |
import uuid
import numpy as np
import scipy.optimize
from ._population import Population
from pychemia.utils.mathematics import unit_vector
class RealFunction(Population):
def __init__(self, function, ndim, limits, local_minimization=False):
"""
Creates a simple population of points in R^N with
N=ndim the dimensionality of space and
using an univaluated function 'function'
:param function: Routine to evaluate a function
:param ndim: (int) Dimensions of function space
:param limits: (numpy.ndarray)
:return:
"""
Population.__init__(self, 'Euclidean', 'global', use_mongo=False, distance_tolerance=1E-3)
self.tag = 'global'
self.name = 'Real Function'
self.function = function
self.ndim = ndim
if len(limits) == 2:
self.limits = np.zeros((ndim, 2))
self.limits[:, 0] = limits[0]
self.limits[:, 1] = limits[1]
else:
self.limits = np.array(limits)
assert (self.limits.shape == (2, ndim))
self._members = []
self._actives = []
self.db = {}
self.moves = {}
self.pcdb = None
self.local_minimization = local_minimization
def __str__(self):
ret = ' Euclidean Population\n\n'
ret += ' Name: %s\n' % self.name
ret += ' Tag: %s\n' % self.tag
ret += '\n'
ret += ' Members: %d\n' % len(self.members)
ret += ' Actives: %d\n' % len(self.actives)
ret += ' Evaluated: %d\n' % len(self.evaluated)
return ret
def new_entry(self, data, active=True):
ident = self.new_identifier()
x = np.atleast_1d(data)
self.db[ident] = {'x': x, 'fx': None}
self.evaluate_entry(ident)
if active:
self.actives.append(ident)
self.members.append(ident)
return ident
def add_random(self):
x = np.random.random_sample(self.ndim)
x = x * (self.limits[:, 1] - self.limits[:, 0]) + self.limits[:, 0]
return self.new_entry(x), None
def coordinate(self, i):
return self.db[i]['x']
def cross(self, ids):
assert len(ids) == 2
parent1 = self.coordinate(ids[0])
parent2 = self.coordinate(ids[1])
if self.ndim == 1:
son1 = 0.5 * (parent1 + parent2)
son2 = np.abs(parent1 - parent2)
elif self.ndim == 2:
son1 = np.array([parent1[0], parent2[1]])
son2 = np.array([parent2[0], parent1[1]])
else:
split = np.random.randint(1, self.ndim - 1)
son1 = np.concatenate((parent1[:split], parent2[split:]))
son2 = np.concatenate((parent2[:split], parent1[split:]))
new_ident1 = self.new_identifier()
self.members.append(new_ident1)
new_ident2 = self.new_identifier()
self.members.append(new_ident2)
self.db[new_ident1] = {'x': son1, 'fx': None}
self.evaluate_entry(new_ident1)
self.db[new_ident2] = {'x': son2, 'fx': None}
self.evaluate_entry(new_ident2)
if self.db[new_ident1]['fx'] > self.db[new_ident2]['fx']:
return new_ident2, new_ident1
else:
return new_ident1, new_ident2
def distance(self, imember, jmember):
# The trivial metric
x1 = self.db[imember]['x']
x2 = self.db[jmember]['x']
return np.linalg.norm(x2 - x1)
def enable(self, ident):
if ident not in self.actives:
self.actives.append(ident)
def evaluate(self):
for ident in self.members:
self.evaluate_entry(ident)
def evaluate_entry(self, ident):
x = self.db[ident]['x']
if self.local_minimization:
localmin = scipy.optimize.minimize(self.function, x)
if self.is_inside(localmin.x):
self.db[ident]['x'] = localmin.x
self.db[ident]['fx'] = localmin.fun
else:
self.db[ident]['fx'] = self.function(x)
else:
self.db[ident]['fx'] = self.function(x)
# def evaluator_daemon(self):
#
# def worker(db, function, d):
# while True:
# for entry_id in db:
# if db[entry_id]['fx'] is None:
# self.evaluate_entry(entry_id)
# for entry_id in self.db:
# d[entry_id] = self.db[entry_id]
# time.sleep(5)
#
# manager = Manager()
# d = manager.dict()
#
# p = Process(target=worker, args=(self.db, self.function, d))
# p.start()
# return p, d
def is_evaluated(self, i):
if i in self.db and self.db[i]['fx'] is not None:
return True
else:
return False
def from_dict(self, population_dict):
pass
def disable(self, ident):
if ident in self.actives:
self.actives.remove(ident)
def get_values(self, selection):
ret = {}
for i in selection:
ret[i] = self.value(i)
return ret
def member_str(self, imember):
ret = '('
for i in range(self.ndim):
ret += '%5.2f' % self.db[imember]['x'][i]
if i < self.ndim - 1:
ret += ', '
else:
ret += ') -> '
if self.value(imember) is not None:
ret += '%5.2f' % self.value(imember)
else:
ret += 'None'
return ret
def move(self, imember, jmember, factor=0.2, in_place=False):
"""
Moves imember in the direction of jmember
If in_place is True the movement occurs on the
same address as imember
:param factor:
:param imember:
:param jmember:
:param in_place:
:return:
"""
x1 = self.db[imember]['x']
x2 = self.db[jmember]['x']
newx = x1 + factor * (x2 - x1)
if not in_place:
new_ident = self.new_identifier()
self.actives.append(new_ident)
self.members.append(new_ident)
else:
new_ident = imember
# print 'Moving',imember,'located at',x1
if new_ident not in self.moves:
# print 'No previous'
self.moves[new_ident] = np.vstack((x1, newx))
else:
# print 'With previous'
self.moves[new_ident] = np.vstack((self.moves[new_ident], newx))
# print self.moves[new_ident]
self.db[new_ident] = {'x': newx, 'fx': None}
self.evaluate_entry(new_ident)
return new_ident
def is_inside(self, x):
outside = False
for i in range(self.ndim):
if self.limits[i, 0] > x[i] or x[i] > self.limits[i, 1]:
outside = True
# print('New position out of limits', x, self.limits)
return not outside
def move_random(self, imember, factor=0.2, in_place=False, kind='move'):
x = np.array(self.db[imember]['x'])
newx = x
outside = True
while outside:
dx = 2 * np.random.random_sample(self.ndim) - 1
# print 'Random movement', dx, factor
dx = unit_vector(dx)
newx = x + factor * dx
outside = not self.is_inside(newx)
if not in_place:
new_ident = self.new_identifier()
self.actives.append(new_ident)
self.members.append(new_ident)
else:
new_ident = imember
self.db[new_ident] = {'x': newx, 'fx': None}
self.evaluate_entry(new_ident)
return new_ident
@staticmethod
def new_identifier():
return str(uuid.uuid4())[-12:]
def random_population(self, n):
for i in range(n):
self.add_random()
def replace_failed(self):
pass
def recover(self):
pass
def save(self):
wf = open('population.dat', 'w')
for i in sorted(self.members):
wf.write("%15s %12.3f %12.3f\n" % (i, self.db[i]['x'][0], self.db[i]['x'][1]))
wf.close()
wf = open('members.dat', 'w')
for i in sorted(self.members):
wf.write("%15s\n" % i)
wf.close()
def save_info(self):
pass
def set_value(self, i, y):
self.db[i]['fx'] = y
def str_entry(self, entry_id):
ret = 'x = ['
for i in self.db[entry_id]['x']:
ret += '%7.2e ,' % i
ret = ret[:-1] + '] f(x) = %7.2e' % self.db[entry_id]['fx']
return ret
def value(self, imember):
return self.db[imember]['fx']
def write_change(self, change):
# print 'Write Change', change
if change['change'] == 'promoted':
self.db[change['from']]['from'] = change['from']
else:
self.db[change['to']]['from'] = change['from']
@property
def actives(self):
return self._actives
@property
def members(self):
return self._members
|
test_container.py | import dic
import threading
import time
import unittest
class Standalone(object):
pass
class SpecialStandalone(Standalone):
pass
class SimpleComponent(object):
def __init__(self, s: Standalone):
self.standalone = s
class SimpleModule(dic.container.Module):
def load(self, builder):
builder.register_class(Standalone)
class ContainerBuilderTestCase(unittest.TestCase):
def setUp(self):
self.builder = dic.container.ContainerBuilder()
def test_build_creates_empty_container(self):
# Arrange
# Act
self.builder.build()
# Assert
# No explosions
def test_register_class_no_deps(self):
# Arrange
self.builder.register_class(Standalone)
# Act
container = self.builder.build()
# Assert
self.assertEqual(len(container.registry_map), 1)
def test_register_class_simple_deps(self):
# Arrange
self.builder.register_class(SimpleComponent)
# Act
container = self.builder.build()
# Assert
self.assertEqual(len(container.registry_map), 1)
def test_register_class_defaults_instance_per_dep(self):
# Arrange
self.builder.register_class(Standalone)
# Act
container = self.builder.build()
# Assert
self.assertIsInstance(container.registry_map[Standalone].component_scope, dic.scope.InstancePerDependency)
def test_register_as_another_type(self):
# Arrange
self.builder.register_class(SpecialStandalone, register_as=Standalone)
# Act
container = self.builder.build()
# Assert
self.assertIn(Standalone, container.registry_map)
def test_register_as_other_types(self):
# Arrange
self.builder.register_class(SpecialStandalone, register_as=(Standalone, 'x'))
# Act
container = self.builder.build()
# Assert
self.assertIn(Standalone, container.registry_map)
self.assertIn('x', container.registry_map)
def test_register_callback(self):
# Arrange
self.builder.register_callback(SimpleComponent, lambda c: SimpleComponent(c.resolve(Standalone)))
# Act
container = self.builder.build()
# Assert
self.assertIn(SimpleComponent, container.registry_map)
self.assertNotIn(Standalone, container.registry_map)
def test_register_overrides_previous_registration(self):
# Arrange
standalone = Standalone()
self.builder.register_instance(Standalone, standalone)
# Act
# This should dis-regard the existing instance registration
self.builder.register_class(Standalone)
# Assert
container = self.builder.build()
x = container.resolve(Standalone)
self.assertIsNot(x, standalone)
y = container.resolve(Standalone)
self.assertIsNot(x, y)
def test_built_containers_are_isolated(self):
# Arrange
self.builder.register_class(Standalone, component_scope=dic.scope.SingleInstance)
first_container = self.builder.build()
second_container = self.builder.build()
# Act
first = first_container.resolve(Standalone)
first_prime = first_container.resolve(Standalone)
second = second_container.resolve(Standalone)
second_prime = second_container.resolve(Standalone)
# Assert
self.assertIs(first, first_prime)
self.assertIsNot(first, second)
self.assertIs(second, second_prime)
def test_register_module(self):
# Arrange/Act
self.builder.register_module(SimpleModule())
container = self.builder.build()
# Assert
self.assertIn(Standalone, container.registry_map)
class ContainerTestCase(unittest.TestCase):
def setUp(self):
self.builder = dic.container.ContainerBuilder()
def test_resolve_simple_class(self):
# Arrange
self.builder.register_class(Standalone)
container = self.builder.build()
# Act
x = container.resolve(Standalone)
# Assert
self.assertIsInstance(x, Standalone)
def test_resolve_with_basic_dep(self):
# Arrange
self.builder.register_class(Standalone)
self.builder.register_class(SimpleComponent)
container = self.builder.build()
# Act
x = container.resolve(SimpleComponent)
# Assert
self.assertIsInstance(x, SimpleComponent)
self.assertIsInstance(x.standalone, Standalone)
def test_resolve_throws_with_missing_dep(self):
# Arrange
self.builder.register_class(SimpleComponent)
container = self.builder.build()
# Act
# Assert
with self.assertRaises(dic.container.DependencyResolutionError) as cm:
container.resolve(SimpleComponent)
def test_resolve_single_instance(self):
# Arrange
self.builder.register_class(Standalone, component_scope=dic.scope.SingleInstance)
container = self.builder.build()
# Act
x = container.resolve(Standalone)
y = container.resolve(Standalone)
# Assert
self.assertIs(x, y)
def test_resolve_custom_tag(self):
# Note that this isn't an advertised feature of dic, but still good to test.
self.builder.register_class(Standalone, component_scope=dic.scope.SingleInstance, register_as='X')
self.builder.register_class(Standalone, component_scope=dic.scope.SingleInstance, register_as='Y')
container = self.builder.build()
# Act
x = container.resolve('X')
y = container.resolve('Y')
# Assert
self.assertIsNot(x, y)
self.assertIsInstance(x, Standalone)
self.assertIsInstance(y, Standalone)
def test_resolve_dep_single_instance(self):
# Arrange
self.builder.register_class(Standalone, component_scope=dic.scope.SingleInstance)
self.builder.register_class(SimpleComponent)
container = self.builder.build()
# Act
x = container.resolve(SimpleComponent)
y = container.resolve(Standalone)
# Assert
self.assertIs(x.standalone, y)
def test_resolve_instance_per_dep(self):
# Arrange
self.builder.register_class(Standalone)
container = self.builder.build()
# Act
x = container.resolve(Standalone)
y = container.resolve(Standalone)
# Assert
self.assertIsNot(x, y)
def test_resolve_via_alias(self):
# Arrange
self.builder.register_class(SpecialStandalone, register_as=[Standalone])
container = self.builder.build()
# Act
x = container.resolve(Standalone)
# Assert
self.assertIsInstance(x, SpecialStandalone)
def test_resolve_with_callback(self):
# Arrange
standalone = Standalone()
self.builder.register_callback(SimpleComponent, lambda c: SimpleComponent(standalone))
container = self.builder.build()
# Act
component = container.resolve(SimpleComponent)
# Assert
self.assertIs(component.standalone, standalone)
def test_resolve_callback_respects_scope(self):
# Arrange
self.builder.register_class(Standalone, component_scope=dic.scope.SingleInstance)
self.builder.register_callback(SimpleComponent, lambda c: SimpleComponent(c.resolve(Standalone)))
container = self.builder.build()
# Act
component1 = container.resolve(SimpleComponent)
component2 = container.resolve(SimpleComponent)
# Assert
self.assertIsNot(component1, component2)
self.assertIs(component1.standalone, component2.standalone)
def test_resolve_instance(self):
# Arrange
standalone = Standalone()
self.builder.register_instance(Standalone, standalone)
container = self.builder.build()
# Act
x = container.resolve(Standalone)
y = container.resolve(Standalone)
# Assert
self.assertIs(x, standalone)
self.assertIs(y, standalone)
def test_resolve_thread_safe(self):
# Obviously can't test this 100%, but should be enough to see if
# it has been done right-ish...
# Arrange
finish_first = threading.Event()
did_first = threading.Event()
did_second = threading.Event()
expected_first = Standalone()
expected_second = Standalone()
actual = [None, None]
# DODO THIS IS
def resolve_standalone(component_context):
if actual[0] is None:
actual[0] = expected_first
did_first.set()
# This should cause the container to lock when resolving the second thing
finish_first.wait()
elif actual[1] is None:
actual[1] = expected_second
did_second.set()
self.builder.register_callback(Standalone, resolve_standalone)
container = self.builder.build()
# Act/Assert
threading.Thread(target=container.resolve, args=(Standalone,)).start()
threading.Thread(target=container.resolve, args=(Standalone,)).start()
time.sleep(2)
self.assertTrue(did_first.is_set())
self.assertIs(expected_first, actual[0])
self.assertFalse(did_second.is_set())
# finish the first resolve
finish_first.set()
# wait for the second resolve to finish
did_second.wait(timeout=2)
self.assertIs(expected_second, actual[1])
if __name__ == '__main__':
unittest.main()
|
tests.py | # -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
from decimal import Decimal, Rounded
import re
import threading
import unittest
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (connection, connections, DEFAULT_DB_ALIAS,
DatabaseError, IntegrityError, reset_queries, transaction)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.utils import format_number, CursorWrapper
from django.db.models import Sum, Avg, Variance, StdDev
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (TestCase, TransactionTestCase, mock, override_settings,
skipUnlessDBFeature, skipIfDBFeature)
from django.test.utils import str_prefix
from django.utils import six
from django.utils.six.moves import range
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate,
**{'complex': aggregate('last_modified') + aggregate('last_modified')})
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses("PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 90301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql_psycopg2.base import DatabaseWrapper
version_path = 'django.db.backends.postgresql_psycopg2.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(DatabaseWrapper.psycopg2_version.__get__(self), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(DatabaseWrapper.psycopg2_version.__get__(self), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retrieve the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
|
spotify_lyrics.py | #!/usr/bin/env python3
import atexit
import os
import textwrap
import threading
import time
import urllib.parse
from pathlib import Path
from subprocess import call
from urllib.request import urlretrieve
import dbus.mainloop.glib
import dbus.service
import ueberzug.lib.v0 as ueberzug
from gi.repository import GLib
import .utils
atexit.register(utils.show_cursor)
class Lyrics(dbus.service.Object):
def __init__(self):
self.spotify = utils.Spotify()
self.home = str(Path.home())
self._current_line = 0
self.changed = True
self.album_hidden = False
self.bus = dbus.SessionBus()
name = dbus.service.BusName('com.spotify_lyrics.line', bus=self.bus)
super().__init__(name, '/com/spotify_lyrics')
def update_directories(self):
self.lyrics_directory = os.path.join(self.home, '.cache', 'spotify-lyrics')
self.artist_directory = os.path.join(
self.lyrics_directory, self.artist.replace('/', ''))
self.album_directory = os.path.join(
self.artist_directory, self.album.replace('/', ''))
self.image_directory = os.path.join(self.artist_directory, 'album_arts')
self.lyrics_file = os.path.join(
self.album_directory, self.song.replace('/', ''))
self.image_file = f'{os.path.join(self.image_directory, self.album)}.png'
if not os.path.isdir(self.lyrics_directory): os.mkdir(self.lyrics_directory)
if not os.path.isdir(self.artist_directory): os.mkdir(self.artist_directory)
if not os.path.isdir(self.album_directory): os.mkdir(self.album_directory)
if not os.path.isdir(self.image_directory): os.mkdir(self.image_directory)
try:
if not os.path.exists(self.image_file):
urlretrieve(self.art_url, self.image_file)
except FileNotFoundError:
pass
except urllib.error.URLError:
pass
@property
def current_line(self):
return self._current_line
@current_line.setter
def current_line(self, value):
self._current_line = value
self._current_line = min(self._current_line, self.total_lines-self.n_entries)
self._current_line = max(self._current_line, 0)
self.changed = True
@dbus.service.method('com.spotify_lyrics.line', signature='v')
def move(self, val):
self.current_line += max(min(val, 1), -1)
def print_metadata(self):
self.changed = True
os.system('clear')
utils.move_cursor(0, 0)
print(f'\033[95mArtist: {self.artist}\033[0m')
print(f'\033[95mAlbum: {self.album}\033[0m')
print(f'\033[95mSong: {self.song}\033[0m')
def read_lyrics(self):
with open(self.lyrics_file, 'r') as f:
lyrics = ''.join(f.readlines())
return lyrics
def save_lyrics(self):
with open(self.lyrics_file, 'w') as f:
f.write(self.lyrics)
def update_lyrics(self):
if not os.path.exists(self.lyrics_file):
self.lyrics = utils.fetch_lyrics(self.artist, self.song)
self.save_lyrics()
else:
self.lyrics = self.read_lyrics()
self._current_line = 0
@ueberzug.Canvas()
def main(self, loop, event, canvas):
self.rows, self.columns = utils.terminal_size()
self.song, self.artist, self.album, self.art_url = self.spotify.metadata()
self.update_directories()
self.update_lyrics()
album_cover = canvas.create_placement(
'album_cover',
x=self.columns//2, y=4,
scaler=ueberzug.ScalerOption.COVER.value)
album_cover.path = self.image_file
if self.album_hidden:
album_cover.visibility = ueberzug.Visibility.INVISIBLE
else:
album_cover.visibility = ueberzug.Visibility.VISIBLE
utils.hide_cursor()
self.print_metadata()
start_row = 5
with utils.KeyPoller() as key_poller:
while event.is_set():
song, artist, album, art_url = self.spotify.metadata()
if self.song != song or self.artist != artist:
self.song = song
self.artist = artist
self.album = album
self.art_url = art_url
self.update_directories()
self.update_lyrics()
album_cover.path = self.image_file
self.print_metadata()
rows, columns = utils.terminal_size()
if self.rows != rows or self.columns != columns:
difference = rows - self.rows
self.rows, self.columns = rows, columns
if difference > 0:
self.current_line -= difference
self.current_line = max(0, self.current_line)
self.current_line = min(self.current_line,
self.total_lines-self.n_entries)
album_cover.x = self.columns//2
self.print_metadata()
if self.changed:
lines = self.lyrics.split('\n')
wrapped_lines = []
for line in lines:
wrapped_lines.extend(
textwrap.fill(
line, (columns if self.album_hidden
else columns//2-2)).split('\n'))
self.total_lines = len(wrapped_lines)
utils.move_cursor(0, start_row)
self.n_entries = min(rows+self.current_line-start_row,
self.total_lines) - self.current_line
for i in range(self.current_line,
self.current_line + self.n_entries):
utils.delete_line()
print(utils.boldify(wrapped_lines[i]))
utils.move_cursor(0, self.n_entries+start_row)
utils.delete_line()
self.changed = False
key = key_poller.poll(timeout=0.1)
if key is not None:
if key == 'q':
os.system('clear')
loop.quit()
event.clear()
break
elif key == 'j' or ord(key) == 5:
self.current_line += 1
elif key == 'k' or ord(key) == 25:
self.current_line += -1
elif key == 'e':
try:
EDITOR = os.environ.get('EDITOR')
album_cover.visibility = ueberzug.Visibility.INVISIBLE
call([EDITOR, self.lyrics_file])
self.update_lyrics()
self.print_metadata()
utils.hide_cursor()
if self.album_hidden:
album_cover.visibility = ueberzug.Visibility.INVISIBLE
else:
album_cover.visibility = ueberzug.Visibility.VISIBLE
except TypeError:
os.system('clear')
print('$EDITOR is not set')
time.sleep(1)
elif key == 'r':
self.print_metadata()
elif key == 'd':
os.remove(self.lyrics_file)
self.update_lyrics()
elif key == 'n':
self.spotify.next()
elif key == 'p':
self.spotify.prev()
elif key == 't':
self.spotify.toggle()
elif key == 'i':
self.album_hidden = not self.album_hidden
self.changed = True
if self.album_hidden:
album_cover.visibility = ueberzug.Visibility.INVISIBLE
else:
album_cover.visibility = ueberzug.Visibility.VISIBLE
elif key == 'h':
os.system('clear')
album_cover.visibility = ueberzug.Visibility.INVISIBLE
utils.move_cursor(0, 0)
utils.print_help()
time.sleep(5)
self.print_metadata()
if self.album_hidden:
album_cover.visibility = ueberzug.Visibility.INVISIBLE
else:
album_cover.visibility = ueberzug.Visibility.VISIBLE
key_poller.flush()
elif key == 'g':
modified_key = key_poller.poll(timeout=1.0)
if modified_key == 'g':
self.current_line = 0
elif key == 'G':
self.current_line = self.total_lines-self.n_entries
def main():
run_event = threading.Event()
run_event.set()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
loop = GLib.MainLoop()
try:
lyrics_thread = threading.Thread(target=Lyrics().main, args=(loop, run_event))
lyrics_thread.start()
loop.run()
except KeyboardInterrupt:
loop.quit()
finally:
run_event.clear()
lyrics_thread.join()
if __name__ == '__main__':
main()
|
server.py | import os
import sys
sys.path.append(os.path.abspath('.\src'))
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
os.environ['PYTHONPATH'] = os.path.abspath('..') + ";" + os.path.abspath('.')
import logging
import threading
import pika
from config.custom_logging import LogConfig
from config.rabbitmq_conf import no_ack_status, rabbitmq_host
logconf = LogConfig("rabbitmq_test", logging.DEBUG)
log = logconf.create_logger()
threads = []
def thread1_func(channel1, channel2):
def receive1_func(ch, method, properties, body):
channel2.basic_publish(exchange='', routing_key='internal', body=body)
log.debug(f"Receive #{body} and send directly #{body} to queue internal")
channel1.basic_ack(delivery_tag=method.delivery_tag)
channel1.basic_consume(receive1_func, queue='output1', no_ack=no_ack_status)
log.debug("channel1 start consuming..")
channel1.start_consuming()
def thread2_func(channel2, channel3):
def receive2_func(ch, method, properties, body):
num = int(body) * 3
log.debug(f"Receive #{body}, multiply by 3 and send directly #{num} to queue output2")
channel3.basic_publish(exchange='', routing_key='output2', body=str(num))
channel2.basic_consume(receive2_func, queue='internal', no_ack=no_ack_status)
log.debug("channel2 start consuming..")
channel2.start_consuming()
if __name__ == '__main__':
connection1 = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel1 = connection1.channel()
channel1.queue_declare(queue='output1')
connection2 = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel2 = connection2.channel()
channel2.queue_declare(queue='internal')
connection3 = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel3 = connection3.channel()
channel3.queue_declare(queue='output2')
t1 = threading.Thread(target=thread1_func, args=(channel1, channel2,))
t1.daemon = True
threads.append(t1)
t2 = threading.Thread(target=thread2_func, args=(channel2, channel3,))
t2.daemon = True
threads.append(t2)
t1.start()
t2.start()
for t in threads:
t.join()
|
main.py | from cpu import cpuUsage
from memSwap import memory
import argparse
from process import Top10ProcessesMemory
def main(t=10,i=1,**kwargs):
import threading
cputhread = threading.Thread(target=cpuUsage,args=(t,i))
memSwapthread = threading.Thread(target=memory,args=(t,i))
cputhread.start()
memSwapthread.start()
cputhread.join()
memSwapthread.join()
Top10ProcessesMemory()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CPU, Memory benchmark Tester')
parser.set_defaults(method=main)
parser.add_argument('-t','-time',type=int)
parser.add_argument('-i','-interval',type=int)
args=parser.parse_args()
args.method(**vars(args))
|
test_html.py | from __future__ import print_function
import glob
import os
import re
import threading
import warnings
# imports needed for Python 3.x but will fail under Python 2.x
try:
from importlib import import_module, reload
except ImportError:
import_module = __import__
from distutils.version import LooseVersion
import pytest
import numpy as np
from numpy.random import rand
from pandas import (DataFrame, MultiIndex, read_csv, Timestamp, Index,
date_range, Series)
from pandas.compat import (map, zip, StringIO, string_types, BytesIO,
is_platform_windows, PY3)
from pandas.io.common import URLError, urlopen, file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
from pandas._libs.parsers import ParserError
import pandas.util.testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf, network
def _have_module(module_name):
try:
import_module(module_name)
return True
except ImportError:
return False
def _skip_if_no(module_name):
if not _have_module(module_name):
pytest.skip("{0!r} not found".format(module_name))
def _skip_if_none_of(module_names):
if isinstance(module_names, string_types):
_skip_if_no(module_names)
if module_names == 'bs4':
import bs4
if LooseVersion(bs4.__version__) == LooseVersion('4.2.0'):
pytest.skip("Bad version of bs4: 4.2.0")
else:
not_found = [module_name for module_name in module_names if not
_have_module(module_name)]
if set(not_found) & set(module_names):
pytest.skip("{0!r} not found".format(not_found))
if 'bs4' in module_names:
import bs4
if LooseVersion(bs4.__version__) == LooseVersion('4.2.0'):
pytest.skip("Bad version of bs4: 4.2.0")
DATA_PATH = tm.get_data_path()
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), ('lists are not of equal size '
'len(list1) == {0}, '
'len(list2) == {1}'.format(len(list1),
len(list2)))
msg = 'not all list elements are DataFrames'
both_frames = all(map(lambda x, y: isinstance(x, DataFrame) and
isinstance(y, DataFrame), list1, list2))
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, 'frames are both empty'
def test_bs4_version_fails():
_skip_if_none_of(('bs4', 'html5lib'))
import bs4
if LooseVersion(bs4.__version__) == LooseVersion('4.2.0'):
tm.assert_raises(AssertionError, read_html, os.path.join(DATA_PATH,
"spam.html"),
flavor='bs4')
class ReadHtmlMixin(object):
def read_html(self, *args, **kwargs):
kwargs.setdefault('flavor', self.flavor)
return read_html(*args, **kwargs)
class TestReadHtml(ReadHtmlMixin):
flavor = 'bs4'
spam_data = os.path.join(DATA_PATH, 'spam.html')
spam_data_kwargs = {}
if PY3:
spam_data_kwargs['encoding'] = 'UTF-8'
banklist_data = os.path.join(DATA_PATH, 'banklist.html')
@classmethod
def setup_class(cls):
_skip_if_none_of(('bs4', 'html5lib'))
def test_to_html_compat(self):
df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
r_idx_names=False).applymap('{0:.3f}'.format).astype(float)
out = df.to_html()
res = self.read_html(out, attrs={'class': 'dataframe'}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@network
def test_banklist_url(self):
url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
df1 = self.read_html(url, 'First Federal Bank of Florida',
attrs={"id": 'table'})
df2 = self.read_html(url, 'Metcalf Bank', attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
@network
def test_spam_url(self):
url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
df1 = self.read_html(url, '.*Water.*')
df2 = self.read_html(url, 'Unit')
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, '.*Florida.*',
attrs={'id': 'table'})
df2 = self.read_html(self.banklist_data, 'Metcalf Bank',
attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
def test_spam_no_types(self):
# infer_types removed in #10892
df1 = self.read_html(self.spam_data, '.*Water.*')
df2 = self.read_html(self.spam_data, 'Unit')
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == 'Proximates'
assert df1[0].columns[0] == 'Nutrient'
def test_spam_with_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*')
df2 = self.read_html(self.spam_data, 'Unit')
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == 'Proximates'
assert df1[0].columns[0] == 'Nutrient'
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={'id': 'table'})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, '.*Water.*', header=1)[0]
assert df.columns[0] == 'Proximates'
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_xrange(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, 'Unit', skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=[1, 2])
df2 = self.read_html(self.spam_data, 'Unit', skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=set([1, 2]))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=set([2, 1]))
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, '.*Water.*',
skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with tm.assert_raises_regex(TypeError, 'is not a valid type '
'for skipping rows'):
self.read_html(self.spam_data, '.*Water.*', skiprows='asdf')
def test_index(self):
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, '.*Water.*')
df2 = self.read_html(data2, 'Unit')
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, '.*Water.*')
df2 = self.read_html(data, 'Unit')
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, '.*Water.*')
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, 'Unit')
assert_framelist_equal(df1, df2)
@network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html('git://github.com', match='.*Water.*')
@network
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html('http://www.a23950sdfa908sd.com',
match='.*Water.*')
except ValueError as e:
assert str(e) == 'No tables found'
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(url), 'First',
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with tm.assert_raises_regex(ValueError, 'No tables found'):
self.read_html(url, 'First Federal Bank of Florida',
attrs={'id': 'tasdfable'})
def _bank_data(self, *args, **kwargs):
return self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'}, *args, **kwargs)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df = self._bank_data(header=[0, 1], skiprows=1,
tupleize_cols=True)[0]
assert isinstance(df.columns, Index)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(url),
match=re.compile(re.compile('Florida')),
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
with tm.assert_raises_regex(ValueError,
r'\(you passed a negative value\)'):
self.read_html(self.spam_data, 'Water', skiprows=-1)
@network
def test_multiple_matches(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
assert len(dfs) > 1
@network
def test_python_docs_table(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(['Repo', 'What'])
@pytest.mark.slow
def test_thousands_macau_stats(self):
all_non_nan_table_index = -2
macau_data = os.path.join(DATA_PATH, 'macau.html')
dfs = self.read_html(macau_data, index_col=0,
attrs={'class': 'style1'})
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
@pytest.mark.slow
def test_thousands_macau_index_col(self):
all_non_nan_table_index = -2
macau_data = os.path.join(DATA_PATH, 'macau.html')
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
data1 = '''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>'''
data2 = data1 + '''<table>
<tbody>
</tbody>
</table>'''
res1 = self.read_html(StringIO(data1))
res2 = self.read_html(StringIO(data2))
assert_framelist_equal(res1, res2)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
data = StringIO('''<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>
</body>
</html>''')
expected = DataFrame(data={'Header': 'first'}, index=[0])
result = self.read_html(data)[0]
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = '''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>'''
data1 = data_template.format(footer="")
data2 = data_template.format(
footer="<tr><td>footA</td><th>footB</th></tr>")
d1 = {'A': ['bodyA'], 'B': ['bodyB']}
d2 = {'A': ['bodyA', 'footA'], 'B': ['bodyB', 'footB']}
tm.assert_frame_equal(self.read_html(data1)[0], DataFrame(d1))
tm.assert_frame_equal(self.read_html(data2)[0], DataFrame(d2))
def test_countries_municipalities(self):
# GH5048
data1 = StringIO('''<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>''')
data2 = StringIO('''
<table>
<tbody>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>''')
res1 = self.read_html(data1)
res2 = self.read_html(data2, header=0)
assert_framelist_equal(res1, res2)
def test_nyse_wsj_commas_table(self):
data = os.path.join(DATA_PATH, 'nyse_wsj.html')
df = self.read_html(data, index_col=0, header=0,
attrs={'class': 'mdcTable'})[0]
columns = Index(['Issue(Roll over for charts and headlines)',
'Volume', 'Price', 'Chg', '% Chg'])
nrows = 100
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, columns)
@pytest.mark.slow
def test_banklist_header(self):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'})[0]
ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
assert df.shape == ground_truth.shape
old = ['First Vietnamese American BankIn Vietnamese',
'Westernbank Puerto RicoEn Espanol',
'R-G Premier Bank of Puerto RicoEn Espanol',
'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
'Washington Mutual Bank(Including its subsidiary Washington '
'Mutual Bank FSB)',
'Silver State BankEn Espanol',
'AmTrade International BankEn Espanol',
'Hamilton Bank, NAEn Espanol',
'The Citizens Savings BankPioneer Community Bank, Inc.']
new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
'R-G Premier Bank of Puerto Rico', 'Eurobank',
'Sanderson State Bank', 'Washington Mutual Bank',
'Silver State Bank', 'AmTrade International Bank',
'Hamilton Bank, NA', 'The Citizens Savings Bank']
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ['Closing Date', 'Updated Date']
converted[date_cols] = converted[date_cols]._convert(datetime=True,
coerce=True)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = 'Gold Canyon'
with open(self.banklist_data, 'r') as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(self.banklist_data, 'Gold Canyon',
attrs={'id': 'table'})[0]
assert gc in df.to_string()
def test_different_number_of_rows(self):
expected = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>"""
out = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>"""
expected = self.read_html(expected, index_col=0)[0]
res = self.read_html(out, index_col=0)[0]
tm.assert_frame_equal(expected, res)
def test_parse_dates_list(self):
df = DataFrame({'date': date_range('1/1/2001', periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=['date'], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range('1/1/2001', periods=10))
df = DataFrame({'date': raw_dates.map(lambda x: str(x.date())),
'time': raw_dates.map(lambda x: str(x.time()))})
res = self.read_html(df.to_html(), parse_dates={'datetime': [1, 2]},
index_col=1)
newdf = DataFrame({'datetime': raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self):
data = os.path.join(DATA_PATH, 'computer_sales_page.html')
with tm.assert_raises_regex(ParserError,
r"Passed header=\[0,1\] are "
r"too many rows for this "
r"multi_index of columns"):
self.read_html(data, header=[0, 1])
def test_wikipedia_states_table(self):
data = os.path.join(DATA_PATH, 'wikipedia_states.html')
assert os.path.isfile(data), '%r is not a file' % data
assert os.path.getsize(data), '%r is an empty file' % data
result = self.read_html(data, 'Arizona', header=1)[0]
assert result['sq mi'].dtype == np.dtype('float64')
def test_decimal_rows(self):
# GH 12907
data = StringIO('''<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>''')
expected = DataFrame(data={'Header': 1100.101}, index=[0])
result = self.read_html(data, decimal='#')[0]
assert result['Header'].dtype == np.dtype('float64')
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
html_data = """<table>
<thead>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': ['0.763', '0.244']})
html_df = read_html(html_data, converters={'a': str})[0]
tm.assert_frame_equal(expected_df, html_df)
def test_na_values(self):
# GH 13461
html_data = """<table>
<thead>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': [0.763, np.nan]})
html_df = read_html(html_data, na_values=[0.244])[0]
tm.assert_frame_equal(expected_df, html_df)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': ['N/A', 'NA']})
html_df = read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({'a': [np.nan, np.nan]})
html_df = read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(data=[("Hillary", 68, "D"),
("Bernie", 74, "D"),
("Donald", 69, "R")])
expected_df.columns = [["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1",
"Unnamed: 2_level_1"]]
html = expected_df.to_html(index=False)
html_df = read_html(html, )[0]
tm.assert_frame_equal(expected_df, html_df)
def _lang_enc(filename):
return os.path.splitext(os.path.basename(filename))[0].split('_')
class TestReadHtmlEncoding(object):
files = glob.glob(os.path.join(DATA_PATH, 'html_encoding', '*.html'))
flavor = 'bs4'
@classmethod
def setup_class(cls):
_skip_if_none_of((cls.flavor, 'html5lib'))
def read_html(self, *args, **kwargs):
kwargs['flavor'] = self.flavor
return read_html(*args, **kwargs)
def read_filename(self, f, encoding):
return self.read_html(f, encoding=encoding, index_col=0)
def read_file_like(self, f, encoding):
with open(f, 'rb') as fobj:
return self.read_html(BytesIO(fobj.read()), encoding=encoding,
index_col=0)
def read_string(self, f, encoding):
with open(f, 'rb') as fobj:
return self.read_html(fobj.read(), encoding=encoding, index_col=0)
def test_encode(self):
assert self.files, 'no files read from the data folder'
for f in self.files:
_, encoding = _lang_enc(f)
try:
from_string = self.read_string(f, encoding).pop()
from_file_like = self.read_file_like(f, encoding).pop()
from_filename = self.read_filename(f, encoding).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if '16' in encoding or '32' in encoding:
continue
raise
class TestReadHtmlEncodingLxml(TestReadHtmlEncoding):
flavor = 'lxml'
@classmethod
def setup_class(cls):
super(TestReadHtmlEncodingLxml, cls).setup_class()
_skip_if_no(cls.flavor)
class TestReadHtmlLxml(ReadHtmlMixin):
flavor = 'lxml'
@classmethod
def setup_class(cls):
_skip_if_no('lxml')
def test_data_fail(self):
from lxml.etree import XMLSyntaxError
spam_data = os.path.join(DATA_PATH, 'spam.html')
banklist_data = os.path.join(DATA_PATH, 'banklist.html')
with pytest.raises(XMLSyntaxError):
self.read_html(spam_data)
with pytest.raises(XMLSyntaxError):
self.read_html(banklist_data)
def test_works_on_valid_markup(self):
filename = os.path.join(DATA_PATH, 'valid_markup.html')
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self):
_skip_if_none_of(('bs4', 'html5lib'))
banklist_data = os.path.join(DATA_PATH, 'banklist.html')
self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib'])
def test_parse_dates_list(self):
df = DataFrame({'date': date_range('1/1/2001', periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=['date'], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range('1/1/2001', periods=10))
df = DataFrame({'date': raw_dates.map(lambda x: str(x.date())),
'time': raw_dates.map(lambda x: str(x.time()))})
res = self.read_html(df.to_html(), parse_dates={'datetime': [1, 2]},
index_col=1)
newdf = DataFrame({'datetime': raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self):
data = os.path.join(DATA_PATH, 'computer_sales_page.html')
self.read_html(data, header=[0, 1])
def test_invalid_flavor():
url = 'google.com'
with pytest.raises(ValueError):
read_html(url, 'google', flavor='not a* valid**++ flaver')
def get_elements_from_file(url, element='table'):
_skip_if_none_of(('bs4', 'html5lib'))
url = file_path_to_url(url)
from bs4 import BeautifulSoup
with urlopen(url) as f:
soup = BeautifulSoup(f, features='html5lib')
return soup.find_all(element)
@pytest.mark.slow
def test_bs4_finds_tables():
filepath = os.path.join(DATA_PATH, "spam.html")
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert get_elements_from_file(filepath, 'table')
def get_lxml_elements(url, element):
_skip_if_no('lxml')
from lxml.html import parse
doc = parse(url)
return doc.xpath('.//{0}'.format(element))
@pytest.mark.slow
def test_lxml_finds_tables():
filepath = os.path.join(DATA_PATH, "spam.html")
assert get_lxml_elements(filepath, 'table')
@pytest.mark.slow
def test_lxml_finds_tbody():
filepath = os.path.join(DATA_PATH, "spam.html")
assert get_lxml_elements(filepath, 'tbody')
def test_same_ordering():
_skip_if_none_of(['bs4', 'lxml', 'html5lib'])
filename = os.path.join(DATA_PATH, 'valid_markup.html')
dfs_lxml = read_html(filename, index_col=0, flavor=['lxml'])
dfs_bs4 = read_html(filename, index_col=0, flavor=['bs4'])
assert_framelist_equal(dfs_lxml, dfs_bs4)
class ErrorThread(threading.Thread):
def run(self):
try:
super(ErrorThread, self).run()
except Exception as e:
self.err = e
else:
self.err = None
@pytest.mark.slow
def test_importcheck_thread_safety():
# see gh-16928
# force import check by reinitalising global vars in html.py
pytest.importorskip('lxml')
reload(pandas.io.html)
filename = os.path.join(DATA_PATH, 'valid_markup.html')
helper_thread1 = ErrorThread(target=read_html, args=(filename,))
helper_thread2 = ErrorThread(target=read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_failure_unseekable():
# Issue #17975
_skip_if_no('lxml')
_skip_if_no('bs4')
class UnseekableStringIO(StringIO):
def seekable(self):
return False
good = UnseekableStringIO('''
<table><tr><td>spam<br />eggs</td></tr></table>''')
bad = UnseekableStringIO('''
<table><tr><td>spam<foobr />eggs</td></tr></table>''')
assert read_html(good)
assert read_html(bad, flavor='bs4')
bad.seek(0)
with pytest.raises(ValueError,
match='passed a non-rewindable file object'):
read_html(bad)
def test_parse_failure_rewinds():
# Issue #17975
_skip_if_no('lxml')
_skip_if_no('bs4')
class MockFile(object):
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = '' if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile('<table><tr><td>spam<br />eggs</td></tr></table>')
bad = MockFile('<table><tr><td>spam<foobr />eggs</td></tr></table>')
assert read_html(good)
assert read_html(bad)
|
processes_barrier.py | import multiprocessing
from multiprocessing import Barrier, Lock, Process
from time import time
from datetime import datetime
def test_with_barrier(synchronizer_p, serializer_p):
name = multiprocessing.current_process().name
synchronizer_p.wait()
now = time()
with serializer_p:
print("process %s ----> %s" % (name, datetime.fromtimestamp(now)))
def test_without_barrier():
name = multiprocessing.current_process().name
now = time()
print("process %s ----> %s" % (name, datetime.fromtimestamp(now)))
if __name__ == '__main__':
synchronizer = Barrier(2)
serializer = Lock()
Process(name='p1 - test_with_barrier', target=test_with_barrier, args=(synchronizer, serializer)).start()
Process(name='p2 - test_with_barrier', target=test_with_barrier, args=(synchronizer, serializer)).start()
Process(name='p3 - test_without_barrier', target=test_without_barrier).start()
Process(name='p4 - test_without_barrier', target=test_without_barrier).start()
|
serveravg.py | from flcore.clients.clientavg import clientAVG
from flcore.servers.serverbase import Server
from utils.data_utils import read_client_data
from threading import Thread
class FedAvg(Server):
def __init__(self, device, dataset, algorithm, model, batch_size, learning_rate, global_rounds, local_steps, join_clients,
num_clients, times, eval_gap, client_drop_rate, train_slow_rate, send_slow_rate, time_select, goal, time_threthold):
super().__init__(dataset, algorithm, model, batch_size, learning_rate, global_rounds, local_steps, join_clients,
num_clients, times, eval_gap, client_drop_rate, train_slow_rate, send_slow_rate, time_select, goal,
time_threthold)
# select slow clients
self.set_slow_clients()
for i, train_slow, send_slow in zip(range(self.num_clients), self.train_slow_clients, self.send_slow_clients):
train, test = read_client_data(dataset, i)
client = clientAVG(device, i, train_slow, send_slow, train, test, model, batch_size, learning_rate, local_steps)
self.clients.append(client)
print(f"\nJoin clients / total clients: {self.join_clients} / {self.num_clients}")
print("Finished creating server and clients.")
def train(self):
for i in range(self.global_rounds+1):
self.send_models()
if i%self.eval_gap == 0:
print(f"\n-------------Round number: {i}-------------")
print("\nEvaluate global model")
self.evaluate()
self.selected_clients = self.select_clients()
for client in self.selected_clients:
client.train()
# threads = [Thread(target=client.train)
# for client in self.selected_clients]
# [t.start() for t in threads]
# [t.join() for t in threads]
self.receive_models()
self.aggregate_parameters()
print("\nBest global results.")
self.print_(max(self.rs_test_acc), max(
self.rs_train_acc), min(self.rs_train_loss))
self.save_results()
self.save_global_model()
|
worker.py | import logging
import traceback
from time import sleep
import sys
from multiprocessing import Process, Value, Pipe
from abc import abstractmethod
import dwf_client_util.util as util
import dwf_client_util.server as server
import dwf_client_util.task_manager as task_manager
ClientStatus = util.ClientStatus
Signals = util.Signals
class SafeProcess(Process):
def __init__(self, client_id, name, debug=False, *args, **kwargs):
super(SafeProcess, self).__init__(*args, **kwargs)
logging.basicConfig(format=f'%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
self.name = name
self.debug = debug
self.client_id = client_id
def run(self):
try:
Process.run(self)
except ConnectionError as e:
logging.info(f"Can't connect to server. Error message: \n {str(e)}\n Client is exiting...")
if self.debug:
logging.info(traceback.format_exc())
sys.exit(-1)
except KeyError:
logging.info("Worker id is unknown, try running the client with the --reinit argument.")
if self.debug:
logging.info(traceback.format_exc())
sys.exit(-1)
except Exception as e:
logging.info(f"Unhandled exception occured:\n{str(e)} \n Client is exiting...")
server.send_to_endpoint('ERROR', {"hash": self.client_id, "log": f"Unhandled exception: {str(e)}"})
if self.debug:
logging.info(traceback.format_exc())
sys.exit(-1)
except KeyboardInterrupt:
if self.debug:
logging.info(f"[{self.name}] Keyboard interruption, process is exiting...")
logging.info(traceback.format_exc())
sys.exit(0)
class BaseWorker():
def __init__(self, client_id, name, debug):
self.client_id = client_id
self.name = name
self.debug = debug
self.active_process = None
self.target_func_args = ()
@abstractmethod
def target_func(self):
pass
def is_alive(self):
return self.active_process.is_alive()
def launch(self):
self.active_process = SafeProcess(self.client_id, self.name, self.debug, target=self.target_func, args=self.target_func_args)
self.active_process.start()
def stop(self):
self.active_process.terminate()
self.active_process.join()
class Pinger(BaseWorker):
def __init__(self, client_id, stop_task_signal, client_status, debug=False):
super(Pinger, self).__init__(client_id, "PINGER", debug)
self.stop_task_signal = stop_task_signal
self.client_status = client_status
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
def _is_stop(self, response):
return not response.json()['working'] and self.client_status.value == ClientStatus.WORKING
def target_func(self):
while True:
resp = server.send_to_endpoint('PING', {'hash': self.client_id})
if self._is_stop(resp):
logging.info(f"Received stop command from server!")
self.stop_task_signal.send(Signals.STOP_TASK)
sleep(util.config['PING_INTVAL'])
class TaskWorker(BaseWorker):
def __init__(self, client_id, client_status, debug=False):
super(TaskWorker, self).__init__(client_id, "TASK WORKER", debug)
self.client_status = client_status
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
def _process_task(self, task):
try:
logging.info(f'New task assigned:\n {task}')
task_manager.process_task(task)
logging.info("Task is done.")
self.client_status.value = ClientStatus.IDLE
except Exception as e:
logging.info(f"Task can't be completed, error occured: {str(e)}")
if self.debug:
logging.info(traceback.format_exc())
server.send_to_endpoint('ERROR', {"hash": self.client_id, "log": f"Error: {str(e)} \n Task: {str(task)}."})
return
def target_func(self):
while True:
task = server.request_task(self.client_id)
if not task:
self.client_status.value = ClientStatus.IDLE
logging.info(f"No task available. Retrying in {util.config['RETRY_INTVAL']}s...")
sleep(util.config['RETRY_INTVAL'])
else:
self.client_status.value = ClientStatus.WORKING
self._process_task(task)
class ProcessManager():
def __init__(self, client_id, debug=False):
self.client_id = client_id
self.client_status = Value("i", ClientStatus.IDLE)
self.parent_conn, self.child_conn = Pipe()
self.pinger = Pinger(client_id, self.child_conn, self.client_status)
self.task_worker = TaskWorker(client_id, self.client_status, debug)
def _are_processes_alive(self, processes):
for process in processes:
if not process.is_alive():
other_processes = list(filter(lambda p: p != process, processes))
for other_process in other_processes:
other_process.stop()
logging.info("Client is exiting...")
return False
return True
def launch(self):
self.task_worker.launch()
self.pinger.launch()
while True:
if not self._are_processes_alive([self.task_worker, self.pinger]):
return
if self.parent_conn.poll(1):
self.parent_conn.recv()
logging.info("Stopping task...")
self.task_worker.stop()
self.task_worker.launch()
|
train_pg_f18.py | """
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
Adapted for pytorch version by Ning Dai
"""
import numpy as np
import pdb
import torch
import gym
import logz
import argparse
import scipy.signal
import os
import time
import inspect
import math
from torch.multiprocessing import Process
from torch import nn, optim
class PolicyNet(nn.Module):
def __init__(self, neural_network_args):
super(PolicyNet, self).__init__()
self.ob_dim = neural_network_args['ob_dim']
self.ac_dim = neural_network_args['ac_dim']
self.discrete = neural_network_args['discrete']
self.hidden_size = neural_network_args['size']
self.n_layers = neural_network_args['n_layers']
self.build_model()
def build_model(self):
layer_dims = [self.ob_dim] + [self.hidden_size] * self.n_layers + [self.ac_dim]
layers = []
for i in range(len(layer_dims) - 1):
layers.extend([nn.Linear(layer_dims[i], layer_dims[i+1]), nn.Tanh()])
layers = layers[:-1]
self.model = nn.Sequential(*layers).apply(self.weights_init_)
if not self.discrete:
self.ts_logsigma = nn.Parameter(torch.randn((self.ac_dim, )))
def weights_init_(self, m):
if hasattr(m, 'weight'):
nn.init.xavier_uniform_(m.weight)
def forward(self, ts_ob_no):
"""
ts_ob_no: A Tensor with shape (batch_size * observation_dim)
"""
y = self.model(ts_ob_no)
if self.discrete:
ts_logits_na = y
return ts_logits_na
else:
ts_means_na = y
ts_logsigma = self.ts_logsigma
return (ts_means_na, ts_logsigma)
class Agent(object):
def __init__(self, neural_network_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = neural_network_args['ob_dim']
self.ac_dim = neural_network_args['ac_dim']
self.discrete = neural_network_args['discrete']
self.hidden_size = neural_network_args['size']
self.n_layers = neural_network_args['n_layers']
self.learning_rate = neural_network_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
self.policy_net = PolicyNet(neural_network_args)
params = list(self.policy_net.parameters())
if self.nn_baseline:
self.value_net = build_mlp(self.ob_dim, 1, self.n_layers, self.hidden_size)
params += list(self.value_net.parameters())
self.optimizer = optim.Adam(params, lr=self.learning_rate)
def sample_trajectories(self, itr, env):
timesteps_this_batch = 0
paths = []
while timesteps_this_batch <= self.min_timesteps_per_batch:
#Animate on first and every tenth batch
animate_this_episode = (len(paths) == 0 and (itr%10) == 0 and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += len(path["reward"])
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate:
env.render()
time.sleep(0.1)
obs.append(ob)
#==================
#SAMPLE ACTION
#===================
#Convert to a tensor
ts_ob_no = torch.from_numpy(ob[None]).float() #(1,ob_dim)
#Pass through policynet
if self.discrete:
ts_logits_na = self.policy_net(ts_ob_no)
ts_probs_na = nn.Softmax()(ts_logits_na)
ts_action = torch.multinomial(ts_probs_na, num_samples=1) #(1,1)
action_na = torch.squeeze(ts_action).numpy() # (1, )
else:
ts_means_na, ts_logsigma_na = self.policy_net(ts_ob_no)
ts_action_na = torch.normal(ts_means_na, ts_logsigma_na.exp()) #(1, action_dim)
action_na = torch.squeeze(ts_action_na).numpy()
acs.append(action_na)
ob, rew, done, _ = env.step(action_na)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation":np.array(obs, dtype=np.float32),
"reward":np.array(rewards, dtype=np.float32),
"action":np.array(acs, dtype=np.float32)}
return path
def estimate_return(self, obs_no, re_n):
#==================
#Transform re_n (num_episodes, each element is num_timesteps of that episode) to q_n (batchsize=sum_timesteps,)
#===================
sum_timesteps = obs_no.shape[0]
q_n = np.array([])
if self.reward_to_go:
q_n = np.concatenate([scipy.signal.lfilter(b=[1], a=[1, -self.gamma], x=re[::-1])[::-1] for re in re_n]).astype(np.float32)
else:
#Transforms re_n into q_n where each index is the sum of rewards of the path is belonged to * self.gamma
q_n = np.concatenate([np.full_like(re, scipy.signal.lfilter(b=[1], a=[1, -self.gamma], x=re[::-1])[-1]) for re in re_n])
#==================
# Calculate adv=(batchsize=sum_timesteps, ) by subtracting some baselines from estimated q_n=(batchsize=sum_timesteps, )
#===================
if self.nn_baseline:
#Use Neural network baseline value function that predictions expected return conditioned on a state
b_n = self.value_net(torch.from_numpy(obs_no)).view(-1).numpy()
b_n = (b_n - np.mean(b_n)) / (np.std(b_n) + 1e-7)
b_n = b_n * np.std(q_n) + np.mean(q_n)
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
if self.normalize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-7)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
#convert numpy arrays to tensors
ts_ob_no, ts_ac_na, ts_qn, ts_adv_n = map(lambda x: torch.from_numpy(x), [ob_no, ac_na, q_n, adv_n])
#get Policy Distribution and log probabilities
policy_params = self.policy_net(ts_ob_no)
# pdb.set_trace()
if self.discrete:
ts_logits_na = policy_params
ts_logprob_n = torch.distributions.Categorical(logits=ts_logits_na).log_prob(ts_ac_na)
else:
ts_means_na, ts_logsigma = policy_params
ts_logprob_n = torch.distributions.Normal(ts_means_na, ts_logsigma.exp()).log_prob(ts_ac_na).sum(-1)
#clean gradient and backprop mate
self.optimizer.zero_grad()
loss = -(ts_logprob_n * ts_adv_n).mean()
loss.backward()
if self.nn_baseline:
baseline = self.value_net(ts_ob_no).view(-1)
ts_target_n = (ts_qn - ts_qn.mean()) / (ts_qn.std() + 1e-7)
baseline_loss = torch.nn.functional.mse_loss(baseline, ts_target_n)
baseline_loss.backward()
self.optimizer.step()
def train_PG(exp_name, env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#==================
#SETUP LOGGER
#===================
locals_ = locals()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
hyperparams = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_hyperparams(hyperparams)
#==================
#SETUP ENV
#===================
#Make gym env
env = gym.make(env_name)
#Set random seeds (TORCH, NUMPY and ENVIRONMENT)
torch.manual_seed(seed)
np.random.seed(seed)
env.seed(seed)
#Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#Find out if env is continous or discrete
discrete = isinstance(env.action_space, gym.spaces.Discrete)
#Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#==================
#INITIALIZE AGENT
#===================
neural_network_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(neural_network_args, sample_trajectory_args, estimate_return_args)
#==================
#TRAINING LOOP
#===================
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
with torch.no_grad():
#Step 1: Sample Trajectories from current policy (neural network)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
#Step 2: Calculate the RETURNS (Q_val, Adv) for this batch (batch_size = sum of all timesteps across all paths)
ob_no = np.concatenate([path["observation"] for path in paths]) #(batch_size * obs_dim)
ac_na = np.concatenate([path["action"] for path in paths]) #(batch_size * action_dim)
re_n = [path["reward"] for path in paths] #(num_paths) each index is a numpy array containing the rewards for that path
with torch.no_grad():
q_n, adv_n = agent.estimate_return(ob_no, re_n)
#Step 3: Update parameters using Policy Gradient
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [len(path["reward"]) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.save_pytorch_model(agent)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# train_func()
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main() |
tieba_sign.py | #!/usr/bin/env python3
# coding=utf-8
import hashlib
import json
import os
import prettytable as pt
import pyzbar.pyzbar as pyzbar
import requests
import time
from io import BytesIO
from PIL import Image
from random import choice
from threading import Thread
import schedule
class Tieba(object):
def __init__(self, users):
self.users = users
self.tb = pt.PrettyTable()
self.s = requests.session()
self.MD5_KEY = 'tiebaclient!!!'
self.CAPTCHA_API = 'http://222.187.238.211:10086/b'
self.INDEX_URL = 'https://tieba.baidu.com/index.html'
self.TBS_URL = 'http://tieba.baidu.com/dc/common/tbs'
self.LIKES_URL = 'http://c.tieba.baidu.com/c/f/forum/like'
self.SIGN_URL = 'http://c.tieba.baidu.com/c/c/forum/sign'
self.GEN_IMG_URL = 'https://tieba.baidu.com/cgi-bin/genimg'
self.QR_CODE_URL = 'https://passport.baidu.com/v2/api/getqrcode'
self.UNICAST_URL = 'https://passport.baidu.com/channel/unicast'
self.USER_INFO_URL = 'https://tieba.baidu.com/f/user/json_userinfo'
self.QR_LOGIN_URL = 'https://passport.baidu.com/v3/login/main/qrbdusslogin'
self.HAO123_URL = 'https://user.hao123.com/static/crossdomain.php'
self.MY_LIKE_URL = 'http://tieba.baidu.com/f/like/mylike'
self.ALL_TIEBA_LIST = []
self.tb.field_names = ['贴吧', '状态']
self.headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'c.tieba.baidu.com',
'User-Agent': 'bdtb for Android 10.3.8.10'
}
def get_time_stamp(self):
return str(int(time.time() * 1000))
def save_cookie(self, user):
cookie_dict = self.s.cookies.get_dict()
with open('.%s' % user, 'w') as f:
json.dump(cookie_dict, f)
f.close()
def load_cookie(self, user):
with open('.%s' % user, 'r') as f:
cookie_dict = json.loads(f.read())
f.close()
for k, v in cookie_dict.items():
self.s.cookies.set(k, v)
def unicast(self, channel_id):
tt = self.get_time_stamp()
r = self.s.get(
url=self.UNICAST_URL,
params={
'channel_id': channel_id,
'tpl': 'tb',
'apiver': 'v3',
'callback': '',
'tt': tt,
'_': tt
}
)
rsp = r.text.replace('(', '').replace(')', '')
rsp_json = json.loads(rsp)
try:
channel_v = json.loads(rsp_json['channel_v'])
return channel_v
except:
print('扫描超时')
def qr_login_set_cookie(self, bduss):
tt = self.get_time_stamp()
r = self.s.get(
url=self.QR_LOGIN_URL,
params={
'v': tt,
'bduss': bduss,
'u': self.INDEX_URL,
'loginVersion': 'v4',
'qrcode': '1',
'tpl': 'tb',
'apiver': 'v3',
'tt': tt,
'alg': 'v1',
'time': tt[10:]
}
)
rsp = json.loads(r.text.replace("'", '"').replace('\"',''))
bdu = rsp['data']['hao123Param']
self.s.get(f'{self.HAO123_URL}?bdu={bdu}&t={tt}')
self.s.get(self.MY_LIKE_URL)
def down_qr_code(self, imgurl):
r = self.s.get(f'https://{imgurl}')
with open('qrcode.png', 'wb') as f:
f.write(r.content)
f.close()
def read_qr_code(self, imgurl):
self.down_qr_code(imgurl)
img = Image.open('qrcode.png')
barcodes = pyzbar.decode(img)
for barcode in barcodes:
barcodeData = barcode.data.decode("utf-8")
return barcodeData
def get_qr_code(self):
tt = self.get_time_stamp()
r = self.s.get(
url=self.QR_CODE_URL,
params={
'lp': 'pc',
'qrloginfrom': 'pc',
'apiver': 'v3',
'tt': tt,
'tpl': 'tb',
'_': tt
}
)
app = input('有百度贴吧APP / 百度APP,请输入 1 ,没有请输入 2\n:')
imgurl = r.json()['imgurl']
while True:
if app == '1':
print(f'请使用浏览器打开二维码链接并使用百度贴吧APP / 百度APP扫描:https://{imgurl}')
print('注意:请使用IE浏览器打开二维码链接!!!')
break
elif app == '2':
qrurl = self.read_qr_code(imgurl)
os.remove('./qrcode.png')
print(f'请使用已经登录了百度贴吧网页端的浏览器打开链接并按照提示完成登陆:{qrurl}')
break
channel_id = r.json()['sign']
return channel_id
def qr_login(self, user):
channel_id = self.get_qr_code()
while True:
rsp = self.unicast(channel_id)
if rsp and rsp['status'] == 1: print('扫描成功,请在手机端确认登录!')
if rsp and rsp['status'] == 0:
print('确认登陆成功')
bduss = rsp['v']
self.qr_login_set_cookie(bduss)
self.save_cookie(user)
break
def login(self, user):
self.s.cookies.clear()
self.qr_login(user)
print('Login: True')
tiebas = self.get_like_tiebas()
self.ALL_TIEBA_LIST.extend(tiebas)
self.start(tiebas)
def check_login(self):
r = self.s.get(self.TBS_URL)
rsp = r.json()
return True if rsp['is_login'] == 1 else False
def calc_sign(self, str_dict):
md5 = hashlib.md5()
md5.update((
''.join(
'%s=%s' % (k, v)
for k, v in str_dict.items()
) + self.MD5_KEY).encode('utf-8')
)
return md5.hexdigest().upper()
def get_bduss_stoken(self):
bduss = self.s.cookies.get_dict()['BDUSS']
stoken = self.s.cookies.get_dict()['STOKEN']
return bduss, stoken
def get_like_tiebas(self):
bduss, stoken = self.get_bduss_stoken()
data = {
'BDUSS': bduss,
'stoken': stoken,
'timestamp': self.get_time_stamp()
}
data['sign'] = self.calc_sign(data)
for _ in range(5):
try:
r = requests.post(
url=self.LIKES_URL,
data=data,
cookies=self.s.cookies,
headers=self.headers,
timeout=3
)
except:
continue
return [tieba['name'] for tieba in r.json()['forum_list']]
def get_tbs(self):
r = self.s.get(self.TBS_URL).json()
return r['tbs']
def recognize_captcha(self, remote_url, rec_times=3):
for _ in range(rec_times):
while True:
try:
response = requests.get(remote_url, timeout=6)
if response.text:
break
else:
print("retry, response.text is empty")
except Exception as ee:
print(ee)
files = {'image_file': ('captcha.jpg', BytesIO(response.content), 'application')}
r = requests.post(self.CAPTCHA_API, files=files)
try:
predict_text = json.loads(r.text)["value"]
return predict_text
except:
continue
def sign_with_vcode(self, tieba, tbs, captcha_input_str, captcha_vcode_str):
"""
由于暂时没碰见需要验证码的情况,
故此处只是print
"""
print(f'{tieba} 需要验证码')
def sign(self, tieba):
tbs = self.get_tbs()
bduss, stoken = self.get_bduss_stoken()
data = {
'BDUSS': bduss,
'kw': tieba,
'stoken': stoken,
'tbs': tbs,
'timestamp': self.get_time_stamp()
}
sign = self.calc_sign(data)
data['sign'] = sign
for _ in range(5):
try:
r = requests.post(
url=self.SIGN_URL,
data=data,
cookies=self.s.cookies,
headers=self.headers,
timeout=5
)
rsp = r.json()
break
except:
continue
try:
if rsp['user_info']['is_sign_in'] == 1:
self.tb.add_row([tieba, '签到成功'])
except:
if rsp['error_msg'] == 'need vcode': # 这里也不清楚手机端需不需要验证码
captcha_vcode_str = rsp['data']['captcha_vcode_str']
captcha_url = f'{self.GEN_IMG_URL}?{captcha_vcode_str}'
captcha_input_str = self.recognize_captcha(captcha_url)
self.sign_with_vcode(tieba, tbs, captcha_input_str, captcha_vcode_str)
else:
self.tb.add_row([tieba, rsp['error_msg']])
def start(self, tiebas):
threads = []
for tieba in tiebas:
t = Thread(target=self.sign, args=(tieba,))
threads.append(t)
for tieba in threads:
tieba.start()
for tieba in threads:
tieba.join()
def main(self):
try:
start_time = time.time()
print(time.strftime("当前时间: %Y-%m-%d %H:%M:%S", time.localtime()))
self.ALL_TIEBA_LIST.clear()
for user in self.users:
print(f'当前登陆: {user}')
if os.path.exists('.%s' % user):
self.load_cookie(user)
if self.check_login():
print('CookieLogin: True')
tiebas = self.get_like_tiebas()
self.ALL_TIEBA_LIST.extend(tiebas)
self.start(tiebas)
else:
print('%sCookies失效...正在重新登录...' % user)
self.login(user)
else:
self.login(user)
self.tb.align = 'l'
print(self.tb)
self.tb.clear_rows()
else:
end_time = time.time()
print('总共签到{}个贴吧,耗时:{}秒'.format(
len(self.ALL_TIEBA_LIST),
int(end_time - start_time)
)
)
except requests.ConnectionError as e:
print("OOPS!! Connection Error. Make sure you are connected to Internet. Technical Details given below.\n")
print(str(e))
except requests.Timeout as e:
print("OOPS!! Timeout Error")
print(str(e))
except requests.RequestException as e:
print("OOPS!! General Error")
print(str(e))
except Exception as e:
print("General Error, ", str(e))
class Tool:
def __init__(self):
self.user_lists = []
self.time_table = []
def parse_config(self):
with open('tieba_conf.json', 'r') as f:
conf_dict = json.loads(f.read())
f.close()
self.user_lists = conf_dict['userNames']
self.time_table = conf_dict['timeTable']
print('当前用户列表:', self.user_lists)
print('当前签到时间表: ', self.time_table)
def get_user_lists(self):
return self.user_lists
def get_time_table(self):
return self.time_table
if __name__ == "__main__":
tool = Tool()
tool.parse_config()
tieba = Tieba(tool.get_user_lists())
# 先执行一次,做登录以及cookies保存
try:
tieba.main()
for moment in tool.get_time_table():
schedule.every().day.at(moment).do(tieba.main)
except Exception as e:
print(e)
try:
while True:
schedule.run_pending()
time.sleep(5)
except KeyboardInterrupt:
print("close tieba sign task.")
|
OcsCatchupArchiverThreads.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# +
# import(s)
# -
from __future__ import print_function
import argparse
import sys
import threading
import time
from ocs_common import *
from OcsLogger import *
from OcsEvents import *
from OcsStates import *
from SALPY_catchuparchiver import *
# +
# dunder string(s)
# -
__author__ = "Philip N. Daly"
__copyright__ = u"\N{COPYRIGHT SIGN} AURA/LSST 2017. All rights reserved. Released under the GPL."
__date__ = "1 February 2017"
__doc__ = """Commander for the OCS CatchupArchiver using Python Threading"""
__email__ = "pdaly@lsst.org"
__file__ = "OcsCatchupArchiverThreads.py"
__history__ = __date__ + ": " + "original version (" + __email__ + ")"
__version__ = "0.1.0"
# +
# function: get_state()
# -
def get_state(sm=None, sd=None, cd=None, xd=None):
if sm and sd and cd and xd:
current_state = sm.current_state
previous_state = sm.current_state
nsta = sd.get(current_state, '')
osta = sd.get(previous_state, '')
cmds = cd.get(current_state, [])
cfgs = xd.get(current_state, [])
return (nsta, osta, cmds, cfgs)
else:
return ('', '', [], [])
# +
# function: setup_processor()
# -
def setup_processor(mgr=None, ent='', cmd=''):
if mgr and isinstance(ent, str) and ent != '' and isinstance(cmd, str) and cmd != '':
arg = '{0:s}_command_{1:s}'.format(ent, cmd)
mgr.salProcessor(arg)
return True
else:
return False
# +
# function: thread_code()
# -
def thread_code(entity='', evp=None, smachine=None):
# declare some variables and initialize them
thread_name = threading.currentThread().getName()
thread_id = threading.currentThread()
thread_entity = entity
# did we get input objects?
if evp:
evp.logger.info('{0:s} thread received event handler at address {1:s}'.format(thread_name, hex(id(evp))))
if smachine:
smachine.logger.info('{0:s} thread received smachine entity at address {1:s}'.format(thread_name, hex(id(smachine))))
# get logger
evlog = OcsLogger(entity, thread_name)
evlog.logger.info('{0:s} thread starting up'.format(thread_name))
# get and reset dictionaries
state_dict = ocsEntitySummaryState
cmd_dict = ocsEntitySummaryStateCommands
cfg_dict = ocsEntitySummaryStateConfigurations
cfg_dict[OCS_SUMMARY_STATE_STANDBY] = ['{0:s}-Normal'.format(entity)]
evlog.logger.info('{0:s} thread configs'.format(str(cfg_dict)))
# connect to SAL
mgr = SAL_catchuparchiver()
if not mgr:
evlog.logger.error('{0:s} thread failed to connect to SAL'.format(thread_name))
return
evlog.logger.info('{0:s} thread connected to SAL'.format(thread_name))
# get processor
if not setup_processor(mgr, entity.lower(), command):
return
evlog.logger.info('{0:s} thread processor created'.format(thread_name))
# get payload container
if thread_name == 'abort':
data = catchuparchiver_command_abortC()
elif thread_name == 'disable':
data = catchuparchiver_command_disableC()
elif thread_name == 'enable':
data = catchuparchiver_command_enableC()
elif thread_name == 'enterControl':
data = catchuparchiver_command_enterControlC()
elif thread_name == 'exitControl':
data = catchuparchiver_command_exitControlC()
elif thread_name == 'setvalue':
data = catchuparchiver_command_setvalueC()
elif thread_name == 'standby':
data = catchuparchiver_command_standbyC()
elif thread_name == 'start':
data = catchuparchiver_command_startC()
elif thread_name == 'stop':
data = catchuparchiver_command_stopC()
else:
evlog.logger.error('{0:s} thread payload container error'.format(thread_name))
return
# info
evlog.logger.info('{0:s} thread payload container created'.format(thread_name))
evlog.logger.info('{0:s} thread {1:s} ready'.format(thread_entity, thread_name))
# loop forever
while True:
# accept command
if thread_name == 'abort':
cmdId = mgr.acceptCommand_abort(data)
elif thread_name == 'disable':
cmdId = mgr.acceptCommand_disable(data)
elif thread_name == 'enable':
cmdId = mgr.acceptCommand_enable(data)
elif thread_name == 'enterControl':
cmdId = mgr.acceptCommand_enterControl(data)
elif thread_name == 'exitControl':
cmdId = mgr.acceptCommand_exitControl(data)
elif thread_name == 'standby':
cmdId = mgr.acceptCommand_standby(data)
elif thread_name == 'start':
cmdId = mgr.acceptCommand_start(data)
elif thread_name == 'stop':
cmdId = mgr.acceptCommand_stop(data)
# process command
if cmdId > 0:
evlog.logger.info('{0:s} thread command identifier {1:d}'.format(thread_name, cmdId))
if thread_name == 'abort':
evlog.logger.info('{0:s} thread received payload {1:s}'.format(thread_name, str(data.state)))
smachine.setBusy = True
retval = mgr.ackCommand_abort(cmdId, SAL__CMD_INPROGRESS, 0, "Ack : OK")
(nsta, osta, cmds, cfgs) = get_state(smachine, state_dict, cmd_dict, cfg_dict)
if smachine:
smachine.change_state(smachine._current_state, OCS_SUMMARY_STATE_FAULT)
if evp:
thread_ocsid = ocs_id(False)
evp.send_event('catchuparchiverEntitySummaryState',
Name=thread_entity,
CurrentState=str(nsta),
PreviousState=str(osta),
Identifier=thread_ocsid,
Timestamp=ocs_mjd_to_iso(thread_ocsid),
Executing=thread_name,
Address=id(thread_id),
CommandsAvailable=str(cmds),
ConfigurationsAvailable=str(cfgs),
priority=SAL__EVENT_INFO)
retval = mgr.ackCommand_abort(cmdId, SAL__CMD_COMPLETE, 0, "Done : OK")
smachine.setBusy = False
elif thread_name == 'disable':
evlog.logger.info('{0:s} thread received payload {1:s}'.format(thread_name, str(data.state)))
smachine.setBusy = True
retval = mgr.ackCommand_disable(cmdId, SAL__CMD_INPROGRESS, 0, "Ack : OK")
(nsta, osta, cmds, cfgs) = get_state(smachine, state_dict, cmd_dict, cfg_dict)
if thread_name not in cmds:
msg = 'Error : Command disallowed in {0:s} state'.format(state_dict.get(smachine._current_state, ''))
retval = mgr.ackCommand_disable(cmdId, SAL__CMD_FAILED, 0, msg)
else:
if smachine:
smachine.change_state(smachine._current_state, OCS_SUMMARY_STATE_DISABLED)
if evp:
thread_ocsid = ocs_id(False)
evp.send_event('catchuparchiverEntitySummaryState',
Name=thread_entity,
CurrentState=str(nsta),
PreviousState=str(osta),
Identifier=thread_ocsid,
Timestamp=ocs_mjd_to_iso(thread_ocsid),
Executing=thread_name,
Address=id(thread_id),
CommandsAvailable=str(cmds),
ConfigurationsAvailable=str(cfgs),
priority=SAL__EVENT_INFO)
retval = mgr.ackCommand_disable(cmdId, SAL__CMD_COMPLETE, 0, "Done : OK")
smachine.setBusy = False
elif thread_name == 'enable':
evlog.logger.info('{0:s} thread received payload {1:s}'.format(thread_name, str(data.state)))
smachine.setBusy = True
retval = mgr.ackCommand_enable(cmdId, SAL__CMD_INPROGRESS, 0, "Ack : OK")
(nsta, osta, cmds, cfgs) = get_state(smachine, state_dict, cmd_dict, cfg_dict)
if thread_name not in cmds:
msg = 'Error : Command disallowed in {0:s} state'.format(state_dict.get(smachine._current_state, ''))
retval = mgr.ackCommand_enable(cmdId, SAL__CMD_FAILED, 0, msg)
else:
if smachine:
smachine.change_state(smachine._current_state, OCS_SUMMARY_STATE_ENABLED)
if evp:
thread_ocsid = ocs_id(False)
evp.send_event('catchuparchiverEntitySummaryState',
Name=thread_entity,
CurrentState=str(nsta),
PreviousState=str(osta),
Identifier=thread_ocsid,
Timestamp=ocs_mjd_to_iso(thread_ocsid),
Executing=thread_name,
Address=id(thread_id),
CommandsAvailable=str(cmds),
ConfigurationsAvailable=str(cfgs),
priority=SAL__EVENT_INFO)
retval = mgr.ackCommand_enable(cmdId, SAL__CMD_COMPLETE, 0, "Done : OK")
smachine.setBusy = False
elif thread_name == 'enterControl':
evlog.logger.info('{0:s} thread received payload {1:s}'.format(thread_name, str(data.state)))
smachine.setBusy = True
retval = mgr.ackCommand_enterControl(cmdId, SAL__CMD_INPROGRESS, 0, "Ack : OK")
(nsta, osta, cmds, cfgs) = get_state(smachine, state_dict, cmd_dict, cfg_dict)
if thread_name not in cmds:
msg = 'Error : Command disallowed in {0:s} state'.format(state_dict.get(smachine._current_state, ''))
retval = mgr.ackCommand_enterControl(cmdId, SAL__CMD_FAILED, 0, msg)
else:
if smachine:
smachine.change_state(smachine._current_state, OCS_SUMMARY_STATE_STANDBY)
if evp:
thread_ocsid = ocs_id(False)
evp.send_event('catchuparchiverEntitySummaryState',
Name=thread_entity,
CurrentState=str(nsta),
PreviousState=str(osta),
Identifier=thread_ocsid,
Timestamp=ocs_mjd_to_iso(thread_ocsid),
Executing=thread_name,
Address=id(thread_id),
CommandsAvailable=str(cmds),
ConfigurationsAvailable=str(cfgs),
priority=SAL__EVENT_INFO)
retval = mgr.ackCommand_enterControl(cmdId, SAL__CMD_COMPLETE, 0, "Done : OK")
smachine.setBusy = False
elif thread_name == 'exitControl':
evlog.logger.info('{0:s} thread received payload {1:s}'.format(thread_name, str(data.state)))
smachine.setBusy = True
retval = mgr.ackCommand_exitControl(cmdId, SAL__CMD_INPROGRESS, 0, "Ack : OK")
(nsta, osta, cmds, cfgs) = get_state(smachine, state_dict, cmd_dict, cfg_dict)
if thread_name not in cmds:
msg = 'Error : Command disallowed in {0:s} state'.format(state_dict.get(smachine._current_state, ''))
retval = mgr.ackCommand_exitControl(cmdId, SAL__CMD_FAILED, 0, msg)
else:
if smachine:
smachine.change_state(smachine._current_state, OCS_SUMMARY_STATE_OFFLINE)
if evp:
thread_ocsid = ocs_id(False)
evp.send_event('catchuparchiverEntitySummaryState',
Name=thread_entity,
CurrentState=str(nsta),
PreviousState=str(osta),
Identifier=thread_ocsid,
Timestamp=ocs_mjd_to_iso(thread_ocsid),
Executing=thread_name,
Address=id(thread_id),
CommandsAvailable=str(cmds),
ConfigurationsAvailable=str(cfgs),
priority=SAL__EVENT_INFO)
retval = mgr.ackCommand_exitControl(cmdId, SAL__CMD_COMPLETE, 0, "Done : OK")
smachine.setBusy = False
elif thread_name == 'standby':
evlog.logger.info('{0:s} thread received payload {1:s}'.format(thread_name, str(data.state)))
smachine.setBusy = True
retval = mgr.ackCommand_standby(cmdId, SAL__CMD_INPROGRESS, 0, "Ack : OK")
(nsta, osta, cmds, cfgs) = get_state(smachine, state_dict, cmd_dict, cfg_dict)
if thread_name not in cmds:
msg = 'Error : Command disallowed in {0:s} state'.format(state_dict.get(smachine._current_state, ''))
retval = mgr.ackCommand_standby(cmdId, SAL__CMD_FAILED, 0, msg)
else:
if smachine:
smachine.change_state(smachine._current_state, OCS_SUMMARY_STATE_STANDBY)
if evp:
thread_ocsid = ocs_id(False)
evp.send_event('catchuparchiverEntitySummaryState',
Name=thread_entity,
CurrentState=str(nsta),
PreviousState=str(osta),
Identifier=thread_ocsid,
Timestamp=ocs_mjd_to_iso(thread_ocsid),
Executing=thread_name,
Address=id(thread_id),
CommandsAvailable=str(cmds),
ConfigurationsAvailable=str(cfgs),
priority=SAL__EVENT_INFO)
retval = mgr.ackCommand_standby(cmdId, SAL__CMD_COMPLETE, 0, "Done : OK")
smachine.setBusy = False
elif thread_name == 'start':
evlog.logger.info('{0:s} thread received payload {1:s}'.format(thread_name, str(data.configuration)))
smachine.setBusy = True
retval = mgr.ackCommand_start(cmdId, SAL__CMD_INPROGRESS, 0, "Ack : OK")
(nsta, osta, cmds, cfgs) = get_state(smachine, state_dict, cmd_dict, cfg_dict)
if thread_name not in cmds:
msg = 'Error : Command disallowed in {0:s} state'.format(state_dict.get(smachine._current_state, ''))
retval = mgr.ackCommand_start(cmdId, SAL__CMD_FAILED, 0, msg)
else:
if smachine:
smachine.change_state(smachine._current_state, OCS_SUMMARY_STATE_DISABLED)
if evp:
thread_ocsid = ocs_id(False)
evp.send_event('catchuparchiverEntitySummaryState',
Name=thread_entity,
CurrentState=str(nsta),
PreviousState=str(osta),
Identifier=thread_ocsid,
Timestamp=ocs_mjd_to_iso(thread_ocsid),
Executing=thread_name,
Address=id(thread_id),
CommandsAvailable=str(cmds),
ConfigurationsAvailable=str(cfgs),
priority=SAL__EVENT_INFO)
retval = mgr.ackCommand_start(cmdId, SAL__CMD_COMPLETE, 0, "Done : OK")
smachine.setBusy = False
elif thread_name == 'stop':
evlog.logger.info('{0:s} thread received payload {1:s}'.format(thread_name, str(data.state)))
smachine.setBusy = True
retval = mgr.ackCommand_stop(cmdId, SAL__CMD_INPROGRESS, 0, "Ack : OK")
(nsta, osta, cmds, cfgs) = get_state(smachine, state_dict, cmd_dict, cfg_dict)
time.sleep(1)
retval = mgr.ackCommand_stop(cmdId, SAL__CMD_COMPLETE, 0, "Done : OK")
smachine.setBusy = False
evlog.logger.info('{0:s} thread command return value {1:d}'.format(thread_name, retval))
# I think this sleep means it's really polling?!
time.sleep(1)
evlog.logger.info('{0:s} thread {1:s} shutting down'.format(thread_entity, thread_name))
mgr.salShutdown()
return True
# +
# main()
# -
if __name__ == "__main__":
# created shared entities
evp = OcsEvents(False)
smachine = OcsStates()
# set up state machine
if smachine:
smachine.change_state(OCS_SUMMARY_STATE_UNKNOWN, OCS_SUMMARY_STATE_OFFLINE)
smachine._shutdown = False
# create threads for each command:
threads = []
for T in ['abort', 'disable', 'enable', 'enterControl', 'exitControl', 'standby', 'start', 'stop']:
t = threading.Thread(name=T, target=thread_code, args=('CatchupArchiver', evp, smachine))
threads.append(t)
t.start()
|
common.py | # http://inamidst.com/saxo/
# Created by Sean B. Palmer
# You know your code is good when you don't have a generic module
import base64
import collections
import os
import pickle
import signal
import socket
import sys
import threading
# Usage as of 534f8c68:
# b64pickle: client
# b64unpickle: client, scheduler
# error: client, create, script
# exit_cleanly: client, saxo
# populate: client, create
# thread: client, script
def console():
# TODO: This can probably be removed
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_sock = os.path.expanduser("~/.saxo/client.sock")
client.connect(client_sock)
while True:
try: text = input("$ ")
except (EOFError, KeyboardInterrupt):
print("")
print("Quitting...")
break
if " " in text:
instruction, args = text.split(" ", 1)
if args:
args = eval("(%s,)" % args)
args = b64pickle(args)
else:
instruction, args = text, b""
octets = instruction.encode("ascii") + b" " + args
client.send(octets + b"\n")
def error(short, long=None, err=None, code=1):
print("saxo: error: " + short, file=sys.stderr)
if long is not None:
print(long.rstrip(), file=sys.stderr)
if err is not None:
if long is not None:
print("", file=sys.stderr)
print("This is the error message that python gave:", file=sys.stderr)
print("", file=sys.stderr)
print(" %s" % err.__class__.__name__)
print(" %s" % err)
sys.exit(code)
def exit_cleanly():
def quit(signum, frame):
print("Exiting cleanly (SIG %s)" % signum)
try: sys.exit()
finally: os._exit(0)
signal.signal(signal.SIGINT, quit)
signal.signal(signal.SIGTERM, quit)
def populate(saxo_path, base):
# TODO: This is being called twice
plugins = os.path.join(base, "plugins")
saxo_plugins = os.path.join(saxo_path, "plugins")
if not os.path.isdir(plugins):
os.mkdir(plugins)
commands = os.path.join(base, "commands")
saxo_commands = os.path.join(saxo_path, "commands")
if not os.path.isdir(commands):
os.mkdir(commands)
def symlink(source, dest):
try: os.symlink(source, dest)
except FileExistsError:
...
for name in os.listdir(saxo_plugins):
dest = os.path.join(plugins, name)
if not (os.path.exists(dest) or os.path.islink(dest)):
symlink(os.path.join(saxo_plugins, name), dest)
# Why is this being done?
with open(os.path.join(commands, "saxo.pth"), "w") as f:
f.write(saxo_path + "\n")
old_path_file = os.path.join(commands, ".saxo-path")
if os.path.islink(old_path_file):
os.remove(old_path_file)
for name in os.listdir(saxo_commands):
dest = os.path.join(commands, name)
if not (os.path.exists(dest) or os.path.islink(dest)):
symlink(os.path.join(saxo_commands, name), dest)
# Clean up any broken symlinks
for directory in (plugins, commands):
for name in os.listdir(directory):
link = os.path.join(directory, name)
if not os.path.islink(link):
continue
target = os.readlink(link)
target = os.path.join(directory, target)
if not os.path.exists(target):
os.remove(link)
def b64pickle(obj):
pickled = pickle.dumps(obj)
return base64.b64encode(pickled)
def b64unpickle(data):
if data:
pickled = base64.b64decode(data)
return pickle.loads(pickled)
return tuple()
def thread(target, *args):
t = threading.Thread(target=target, args=tuple(args), daemon=True)
t.start()
return t
def tarjan(graph):
# Robert E. Tarjan's 1975 strongly connected nodes algorithm
# This is a kind of robust topological sort
index = {}
lowlinks = {}
stack = collections.OrderedDict()
result = []
def search(node):
index[node] = len(index)
lowlinks[node] = index[node]
stack[node] = None
for succ in graph.get(node, ()):
if succ not in index:
search(succ)
lowlinks[node] = min(lowlinks[succ], lowlinks[node])
elif succ in stack:
lowlinks[node] = min(lowlinks[node], index[succ])
if lowlinks[node] == index[node]:
connected = []
succ = None
while succ != node:
succ = stack.popitem()[0]
connected.append(succ)
result.append(connected)
for node in graph:
if not node in index:
search(node)
return result
def tsort(graph):
for connected in tarjan(graph):
for node in connected:
yield node
|
PySC2_A3C_FullyConv.py | """
PySC2_A3C_AtariNetNew.py
A script for training and running an A3C agent on the PySC2 environment, with reference to DeepMind's paper:
[1] Vinyals, Oriol, et al. "Starcraft II: A new challenge for reinforcement learning." arXiv preprint arXiv:1708.04782 (2017).
Advantage estimation uses generalized advantage estimation from:
[2] Schulman, John, et al. "High-dimensional continuous control using generalized advantage estimation." arXiv preprint arXiv:1506.02438 (2015).
Credit goes to Arthur Juliani for providing for reference an implementation of A3C for the VizDoom environment
https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-8-asynchronous-actor-critic-agents-a3c-c88f72a5e9f2
https://github.com/awjuliani/DeepRL-Agents
This follows the AtariNet implementation described in [1].
The agent takes as input all of the features and outputs a policy across all 524 actions, which makes it generalizable to any of the minigames supplied in SC2LE.
"""
import threading
import multiprocessing
import psutil
import numpy as np
import tensorflow as tf
import scipy.signal
from time import sleep
import os
import json
import pickle
from pysc2.env import sc2_env
from pysc2.env import environment
from pysc2.lib import actions
from pysc2.maps import mini_games
"""
Use the following command to launch Tensorboard:
tensorboard --logdir=worker_0:'./train_0',worker_1:'./train_1',worker_2:'./train_2',worker_3:'./train_3'
"""
## HELPER FUNCTIONS
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target_graph(from_scope,to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Processes PySC2 observations
def process_observation(observation, action_spec, observation_spec):
# is episode over?
episode_end = observation.step_type == environment.StepType.LAST
# reward
reward = observation.reward
# features
features = observation.observation
variable_features = ['cargo', 'multi_select', 'build_queue']
max_no = {'available_actions': len(action_spec.functions), 'cargo': 100, 'multi_select': 100, 'build_queue': 10}
# nonspatial features
nonspatial_stack = []
nonspatial_stack = np.log(features['player'].reshape(-1) + 1.)
nonspatial_stack = np.concatenate((nonspatial_stack, features['game_loop'].reshape(-1)))
nonspatial_stack = np.expand_dims(nonspatial_stack, axis=0)
# spatial_minimap features
minimap_stack = np.stack((features['minimap']), axis=2)
minimap_stack = np.expand_dims(minimap_stack, axis=0)
# spatial_screen features
screen_stack = np.stack((features['screen']), axis=2)
screen_stack = np.expand_dims(screen_stack, axis=0)
return reward, nonspatial_stack, minimap_stack, screen_stack, episode_end
# Discounting function used to calculate discounted returns.
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def sample_dist(dist):
sample = np.random.choice(dist[0],p=dist[0])
sample = np.argmax(dist == sample)
return sample
## ACTOR-CRITIC NETWORK
class AC_Network():
def __init__(self, scope, trainer, action_spec, observation_spec):
with tf.variable_scope(scope):
# Architecture here follows Atari-net Agent described in [1] Section 4.3
nonspatial_size = 12
minimap_channels = 7
screen_channels = 17
self.inputs_nonspatial = tf.placeholder(shape=[None,nonspatial_size], dtype=tf.float32)
self.inputs_spatial_minimap = tf.placeholder(shape=[None,64,64,minimap_channels], dtype=tf.float32)
self.inputs_spatial_screen = tf.placeholder(shape=[None,64,64,screen_channels], dtype=tf.float32)
self.nonspatial_dense = tf.layers.dense(
inputs=self.inputs_nonspatial,
units=32,
activation=tf.tanh)
self.screen_conv1 = tf.layers.conv2d(
inputs=self.inputs_spatial_screen,
filters=16,
kernel_size=[5,5],
strides=[1,1],
padding='same',
activation=tf.nn.relu)
self.screen_conv2 = tf.layers.conv2d(
inputs=self.screen_conv1,
filters=32,
kernel_size=[3,3],
strides=[1,1],
padding='same',
activation=tf.nn.relu)
self.minimap_conv1 = tf.layers.conv2d(
inputs=self.inputs_spatial_minimap,
filters=16,
kernel_size=[5,5],
strides=[1,1],
padding='same',
activation=tf.nn.relu)
self.minimap_conv2 = tf.layers.conv2d(
inputs=self.minimap_conv1,
filters=32,
kernel_size=[3,3],
strides=[1,1],
padding='same',
activation=tf.nn.relu)
screen_output_length = 1
for dim in self.screen_conv2.get_shape().as_list()[1:]:
screen_output_length *= dim
minimap_output_length = 1
for dim in self.minimap_conv2.get_shape().as_list()[1:]:
minimap_output_length *= dim
self.latent_vector_nonspatial = tf.layers.dense(
inputs=tf.concat([self.nonspatial_dense, tf.reshape(self.screen_conv2,shape=[-1,screen_output_length]), tf.reshape(self.minimap_conv2,shape=[-1,minimap_output_length])], axis=1),
units=256,
activation=tf.nn.relu)
# Output layers for policy and value estimations
# 12 policy networks for base actions and arguments
# - All modeled independently
# - Spatial arguments have the x and y values modeled independently as well
# 1 value network
spatial_arguments = ['screen', 'minimap', 'screen2']
self.policy_base_actions = tf.layers.dense(
inputs=self.latent_vector_nonspatial,
units=len(action_spec.functions),
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_nonspatial = dict()
for arg in action_spec.types:
if arg.name not in spatial_arguments:
self.policy_arg_nonspatial[arg.name] = dict()
for dim, size in enumerate(arg.sizes):
if size == 2:
self.policy_arg_nonspatial[arg.name][dim] = tf.layers.dense(
inputs=self.latent_vector_nonspatial,
units=size,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(1.0))
else:
self.policy_arg_nonspatial[arg.name][dim] = tf.layers.dense(
inputs=self.latent_vector_nonspatial,
units=size,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_spatial = dict()
self.latent_vector_spatial = dict()
for arg in spatial_arguments:
self.latent_vector_spatial[arg] = tf.layers.conv2d(
inputs=tf.concat([self.screen_conv2, self.minimap_conv2], axis=3),
filters=1,
kernel_size=[1,1],
strides=[1,1],
padding='same',
activation=None)
self.policy_arg_spatial[arg] = tf.nn.softmax(tf.reshape(self.latent_vector_spatial[arg], shape=[-1, 64 * 64]))
self.value = tf.layers.dense(
inputs=self.latent_vector_nonspatial,
units=1,
kernel_initializer=normalized_columns_initializer(1.0))
# Only the worker network need ops for loss functions and gradient updating.
# calculates the losses
# self.gradients - gradients of loss wrt local_vars
# applies the gradients to update the global network
if scope != 'global':
self.actions_base = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot_base = tf.one_hot(self.actions_base, 524, dtype=tf.float32)
self.actions_arg = dict()
self.actions_onehot_arg = dict()
for arg in action_spec.types:
if arg.name not in spatial_arguments:
arg_name = arg.name
self.actions_arg[arg_name] = dict()
self.actions_onehot_arg[arg_name] = dict()
for dim, size in enumerate(arg.sizes):
self.actions_arg[arg_name][dim] = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot_arg[arg_name][dim] = tf.one_hot(self.actions_arg[arg_name][dim], size, dtype=tf.float32)
self.actions_arg_spatial = dict()
self.actions_onehot_arg_spatial = dict()
for arg in spatial_arguments:
self.actions_arg_spatial[arg] = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_spatial[arg] = tf.one_hot(self.actions_arg_spatial[arg], 64 * 64,dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.responsible_outputs_base = tf.reduce_sum(self.policy_base_actions * self.actions_onehot_base, [1])
self.responsible_outputs_arg = dict()
for arg_name in self.policy_arg_nonspatial:
self.responsible_outputs_arg[arg_name] = dict()
for dim in self.policy_arg_nonspatial[arg_name]:
self.responsible_outputs_arg[arg_name][dim] = tf.reduce_sum(self.policy_arg_nonspatial[arg_name][dim] * self.actions_onehot_arg[arg_name][dim], [1])
self.responsible_outputs_arg_spatial = dict()
for arg in spatial_arguments:
self.responsible_outputs_arg_spatial[arg] = tf.reduce_sum(self.policy_arg_spatial[arg] * self.actions_onehot_arg_spatial[arg], [1])
# Loss functions
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value,[-1])))
self.log_policy_base_actions = tf.log(tf.clip_by_value(self.policy_base_actions, 1e-20, 1.0)) # avoid NaN with clipping when value in policy becomes zero
self.entropy_base = - tf.reduce_sum(self.policy_base_actions * self.log_policy_base_actions)
self.entropy_arg = dict()
for arg_name in self.policy_arg_nonspatial:
self.entropy_arg[arg_name] = dict()
for dim in self.policy_arg_nonspatial[arg_name]:
self.entropy_arg[arg_name][dim] = - tf.reduce_sum(self.policy_arg_nonspatial[arg_name][dim] * tf.log(tf.clip_by_value(self.policy_arg_nonspatial[arg_name][dim], 1e-20, 1.0)))
self.entropy_arg_spatial = dict()
for arg in spatial_arguments:
self.entropy_arg_spatial[arg] = - tf.reduce_sum(self.policy_arg_spatial[arg] * tf.log(tf.clip_by_value(self.policy_arg_spatial[arg], 1e-20, 1.)))
self.entropy = self.entropy_base
for arg_name in self.policy_arg_nonspatial:
for dim in self.policy_arg_nonspatial[arg_name]:
self.entropy += self.entropy_arg[arg_name][dim]
for arg in spatial_arguments:
self.entropy += self.entropy_arg_spatial[arg]
self.policy_loss_base = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_base, 1e-20, 1.0)) * self.advantages)
self.policy_loss_arg = dict()
for arg_name in self.policy_arg_nonspatial:
self.policy_loss_arg[arg_name] = dict()
for dim in self.policy_arg_nonspatial[arg_name]:
self.policy_loss_arg[arg_name][dim] = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg[arg_name][dim], 1e-20, 1.0)) * self.advantages)
self.policy_loss_arg_spatial = dict()
for arg in spatial_arguments:
self.policy_loss_arg_spatial[arg] = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_spatial[arg], 1e-20, 1.0))*self.advantages)
self.policy_loss = self.policy_loss_base
for arg_name in self.policy_arg_nonspatial:
for dim in self.policy_arg_nonspatial[arg_name]:
self.policy_loss += self.policy_loss_arg[arg_name][dim]
for arg in spatial_arguments:
self.policy_loss += self.policy_loss_arg_spatial[arg]
self.loss = self.value_loss + self.policy_loss - self.entropy * 0.001
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss,local_vars)
self.var_norms = tf.global_norm(local_vars)
grads, self.grad_norms = tf.clip_by_global_norm(self.gradients,40.0)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
## WORKER AGENT
class Worker():
def __init__(self, name, trainer, model_path, global_episodes, global_steps, map_name, action_spec, observation_spec):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment_global_episodes = self.global_episodes.assign_add(1)
self.global_steps = global_steps
self.increment_global_steps = self.global_steps.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("train_" + str(self.number))
self.action_spec = action_spec
self.observation_spec = observation_spec
#Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(self.name, trainer, action_spec, observation_spec)
self.update_local_ops = update_target_graph('global', self.name)
print('Initializing environment #{}...'.format(self.number))
self.env = sc2_env.SC2Env(map_name=map_name)
def train(self, rollout, sess, gamma, bootstrap_value):
rollout = np.array(rollout)
obs_minimap = rollout[:,0]
obs_screen = rollout[:,1]
obs_nonspatial = rollout[:,2]
actions_base = rollout[:,3]
actions_args = rollout[:,4]
actions_args_spatial = rollout[:,5]
rewards = rollout[:,6]
next_obs_minimap = rollout[:,7]
next_obs_screen = rollout[:,8]
next_obs_nonspatial = rollout[:,9]
values = rollout[:,11]
actions_arg_stack = dict()
for actions_arg in actions_args:
for arg_name in actions_arg:
if arg_name not in actions_arg_stack:
actions_arg_stack[arg_name] = dict()
for dim in actions_arg[arg_name]:
if dim not in actions_arg_stack[arg_name]:
actions_arg_stack[arg_name][dim] = [actions_arg[arg_name][dim]]
else:
actions_arg_stack[arg_name][dim].append(actions_arg[arg_name][dim])
actions_arg_spatial_stack = dict()
for actions_arg_spatial in actions_args_spatial:
for arg_name,arg_value in actions_arg_spatial.items():
if arg_name not in actions_arg_spatial_stack:
actions_arg_spatial_stack[arg_name] = []
actions_arg_spatial_stack[arg_name].append(arg_value)
# Here we take the rewards and values from the rollout, and use them to calculate the advantage and discounted returns.
# The advantage function uses generalized advantage estimation from [2]
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus,gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages,gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
feed_dict = {self.local_AC.target_v:discounted_rewards,
self.local_AC.inputs_spatial_screen:np.stack(obs_screen).reshape(-1,64,64,17),
self.local_AC.inputs_spatial_minimap:np.stack(obs_minimap).reshape(-1,64,64,7),
self.local_AC.inputs_nonspatial:np.stack(obs_nonspatial).reshape(-1,12),
self.local_AC.actions_base:actions_base,
self.local_AC.advantages:advantages}
for arg_name in actions_arg_stack:
for dim in actions_arg_stack[arg_name]:
feed_dict[self.local_AC.actions_arg[arg_name][dim]] = actions_arg_stack[arg_name][dim]
for arg_name, value in actions_arg_spatial_stack.items():
feed_dict[self.local_AC.actions_arg_spatial[arg_name]] = value
v_l,p_l,e_l,g_n,v_n, _ = sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.apply_grads],
feed_dict=feed_dict)
return v_l / len(rollout), p_l / len(rollout), e_l / len(rollout), g_n,v_n
def work(self,max_episode_length,gamma,sess,coord,saver):
episode_count = sess.run(self.global_episodes)
total_steps = 0
print ("Starting worker " + str(self.number))
with sess.as_default(), sess.graph.as_default():
while not coord.should_stop():
# Download copy of parameters from global network
sess.run(self.update_local_ops)
episode_buffer = []
episode_values = []
episode_frames = []
episode_reward = 0
episode_step_count = 0
# Start new episode
obs = self.env.reset()
episode_frames.append(obs[0])
reward, nonspatial_stack, minimap_stack, screen_stack, episode_end = process_observation(obs[0], self.action_spec, self.observation_spec)
s_minimap = minimap_stack
s_screen = screen_stack
s_nonspatial = nonspatial_stack
while not episode_end:
# Take an action using distributions from policy networks' outputs.
base_action_dist, arg_spatial_dist, arg_nonspatial_dist, v = sess.run([
self.local_AC.policy_base_actions,
self.local_AC.policy_arg_spatial,
self.local_AC.policy_arg_nonspatial,
self.local_AC.value],
feed_dict={
self.local_AC.inputs_spatial_minimap: minimap_stack,
self.local_AC.inputs_spatial_screen: screen_stack,
self.local_AC.inputs_nonspatial: nonspatial_stack})
# Apply filter to remove unavailable actions and then renormalize
base_action_dist[0] += 1e-20
for action_id, action in enumerate(base_action_dist[0]):
if action_id not in obs[0].observation['available_actions']:
base_action_dist[0][action_id] = 0.
base_action_dist[0] /= np.sum(base_action_dist[0])
action_id = sample_dist(base_action_dist)
arg_sample = dict()
for arg_name in arg_nonspatial_dist:
arg_sample[arg_name] = dict()
for dim in arg_nonspatial_dist[arg_name]:
arg_sample[arg_name][dim] = sample_dist(arg_nonspatial_dist[arg_name][dim])
arg_sample_spatial = dict()
arg_sample_spatial_abs = dict()
for arg in arg_spatial_dist:
arg_sample_spatial_abs[arg] = sample_dist(arg_spatial_dist[arg])
arg_sample_spatial[arg] = [arg_sample_spatial_abs[arg] % 64, arg_sample_spatial_abs[arg] / 64]
arguments = []
spatial_arguments = ['screen', 'minimap', 'screen2']
for argument in self.action_spec.functions[action_id].args:
name = argument.name
if name not in spatial_arguments:
argument_value = []
for dim, size in enumerate(argument.sizes):
argument_value.append(arg_sample[name][dim])
else:
argument_value = arg_sample_spatial[name]
arguments.append(argument_value)
# Set unused arguments to -1 so that they won't be updated in the training
# See documentation for tf.one_hot
for arg_name, argument in arg_sample.items():
if arg_name not in self.action_spec.functions[action_id].args:
for dim in argument:
arg_sample[arg_name][dim] = -1
for arg_name, arg in arg_sample_spatial_abs.items():
if arg_name not in self.action_spec.functions[action_id].args:
arg_sample_spatial_abs[arg_name] = -1
a = actions.FunctionCall(action_id, arguments)
obs = self.env.step(actions=[a])
r, nonspatial_stack, minimap_stack, screen_stack, episode_end = process_observation(obs[0], self.action_spec, self.observation_spec)
if not episode_end:
episode_frames.append(obs[0])
s1_minimap = minimap_stack
s1_screen = screen_stack
s1_nonspatial = nonspatial_stack
else:
s1_minimap = s_minimap
s1_screen = s_screen
s1_nonspatial = s_nonspatial
# Append latest state to buffer
episode_buffer.append([s_minimap, s_screen, s_nonspatial,action_id,arg_sample,arg_sample_spatial_abs,r,s1_minimap, s1_screen, s1_nonspatial,episode_end,v[0,0]])
episode_values.append(v[0,0])
episode_reward += r
s_minimap = s1_minimap
s_screen = s1_screen
s_nonspatial = s1_nonspatial
sess.run(self.increment_global_steps)
total_steps += 1
episode_step_count += 1
# If the episode hasn't ended, but the experience buffer is full, then we make an update step using that experience rollout.
if len(episode_buffer) == 40 and not episode_end and episode_step_count != max_episode_length - 1:
# Since we don't know what the true final return is, we "bootstrap" from our current value estimation.
v1 = sess.run(self.local_AC.value,
feed_dict={self.local_AC.inputs_spatial_minimap: minimap_stack, self.local_AC.inputs_spatial_screen: screen_stack,self.local_AC.inputs_nonspatial: nonspatial_stack})[0,0]
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,v1)
episode_buffer = []
sess.run(self.update_local_ops)
if episode_end:
break
self.episode_rewards.append(episode_reward)
self.episode_lengths.append(episode_step_count)
self.episode_mean_values.append(np.mean(episode_values))
episode_count += 1
episode_reward = obs[0].observation['score_cumulative'][0]
global _max_score, _running_avg_score
if _max_score < episode_reward:
_max_score = episode_reward
_running_avg_score = (2.0 / 101) * (episode_reward - _running_avg_score) + _running_avg_score
print("{} Step #{} Episode #{} Reward: {}".format(self.name, total_steps, episode_count, episode_reward))
print("Total Steps: {}\tTotal Episodes: {}\tMax Score: {}\tAvg Score: {}".format(sess.run(self.global_steps), sess.run(self.global_episodes), _max_score, _running_avg_score))
# Update the network using the episode buffer at the end of the episode.
if len(episode_buffer) != 0:
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,0.)
if episode_count % 50 == 0 and episode_count != 0:
if episode_count % 50 == 0 and self.name == 'worker_0':
saver.save(sess,self.model_path+'/model-'+str(episode_count)+'.cptk')
print ("Saved Model")
mean_reward = np.mean(self.episode_rewards[-50:])
mean_length = np.mean(self.episode_lengths[-50:])
mean_value = np.mean(self.episode_mean_values[-50:])
summary = tf.Summary()
summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))
summary.value.add(tag='Perf/Length', simple_value=float(mean_length))
summary.value.add(tag='Perf/Value', simple_value=float(mean_value))
summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))
summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))
summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))
summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))
summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))
self.summary_writer.add_summary(summary, episode_count)
self.summary_writer.flush()
sess.run(self.increment_global_episodes)
def main():
max_episode_length = 300
gamma = .99 # discount rate for advantage estimation and reward discounting
load_model = FLAGS.load_model
model_path = './model'
map_name = FLAGS.map_name
assert map_name in mini_games.mini_games
global _max_score, _running_avg_score
_max_score = 0
_running_avg_score = 0
print('Initializing temporary environment to retrive action_spec...')
action_spec = sc2_env.SC2Env(map_name=map_name).action_spec()
print('Initializing temporary environment to retrive observation_spec...')
observation_spec = sc2_env.SC2Env(map_name=map_name).observation_spec()
tf.reset_default_graph()
if not os.path.exists(model_path):
os.makedirs(model_path)
with tf.device("/cpu:0"):
global_episodes = tf.Variable(0, dtype=tf.int32, name='global_episodes', trainable=False)
global_steps = tf.Variable(0, dtype=tf.int32, name='global_steps', trainable=False)
trainer = tf.train.AdamOptimizer(learning_rate=3e-5)
master_network = AC_Network('global', None, action_spec,observation_spec) # Generate global network
if FLAGS.n_agents < 1:
num_workers = psutil.cpu_count() # Set workers to number of available CPU threads
else:
num_workers = FLAGS.n_agents
workers = []
# Create worker classes
for i in range(num_workers):
workers.append(Worker(i, trainer, model_path, global_episodes, global_steps, map_name, action_spec, observation_spec))
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if load_model == True:
print ('Loading Model...')
ckpt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
# This is where the asynchronous magic happens.
# Start the "work" process for each worker in a separate thread.
worker_threads = []
for worker in workers:
worker_work = lambda: worker.work(max_episode_length, gamma, sess, coord, saver)
t = threading.Thread(target=(worker_work))
t.start()
sleep(0.5)
sleep(1.5)
worker_threads.append(t)
coord.join(worker_threads)
if __name__ == '__main__':
import sys
from absl import flags
flags.DEFINE_string(name="map_name",
default="DefeatRoaches",
help="Name of the map/minigame")
flags.DEFINE_integer(name="n_agents",
default=0,
help="Number of agents; passing anything less than 1 will default to number of available CPU threads")
flags.DEFINE_boolean(name="load_model",
default=False,
help="Load a saved model")
FLAGS = flags.FLAGS
FLAGS(sys.argv)
main()
|
simulate.py | from contextlib import contextmanager
import multiprocessing as mp
import subprocess
import pickle
import json
import tqdm
import time
import sys
import os
import re
import numpy as np
from simulator import get_simulator
from pscan import walk_space, organize_pscans
from config import maxseed, workpath
import mpi
def mp_run(arg):
run, s, kws = arg
return run(s, **kws)
def worker(n, in_q, out_q, seed, system, processing):
batch_targets = system.get('targets', [])
np.random.seed(seed)
run = get_simulator(system)
msg = in_q.get()
while not msg[0] == 'halt':
if msg[0] == 'run':
l, location, batchsize = msg[1], msg[2], msg[3]
seeds = np.random.randint(0, maxseed, size=(batchsize, ), dtype=int)
batch = np.array([run(s, **location) for s in seeds])
if processing:
batch = [p(location, batch_targets, batch, t) for p, t in processing]
out_q.put(('ran', l, batch))
else:
print('msg (%d):' % n, msg)
msg = in_q.get()
def simulate(system, processing=[], batchsize=1, axes=None,
n_workers=4):
np.random.seed(0)
seeds = np.random.randint(0, maxseed, size=(n_workers, ), dtype=int)
workers, idle, busy = [], [], []
for n, s in enumerate(seeds):
iq, oq = mp.Queue(), mp.Queue()
p = mp.Process(target=worker, args=(n, iq, oq, s, system, processing))
p.start()
#iq.put(('hello world', ))
workers.append((iq, oq, p))
idle.append(n)
if axes:
locations, trajectory = walk_space(axes)
remaining = len(trajectory)
data = [None] * remaining
with tqdm.tqdm(total=remaining) as pbar:
while remaining:
if idle and trajectory:
n = idle.pop(0)
iq, oq, p = workers[n]
l, loc = trajectory.pop(0)
iq.put(('run', l, loc, batchsize))
busy.append(n)
#print('deployed', idle, busy)
elif busy:
for n in busy:
iq, oq, p = workers[n]
if not oq.empty():
msg = oq.get()
if msg[0] == 'ran':
l, batch = msg[1], msg[2]
data[l] = batch
remaining -= 1
pbar.update(1)
idle.append(n)
busy.remove(n)
#print('returned', idle, busy)
break
else:
#print('sleep', idle, busy)
time.sleep(1)
else:
locations = {}
seeds = np.random.randint(0, maxseed, size=(batchsize, ), dtype=int)
batch = ((run, s, {}) for s in seeds)
imap = pool.imap_unordered(mp_run, batch)
batch = np.array(list(tqdm.tqdm(imap, total=batchsize)))
if processing:
batch = [p({}, targets, batch) for p in processing]
data = [batch]
for iq, oq, p in workers:
iq.put(('halt', ))
for iq, oq, p in workers:
p.join()
pspace, pscans = organize_pscans(locations, data, len(processing))
return pspace, pscans
#return locations, data
@contextmanager
def progress_bars(n_locations):
dispatcher_start_re = re.compile(r'^dispatch \d+ start: \d+ workers$')
worker_start_re = re.compile(r'^worker \d+ start$')
setting_up_re = re.compile(r'^worker \d+ setting up$')
setup_re = re.compile(r'^worker \d+ ready to simulate$')
update_re = re.compile(r'^worker \d+ ran location \d+$')
update_bar = tqdm.tqdm(total=n_locations, desc='Scanning Parameters')
def handle_input_line(l, n_workers=-1):
if update_re.match(l):
r = l.split()[1]
d = 'worker %s / %d ran location' % (r, n_workers)
update_bar.set_description(d)
update_bar.update(1)
elif dispatcher_start_re.match(l):
n_workers = int(l.split()[3])
elif worker_start_re.match(l):
r = l.split()[1]
d = 'started worker %s / %d' % (r, n_workers)
update_bar.set_description(d)
elif setting_up_re.match(l):
r = l.split()[1]
d = 'worker %s / %d setting up' % (r, n_workers)
update_bar.set_description(d)
elif setup_re.match(l):
r = l.split()[1]
d = 'worker %s / %d setup up' % (r, n_workers)
update_bar.set_description(d)
else:
print(l, end='')
try:
yield handle_input_line
finally:
update_bar.close()
def mpi_simulate(system, processing=[], batchsize=1, axes=None,
n_workers=8, hostfile=None):
mpirun_spec = {
'seed': 0,
'system': system,
'processing': [((p.__module__, p.__name__), t) for p, t in processing],
'batchsize': batchsize,
'axes': tuple((a, tuple(v)) for a, v in axes),
}
mpirun_path = os.path.join(workpath, 'run.json')
mpiout_path = os.path.join(workpath, 'run.pkl')
os.makedirs(os.path.dirname(mpirun_path), exist_ok=True)
with open(mpirun_path, 'w') as f:
f.write(json.dumps(mpirun_spec, indent=4))
if hostfile:
mpiconfig = '--nooversubscribe --hostfile %s' % hostfile
else:
mpiconfig = '-n %d' % n_workers
mpiargs = (mpiconfig, sys.executable, 'cluster.py',
mpirun_path, mpiout_path)
cmd = 'mpiexec %s %s %s %s %s' % mpiargs
n_locations = np.cumprod([len(v) for a, v in axes])[-1]
with progress_bars(n_locations) as line_handler:
mpiprocess = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, universal_newlines=True)
for line in iter(mpiprocess.stdout.readline, ''):
line_handler(line)
mpiprocess.stdout.close()
return_code = mpiprocess.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
print('loading output data...')
with open(mpiout_path, 'rb') as f:
locations, data = pickle.loads(f.read())
print('loaded output data')
pspace, pscans = organize_pscans(locations, data, len(processing))
return pspace, pscans
|
__init__.py | from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
List,
Type,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import get_dtype # noqa:F401
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import (
DatetimeArray,
PeriodArray,
TimedeltaArray,
period_array,
)
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: List[Dtype] = [bool, "bool"]
BYTES_DTYPES: List[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: List[Dtype] = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: List[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}.get(idx_type)
if idx_func:
# error: Cannot call function of unknown type
idx = idx_func(nentries) # type: ignore[operator]
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
|
common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys, os, logging
import multiprocessing as mp
import mxnet as mx
import numpy as np
import random
import shutil
from mxnet.base import MXNetError
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../common/'))
sys.path.insert(0, os.path.join(curr_path, '../../../python'))
import models
from contextlib import contextmanager
from nose.tools import make_decorator, assert_raises
import tempfile
def assertRaises(expected_exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
except expected_exception as e:
pass
else:
# Did not raise exception
assert False, "%s did not raise %s" % (func.__name__, expected_exception.__name__)
def default_logger():
"""A logger used to output seed information to nosetests logs."""
logger = logging.getLogger(__name__)
# getLogger() lookups will return the same logger, but only add the handler once.
if not len(logger.handlers):
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
logger.addHandler(handler)
if (logger.getEffectiveLevel() == logging.NOTSET):
logger.setLevel(logging.INFO)
return logger
@contextmanager
def random_seed(seed=None):
"""
Runs a code block with a new seed for np, mx and python's random.
Parameters
----------
seed : the seed to pass to np.random, mx.random and python's random.
To impose rng determinism, invoke e.g. as in:
with random_seed(1234):
...
To impose rng non-determinism, invoke as in:
with random_seed():
...
Upon conclusion of the block, the rng's are returned to
a state that is a function of their pre-block state, so
any prior non-determinism is preserved.
"""
try:
next_seed = np.random.randint(0, np.iinfo(np.int32).max)
if seed is None:
np.random.seed()
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger = default_logger()
logger.debug('Setting np, mx and python random seeds = %s', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
yield
finally:
# Reinstate prior state of np.random and other generators
np.random.seed(next_seed)
mx.random.seed(next_seed)
random.seed(next_seed)
def assert_raises_cudnn_not_satisfied(min_version):
def test_helper(orig_test):
@make_decorator(orig_test)
def test_new(*args, **kwargs):
cudnn_off = os.getenv('CUDNN_OFF_TEST_ONLY') == 'true'
cudnn_env_version = os.getenv('CUDNN_VERSION', None if cudnn_off else '7.3.1')
cudnn_test_disabled = cudnn_off or cudnn_env_version < min_version
if not cudnn_test_disabled or mx.context.current_context().device_type == 'cpu':
orig_test(*args, **kwargs)
else:
assert_raises((MXNetError, RuntimeError), orig_test, *args, **kwargs)
return test_new
return test_helper
def with_seed(seed=None):
"""
A decorator for nosetests test functions that manages rng seeds.
Parameters
----------
seed : the seed to pass to np.random and mx.random
This tests decorator sets the np, mx and python random seeds identically
prior to each test, then outputs those seeds if the test fails or
if the test requires a fixed seed (as a reminder to make the test
more robust against random data).
@with_seed()
def test_ok_with_random_data():
...
@with_seed(1234)
def test_not_ok_with_random_data():
...
Use of the @with_seed() decorator for all tests creates
tests isolation and reproducability of failures. When a
test fails, the decorator outputs the seed used. The user
can then set the environment variable MXNET_TEST_SEED to
the value reported, then rerun the test with:
nosetests --verbose -s <test_module_name.py>:<failing_test>
To run a test repeatedly, set MXNET_TEST_COUNT=<NNN> in the environment.
To see the seeds of even the passing tests, add '--logging-level=DEBUG' to nosetests.
"""
def test_helper(orig_test):
@make_decorator(orig_test)
def test_new(*args, **kwargs):
test_count = int(os.getenv('MXNET_TEST_COUNT', '1'))
env_seed_str = os.getenv('MXNET_TEST_SEED')
for i in range(test_count):
if seed is not None:
this_test_seed = seed
log_level = logging.INFO
elif env_seed_str is not None:
this_test_seed = int(env_seed_str)
log_level = logging.INFO
else:
this_test_seed = np.random.randint(0, np.iinfo(np.int32).max)
log_level = logging.DEBUG
post_test_state = np.random.get_state()
np.random.seed(this_test_seed)
mx.random.seed(this_test_seed)
random.seed(this_test_seed)
logger = default_logger()
# 'nosetests --logging-level=DEBUG' shows this msg even with an ensuing core dump.
test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else ''
test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}'
' to reproduce.').format(test_count_msg, this_test_seed)
logger.log(log_level, test_msg)
try:
orig_test(*args, **kwargs)
except:
# With exceptions, repeat test_msg at INFO level to be sure it's seen.
if log_level < logging.INFO:
logger.info(test_msg)
raise
finally:
np.random.set_state(post_test_state)
return test_new
return test_helper
def setup_module():
"""
A function with a 'magic name' executed automatically before each nosetests module
(file of tests) that helps reproduce a test segfault by setting and outputting the rng seeds.
The segfault-debug procedure on a module called test_module.py is:
1. run "nosetests --verbose test_module.py". A seg-faulting output might be:
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... ok
test_module.test2 ... Illegal instruction (core dumped)
2. Copy the module-starting seed into the next command, then run:
MXNET_MODULE_SEED=4018804151 nosetests --logging-level=DEBUG --verbose test_module.py
Output might be:
[WARNING] **** module-level seed is set: all tests running deterministically ****
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... [DEBUG] np and mx random seeds = 3935862516
ok
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Copy the segfaulting-test seed into the command:
MXNET_TEST_SEED=1435005594 nosetests --logging-level=DEBUG --verbose test_module.py:test2
Output might be:
[INFO] np, mx and python random seeds = 2481884723
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Finally reproduce the segfault directly under gdb (might need additional os packages)
by editing the bottom of test_module.py to be
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
test2()
MXNET_TEST_SEED=1435005594 gdb -ex r --args python test_module.py
4. When finished debugging the segfault, remember to unset any exported MXNET_ seed
variables in the environment to return to non-deterministic testing (a good thing).
"""
module_seed_str = os.getenv('MXNET_MODULE_SEED')
logger = default_logger()
if module_seed_str is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
else:
seed = int(module_seed_str)
logger.warn('*** module-level seed is set: all tests running deterministically ***')
logger.info('Setting module np/mx/python random seeds, use MXNET_MODULE_SEED=%s to reproduce.', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
# The MXNET_TEST_SEED environment variable will override MXNET_MODULE_SEED for tests with
# the 'with_seed()' decoration. Inform the user of this once here at the module level.
if os.getenv('MXNET_TEST_SEED') is not None:
logger.warn('*** test-level seed set: all "@with_seed()" tests run deterministically ***')
try:
from tempfile import TemporaryDirectory
except:
# really simple implementation of TemporaryDirectory
class TemporaryDirectory(object):
def __init__(self, suffix='', prefix='', dir=''):
self._dirname = tempfile.mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self._dirname
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self._dirname)
def teardown():
"""
A function with a 'magic name' executed automatically after each nosetests test module.
It waits for all operations in one file to finish before carrying on the next.
"""
mx.nd.waitall()
def run_in_spawned_process(func, env, *args):
"""
Helper function to run a test in its own process.
Avoids issues with Singleton- or otherwise-cached environment variable lookups in the backend.
Adds a seed as first arg to propagate determinism.
Parameters
----------
func : function to run in a spawned process.
env : dict of additional environment values to set temporarily in the environment before exec.
args : args to pass to the function.
Returns
-------
Whether the python version supports running the function as a spawned process.
This routine calculates a random seed and passes it into the test as a first argument. If the
test uses random values, it should include an outer 'with random_seed(seed):'. If the
test needs to return values to the caller, consider use of shared variable arguments.
"""
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
return False
else:
seed = np.random.randint(0,1024*1024*1024)
orig_environ = os.environ.copy()
try:
for (key, value) in env.items():
os.environ[key] = str(value)
# Prepend seed as first arg
p = mpctx.Process(target=func, args=(seed,)+args)
p.start()
p.join()
assert p.exitcode == 0, "Non-zero exit code %d from %s()." % (p.exitcode, func.__name__)
finally:
os.environ.clear()
os.environ.update(orig_environ)
return True |
test_p2p_grpform.py | #!/usr/bin/python
#
# P2P group formation test cases
# Copyright (c) 2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
import threading
import Queue
import hwsim_utils
import utils
def check_grpform_results(i_res, r_res):
if i_res['result'] != 'success' or r_res['result'] != 'success':
raise Exception("Failed group formation")
if i_res['ssid'] != r_res['ssid']:
raise Exception("SSID mismatch")
if i_res['freq'] != r_res['freq']:
raise Exception("freq mismatch")
if 'go_neg_freq' in r_res and i_res['go_neg_freq'] != r_res['go_neg_freq']:
raise Exception("go_neg_freq mismatch")
if i_res['freq'] != i_res['go_neg_freq']:
raise Exception("freq/go_neg_freq mismatch")
if i_res['role'] != i_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if 'go_neg_role' in r_res and r_res['role'] != r_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if i_res['go_dev_addr'] != r_res['go_dev_addr']:
raise Exception("GO Device Address mismatch")
def go_neg_init(i_dev, r_dev, pin, i_method, i_intent, res):
logger.debug("Initiate GO Negotiation from i_dev")
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent)
logger.debug("i_res: " + str(i_res))
res.put(i_res)
def go_neg_pin(i_dev, r_dev, i_intent=None, r_intent=None, i_method='enter', r_method='display'):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init, args=(i_dev, r_dev, pin, i_method, i_intent, res))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, timeout=20)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pin_authorized(i_dev, r_dev, i_intent=None, r_intent=None, expect_failure=False, i_go_neg_status=None, i_method='enter', r_method='display', test_data=True, i_freq=None, r_freq=None):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, freq=r_freq)
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent, expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if i_go_neg_status:
if i_res['result'] != 'go-neg-failed':
raise Exception("Expected GO Negotiation failure not reported")
if i_res['status'] != i_go_neg_status:
raise Exception("Expected GO Negotiation status not seen")
if expect_failure:
return
logger.info("Group formed")
if test_data:
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
return [i_res, r_res]
def go_neg_init_pbc(i_dev, r_dev, i_intent, res, freq):
logger.debug("Initiate GO Negotiation from i_dev")
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc",
timeout=20, go_intent=i_intent, freq=freq)
logger.debug("i_res: " + str(i_res))
res.put(i_res)
def go_neg_pbc(i_dev, r_dev, i_intent=None, r_intent=None, i_freq=None, r_freq=None):
r_dev.p2p_find(social=True)
i_dev.p2p_find(social=True)
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init_pbc, args=(i_dev, r_dev, i_intent, res, i_freq))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, timeout=20, freq=r_freq)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def remove_group(dev1, dev2):
dev1.remove_group()
try:
dev2.remove_group()
except:
pass
def test_grpform(dev):
"""P2P group formation using PIN and authorized connection (init -> GO)"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
def test_grpform_a(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (init: group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_b(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (resp: group iface)"""
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_c(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform2(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO)"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
def test_grpform2_c(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO) (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform3(dev):
"""P2P group formation using PIN and re-init GO Negotiation"""
go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
def test_grpform3_c(dev):
"""P2P group formation using PIN and re-init GO Negotiation (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_pbc(dev):
"""P2P group formation using PBC and re-init GO Negotiation"""
dev[0].request("SET ignore_old_scan_res 1")
dev[1].request("SET ignore_old_scan_res 1")
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
if i_res['role'] != 'GO' or r_res['role'] != 'client':
raise Exception("Unexpected device roles")
remove_group(dev[0], dev[1])
def test_both_go_intent_15(dev):
"""P2P GO Negotiation with both devices using GO intent 15"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=15, expect_failure=True, i_go_neg_status=9)
def test_both_go_neg_display(dev):
"""P2P GO Negotiation with both devices trying to display PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='display', r_method='display')
def test_both_go_neg_enter(dev):
"""P2P GO Negotiation with both devices trying to enter PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='enter', r_method='enter')
def test_grpform_per_sta_psk(dev):
"""P2P group formation with per-STA PSKs"""
dev[0].request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
pin = dev[2].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
c_res = dev[2].p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60)
check_grpform_results(i_res, c_res)
if r_res['psk'] == c_res['psk']:
raise Exception("Same PSK assigned for both clients")
hwsim_utils.test_connectivity_p2p(dev[1], dev[2])
dev[0].remove_group()
dev[1].wait_go_ending_session()
dev[2].wait_go_ending_session()
def test_grpform_per_sta_psk_wps(dev):
"""P2P group formation with per-STA PSKs with non-P2P WPS STA"""
dev[0].request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[0].p2p_go_authorize_client_pbc()
dev[2].request("WPS_PBC")
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED"], timeout=30)
if ev is None:
raise Exception("Association with the GO timed out")
hwsim_utils.test_connectivity_p2p_sta(dev[1], dev[2])
dev[0].remove_group()
dev[2].request("DISCONNECT")
dev[1].wait_go_ending_session()
def test_grpform_force_chan_go(dev):
"""P2P group formation forced channel selection by GO"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
i_freq=2432,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2432":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_cli(dev):
"""P2P group formation forced channel selection by client"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2417,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2417":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_conflict(dev):
"""P2P group formation fails due to forced channel mismatch"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15, r_freq=2427,
expect_failure=True, i_go_neg_status=7)
def test_grpform_pref_chan_go(dev):
"""P2P group formation preferred channel selection by GO"""
dev[0].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2442":
raise Exception("Unexpected channel - did not follow GO's p2p_pref_chan")
remove_group(dev[0], dev[1])
def test_grpform_pref_chan_go_overridden(dev):
"""P2P group formation preferred channel selection by GO overridden by client"""
dev[1].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2422,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2422":
raise Exception("Unexpected channel - did not follow client's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_forcing_chan(dev):
"""P2P group formation with no-GO freq forcing channel"""
dev[1].request("SET p2p_no_go_freq 100-200,300,4000-6000")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow no-GO freq")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_conflict(dev):
"""P2P group formation fails due to no-GO range forced by client"""
dev[1].request("SET p2p_no_go_freq 2000-3000")
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15,
expect_failure=True, i_go_neg_status=7)
def test_grpform_no_5ghz_world_roaming(dev):
"""P2P group formation with world roaming regulatory"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=14,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli2(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=14,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli3(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli4(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse; intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
|
recording.py | # Copyright 2019 École Polytechnique Fédérale de Lausanne. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import subprocess
import shutil
import pathlib
import threading
import os
import shlex
import csv
import signal
from contextlib import contextmanager, ExitStack
from concurrent.futures import ThreadPoolExecutor
import socket
import psutil
from .dist_common import get_external_ip_addr
import time
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__file__)
log.setLevel(level=logging.DEBUG)
def check_command(cmd):
if shutil.which(cmd) is None:
raise Exception("Need to install package for command '{c}'. Probably sysstat".format(c=cmd))
def get_cluster_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("iccluster001", 22))
return s.getsockname()[0]
def get_cluster_iface():
cluster_ip = get_cluster_ip()
for iface_name, addresses in psutil.net_if_addrs().items():
for address in addresses:
if address.address == cluster_ip:
return iface_name
else:
raise Exception("Couldn't find cluster interface with ip {}".format(cluster_ip))
@contextmanager
def StatsRecorder(pid_to_outfile_map, kill_self=False):
"""
:param pid_to_outfile_map: a mapping of {pid: pathlib.Path()}, where the value is the file to write the given pid info to
:return:
"""
check_command("pidstat")
extra_option = ""
pidstat_options = subprocess.run(["pidstat", "-H"], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if pidstat_options.returncode == 0:
extra_option = "HI"
pid_processes = []
try:
for pid, outfile in pid_to_outfile_map.items():
assert isinstance(pid, int)
assert isinstance(outfile, pathlib.Path)
# We use the "build a list" style instead of generating the Popen instances so we can kill them if needed to bail early
pid_processes.append((pid, subprocess.Popen("pidstat -hrduvwR{extra} -p {pid} 1 | sed '1d;/^[#]/{{4,$d}};/^[#]/s/^[#][ ]*//;/^$/d;s/^[ ]*//;s/[ ]\+/,/g' > {outfile}".format(
pid=pid, outfile=str(outfile.absolute()), extra=extra_option
), shell=True, universal_newlines=True, stdout=subprocess.PIPE,
start_new_session=True))) # needed so we can kill by pgid
yield
finally:
def kill_pid(monitored_pid, proc):
if monitored_pid != os.getpid() or kill_self:
print("Killing non-self pid: {}".format(monitored_pid))
# if we are monitoring ourself, we don't need to kill our own process unless we have to
# kill the process will create a partial line (most likely), so if possible it's better to
# allow a clean shutdown, where pidstat just stops on its own
pgid = os.getpgid(proc.pid)
try:
for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL):
if proc.poll() is None:
break
else:
os.killpg(pgid, sig)
time.sleep(1.0)
except ProcessLookupError:
log.warning("Can't kill pid {p}. Not found".format(p=proc.pid))
with ThreadPoolExecutor(max_workers=8) as tpe:
tpe.map(kill_pid, pid_processes)
@contextmanager
def NetRecorder(outdir, record_interval=1.0):
outpath = outdir / "net_stat.csv"
ext_ip = get_external_ip_addr()
iface_name = None
for name, iface_addrs in psutil.net_if_addrs().items():
for addr in iface_addrs:
if addr.family == socket.AF_INET and addr.address == ext_ip:
iface_name = name
if iface_name is None:
raise Exception("Couldn't find interface for IPv4 address: '{ip}'".format(ip=ext_ip))
stop_event = threading.Event()
net_stats = []
def collect_net_stats():
prior_counters = None
prior_time = None
while not stop_event.is_set():
current_time = time.time()
current_counters = psutil.net_io_counters(pernic=True)
if prior_counters is not None:
prior_nic = prior_counters[iface_name]
current_nic = current_counters[iface_name]
elapsed = current_time - prior_time
assert elapsed > 0, "Time diff was not >0. Got {}".format(elapsed)
elapsed = float(elapsed)
rx_bytes = current_nic.bytes_recv - prior_nic.bytes_recv
assert rx_bytes >= 0, "Got a negative rx byte diff {}".format(rx_bytes)
tx_bytes = current_nic.bytes_sent - prior_nic.bytes_sent
assert tx_bytes >= 0, "Got a negative tx byte diff {}".format(tx_bytes)
rx_packets = current_nic.packets_recv - prior_nic.packets_recv
assert rx_packets >= 0, "Got a negative rx byte diff {}".format(rx_packets)
tx_packets = current_nic.packets_sent - prior_nic.packets_sent
assert tx_packets >= 0, "Got a negative tx byte diff {}".format(tx_packets)
net_stats.append({
"time": current_time,
"rx_bytes/sec": rx_bytes / elapsed,
"tx_bytes/sec": tx_bytes / elapsed,
"rx_packets/sec": rx_packets / elapsed,
"tx_packets/sec": tx_packets / elapsed,
"rx_bytes": current_nic.bytes_recv,
"tx_bytes": current_nic.bytes_sent,
"rx_packets": current_nic.packets_recv,
"tx_packets": current_nic.packets_sent
})
prior_counters = current_counters
prior_time = current_time
time.sleep(record_interval)
start_thread = threading.Thread(target=collect_net_stats)
try:
start_thread.start()
log.info("Starting net recorder.")
yield
finally:
log.info("Stopping net recorder")
stop_event.set()
if start_thread.is_alive():
start_thread.join()
if len(net_stats) > 0:
with outpath.open("w") as f:
writer = csv.DictWriter(f=f, fieldnames=net_stats[0].keys())
writer.writeheader()
writer.writerows(net_stats)
else:
log.warning("Net stats didn't record anything")
else:
log.error("Net recorder didn't start correctly")
log.debug("Net recorder stopped")
@contextmanager
def record_self(outdir, kill_self=False, net_record_interval=1.0):
assert isinstance(outdir, pathlib.Path), "record_self(outdir) must be passed a pathlib.Path parameter"
if not outdir.exists():
raise Exception("outdir '{d}' doesn't exist. Caller must make it!".format(d=str(outdir)))
outfile = outdir / "stats.csv"
if outfile.exists():
outfile.unlink()
with ExitStack() as es:
es.enter_context(StatsRecorder(
pid_to_outfile_map={os.getpid(): outfile},
kill_self=kill_self
))
es.enter_context(NetRecorder(outdir=outdir, record_interval=net_record_interval))
yield
def recording_cleanup():
subprocess.run(shlex.split("pkill sar"), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
subprocess.run(shlex.split("pkill sadc"), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
|
main.py | def exit(exit_code):
global drivers,locks
try:
with locks[3]:
try:drivers
except NameError:pass
else:
for driver in drivers:
try:Process(driver).terminate()
except:pass
except:pass
finally:
if exit_code:
print_exc()
stdout.write('\r[INFO] Exitting with exit code %d\n'%exit_code)
_exit(exit_code)
def log(message):
global args
if args.debug:
stdout.write('%s\n'%message)
if __name__=='__main__':
from os import _exit
from sys import stdout
from traceback import print_exc
while True:
try:
import re
import os
from os import devnull,environ
from os.path import isfile,join as path_join
from time import sleep
from random import choice
from psutil import Process
from platform import system
from argparse import ArgumentParser
from requests import get as requests_get
from threading import Thread,Lock,enumerate as list_threads
from user_agent import generate_user_agent
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
break
except:
try:INSTALLED
except NameError:
try:from urllib import urlopen
except:from urllib.request import urlopen
argv=['AdFly-Bot',True]
exec(urlopen('https://raw.githubusercontent.com/DeBos99/multi-installer/master/install.py').read().decode())
else:exit(1)
def is_root():
try:return not os.geteuid()
except:return False
def get_proxies():
global args
if args.proxies:
proxies=open(args.proxies,'r').read().strip().split('\n')
else:
proxies=requests_get('https://www.proxy-list.download/api/v1/get?type=https&anon=elite').content.decode().strip().split('\r\n')
log('[INFO] %d proxies successfully loaded!'%len(proxies))
return proxies
def bot(id):
global args,locks,urls,user_agents,proxies,drivers,watched_ads
while True:
try:
url=choice(urls)
with locks[0]:
if len(proxies)==0:
proxies.extend(get_proxies())
proxy=choice(proxies)
proxies.remove(proxy)
log('[INFO][%d] Connecting to %s'%(id,proxy))
user_agent=choice(user_agents) if args.user_agent else user_agents()
log('[INFO][%d] Setting user agent to %s'%(id,user_agent))
if args.slow_start:
locks[1].acquire()
if system()=='Windows':
executable_dir=path_join(environ['APPDATA'],'DeBos','drivers')
else:
executable_dir=path_join(environ['HOME'],'.DeBos','drivers')
if args.driver=='chrome':
chrome_options=webdriver.ChromeOptions()
chrome_options.add_argument('--proxy-server={}'.format(proxy))
chrome_options.add_argument('--user-agent={}'.format(user_agent))
chrome_options.add_argument('--mute-audio')
chrome_options.add_experimental_option('excludeSwitches',['enable-logging'])
if args.headless:
chrome_options.add_argument('--headless')
if is_root():
chrome_options.add_argument('--no-sandbox')
if system()=='Windows':
executable_path=path_join(executable_dir,'chromedriver.exe')
else:
executable_path=path_join(executable_dir,'chromedriver')
driver=webdriver.Chrome(options=chrome_options,executable_path=executable_path)
else:
firefox_options=webdriver.FirefoxOptions()
firefox_options.preferences.update({
'media.volume_scale':'0.0',
'general.useragent.override':user_agent,
'network.proxy.type':1,
'network.proxy.http':proxy.split(':')[0],
'network.proxy.http_port':int(proxy.split(':')[1]),
'network.proxy.ssl':proxy.split(':')[0],
'network.proxy.ssl_port':int(proxy.split(':')[1])
})
if args.headless:
firefox_options.add_argument('--headless')
if system()=='Windows':
executable_path=path_join(executable_dir,'geckodriver.exe')
else:
executable_path=path_join(executable_dir,'geckodriver')
driver=webdriver.Firefox(options=firefox_options,service_log_path=devnull,executable_path=executable_path)
process=driver.service.process
pid=process.pid
cpids=[x.pid for x in Process(pid).children()]
pids=[pid]+cpids
drivers.extend(pids)
if args.slow_start:
locks[1].release()
log('[INFO][%d] Successully started webdriver!'%id)
driver.set_page_load_timeout(60);
log('[INFO][%d] Opening %s'%(id,url))
driver.get(url)
if driver.title=='Shrink your URLs and get paid!':
log('[INFO][%d] Website successfully loaded!'%id)
WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.ID,'skip_bu2tton'))).click()
log('[INFO][%d] Ad successfully viewed!'%id)
watched_ads+=1
else:
log('[WARNING][%d] Dead proxy eliminated!'%id)
except WebDriverException as e:
log('[WARNING][%d] %s'%(id,e.__class__.__name__))
except KeyboardInterrupt:exit(0)
except:exit(1)
finally:
log('[INFO][%d] Quitting webdriver!'%id)
try:driver
except NameError:pass
else:driver.quit()
with locks[2]:
try:pids
except NameError:pass
else:
for pid in pids:
try:drivers.remove(pid)
except:pass
if __name__=='__main__':
try:
parser=ArgumentParser()
parser.add_argument('-u','--url',help='set url of video/set path to file with urls',required=True)
parser.add_argument('-t','--threads',help='set number of threads',type=int,default=15)
parser.add_argument('-D','--driver',help='set webdriver',choices=['chrome','firefox'],default='chrome')
parser.add_argument('-H','--headless',help='enable headless mode',action='store_true')
parser.add_argument('-s','--slow-start',help='enable slow start mode',action='store_true')
parser.add_argument('-p','--proxies',help='set path to file with proxies')
parser.add_argument('-U','--user-agent',help='set user agent/set path to file with user agents')
parser.add_argument('-d','--debug',help='enable debug mode',action='store_true')
parser.add_argument('-r','--refresh',help='set refresh rate for logger in seconds',type=float,default=1.0)
args=parser.parse_args()
if args.url:
if isfile(args.url):
urls=open(args.url,'r').read().strip().split('\n')
else:
urls=[args.url]
urls=[re.sub(r'\A(?:https?://)?(.*)\Z',r'https://\1',x) for x in urls]
if args.user_agent:
if isfile(args.user_agent):
user_agents=open(args.user_agent,'r').read().strip().split('\n')
else:
user_agents=[args.user_agent]
else:
user_agents=generate_user_agent
locks=[Lock() for _ in range(4)]
logger_lock=Lock()
drivers=[]
proxies=[]
watched_ads=0
for i in range(args.threads):
t=Thread(target=bot,args=(i+1,))
t.daemon=True
t.start()
if args.debug:
for t in list_threads()[1:]:
t.join()
else:
while True:
with logger_lock:
print('\n'*100)
stdout.write('Watched ads: %d'%watched_ads)
stdout.flush()
sleep(args.refresh)
except SystemExit as e:exit(int(str(e)))
except KeyboardInterrupt:exit(0)
except:exit(1)
|
train_dynamicdata.py | #!/usr/bin/env python
"""Train models using alternate dynamic data loader."""
import os
import signal
import torch
from pprint import pformat
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main_dynamicdata as single_main
from onmt.utils.parse import ArgumentParser
from onmt.dynamicdata.config import read_data_config, verify_shard_config
from onmt.dynamicdata.transforms import set_train_opts
from onmt.dynamicdata.vocab import load_fields, load_transforms
from onmt.dynamicdata.iterators import build_mixer
from onmt.dynamicdata.dataset import DatasetAdaptor, build_dataset_adaptor_iter
from itertools import cycle
def train(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
set_random_seed(opt.seed, False)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
#logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
#fields = checkpoint['vocab']
if 'data_loader_step' in checkpoint['opt']:
data_loader_step = checkpoint['opt'].data_loader_step
print('Set data_loader_step {} from model_opt'.format(data_loader_step))
elif opt.data_loader_step is not None:
data_loader_step = opt.data_loader_step
print('Overrode data_loader_step {} from opt'.format(data_loader_step))
else:
data_loader_step = checkpoint['optim']['training_step']
print('Approximated data_loader_step {} from optim. '
'Consider using --data_loader_step'.format(data_loader_step))
else:
data_loader_step = 0
nb_gpu = len(opt.gpu_ranks)
# always using producer/consumer
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore,),
daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(queues, semaphore, opt, data_loader_step),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
def build_data_loader(opt):
# because generators cannot be pickled,
# the data loader components should be built in the producer process
data_config = read_data_config(opt.data_config)
verify_shard_config(data_config)
transform_models, transforms = load_transforms(data_config)
set_train_opts(data_config, transforms)
fields = load_fields(data_config)
dataset_adaptor = DatasetAdaptor(fields)
return data_config, transforms, dataset_adaptor
def batch_producer(queues, semaphore, opt, data_loader_step):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
data_config, transforms, dataset_adaptor = build_data_loader(opt)
logger.info('Transforms:')
logger.info(pformat(transforms))
mixer, task_epochs = build_mixer(data_config, transforms, is_train=True, bucket_size=opt.bucket_size)
report_every = max(opt.queue_size, opt.report_every)
def mb_callback(i):
if i % report_every == 0:
logger.info('mb %s epochs %s',
i, {key: ge.epoch for key, ge in task_epochs.items()})
for task in transforms:
logger.info('* transform stats ({})'.format(task))
for transform in transforms[task]:
for line in transform.stats():
logger.info('\t{}'.format(line))
train_iter = build_dataset_adaptor_iter(
mixer, dataset_adaptor, opt, mb_callback, data_loader_step, is_train=True)
def next_batch():
new_batch = next(train_iter)
semaphore.acquire()
return new_batch
b = next_batch()
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
# the training process has the gpu, and is responsible for calling torch.device
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch()
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
if device_id == 0:
data_config, transforms, dataset_adaptor = build_data_loader(opt)
valid_mixer, valid_task_epochs = build_mixer(data_config, transforms,
is_train=False, bucket_size=opt.bucket_size)
valid_iter = build_dataset_adaptor_iter(valid_mixer, dataset_adaptor, opt,
mb_callback=None, data_loader_step=0, is_train=False)
valid_iter = list(valid_iter)
else:
valid_iter = None
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore, valid_iter)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
train(opt)
if __name__ == "__main__":
main()
|
broker.py | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import threading
import time
from multiprocessing import Queue, Process
import psutil
import lz4.frame
import pickle
# from pyarrow import deserialize
from absl import logging
from xt.framework.explorer import Explorer
from xt.framework.evaluator import Evaluator
from xt.framework.comm.comm_conf import CommConf, get_port
from xt.framework.comm.uni_comm import UniComm
from xt.framework.remoter import dist_model
from xt.framework.comm.message import message, get_msg_info, set_msg_data
class BrokerMaster(object):
def __init__(self, node_config_list, start_port=None):
self.node_config_list = node_config_list
self.node_num = len(node_config_list)
comm_conf = None
if not start_port:
comm_conf = CommConf()
start_port = comm_conf.get_start_port()
self.start_port = start_port
logging.info("master broker init on port: {}".format(start_port))
self.comm_conf = comm_conf
recv_port, send_port = get_port(start_port)
self.recv_slave = UniComm("CommByZmq", type="PULL", port=recv_port)
self.send_slave = [
UniComm("CommByZmq", type="PUSH", port=send_port + i)
for i in range(self.node_num)
]
self.recv_local_q = UniComm("LocalMsg")
self.send_local_q = dict()
self.main_task = None
def start_data_transfer(self):
""" start transfer data and other thread """
data_transfer_thread = threading.Thread(target=self.recv_broker_slave)
data_transfer_thread.setDaemon(True)
data_transfer_thread.start()
data_transfer_thread = threading.Thread(target=self.recv_local)
data_transfer_thread.setDaemon(True)
data_transfer_thread.start()
alloc_thread = threading.Thread(target=self.alloc_actor)
alloc_thread.setDaemon(True)
alloc_thread.start()
def recv_broker_slave(self):
""" recv remote train data in sync mode"""
while True:
recv_data = self.recv_slave.recv_bytes()
recv_data = pickle.loads(lz4.frame.decompress(recv_data))
cmd = get_msg_info(recv_data, "cmd")
if cmd in []:
pass
else:
send_cmd = self.send_local_q.get(cmd)
if send_cmd:
send_cmd.send(recv_data)
def recv_local(self):
""" recv local cmd """
while True:
recv_data = self.recv_local_q.recv()
cmd = get_msg_info(recv_data, "cmd")
if cmd in ["close"]:
self.close(recv_data)
if cmd in [self.send_local_q.keys()]:
self.send_local_q[cmd].send(recv_data)
logging.debug("recv data: {} with cmd-{}".format(recv_data, cmd))
else:
broker_id = get_msg_info(recv_data, "broker_id")
logging.debug("recv data: {} with bid-{}".format(recv_data, broker_id))
if broker_id == -1:
for slave, node_info in zip(self.send_slave, self.node_config_list):
# model name as list
if get_msg_info(recv_data, "cmd") in ("dist_model",) and \
isinstance(recv_data["data"], list):
_remote_model = dist_model(
src_model=recv_data["data"][0], node_info=node_info
)
# update remote model as message data
if _remote_model:
set_msg_data(msg=recv_data, data=_remote_model)
slave.send(recv_data)
else:
if get_msg_info(recv_data, "cmd") in ("dist_model",) and \
isinstance(recv_data["data"], list):
_remote_model = dist_model(
recv_data["data"][0], node_info=self.node_config_list[broker_id]
)
# update remote model as message data
if _remote_model:
set_msg_data(msg=recv_data, data=_remote_model)
self.send_slave[broker_id].send(recv_data)
def register(self, cmd):
self.send_local_q.update({cmd: UniComm("LocalMsg")})
return self.send_local_q[cmd]
def alloc_actor(self):
while True:
time.sleep(10)
if not self.send_local_q.get("train"):
continue
train_list = self.send_local_q["train"].comm.data_list
if len(train_list) > 200:
self.send_alloc_msg("decrease")
elif len(train_list) < 10:
self.send_alloc_msg("increase")
def send_alloc_msg(self, actor_status):
alloc_cmd = {
"ctr_info": {"cmd": actor_status, "actor_id": -1, "explorer_id": -1}
}
for q in self.send_slave:
q.send(alloc_cmd)
def close(self, close_cmd):
for slave in self.send_slave:
slave.send(close_cmd)
time.sleep(1)
try:
self.comm_conf.release_start_port(self.start_port)
except BaseException:
pass
os._exit(0)
def start(self):
""" start all system """
self.start_data_transfer()
def main_loop(self):
"""
The foreground task of broker master.
main_loop after ready the messy setup works.
:return:
"""
if not self.main_task:
logging.fatal("learning process isn't ready!")
self.main_task.main_loop()
def stop(self):
""" stop all system """
close_cmd = message(None, cmd="close")
self.recv_local_q.send(close_cmd)
class BrokerSlave(object):
def __init__(self, ip_addr, broker_id, start_port):
self.broker_id = broker_id
train_port, predict_port = get_port(start_port)
self.send_master_q = UniComm(
"CommByZmq", type="PUSH", addr=ip_addr, port=train_port
)
self.recv_master_q = UniComm(
"CommByZmq", type="PULL", addr=ip_addr, port=predict_port + broker_id
)
self.recv_explorer_q = UniComm("ShareByPlasma")
self.send_explorer_q = dict()
self.explore_process = dict()
self.processes_suspend = 0
logging.info("init broker slave with id-{}".format(self.broker_id))
def start_data_transfer(self):
""" start transfer data and other thread """
data_transfer_thread = threading.Thread(target=self.recv_master)
data_transfer_thread.start()
data_transfer_thread = threading.Thread(target=self.recv_explorer)
data_transfer_thread.start()
def recv_master(self):
""" recv remote train data in sync mode"""
while True:
recv_data = self.recv_master_q.recv()
cmd = get_msg_info(recv_data, "cmd")
if cmd in ["close"]:
self.close(recv_data)
if cmd in ["create_explorer"]:
self.create_explorer(recv_data["data"])
continue
if cmd in ["create_evaluator"]:
self.create_evaluator(recv_data["data"])
continue
if cmd in ["increase", "decrease"]:
self.alloc(cmd)
continue
if cmd in ("eval",):
test_id = get_msg_info(recv_data, "test_id")
self.send_explorer_q[test_id].put(recv_data)
continue
explorer_id = get_msg_info(recv_data, "explorer_id")
if explorer_id == -1:
for _, send_q in self.send_explorer_q.items():
send_q.put(recv_data)
else:
self.send_explorer_q[explorer_id].put(recv_data)
def recv_explorer(self):
""" recv explorer cmd """
while True:
data, object_id = self.recv_explorer_q.recv_bytes()
self.send_master_q.send_bytes(data)
self.recv_explorer_q.delete(object_id)
def create_explorer(self, config_info):
""" create explorer """
env_para = config_info.get("env_para")
env_id = env_para.get("env_id")
send_explorer = Queue()
explorer = Explorer(
config_info,
self.broker_id,
recv_broker=send_explorer,
send_broker=self.recv_explorer_q,
)
p = Process(target=explorer.start)
p.daemon = True
p.start()
cpu_id = int(env_id) + 4
cpu_cmd = "taskset -pc " + str(cpu_id) + " " + str(p.pid)
print(cpu_cmd)
os.system(cpu_cmd)
self.send_explorer_q.update({env_id: send_explorer})
self.explore_process.update({env_id: p})
def create_evaluator(self, config_info):
""" create evaluator """
test_id = config_info.get("test_id")
send_evaluator = Queue()
evaluator = Evaluator(
config_info,
self.broker_id,
recv_broker=send_evaluator,
send_broker=self.recv_explorer_q,
)
p = Process(target=evaluator.start)
p.daemon = True
p.start()
self.send_explorer_q.update({test_id: send_evaluator})
self.explore_process.update({test_id: p})
def alloc(self, actor_status):
""" monitor system and adjust resource """
p_id = [_p.pid for _, _p in self.explore_process.items()]
p = [psutil.Process(_pid) for _pid in p_id]
if actor_status == "decrease":
if self.processes_suspend < len(p):
p[self.processes_suspend].suspend()
self.processes_suspend += 1
elif actor_status == "increase":
if self.processes_suspend >= 1:
p[self.processes_suspend - 1].resume()
self.processes_suspend -= 1
else:
pass
elif actor_status == "reset":
# resume all processes suspend
for _, resume_p in enumerate(p):
resume_p.resume()
def close(self, close_cmd):
""" close broker """
for _, send_q in self.send_explorer_q.items():
send_q.put(close_cmd)
time.sleep(5)
for _, p in self.explore_process.items():
if p.exitcode is None:
p.terminate()
os.system("pkill plasma -g " + str(os.getpgid(0)))
os._exit(0)
def start(self):
""" start all system """
self.start_data_transfer()
|
Hiwin_RT605_ArmCommand_Socket_20190627175339.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
#print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
print("yes")
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
#print(data)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join() |
main.py | from socket import *
import threading
import time
import random
import json
import pymysql
random.seed(time.time())
lock = threading.Lock()
connected_com = dict()
connected_dev = dict()
connected_mob = dict()
def getLog():
while True:
now = time.localtime()
print("%02d:%02d:%02d" % ((now.tm_hour+9)%24, now.tm_min, now.tm_sec) + str(connected_com) + str(connected_mob))
time.sleep(300)
def run():
while True:
try:
s = input()
exec(s)
except error as m:
print(m)
def receive(connection_id):
source_sock = connected_mob[connection_id]
target_sock = connected_com[connection_id]
while True:
try:
recvData = source_sock.recv(1024)#check source alive
if not recvData:
source_sock.close()
break
target_sock.send(recvData)
except:
target_sock.send('disconnected with other device'.encode('utf-8'))
break
def check(connection_id):
source_sock = connected_mob[connection_id]
target_sock = connected_com[connection_id]
receiver = threading.Thread(target=receive, args=(connection_id,), daemon=True)
receiver.start()
try:
while True:
recvData = target_sock.recv(1024) # check target alive
if not recvData:
lock.acquire()
del connected_com[connection_id]
del connected_mob[connection_id]
lock.release()
target_sock.close()
break
except OSError :
lock.acquire()
del connected_com[connection_id]
del connected_mob[connection_id]
lock.release()
target_sock.close()
source_sock.send('disconnected with other device'.encode('utf-8'))
time.sleep(0.5)
source_sock.close() ##reconnect? exti?
def make_connection(sock):
try:
recvData = sock.recv(1024).decode('utf-8')
conn_info = json.loads(recvData)
pw = connected_dev[conn_info["id"], conn_info["did"]]
sock.send(pw.encode('utf-8'))
except:
pass
# conn_info["id" | "did"]
def dist(sock):
while True:
recvData = sock.recv(1024).decode('utf-8')
if(recvData == ''): break
print("flag: {}".format(recvData))
if( recvData == 'com' ): # from com
pw = f'0000'
while(pw == '0000' or connected_com.get(pw,0) != 0 ):
pw = f'{random.randrange(1, 10**4):04}'
lock.acquire()
connected_com[pw] = sock
connected_mob[pw] = 0
lock.release()
sandData = pw.encode('utf-8')
sock.send(sandData)
break
elif( recvData == 'login' ):
recvData = sock.recv(1024).decode('utf-8')
login_info = json.loads(recvData)
print("login {}".format(recvData))
# db에서 정보 확인
if(login_info.get("did",0) == 0 ): #phone
sql = 'select count(*) from user_info where id = "{}" and pw = "{}";'.format(login_info["id"], login_info["pw"])
curs.execute(sql)
rows = curs.fetchall()
if(rows[0][0] == 1):
sql = 'select deviceName from conn_info where id = "{}";'.format(login_info["id"])
curs.execute(sql)
rows = curs.fetchall()
conn_list = ''
for r in rows:
if( connected_dev.get((login_info["id"],r[0]), 0) != 0 ):
conn_list += r[0] + ','
if(conn_list ==''): conn_list = 'empty'
sock.send(conn_list.encode('utf-8'))
conn = threading.Thread(target=make_connection, args=(sock,))
conn.start()
else :
sock.send('fail'.encode('utf-8'))
continue
else: # pc
sql = 'select count(*) from user_info where id = "{}" and pw = "{}";'.format(login_info["id"], login_info["pw"])
curs.execute(sql)
rows = curs.fetchall()
if(rows[0][0] == 0):
sock.send('fail'.encode('utf-8'))
break
sql = 'insert conn_info(id, macAddr, DeviceName) values ("{}", "{}", "{}");'.format(login_info["id"], login_info["mac"], login_info["did"])
try:
curs.execute(sql)
rows = curs.fetchall()
except:
pass
pw = f'0000'
while(pw == '0000' or connected_com.get(pw,0) != 0 ):
pw = f'{random.randrange(1, 10**4):04}'
lock.acquire()
connected_com[pw] = sock
connected_mob[pw] = 1
connected_dev[login_info["id"], login_info["did"]] = pw
lock.release()
sock.send('ok'.encode('utf-8'))
print("login end")
break
elif( recvData == 'signup'):
recvData = sock.recv(1024).decode('utf-8')
print("signup {}",format(recvData))
signup_info = json.loads(recvData) #id, pw, name, email
print(signup_info)
sql = 'insert user_info(id, pw, name, email) values ("{}", "{}", "{}", "{}");'.format(signup_info["id"], signup_info["pw"], signup_info["name"], signup_info["email"])
try:
curs.execute(sql)
rows = curs.fetchall()
sock.send('ok'.encode('utf-8'))
except :
sock.send('fail'.encode('utf-8'))
continue
print("signup end")
break
else : # from mobile, data : password
try:
if(connected_mob[recvData] == 0):
sendData = 'Connected'.encode('utf-8')
sock.send(sendData)
lock.acquire()
connected_mob[recvData] = 1
lock.release()
time.sleep(10)
break
else:
sendData = 'Connected'.encode('utf-8')
connected_com[recvData].send(sendData)
sock.send(sendData)
lock.acquire()
connected_mob[recvData] = sock
lock.release()
checking = threading.Thread(target=check, args=(recvData,))
checking.start()
break
except KeyError:
sendData = 'Invalid Password'
sock.send(sendData.encode('utf-8'))
except OSError:
sendData = 'Invalid Password'
sock.send(sendData.encode('utf-8'))
lock.acquire()
del connected_mob[recvData]
del connected_com[recvData]
lock.release()
break
#mysql 5.7.22
# MySQL Connection 연결
db_conn = pymysql.connect(host='database-1.clechpc6fvlz.us-east-1.rds.amazonaws.com',
port = 3306, user='admin', password='puri142857', db='capstone', charset='utf8')
# # Connection 으로부터 Cursor 생성
curs = db_conn.cursor()
# # SQL문 실행
# sql = "select * from customer"
# curs.execute(sql)
# # 데이타 Fetch
# rows = curs.fetchall()
# print(rows) # 전체 rows
# # print(rows[0]) # 첫번째 row: (1, '김정수', 1, '서울')
# # print(rows[1]) # 두번째 row: (2, '강수정', 2, '서울')
# Connection 닫기
# db_conn.close()
port = 8081
serverSock = socket(AF_INET, SOCK_STREAM)
serverSock.bind(('', port))
serverSock.listen(1)
exe = threading.Thread(target= run)
exe.start()
logging = threading.Thread(target=getLog)
logging.start()
if __name__ == '__main__' :
while True:
print( connected_com)
connectionSock, addr = serverSock.accept()
print(str(addr), 'connected.')
disting = threading.Thread(target=dist, args=(connectionSock, ))
disting.start()
#time.sleep(1)
pass
db_conn.close()
db_conn.close() |
test_kudu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kudu.schema import (
BOOL,
DOUBLE,
FLOAT,
INT16,
INT32,
INT64,
INT8,
SchemaBuilder,
STRING,
BINARY,
UNIXTIME_MICROS)
from kudu.client import Partitioning
from kudu.util import to_unixtime_micros
import logging
import pytest
import random
import textwrap
import threading
import time
from datetime import datetime
from pytz import utc
from tests.common.environ import ImpalaTestClusterProperties, HIVE_MAJOR_VERSION
from tests.common.kudu_test_suite import KuduTestSuite
from tests.common.impala_cluster import ImpalaCluster
from tests.common.skip import SkipIfNotHdfsMinicluster, SkipIfKudu, SkipIfHive2
from tests.common.test_dimensions import (add_exec_option_dimension,
extend_exec_option_dimension)
from tests.verifiers.metric_verifier import MetricVerifier
KUDU_MASTER_HOSTS = pytest.config.option.kudu_master_hosts
IMPALA_TEST_CLUSTER_PROPERTIES = ImpalaTestClusterProperties.get_instance()
LOG = logging.getLogger(__name__)
# TODO(IMPALA-8614): parameterize some tests to run with HMS integration enabled.
class TestKuduBasicDML(KuduTestSuite):
"""
This suite tests the basic DML operations when using a kudu table.
"""
@classmethod
def add_test_dimensions(cls):
super(TestKuduBasicDML, cls).add_test_dimensions()
# The default read mode of READ_LATEST does not provide high enough consistency for
# these tests.
add_exec_option_dimension(cls, "kudu_read_mode", "READ_AT_SNAPSHOT")
# Run with and without multithreading to ensure Kudu DML works with both threading
# models. E.g. see IMPALA-9782.
add_exec_option_dimension(cls, "mt_dop", "0")
extend_exec_option_dimension(cls, "mt_dop", "4")
@SkipIfKudu.no_hybrid_clock
def test_kudu_insert(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_insert', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_update(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_update', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_upsert(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_upsert', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_delete(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_delete', vector, use_db=unique_database)
# TODO(IMPALA-8614): parameterize some tests to run with HMS integration enabled.
class TestKuduOperations(KuduTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
@classmethod
def add_test_dimensions(cls):
super(TestKuduOperations, cls).add_test_dimensions()
# The default read mode of READ_LATEST does not provide high enough consistency for
# these tests.
add_exec_option_dimension(cls, "kudu_read_mode", "READ_AT_SNAPSHOT")
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_out_of_range_timestamps(self, vector, cursor, kudu_client, unique_database):
"""Test timestamp values that are outside of Impala's supported date range."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.times (a INT PRIMARY KEY, ts TIMESTAMP)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "times"))
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "times"))
session = kudu_client.new_session()
session.apply(table.new_insert((0, datetime(1987, 5, 19, 0, 0, tzinfo=utc))))
# Add a date before 1400
session.apply(table.new_insert((1, datetime(1300, 1, 1, 0, 0, tzinfo=utc))))
# TODO: Add a date after 9999. There isn't a way to represent a date greater than
# 9999 in Python datetime.
#session.apply(table.new_insert((2, datetime(12000, 1, 1, 0, 0, tzinfo=utc))))
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
# TODO: The test driver should have a way to specify query options in an 'options'
# section rather than having to split abort_on_error cases into separate files.
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/kudu-overflow-ts', vector,
use_db=unique_database)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/kudu-overflow-ts-abort-on-error', vector,
use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_scan_node(self, vector, unique_database):
self.run_test_case('QueryTest/kudu-scan-node', vector, use_db=unique_database)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@SkipIfKudu.no_hybrid_clock
def test_kudu_insert_mem_limit(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_insert_mem_limit', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_partition_ddl(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_partition_ddl', vector, use_db=unique_database)
@pytest.mark.skipif(IMPALA_TEST_CLUSTER_PROPERTIES.is_remote_cluster(),
reason="Test references hardcoded hostnames: IMPALA-4873")
@pytest.mark.execute_serially
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_kudu_alter_table(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_alter', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_stats(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_stats', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_describe(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_describe', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_limit(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_limit', vector, use_db=unique_database)
def test_kudu_column_options(self, cursor, kudu_client, unique_database):
"""Test Kudu column options"""
encodings = ["ENCODING PLAIN_ENCODING", ""]
compressions = ["COMPRESSION SNAPPY", ""]
nullability = ["NOT NULL", "NULL", ""]
defaults = ["DEFAULT 1", ""]
blocksizes = ["BLOCK_SIZE 32768", ""]
indx = 1
for encoding in encodings:
for compression in compressions:
for default in defaults:
for blocksize in blocksizes:
for nullable in nullability:
impala_tbl_name = "test_column_options_%s" % str(indx)
cursor.execute("""CREATE TABLE %s.%s (a INT PRIMARY KEY
%s %s %s %s, b INT %s %s %s %s %s) PARTITION BY HASH (a)
PARTITIONS 3 STORED AS KUDU""" % (unique_database, impala_tbl_name,
encoding, compression, default, blocksize, nullable, encoding,
compression, default, blocksize))
indx = indx + 1
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, impala_tbl_name))
def test_kudu_col_changed(
self, cursor, kudu_client, unique_database, cluster_properties):
"""Test changing a Kudu column outside of Impala results in a failure on read with
outdated metadata (IMPALA-4828)."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "int32")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, i))
session.apply(op)
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
# Scanning should result in an error with Catalog V1, since the metadata is cached.
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cluster_properties.is_catalog_v2_cluster(),\
"Should fail with Catalog V1, which caches metadata"
except Exception as e:
assert not cluster_properties.is_catalog_v2_cluster(),\
"Should succeed with Catalog V2, which does not cache metadata"
expected_error = "Column 's' is type INT but Impala expected STRING. The table "\
"metadata in Impala may be outdated and need to be refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_not_null_changed(
self, cursor, kudu_client, unique_database, cluster_properties):
"""Test changing a NOT NULL Kudu column outside of Impala results in a failure
on read with outdated metadata (IMPALA-4828)."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING NOT NULL)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "string", nullable=True)
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, None))
session.apply(op)
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cluster_properties.is_catalog_v2_cluster(),\
"Should fail with Catalog V1, which caches metadata"
except Exception as e:
assert not cluster_properties.is_catalog_v2_cluster(),\
"Should succeed with Catalog V2, which does not cache metadata"
expected_error = "Column 's' is nullable but Impala expected it to be "\
"not nullable. The table metadata in Impala may be outdated and need to be "\
"refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_null_changed(
self, cursor, kudu_client, unique_database, cluster_properties):
"""Test changing a NULL Kudu column outside of Impala results in a failure
on read with outdated metadata (IMPALA-4828)."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING NULL)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "string", nullable=False, default="bar")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, "foo"))
session.apply(op)
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cluster_properties.is_catalog_v2_cluster(),\
"Should fail with Catalog V1, which caches metadata"
except Exception as e:
assert not cluster_properties.is_catalog_v2_cluster(),\
"Should succeed with Catalog V2, which does not cache metadata"
expected_error = "Column 's' is not nullable but Impala expected it to be "\
"nullable. The table metadata in Impala may be outdated and need to be "\
"refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_added(self, cursor, kudu_client, unique_database, cluster_properties):
"""Test adding a Kudu column outside of Impala."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and add a new col
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("b", "int32")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
op = table.new_insert((0, 0))
session.apply(op)
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
if cluster_properties.is_catalog_v2_cluster():
# Changes in Kudu should be immediately visible to Impala with Catalog V2.
assert cursor.fetchall() == [(0, 0)]
else:
# Only the first col is visible to Impala. Impala will not know about the missing
# column, so '*' is expanded to known columns. This doesn't have a separate check
# because the query can proceed and checking would need to fetch metadata from the
# Kudu master, which is what REFRESH is for.
assert cursor.fetchall() == [(0, )]
# After a REFRESH both cols should be visible
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, 0)]
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_kudu_col_removed(self, cursor, kudu_client, unique_database):
"""Test removing a Kudu column outside of Impala."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
cursor.execute("insert into %s.foo values (0, 'foo')" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
except Exception as e:
expected_error = "Column 's' not found in kudu table impala::test_kudu_col_removed"
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, )]
def test_kudu_show_unbounded_range_partition(self, cursor, kudu_client,
unique_database):
"""Check that a single unbounded range partition gets printed correctly."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
name = unique_database + ".unbounded_range_table"
try:
kudu_client.create_table(name, schema,
partitioning=Partitioning().set_range_partition_columns(["id"]))
kudu_table = kudu_client.table(name)
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("SHOW RANGE PARTITIONS %s" % impala_table_name)
assert cursor.description == [
('RANGE (id)', 'STRING', None, None, None, None, None)]
assert cursor.fetchall() == [('UNBOUNDED',)]
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.no_hybrid_clock
def test_column_storage_attributes(self, cursor, unique_database):
"""Tests that for every valid combination of column type, encoding, and compression,
we can insert a value and scan it back from Kudu."""
# This test takes about 2min and is unlikely to break, so only run it in exhaustive.
if self.exploration_strategy() != 'exhaustive':
pytest.skip("Only runs in exhaustive to reduce core time.")
table_name = "%s.storage_attrs" % unique_database
types = ['boolean', 'tinyint', 'smallint', 'int', 'bigint', 'float', 'double', \
'string', 'timestamp', 'decimal', 'date', 'varchar(10)']
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
create_query = "create table %s (id int primary key" % table_name
for t in types:
# We truncate the type attributes in the column name to keep things simple.
create_query += ", %s_col %s" % (t.split('(')[0], t)
create_query += ") partition by hash(id) partitions 16 stored as kudu"
cursor.execute(create_query)
encodings = ['AUTO_ENCODING', 'PLAIN_ENCODING', 'PREFIX_ENCODING', 'GROUP_VARINT', \
'RLE', 'DICT_ENCODING', 'BIT_SHUFFLE']
compressions = ['DEFAULT_COMPRESSION', 'NO_COMPRESSION', 'SNAPPY', 'LZ4', 'ZLIB']
i = 0
for e in encodings:
for c in compressions:
for t in types:
try:
# We truncate the type attributes in the column name to keep things simple.
cursor.execute("""alter table %s alter column %s_col
set encoding %s compression %s""" % (table_name, t.split('(')[0], e, c))
except Exception as err:
assert "encoding %s not supported for type" % e in str(err)
cursor.execute("""insert into %s values (%s, true, 0, 0, 0, 0, 0, 0, '0',
cast('2009-01-01' as timestamp), cast(0 as decimal),
cast('2010-01-01' as date), cast('' as varchar(10)))""" % (table_name, i))
cursor.execute("select * from %s where id = %s" % (table_name, i))
assert cursor.fetchall() == \
[(i, True, 0, 0, 0, 0, 0.0, 0.0, '0', datetime(2009, 1, 1, 0, 0), 0,
'2010-01-01', '')]
i += 1
cursor.execute("select count(*) from %s" % table_name)
print cursor.fetchall() == [(i, )]
def test_concurrent_schema_change(self, cursor, unique_database):
"""Tests that an insert into a Kudu table with a concurrent schema change either
succeeds or fails gracefully."""
table_name = "%s.test_schema_change" % unique_database
cursor.execute("""create table %s (col0 bigint primary key, col1 bigint)
partition by hash(col0) partitions 16 stored as kudu""" % table_name)
iters = 5
def insert_values():
threading.current_thread().errors = []
client = self.create_impala_client()
for i in range(0, iters):
time.sleep(random.random()) # sleeps for up to one second
try:
client.execute("insert into %s values (0, 0), (1, 1)" % table_name)
except Exception as e:
threading.current_thread().errors.append(e)
insert_thread = threading.Thread(target=insert_values)
insert_thread.start()
for i in range(0, iters):
time.sleep(random.random()) # sleeps for up to one second
cursor.execute("alter table %s drop column col1" % table_name)
if i % 2 == 0:
cursor.execute("alter table %s add columns (col1 string)" % table_name)
else:
cursor.execute("alter table %s add columns (col1 bigint)" % table_name)
insert_thread.join()
for error in insert_thread.errors:
msg = str(error)
# The first two are AnalysisExceptions, the next two come from KuduTableSink::Open()
# if the schema has changed since analysis, the rest come from the Kudu server if
# the schema changes between KuduTableSink::Open() and when the write ops are sent.
possible_errors = [
"has fewer columns (1) than the SELECT / VALUES clause returns (2)",
"(type: TINYINT) is not compatible with column 'col1' (type: STRING)",
"has fewer columns than expected.",
"Column col1 has unexpected type.",
"Client provided column col1[int64 NULLABLE] not present in tablet",
"Client provided column col1 INT64 NULLABLE not present in tablet",
"The column 'col1' must have type string NULLABLE found int64 NULLABLE"
]
assert any(err in msg for err in possible_errors)
def _retry_query(self, cursor, query, expected):
retries = 0
while retries < 3:
cursor.execute(query)
result = cursor.fetchall()
if result == expected:
break
retries += 1
time.sleep(1)
assert retries < 3, \
"Did not get a correct result for %s after 3 retries: %s" % (query, result)
def test_read_modes(self, cursor, unique_database):
"""Other Kudu tests are run with a scan level of READ_AT_SNAPSHOT to have predicable
scan results. This test verifies that scans work as expected at the scan level of
READ_LATEST by retrying the scan if the results are incorrect."""
table_name = "%s.test_read_latest" % unique_database
cursor.execute("set kudu_read_mode=READ_LATEST")
cursor.execute("""create table %s (a int primary key, b string) partition by hash(a)
partitions 8 stored as kudu""" % table_name)
cursor.execute("insert into %s values (0, 'a'), (1, 'b'), (2, 'c')" % table_name)
self._retry_query(cursor, "select * from %s order by a" % table_name,
[(0, 'a'), (1, 'b'), (2, 'c')])
cursor.execute("""insert into %s select id, string_col from functional.alltypes
where id > 2 limit 100""" % table_name)
self._retry_query(cursor, "select count(*) from %s" % table_name, [(103,)])
class TestCreateExternalTable(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_external_timestamp_default_value(self, cursor, kudu_client, unique_database):
"""Checks that a Kudu table created outside Impala with a default value on a
UNIXTIME_MICROS column can be loaded by Impala, and validates the DESCRIBE
output is correct."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
column_spec = schema_builder.add_column("ts", UNIXTIME_MICROS)
column_spec.default(datetime(2009, 1, 1, 0, 0, tzinfo=utc))
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
name = unique_database + ".tsdefault"
try:
kudu_client.create_table(name, schema,
partitioning=Partitioning().set_range_partition_columns(["id"]))
kudu_table = kudu_client.table(name)
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
table_desc = [[col.strip() if col else col for col in row] for row in cursor]
# Pytest shows truncated output on failure, so print the details just in case.
LOG.info(table_desc)
assert ["ts", "timestamp", "", "false", "true", "1230768000000000", \
"AUTO_ENCODING", "DEFAULT_COMPRESSION", "0"] in table_desc
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.hms_integration_enabled
def test_implicit_table_props(self, cursor, kudu_client):
"""Check that table properties added internally during table creation are as
expected.
"""
with self.temp_kudu_table(kudu_client, [STRING, INT8, BOOL], num_key_cols=2) \
as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE FORMATTED %s" % impala_table_name)
table_desc = [[col.strip() if col else col for col in row] for row in cursor]
LOG.info(table_desc)
# Pytest shows truncated output on failure, so print the details just in case.
assert ["", "EXTERNAL", "TRUE"] in table_desc
assert ["", "kudu.master_addresses", KUDU_MASTER_HOSTS] in table_desc
assert ["", "kudu.table_name", kudu_table.name] in table_desc
assert ["", "storage_handler", "org.apache.hadoop.hive.kudu.KuduStorageHandler"] \
in table_desc
@SkipIfKudu.hms_integration_enabled
def test_col_types(self, cursor, kudu_client):
"""Check that a table can be created using all available column types."""
# TODO: Add DECIMAL when the Kudu python client supports decimal
kudu_types = [STRING, BOOL, DOUBLE, FLOAT, INT16, INT32, INT64, INT8]
with self.temp_kudu_table(kudu_client, kudu_types) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
kudu_schema = kudu_table.schema
for i, (col_name, col_type, _, _, _, _, _, _, _) in enumerate(cursor):
kudu_col = kudu_schema[i]
assert col_name == kudu_col.name
assert col_type.upper() == \
self.kudu_col_type_to_impala_col_type(kudu_col.type.type)
@SkipIfKudu.hms_integration_enabled
def test_unsupported_binary_col(self, cursor, kudu_client):
"""Check that external tables with BINARY columns fail gracefully.
"""
with self.temp_kudu_table(kudu_client, [INT32, BINARY]) as kudu_table:
impala_table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (impala_table_name,
kudu_table.name))
assert False
except Exception as e:
assert "Kudu type 'binary' is not supported in Impala" in str(e)
@SkipIfKudu.hms_integration_enabled
def test_drop_external_table(self, cursor, kudu_client):
"""Check that dropping an external table only affects the catalog and does not delete
the table in Kudu.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("SELECT COUNT(*) FROM %s" % impala_table_name)
assert cursor.fetchall() == [(0, )]
try:
cursor.execute("SELECT COUNT(*) FROM %s" % impala_table_name)
assert False
except Exception as e:
assert "Could not resolve table reference" in str(e)
assert kudu_client.table_exists(kudu_table.name)
@SkipIfKudu.hms_integration_enabled
def test_explicit_name(self, cursor, kudu_client):
"""Check that a Kudu table can be specified using a table property."""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (table_name, kudu_table.name))
with self.drop_impala_table_after_context(cursor, table_name):
cursor.execute("SELECT * FROM %s" % table_name)
assert len(cursor.fetchall()) == 0
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_preference(self, cursor, kudu_client):
"""Check that the table name from a table property is used when a table of the
implied name also exists.
"""
with self.temp_kudu_table(kudu_client, [INT64]) as preferred_kudu_table:
with self.temp_kudu_table(kudu_client, [INT8]) as other_kudu_table:
impala_table_name = self.get_kudu_table_base_name(other_kudu_table.name)
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
impala_table_name, preferred_kudu_table.name))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
assert cursor.fetchall() == \
[("a", "bigint", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_doesnt_exist(self, cursor, kudu_client):
kudu_table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
self.random_table_name(), kudu_table_name))
assert False
except Exception as e:
assert "Table does not exist in Kudu: '%s'" % kudu_table_name in str(e)
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_doesnt_exist_but_implicit_does(self, cursor, kudu_client):
"""Check that when an explicit table name is given but that table doesn't exist,
there is no fall-through to an existing implicit table.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
self.get_kudu_table_base_name(kudu_table.name), table_name))
assert False
except Exception as e:
assert "Table does not exist in Kudu: '%s'" % table_name in str(e)
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_table_without_partitioning(self, cursor, kudu_client, unique_database):
"""Test a Kudu table created without partitioning (i.e. equivalent to a single
unbounded partition). It is not possible to create such a table in Impala, but
it can be created directly in Kudu and then loaded as an external table.
Regression test for IMPALA-5154."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([])
name = "%s.one_big_unbounded_partition" % unique_database
try:
kudu_client.create_table(name, schema, partitioning=partitioning)
kudu_table = kudu_client.table(name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (name, props))
with self.drop_impala_table_after_context(cursor, name):
cursor.execute("INSERT INTO %s VALUES (1), (2), (3)" % name)
cursor.execute("SELECT COUNT(*) FROM %s" % name)
assert cursor.fetchall() == [(3, )]
try:
cursor.execute("SHOW RANGE PARTITIONS %s" % name)
assert False
except Exception as e:
assert "AnalysisException: SHOW RANGE PARTITIONS requested but table does "\
"not have range partitions" in str(e)
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_column_name_case(self, cursor, kudu_client, unique_database):
"""IMPALA-5286: Tests that an external Kudu table that was created with a column name
containing upper case letters is handled correctly."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
table_name = '%s.kudu_external_test' % unique_database
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
schema_builder = SchemaBuilder()
key_col = 'Key'
schema_builder.add_column(key_col, INT64).nullable(False).primary_key()
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([key_col])\
.add_range_partition([1], [10])
try:
kudu_client.create_table(table_name, schema, partitioning)
props = "tblproperties('kudu.table_name' = '%s')" % table_name
cursor.execute("create external table %s stored as kudu %s" % (table_name, props))
# Perform a variety of operations on the table.
cursor.execute("insert into %s (kEy) values (5), (1), (4)" % table_name)
cursor.execute("select keY from %s where KeY %% 2 = 0" % table_name)
assert cursor.fetchall() == [(4, )]
cursor.execute("select * from %s order by kEY" % (table_name))
assert cursor.fetchall() == [(1, ), (4, ), (5, )]
# Do a join with a runtime filter targeting the column.
cursor.execute("select count(*) from %s a, %s b where a.key = b.key" %
(table_name, table_name))
assert cursor.fetchall() == [(3, )]
cursor.execute("alter table %s add range partition 11 < values < 20" % table_name)
new_key = "KEY2"
cursor.execute("alter table %s change KEy %s bigint" % (table_name, new_key))
val_col = "vaL"
cursor.execute("alter table %s add columns (%s bigint)" % (table_name, val_col))
cursor.execute("describe %s" % table_name)
results = cursor.fetchall()
# 'describe' should print the column name in lower case.
assert new_key.lower() in results[0]
assert val_col.lower() in results[1]
cursor.execute("alter table %s drop column Val" % table_name);
cursor.execute("describe %s" % table_name)
assert len(cursor.fetchall()) == 1
cursor.execute("alter table %s drop range partition 11 < values < 20" % table_name)
finally:
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
@SkipIfKudu.hms_integration_enabled
def test_conflicting_column_name(self, cursor, kudu_client, unique_database):
"""IMPALA-5283: Tests that loading an external Kudu table that was created with column
names that differ only in case results in an error."""
table_name = '%s.kudu_external_test' % unique_database
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
schema_builder = SchemaBuilder()
col0 = 'col'
schema_builder.add_column(col0, INT64).nullable(False).primary_key()
col1 = 'COL'
schema_builder.add_column(col1, INT64)
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([col0])\
.add_range_partition([1], [10])
try:
kudu_client.create_table(table_name, schema, partitioning)
props = "tblproperties('kudu.table_name' = '%s')" % table_name
cursor.execute("create external table %s stored as kudu %s" % (table_name, props))
assert False, 'create table should have resulted in an exception'
except Exception as e:
assert 'Error loading Kudu table: Impala does not support column names that ' \
+ 'differ only in casing' in str(e)
finally:
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
class TestShowCreateTable(KuduTestSuite):
column_properties = "ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION"
def assert_show_create_equals(self, cursor, create_sql, show_create_sql,
do_exact_match=False):
"""Executes 'create_sql' to create a table, then runs "SHOW CREATE TABLE" and checks
that the output is the same as 'show_create_sql'. 'create_sql' and
'show_create_sql' can be templates that can be used with str.format(). format()
will be called with 'table' and 'db' as keyword args. Also, compares HMS-3 specific
output due to HMS translation. If do_exact_match is True does not manipulate the
output and compares exactly with the show_create_sql parameter.
"""
format_args = {"table": self.random_table_name(), "db": cursor.conn.db_name}
cursor.execute(create_sql.format(**format_args))
cursor.execute("SHOW CREATE TABLE {table}".format(**format_args))
output = cursor.fetchall()[0][0]
if not do_exact_match and HIVE_MAJOR_VERSION > 2:
# in case of HMS-3 all Kudu tables are translated to external tables with some
# additional properties. This code below makes sure that we have the expected table
# properties and the table is external
# TODO we should move these tests to a query.test file so that we can have better
# way to compare the output against different hive versions
assert output.startswith("CREATE EXTERNAL TABLE")
assert "TBLPROPERTIES ('external.table.purge'='TRUE', " in output
# We have made sure that the output starts with CREATE EXTERNAL TABLE, now we can
# change it to "CREATE TABLE" to make it easier to compare rest of the str
output = output.replace("CREATE EXTERNAL TABLE", "CREATE TABLE")
# We should also remove the additional tbl property external.table.purge so that we
# can compare the rest of output
output = output.replace("TBLPROPERTIES ('external.table.purge'='TRUE', ",
"TBLPROPERTIES (")
assert output == \
textwrap.dedent(show_create_sql.format(**format_args)).strip()
@SkipIfKudu.hms_integration_enabled
def test_primary_key_and_distribution(self, cursor):
# TODO: Add case with BLOCK_SIZE
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY)
PARTITION BY HASH (c) PARTITIONS 3 STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY, d STRING NULL)
PARTITION BY HASH (c) PARTITIONS 3, RANGE (c)
(PARTITION VALUES <= 1, PARTITION 1 < VALUES <= 2,
PARTITION 2 < VALUES) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3, RANGE (c) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT ENCODING PLAIN_ENCODING, PRIMARY KEY (c))
PARTITION BY HASH (c) PARTITIONS 3 STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING PLAIN_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT COMPRESSION LZ4, d STRING, PRIMARY KEY(c, d))
PARTITION BY HASH (c) PARTITIONS 3, HASH (d) PARTITIONS 3,
RANGE (c, d) (PARTITION VALUE = (1, 'aaa'), PARTITION VALUE = (2, 'bbb'))
STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION LZ4,
d STRING NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c, d)
)
PARTITION BY HASH (c) PARTITIONS 3, HASH (d) PARTITIONS 3, RANGE (c, d) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT, d STRING, e INT NULL DEFAULT 10, PRIMARY KEY(c, d))
PARTITION BY RANGE (c) (PARTITION VALUES <= 1, PARTITION 1 < VALUES <= 2,
PARTITION 2 < VALUES <= 3, PARTITION 3 < VALUES) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d STRING NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
e INT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION DEFAULT 10,
PRIMARY KEY (c, d)
)
PARTITION BY RANGE (c) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT COMMENT 'Ab 1@' PRIMARY KEY) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL {p} COMMENT 'Ab 1@',
PRIMARY KEY (c)
)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, p=self.column_properties,
kudu_addr=KUDU_MASTER_HOSTS))
@SkipIfKudu.hms_integration_enabled
def test_timestamp_default_value(self, cursor):
create_sql_fmt = """
CREATE TABLE {table} (c INT, d TIMESTAMP,
e TIMESTAMP NULL DEFAULT CAST('%s' AS TIMESTAMP),
PRIMARY KEY(c, d))
PARTITION BY HASH(c) PARTITIONS 3
STORED AS KUDU"""
# Long lines are unfortunate, but extra newlines will break the test.
show_create_sql_fmt = """
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d TIMESTAMP NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
e TIMESTAMP NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION DEFAULT unix_micros_to_utc_timestamp(%s),
PRIMARY KEY (c, d)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS)
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000001000"),
show_create_sql_fmt % ("1230768000000001"))
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000001001"),
show_create_sql_fmt % ("1230768000000001"))
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000000999"),
show_create_sql_fmt % ("1230768000000001"))
@SkipIfKudu.hms_integration_enabled
def test_external_kudu_table_name_with_show_create(self, cursor, kudu_client,
unique_database):
"""Check that the generated kudu.table_name tblproperty is present with
show create table with external Kudu tables.
"""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
partitioning = Partitioning().set_range_partition_columns(["id"])
schema = schema_builder.build()
kudu_table_name = self.random_table_name()
try:
kudu_client.create_table(kudu_table_name, schema, partitioning)
kudu_table = kudu_client.table(kudu_table_name)
table_name_prop = "'kudu.table_name'='%s'" % kudu_table.name
self.assert_show_create_equals(cursor,
"""
CREATE EXTERNAL TABLE {{table}} STORED AS KUDU
TBLPROPERTIES({props})""".format(
props=table_name_prop),
"""
CREATE EXTERNAL TABLE {db}.{{table}}
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}', {kudu_table})""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS,
kudu_table=table_name_prop), True)
finally:
if kudu_client.table_exists(kudu_table_name):
kudu_client.delete_table(kudu_table_name)
@SkipIfKudu.hms_integration_enabled
def test_managed_kudu_table_name_with_show_create(self, cursor):
"""Check that the generated kudu.table_name tblproperty is not present with
show create table with managed Kudu tables.
"""
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
def test_synchronized_kudu_table_with_show_create(self, cursor):
# in this case we do exact match with the provided input since this is specifically
# creating a synchronized table
self.assert_show_create_equals(cursor,
"""
CREATE EXTERNAL TABLE {table} (
id BIGINT,
name STRING,
PRIMARY KEY(id))
PARTITION BY HASH PARTITIONS 16
STORED AS KUDU
TBLPROPERTIES('external.table.purge'='true')""",
"""
CREATE EXTERNAL TABLE {db}.{{table}} (
id BIGINT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
name STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (id)
)
PARTITION BY HASH (id) PARTITIONS 16
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='true', 'kudu.master_addresses'='{kudu_addr}')"""
.format(db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS), True)
self.assert_show_create_equals(cursor,
"""
CREATE EXTERNAL TABLE {table} (
id BIGINT PRIMARY KEY,
name STRING)
PARTITION BY HASH(id) PARTITIONS 16
STORED AS KUDU
TBLPROPERTIES('external.table.purge'='true')""",
"""
CREATE EXTERNAL TABLE {db}.{{table}} (
id BIGINT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
name STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (id)
)
PARTITION BY HASH (id) PARTITIONS 16
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='true', 'kudu.master_addresses'='{kudu_addr}')"""
.format(db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS), True)
class TestDropDb(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_drop_non_empty_db(self, unique_cursor, kudu_client):
"""Check that an attempt to drop a database will fail if Kudu tables are present
and that the tables remain.
"""
db_name = unique_cursor.conn.db_name
with self.temp_kudu_table(kudu_client, [INT32], db_name=db_name) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
unique_cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
unique_cursor.execute("USE DEFAULT")
try:
unique_cursor.execute("DROP DATABASE %s" % db_name)
assert False
except Exception as e:
assert "One or more tables exist" in str(e)
unique_cursor.execute("SELECT COUNT(*) FROM %s.%s" % (db_name, impala_table_name))
assert unique_cursor.fetchall() == [(0, )]
@SkipIfKudu.hms_integration_enabled
def test_drop_db_cascade(self, unique_cursor, kudu_client):
"""Check that an attempt to drop a database will succeed even if Kudu tables are
present and that the managed tables are removed.
"""
db_name = unique_cursor.conn.db_name
with self.temp_kudu_table(kudu_client, [INT32], db_name=db_name) as kudu_table:
# Create an external Kudu table
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
unique_cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
# Create a managed Kudu table
managed_table_name = self.random_table_name()
unique_cursor.execute("""
CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY HASH (a) PARTITIONS 3
STORED AS KUDU""" % managed_table_name)
kudu_table_name = "impala::" + db_name + "." + managed_table_name
assert kudu_client.table_exists(kudu_table_name)
# Create a table in HDFS
hdfs_table_name = self.random_table_name()
unique_cursor.execute("""
CREATE TABLE %s (a INT) PARTITIONED BY (x INT)""" % (hdfs_table_name))
unique_cursor.execute("USE DEFAULT")
unique_cursor.execute("DROP DATABASE %s CASCADE" % db_name)
unique_cursor.execute("SHOW DATABASES")
assert (db_name, '') not in unique_cursor.fetchall()
assert kudu_client.table_exists(kudu_table.name)
assert not kudu_client.table_exists(managed_table_name)
class TestImpalaKuduIntegration(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_replace_kudu_table(self, cursor, kudu_client):
"""Check that an external Kudu table is accessible if the underlying Kudu table is
modified using the Kudu client.
"""
# Create an external Kudu table
col_names = ['a']
with self.temp_kudu_table(kudu_client, [INT32], col_names=col_names) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("a", "int", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
# Drop the underlying Kudu table and replace it with another Kudu table that has
# the same name but different schema
kudu_client.delete_table(kudu_table.name)
assert not kudu_client.table_exists(kudu_table.name)
new_col_names = ['b', 'c']
name_parts = kudu_table.name.split(".")
assert len(name_parts) == 2
with self.temp_kudu_table(kudu_client, [STRING, STRING], col_names=new_col_names,
db_name=name_parts[0], name= name_parts[1]) as new_kudu_table:
assert kudu_client.table_exists(new_kudu_table.name)
# Refresh the external table and verify that the new schema is loaded from
# Kudu.
cursor.execute("REFRESH %s" % (impala_table_name))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("b", "string", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0"),
("c", "string", "", "false", "true", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
@SkipIfKudu.hms_integration_enabled
def test_delete_external_kudu_table(self, cursor, kudu_client):
"""Check that Impala can recover from the case where the underlying Kudu table of
an external table is dropped using the Kudu client.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
# Create an external Kudu table
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("a", "int", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
# Drop the underlying Kudu table
kudu_client.delete_table(kudu_table.name)
assert not kudu_client.table_exists(kudu_table.name)
err_msg = 'the table does not exist: table_name: "%s"' % (kudu_table.name)
try:
cursor.execute("REFRESH %s" % (impala_table_name))
except Exception as e:
assert err_msg in str(e)
cursor.execute("DROP TABLE %s" % (impala_table_name))
cursor.execute("SHOW TABLES")
assert (impala_table_name,) not in cursor.fetchall()
@SkipIfKudu.hms_integration_enabled
def test_delete_managed_kudu_table(self, cursor, kudu_client, unique_database):
"""Check that dropping a managed Kudu table works even if the underlying Kudu table
has been dropped externally."""
impala_tbl_name = "foo"
cursor.execute("""CREATE TABLE %s.%s (a INT PRIMARY KEY) PARTITION BY HASH (a)
PARTITIONS 3 STORED AS KUDU""" % (unique_database, impala_tbl_name))
kudu_tbl_name = KuduTestSuite.to_kudu_table_name(unique_database, impala_tbl_name)
assert kudu_client.table_exists(kudu_tbl_name)
kudu_client.delete_table(kudu_tbl_name)
assert not kudu_client.table_exists(kudu_tbl_name)
cursor.execute("DROP TABLE %s.%s" % (unique_database, impala_tbl_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (impala_tbl_name,) not in cursor.fetchall()
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
class TestKuduMemLimits(KuduTestSuite):
QUERIES = ["select * from tpch_kudu.lineitem where l_orderkey = -1",
"select * from tpch_kudu.lineitem where l_commitdate like '%cheese'",
"select * from tpch_kudu.lineitem limit 90"]
# The value indicates the minimum memory requirements for the queries above, the first
# memory limit corresponds to the first query
QUERY_MEM_LIMITS = [1, 1, 10]
@pytest.mark.execute_serially
@pytest.mark.parametrize("mem_limit", [1, 10, 0])
def test_low_mem_limit_low_selectivity_scan(self, cursor, mem_limit, vector):
"""Tests that the queries specified in this test suite run under the given
memory limits."""
exec_options = dict((k, str(v)) for k, v
in vector.get_value('exec_option').iteritems())
exec_options['mem_limit'] = "{0}m".format(mem_limit)
for i, q in enumerate(self.QUERIES):
try:
cursor.execute(q, configuration=exec_options)
cursor.fetchall()
except Exception as e:
if (mem_limit > self.QUERY_MEM_LIMITS[i]):
raise
assert "Memory limit exceeded" in str(e)
# IMPALA-4654: Validate the fix for a bug where LimitReached() wasn't respected in
# the KuduScanner and the limit query above would result in a fragment running an
# additional minute. This ensures that the num fragments 'in flight' reaches 0 in
# less time than IMPALA-4654 was reproducing (~60sec) but yet still enough time that
# this test won't be flaky.
verifiers = [MetricVerifier(i.service)
for i in ImpalaCluster.get_e2e_test_cluster().impalads]
for v in verifiers:
v.wait_for_metric("impala-server.num-fragments-in-flight", 0, timeout=30)
@SkipIfHive2.create_external_kudu_table
class TestCreateSynchronizedTable(KuduTestSuite):
def test_create_synchronized_table(self, cursor, kudu_client, unique_database):
"""
Creates a synchronized Kudu table and makes sure that the statement does not fail.
"""
table_name = self.random_table_name()
# create a external kudu table with external.table.purge=true
cursor.execute("""
CREATE EXTERNAL TABLE %s.%s (
id int PRIMARY KEY,
name string)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='true')
""" % (unique_database, table_name))
# make sure that the table was created
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (table_name,) in cursor.fetchall()
# make sure that the kudu table was created with default name
assert kudu_client.table_exists(self.to_kudu_table_name(unique_database, table_name))
# make sure that the external.table.purge property can be changed
cursor.execute("ALTER TABLE %s.%s set TBLPROPERTIES ("
"'external.table.purge'='FALSE')" % (unique_database, table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (table_name,) in cursor.fetchall()
cursor.execute("ALTER TABLE %s.%s set TBLPROPERTIES ("
"'external.table.purge'='TRUE')" % (unique_database, table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (table_name,) in cursor.fetchall()
# make sure that table can be renamed
new_table_name = self.random_table_name()
cursor.execute("ALTER TABLE %s.%s rename to %s.%s" %
(unique_database, table_name, unique_database, new_table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (new_table_name,) in cursor.fetchall()
# make sure that the kudu table was created with default name
assert kudu_client.table_exists(
self.to_kudu_table_name(unique_database, new_table_name))
# now make sure that table disappears after we remove it
cursor.execute("DROP TABLE %s.%s" % (unique_database, new_table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (new_table_name,) not in cursor.fetchall()
assert not kudu_client.table_exists(
self.to_kudu_table_name(unique_database, new_table_name))
def test_invalid_sync_table_stmts(self, cursor, kudu_client, unique_database):
"""
Test makes sure that a invalid way to create a synchronized table is erroring out
"""
table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='false')
""" % (unique_database, table_name))
assert False,\
"Create table statement with external.table.purge=False should error out"
except Exception as e:
# We throw this exception since the analyzer checks for properties one by one.
# This is the first property that it checks for an external table
assert "Table property kudu.table_name must be specified when " \
"creating an external Kudu table" in str(e)
try:
# missing external.table.purge in TBLPROPERTIES
cursor.execute("""
CREATE EXTERNAL TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('FOO'='BAR')
""" % (unique_database, table_name))
assert False, \
"Create external table statement must include external.table.purge property"
except Exception as e:
# We throw this exception since the analyzer checks for properties one by one.
# This is the first property that it checks for an external table
assert "Table property kudu.table_name must be specified when " \
"creating an external Kudu table" in str(e)
try:
# Trying to create a managed table with external.purge.table property in it
cursor.execute("""
CREATE TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='true')
""" % (unique_database, table_name))
assert False, \
"Managed table creation with external.table.purge property must be disallowed"
except Exception as e:
assert "Table property 'external.table.purge' cannot be set to true " \
"with an managed Kudu table." in str(e)
# TODO should we block this?
cursor.execute("""
CREATE TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='False')""" % (unique_database, table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (table_name,) in cursor.fetchall()
def test_sync_tbl_with_kudu_table(self, cursor, kudu_client, unique_database):
"""
Test tries to create a synchronized table with an existing Kudu table name and
makes sure it fails.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES('external.table.purge'='true', 'kudu.table_name' = '%s')"""
% (unique_database, table_name,
self.get_kudu_table_base_name(kudu_table.name)))
assert False, "External tables with external.purge.table property must fail " \
"if the kudu table already exists"
except Exception as e:
assert "Not allowed to set 'kudu.table_name' manually for" \
" synchronized Kudu tables" in str(e)
|
sslproxy_test.py | """Test routines to generate dummy certificates."""
import BaseHTTPServer
import shutil
import signal
import socket
import tempfile
import threading
import time
import unittest
import certutils
import sslproxy
class Client(object):
def __init__(self, ca_cert_path, verify_cb, port, host_name='foo.com',
host='localhost'):
self.host_name = host_name
self.verify_cb = verify_cb
self.ca_cert_path = ca_cert_path
self.port = port
self.host_name = host_name
self.host = host
self.connection = None
def run_request(self):
context = certutils.get_ssl_context()
context.set_verify(certutils.VERIFY_PEER, self.verify_cb) # Demand a cert
context.use_certificate_file(self.ca_cert_path)
context.load_verify_locations(self.ca_cert_path)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection = certutils.get_ssl_connection(context, s)
self.connection.connect((self.host, self.port))
self.connection.set_tlsext_host_name(self.host_name)
try:
self.connection.send('\r\n\r\n')
finally:
self.connection.shutdown()
self.connection.close()
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1' # override BaseHTTPServer setting
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline(65537)
class WrappedErrorHandler(sslproxy.SslHandshakeHandler, Handler):
"""Wraps handler to verify expected sslproxy errors are being raised."""
def setup(self):
Handler.setup(self)
try:
sslproxy.SslHandshakeHandler.setup(self)
except certutils.Error:
self.server.error_function = certutils.Error
def finish(self):
sslproxy.SslHandshakeHandler.finish(self)
Handler.finish(self)
class DummyArchive(object):
def __init__(self, cert_str):
self.root_ca_cert_str = cert_str
def get_certificate(self, host):
return certutils.generate_cert(self.root_ca_cert_str, '', host)
class DummyFetch(object):
def __init__(self, cert_str):
self.http_archive = DummyArchive(cert_str)
class Server(BaseHTTPServer.HTTPServer):
"""SSL server."""
def __init__(self, ca_cert_path, use_error_handler=False, port=0,
host='localhost'):
self.ca_cert_path = ca_cert_path
with open(ca_cert_path, 'r') as ca_file:
ca_cert_str = ca_file.read()
self.http_archive_fetch = DummyFetch(ca_cert_str)
if use_error_handler:
self.HANDLER = WrappedErrorHandler
else:
self.HANDLER = sslproxy.wrap_handler(Handler)
try:
BaseHTTPServer.HTTPServer.__init__(self, (host, port), self.HANDLER)
except Exception, e:
raise RuntimeError('Could not start HTTPSServer on port %d: %s'
% (port, e))
def __enter__(self):
thread = threading.Thread(target=self.serve_forever)
thread.daemon = True
thread.start()
return self
def cleanup(self):
try:
self.shutdown()
except KeyboardInterrupt:
pass
def __exit__(self, type_, value_, traceback_):
self.cleanup()
class TestClient(unittest.TestCase):
_temp_dir = None
def setUp(self):
self._temp_dir = tempfile.mkdtemp(prefix='sslproxy_', dir='/tmp')
self.ca_cert_path = self._temp_dir + 'testCA.pem'
self.cert_path = self._temp_dir + 'testCA-cert.cer'
self.wrong_ca_cert_path = self._temp_dir + 'wrong.pem'
self.wrong_cert_path = self._temp_dir + 'wrong-cert.cer'
# Write both pem and cer files for certificates
certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(),
cert_path=self.ca_cert_path)
certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(),
cert_path=self.ca_cert_path)
def tearDown(self):
if self._temp_dir:
shutil.rmtree(self._temp_dir)
def verify_cb(self, conn, cert, errnum, depth, ok):
"""A callback that verifies the certificate authentication worked.
Args:
conn: Connection object
cert: x509 object
errnum: possible error number
depth: error depth
ok: 1 if the authentication worked 0 if it didnt.
Returns:
1 or 0 depending on if the verification worked
"""
self.assertFalse(cert.has_expired())
self.assertGreater(time.strftime('%Y%m%d%H%M%SZ', time.gmtime()),
cert.get_notBefore())
return ok
def test_no_host(self):
with Server(self.ca_cert_path) as server:
c = Client(self.cert_path, self.verify_cb, server.server_port, '')
self.assertRaises(certutils.Error, c.run_request)
def test_client_connection(self):
with Server(self.ca_cert_path) as server:
c = Client(self.cert_path, self.verify_cb, server.server_port, 'foo.com')
c.run_request()
c = Client(self.cert_path, self.verify_cb, server.server_port,
'random.host')
c.run_request()
def test_wrong_cert(self):
with Server(self.ca_cert_path, True) as server:
c = Client(self.wrong_cert_path, self.verify_cb, server.server_port,
'foo.com')
self.assertRaises(certutils.Error, c.run_request)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL) # Exit on Ctrl-C
unittest.main()
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import *
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
self.history_model.on_fee_histogram()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected%s.png"%fork_str)
else:
icon = QIcon(":icons/status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
status, msg = False, repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
draw.py | #!/usr/bin/python3
"""MathDraw - a virtual whiteboard
with batteries included.
"""
# Controls:
# lmb =paint
# rmb =erase
# mmb =cycle colors
# arrow keys =move canvas
# t =text input
# T =cli input
# return =finish text input
# double lmb =cycle colors
import os, subprocess, sys # Using ps2pdf
import socket
import string
import threading
import time
import time # Filename
import tkinter
from server import PORT, MAX_OFFSET
from threading import Thread
WIDTH = 1280
HEIGHT = 720
class MathClient():
black = "#050505"
red = "#BB0000"
green = "#009900"
blue = "#0000BB"
num = 0
color = [black, red, green, blue]
colorName = ["black", "red", "green", "blue"]
def __init__(self):
self.host = socket.gethostname()
self.server = None
self.textaccum = ""
self.listen = False
self.textpos = (0, 0)
self.last = (0, 0)
self.delta = (0, 0)
self.pos = [0, 0]
self.useLast = False
self.follow = False
self._connect()
self._tkinter()
def _connect(self):
try:
# look for env $MATHDRAW
self.host = os.environ["MATHDRAW"]
except:
# TODO starting server when one is already up
# locally on port n-1
from server import MathServer
# if not told to connect to remote
# start local server
self.server = MathServer()
Thread(target=self.server.start, daemon=True).start()
self.title = "MathDraw 5 - {}".format(self.host)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for i in range(MAX_OFFSET+1):
try:
self.sock.connect((self.host, PORT+i))
break
except:
continue
self.sfile = self.sock.makefile()
answ = self.sfile.readline()
if answ != 'accept\n':
print("didn't receive an accept message")
print("received:\n{}\n... instead".format(answ))
def _tkinter(self):
self.tk = tkinter.Tk(className="mathdraw")
self.tk.title(self.title)
self.tk.bind("<Left>", lambda e: self._scroll('left'))
self.tk.bind("<Right>", lambda e: self._scroll('right'))
self.tk.bind("<Up>", lambda e: self._scroll('up'))
self.tk.bind("<Down>", lambda e: self._scroll('down'))
self.tk.bind("t", self.write)
self.tk.bind("f", self.followToggle)
self.tk.bind("T", self.cmdInput)
self.tk.bind("<Return>", self.enter)
self.tk.bind("<Key>", self.listenT)
self.tk.bind("<BackSpace>", self.removeT)
# self.tk.bind("D", self.plotting)
self.canv = tkinter.Canvas(self.tk, width=WIDTH, height=HEIGHT, background="#fff")
self.canv.pack()
self.canv.bind("<Button-1>", self.paint)
self.canv.bind("<B1-Motion>", self.paint)
self.canv.bind("c", self.cycle)
self.canv.bind("<Button-2>", self.cycle)
self.canv.bind("<ButtonRelease-1>", self.release)
self.canv.bind("<Leave>", self.release)
self.canv.bind("<B3-Motion>", self.erase)
self._update()
def start(self):
tkinter.mainloop()
def _cx(self, x, f=int):
return f(self.canv.canvasx(x))
def _cy(self, y, f=int):
return f(self.canv.canvasy(y))
def _c(self, event, f=int):
return (f(self._cx(event.x, f)), f(self._cy(event.y, f)))
def followToggle(self, event):
if self.listen:
self.listenT(event)
return
self.follow = not self.follow
self._update()
def paint(self, event):
(x, y) = self._c(event, float)
(lx, ly) = self.last
if self.useLast:
x = (x + lx+self.delta[0]) / 2.0
y = (y + ly+self.delta[1]) / 2.0
self._paint(lx, ly, x, y, self.num % len(self.color))
self.sock.send('d:{:.0f}:{:.0f}:{:.0f}:{:.0f}:{}\n'.format(lx, ly, x, y, self.num % len(self.color)).encode('ascii'))
self.delta = (x-lx, y-ly)
else:
self.useLast = True
self.delta = (0, 0)
self.last = (x, y)
def _paint(self, x1, y1, x2, y2, n):
c = self.color[n]
self.canv.create_line(x1, y1, x2, y2, fill=c, width=2)
self.canv.create_oval(x1-1,y1-1,x1+1,y1+1, fill=c, width=0)
def cycle(self, event):
self.num = (self.num + 1) % len(self.color)
self.tk.title(self.title + " " + self.color[self.num])
self._update()
def release(self, event):
self.useLast = False
def _scroll(self, direction):
dist = 2
dx = 0
dy = 0
if direction == 0 or direction == 'up':
self.canv.yview_scroll(-dist, "pages")
dy = -HEIGHT
elif direction == 1 or direction == 'right':
self.canv.xview_scroll( dist, "pages")
dx = WIDTH
elif direction == 2 or direction == 'down':
self.canv.yview_scroll( dist, "pages")
dy = HEIGHT
elif direction == 3 or direction == 'left':
self.canv.xview_scroll(-dist, "pages")
dx = -WIDTH
self.pos[0] += dx
self.pos[1] -= dy
self._update()
def _change(self, wx, wy, s):
pos = "[{}, {}]".format(wx, wy)
self._blockErase(0, 0, 6 * len(pos) + 8, 20)
self.canv.create_text(self._cx(s), self._cy(s), text=pos, anchor="nw", fill="#000")
def _blockErase(self, x, y, width, height):
self.canv.create_rectangle(self._cx(x), self._cy(y), self._cx(x+width), self._cy(y+height), fill="#FFF", width=0)
def _update(self):
space = 4
self._change(int(self.pos[0]/WIDTH), int(self.pos[1]/HEIGHT), space)
self._blockErase(0, 20, 40, 20)
self.canv.create_text(self._cx(space), self._cy(space+20), text=self.colorName[self.num], anchor="nw", fill=self.color[self.num])
self._blockErase(0, 40, 40, 20)
if self.follow:
self.canv.create_text(self._cx(space), self._cy(space+40), text="follow", anchor="nw", fill="#000")
def erase(self, event):
x = self._cx(event.x)
y = self._cy(event.y)
self.sock.send('e:{}:{}\n'.format(x,y).encode('ascii'))
self._erase(x, y)
def _erase(self, x, y):
s = 20
self.canv.create_oval(x - s, y - s, x + s, y + s, fill="white", outline="white")
def write(self, event):
if self.listen:
self.listenT(event)
return
print("Listening to Text\n -> ", end="")
sys.stdout.flush()
self.listen = True
self.textpos = (event.x, event.y)
def enter(self, event):
if self.listen:
self.listen = False
self.writeOut()
def writeOut(self):
x = self._cx(self.textpos[0])
y = self._cy(self.textpos[1])
self._writeOut(x, y, self.textaccum)
self.sock.send('t:{}:{}:{}\n'.format(x, y, self.textaccum).encode('ascii'))
self.textaccum = ""
print("\nText written")
def _writeOut(self, x, y, t):
self.canv.create_text(x, y, text=t, font="\"Times New Roman\" 18")
def listenT(self, event):
if self.listen and event.char in string.printable:
self.textaccum += event.char
print("\033[1024D -> \033[34;4m", end="")
print(self.textaccum, end="\033[0m")
sys.stdout.flush()
else:
self.textaccum = ""
def removeT(self, event):
self.textaccum = self.textaccum[:-1]
print("\033[1D \033[1D", end="")
sys.stdout.flush()
def cmdInput(self, event):
if self.listen:
self.listenT(event)
return
t = str(input("Text:"))
x = self._cx(event.x)
y = self._cy(event.y)
self.sock.send('t:{}:{}:{}\n'.format(x, y, t).encode('ascii'))
self._writeOut(x, y, t)
def plotting(self, event):
print("\033[31;1mplotting not implemented\033[0m")
def server_communication(self):
Thread(target=self._sock_receive).start()
def _sock_receive(self):
try:
while True:
msg = self.sfile.readline()[:-1]
if len(msg) == 0:
raise Exception(" <- server down")
mstr = msg.split(":")[1:]
mdat = []
if msg[0] == 't':
mdat = list(map(int, mstr[:-1]))
else:
mdat = list(map(int, mstr))
if msg[0] == 'e':
#e:x:y
self._erase(mdat[0], mdat[1])
elif msg[0] == 'd':
#d:x1:y1:x2:y2:num
self._paint(mdat[0], mdat[1], mdat[2], mdat[3], mdat[4])
elif msg[0] == 't':
#t:x:y:text
self._writeOut(mdat[0], mdat[1], mstr[2])
elif msg[0] == 'c':
print(" <- change to [{}, {}] received".format(mdat[0], mdat[1]))
else:
print("unknown server response")
except Exception as e:
print(e)
print("return client receive")
return
# TODO implement MathClient._servercom(...)
# that checks for BrokenPipeError and tries to reconnect
def main():
client = MathClient()
try:
client.server_communication()
client.start()
except KeyboardInterrupt:
pass
except:
pass
client.sock.send(b'close\n')
time.sleep(0.5)
if __name__ == '__main__':
main()
|
3.py | from multiprocessing import Process
import time
import os
from functools import wraps
start_time = time.time()
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
print(f"@timefn: {fn.__name__} took {t2 -t1} seconds")
return result
return measure_time
@timefn
def count(cnt):
proc = os.getpid()
for i in range(cnt):
print("Process Id :", proc, " -- ", i)
if __name__ == '__main__':
num_arr = [100000, 100000, 100000, 100000]
procs = []
for index, number in enumerate(num_arr):
proc = Process(target=count, args=(number,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
print(" --- %s seconds" % (time.time() - start_time)) |
utils.py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Utils function for the parallel training.
This is an experimental interface that is subject to change and/or deletion.
"""
from multiprocessing import Process
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.ops import functional as F
import mindspore.common.dtype as mstype
from mindspore import context
from mindspore.common.tensor import Tensor
from mindspore.train.callback import Callback
from mindspore.train.summary import SummaryRecord
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.communication.management import get_rank, get_group_size, create_group
from mindspore.nn.learning_rate_schedule import LearningRateSchedule, PolynomialDecayLR, WarmUpLR, CosineDecayLR
import numpy as np
_get_square_sum = C.MultitypeFuncGraph("_get_square_sum")
@_get_square_sum.register("Tensor", "Number")
def _get_square_sum_func(grad, value):
norm = P.ReduceSum(False)(F.square(grad), ()) / value
norm = F.expand_dims(F.cast(norm, mstype.float32), 0)
return norm
_apply_global_norm = C.MultitypeFuncGraph("apply_global_norm")
@_apply_global_norm.register("Tensor", "Tensor", "Tensor")
def _apply_global_norm_func(clip_norm, global_norm, grad):
grad = grad * clip_norm / global_norm
return grad
def _get_model_parallel_group(mp):
"""
Calculate the communication group of model parallel dim in one pipeline stage
"""
rank = get_rank()
stage_nums = auto_parallel_context().get_pipeline_stages()
device_nums = get_group_size()
per_stage_device_nums = device_nums // stage_nums
stage_id = rank // per_stage_device_nums
local_stage_rank_id = rank % per_stage_device_nums
index = local_stage_rank_id // mp
group = range(0, mp)
rank_str_list = [str(x + index * mp + stage_id * per_stage_device_nums) for x in group]
rank_list_str = "-".join(rank_str_list)
rank_list = [x + index * mp + stage_id * per_stage_device_nums for x in group]
return rank_list, rank_list_str
def _get_pipeline_group():
"""
Calculate the communication group between all pipeline stages
"""
rank = get_rank()
stage_nums = auto_parallel_context().get_pipeline_stages()
device_nums = get_group_size()
per_stage_device_nums = device_nums // stage_nums
local_stage_rank_id = rank % per_stage_device_nums
group = range(0, stage_nums)
rank_list = [local_stage_rank_id + x * per_stage_device_nums for x in group]
rank_str_list = [str(local_stage_rank_id + x * per_stage_device_nums) for x in group]
rank_list_str = "-".join(rank_str_list)
return rank_list, rank_list_str
class _GlobalNorm(nn.Cell):
"""
Calculate the global norm value of given tensors
"""
def __init__(self, params, config):
super(_GlobalNorm, self).__init__()
self.hyper_map = C.HyperMap()
self.is_pipeline = (config.pipeline_stage > 1)
if self.is_pipeline:
group_size = config.mp
group_list, group_name = _get_model_parallel_group(config.mp)
create_group(group_name, group_list)
self.allreduce = P.AllReduce(group=group_name)
pipeline_group_list, pipeline_group_name = _get_pipeline_group()
create_group(pipeline_group_name, pipeline_group_list)
self.allreduce2 = P.AllReduce(group=pipeline_group_name)
else:
group_size = get_group_size()
if config.vocab_emb_dp:
self.allreduce_filter = tuple("projection.bias" not in x.name and "layernorm" not in x.name
and "embedding_table" not in x.name for x in params)
else:
self.allreduce_filter = tuple("projection.bias" not in x.name and "layernorm" not in x.name
and "position_embedding.embedding_table" not in x.name for x in params)
self.allreduce_group_size = ()
self.init_params(params, config, group_size)
def init_params(self, params, config, group_size):
""" init_params """
for x in params:
if "uniter.encoder" in x.name:
if "dense" in x.name and "weight" in x.name:
self.allreduce_group_size = self.allreduce_group_size + (1.0,)
elif "projection" in x.name and "weight" in x.name:
self.allreduce_group_size = self.allreduce_group_size + (1.0,)
elif "wi" in x.name or "wo" in x.name:
self.allreduce_group_size = self.allreduce_group_size + (1.0,)
elif "dense" in x.name and "bias" in x.name:
self.allreduce_group_size = self.allreduce_group_size + (config.dp * 1.0,)
else:
self.allreduce_group_size = self.allreduce_group_size + (group_size * 1.0,)
elif "txt_output" in x.name or "img_output" in x.name:
if "weight" in x.name:
self.allreduce_group_size = self.allreduce_group_size + (config.dp * 1.0,)
elif "dense" in x.name and "bias" in x.name:
self.allreduce_group_size = self.allreduce_group_size + (config.dp * 1.0,)
elif "mapping" in x.name and "bias" in x.name:
self.allreduce_group_size = self.allreduce_group_size + (config.dp * 1.0,)
else:
self.allreduce_group_size = self.allreduce_group_size + (group_size * 1.0,)
else:
self.allreduce_group_size = self.allreduce_group_size + (group_size * 1.0,)
def construct(self, grads):
"""Calculate global norm construct"""
square_sum = self.hyper_map(_get_square_sum, grads, self.allreduce_group_size)
square_reduce_sum = F.addn(square_sum)
if self.is_pipeline:
stage_square_reduce_sum = self.allreduce(square_reduce_sum)
global_square_reduce_sum = self.allreduce2(stage_square_reduce_sum)
global_norms = F.sqrt(global_square_reduce_sum)
else:
global_norms = F.sqrt(P.AllReduce()(square_reduce_sum))
return global_norms
class _ClipByGlobalNorm(nn.Cell):
"""
Clip grads by global norm
"""
def __init__(self, params, parallel_config, clip_norm=1.0):
super(_ClipByGlobalNorm, self).__init__()
# According to the parallel mode, enabling the parallel global norm or not
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
self.global_norm = _GlobalNorm(params, parallel_config)
self.clip_norm = Tensor([clip_norm], mstype.float32)
self.hyper_map = C.HyperMap()
def construct(self, grads):
"""Clip grads by global norm construct"""
global_norm_value = self.global_norm(grads)
cond = P.GreaterEqual()(global_norm_value, self.clip_norm)
global_norm = F.select(cond, global_norm_value, self.clip_norm)
grads = self.hyper_map(F.partial(_apply_global_norm, self.clip_norm, global_norm), grads)
return grads
class LearningRate(LearningRateSchedule):
"""
Learning_rate sheduler
"""
def __init__(self,
start_learning_rate,
end_learning_rate,
warmup_steps,
decay_steps,
power=1.0,
use_cosine=True):
super(LearningRate, self).__init__()
self.warmup_flag = False
if warmup_steps > 0:
self.warmup_flag = True
self.warmup_lr = WarmUpLR(start_learning_rate, warmup_steps)
self.decay_lr = PolynomialDecayLR(start_learning_rate, end_learning_rate, decay_steps, power)
self.cosine_decay_lr = CosineDecayLR(end_learning_rate, start_learning_rate, decay_steps)
self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
self.greater = P.Greater()
self.one = Tensor(np.array([1.0]).astype(np.float32))
self.cast = P.Cast()
self.use_cosine = use_cosine
def construct(self, global_step):
"""Learning_rate sheduler construct"""
if not self.use_cosine:
decay_lr = self.decay_lr(global_step)
else:
decay_lr = self.cosine_decay_lr(global_step)
if self.warmup_flag:
is_warmup = self.cast(self.greater(self.warmup_steps, global_step), mstype.float32)
warmup_lr = self.warmup_lr(global_step)
lr = (self.one - is_warmup) * decay_lr + is_warmup * warmup_lr
else:
lr = decay_lr
return lr
class LossSummaryCallback(Callback):
"""
LossSummaryCallback
"""
def __init__(self, summary_dir, local_rank=0, has_trained_epoch=0, has_trained_step=0,
bucket='obs://mindspore-file/loss_file/summary/', syn_times=100):
import moxing as mox
self._summary_dir = summary_dir
self.local_rank = local_rank
self.has_trained_epoch = has_trained_epoch
self.has_trained_step = has_trained_step
self.bucket = bucket
self.syn_times = syn_times
self.id2task = {
0: 'mlmThree',
1: 'mrcThree',
2: 'mrfrThree',
3: 'mafrThree',
4: 'macThree',
5: "itmThree",
6: 'mrctThree',
7: "tdThree",
8: "idThree"
}
if not mox.file.exists(self.bucket):
print("++++++++++Creating summary bueckt dir {}".format(self.bucket))
mox.file.make_dirs(self.bucket)
print("++++++++++entering")
self.summary_record = SummaryRecord(self._summary_dir)
def step_end(self, run_context):
"""
step_end
"""
cb_params = run_context.original_args()
cur_step = cb_params.cur_step_num + self.has_trained_step
# create a confusion matric image, and record it to summary file
print("++++++++++writing")
task_id = cb_params.net_outputs[3].asnumpy()[0]
self.summary_record.add_value('scalar', 'scale', cb_params.net_outputs[2])
self.summary_record.add_value('scalar', '{}'.format(self.id2task[task_id]), cb_params.net_outputs[0])
self.summary_record.record(cur_step)
print("writing finished...", cur_step, self.syn_times)
if cur_step % self.syn_times == 0:
print("++++++++++Copying summary to the bueckets start", flush=True)
self.summary_record.flush()
self.syn_files()
print("++++++++++Copying summary to the bueckets ends", flush=True)
def syn_files(self):
process = Process(target=mox.file.copy_parallel, args=(self._summary_dir, self.bucket), name="file_sync")
process.start()
class LossSummaryCallbackLocal(Callback):
"""
LossSummaryCallbackLocal
"""
def __init__(self, summary_dir, local_rank=0, has_trained_epoch=0, has_trained_step=0):
self._summary_dir = summary_dir
self.local_rank = local_rank
self.has_trained_epoch = has_trained_epoch
self.has_trained_step = has_trained_step
self.id2task = {
0: 'mlmThree',
1: 'mrcThree',
2: 'mrfrThree',
3: 'mafrThree',
4: 'macThree',
5: "itmThree",
6: 'mrctThree',
7: "tdThree",
8: "idThree"
}
print("++++++++++entering")
self.summary_record = SummaryRecord(self._summary_dir)
def step_end(self, run_context):
"""
step_end
"""
cb_params = run_context.original_args()
cur_step = cb_params.cur_step_num + self.has_trained_step
# create a confusion matric image, and record it to summary file
print("++++++++++writing")
self.summary_record.add_value('scalar', 'loss', cb_params.net_outputs[0])
self.summary_record.record(cur_step)
|
util.py | # -*- coding: utf-8 -*-
"""
(C) 2014-2019 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import inspect
import json
import logging
import os
import re
import sys
import traceback
from platform import architecture
from threading import Thread
from uuid import uuid4
import webview
from .js import api, npo, dom, event, drag
_token = uuid4().hex
default_html = """
<!doctype html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1.0, user-scalable=0">
</head>
<body></body>
</html>
"""
logger = logging.getLogger('pywebview')
class WebViewException(Exception):
pass
def get_app_root():
"""
Gets the file root of the application.
"""
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
return sys._MEIPASS
except AttributeError:
if 'pytest' in sys.modules:
for arg in reversed(sys.argv):
path = os.path.realpath(arg.split('::')[0])
if os.path.exists(path):
return path if os.path.isdir(path) else os.path.dirname(path)
else:
return os.path.dirname(os.path.realpath(sys.argv[0]))
def abspath(path):
"""
Make path absolute, using the application root
"""
path = os.fspath(path)
if not os.path.isabs(path):
path = os.path.join(get_app_root(), path)
return os.path.normpath(path)
def base_uri(relative_path=''):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = get_app_root()
if not os.path.exists(base_path):
raise ValueError('Path %s does not exist' % base_path)
return 'file://%s' % os.path.join(base_path, relative_path)
def convert_string(string):
if sys.version < '3':
return unicode(string)
else:
return str(string)
def parse_file_type(file_type):
'''
:param file_type: file type string 'description (*.file_extension1;*.file_extension2)' as required by file filter in create_file_dialog
:return: (description, file extensions) tuple
'''
valid_file_filter = r'^([\w ]+)\((\*(?:\.(?:\w+|\*))*(?:;\*\.\w+)*)\)$'
match = re.search(valid_file_filter, file_type)
if match:
return match.group(1).rstrip(), match.group(2)
else:
raise ValueError('{0} is not a valid file filter'.format(file_type))
def parse_api_js(window, platform, uid=''):
def get_args(f):
try:
params = list(inspect.getfullargspec(f).args) # Python 3
except AttributeError:
params = list(inspect.getargspec(f).args) # Python 2
return params
def generate_func():
if window._js_api:
functions = { name: get_args(getattr(window._js_api, name))[1:] for name in dir(window._js_api) if inspect.ismethod(getattr(window._js_api, name)) and not name.startswith('_')}
else:
functions = {}
if len(window._functions) > 0:
expose_functions = { name: get_args(f) for name, f in window._functions.items()}
else:
expose_functions = {}
functions.update(expose_functions)
functions = functions.items()
return [ {'func': name, 'params': params} for name, params in functions ]
try:
func_list = generate_func()
except Exception as e:
logger.exception(e)
func_list = []
js_code = npo.src + event.src + api.src % (_token, platform, uid, func_list) + dom.src + drag.src % webview.DRAG_REGION_SELECTOR
return js_code
def js_bridge_call(window, func_name, param, value_id):
def _call():
try:
result = func(*func_params.values())
result = json.dumps(result).replace('\\', '\\\\').replace('\'', '\\\'')
code = 'window.pywebview._returnValues["{0}"]["{1}"] = {{value: \'{2}\'}}'.format(func_name, value_id, result)
except Exception as e:
error = {
'message': str(e),
'name': type(e).__name__,
'stack': traceback.format_exc()
}
result = json.dumps(error).replace('\\', '\\\\').replace('\'', '\\\'')
code = 'window.pywebview._returnValues["{0}"]["{1}"] = {{isError: true, value: \'{2}\'}}'.format(func_name, value_id, result)
window.evaluate_js(code)
if func_name == 'moveWindow':
window.move(*param)
return
func = window._functions.get(func_name) or getattr(window._js_api, func_name, None)
if func is not None:
try:
func_params = param
t = Thread(target=_call)
t.start()
except Exception:
logger.exception('Error occurred while evaluating function {0}'.format(func_name))
else:
logger.error('Function {}() does not exist'.format(func_name))
def escape_string(string):
return string\
.replace('\\', '\\\\') \
.replace('"', r'\"') \
.replace('\n', r'\n')\
.replace('\r', r'\r')
def make_unicode(string):
"""
Python 2 and 3 compatibility function that converts a string to Unicode. In case of Unicode, the string is returned
unchanged
:param string: input string
:return: Unicode string
"""
if sys.version < '3' and isinstance(string, str):
return unicode(string.decode('utf-8'))
return string
def escape_line_breaks(string):
return string.replace('\\n', '\\\\n').replace('\\r', '\\\\r')
def inject_base_uri(content, base_uri):
pattern = r'<%s(?:[\s]+[^>]*|)>'
base_tag = '<base href="%s">' % base_uri
match = re.search(pattern % 'base', content)
if match:
return content
match = re.search(pattern % 'head', content)
if match:
tag = match.group()
return content.replace(tag, tag + base_tag)
match = re.search(pattern % 'html', content)
if match:
tag = match.group()
return content.replace(tag, tag + base_tag)
match = re.search(pattern % 'body', content)
if match:
tag = match.group()
return content.replace(tag, base_tag + tag)
return base_tag + content
def interop_dll_path(dll_name):
if dll_name == 'WebBrowserInterop.dll':
dll_name = 'WebBrowserInterop.x64.dll' if architecture()[0] == '64bit' else 'WebBrowserInterop.x86.dll'
# Unfrozen path
dll_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib', dll_name)
if os.path.exists(dll_path):
return dll_path
# Frozen path, dll in the same dir as the executable
dll_path = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), dll_name)
if os.path.exists(dll_path):
return dll_path
try:
# Frozen path packed as onefile
dll_path = os.path.join(sys._MEIPASS, dll_name)
if os.path.exists(dll_path):
return dll_path
except Exception:
pass
raise Exception('Cannot find %s' % dll_name)
|
dqn_test.py | """
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
dqn algorithm used for controling the steer to make a vehicle keep lane.
Author:Team Li
"""
import tensorflow as tf
import cv2, math, sys, random, threading
from keep_lane.basic_net.dqn_utils import action_value_net
import RL.rl_utils as rl_tools
try:
sys.path.append('F:\my_project\driving-desicion-in-carla\dist/carla-0.9.4-py3.7-win-amd64.egg')
import carla
except:
raise ImportError('Please check your carla file')
from carla_utils.world_ops import *
from carla_utils.sensor_ops import *
tf.app.flags.DEFINE_string(
'checkpoint_dir', '../checkpoint/keep_lane_world/random_init',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_integer(
'n_egopilots', 1, 'the number of egopilots')
tf.app.flags.DEFINE_integer(
'img_height', 416,
'raw image height captured from carla')
tf.app.flags.DEFINE_integer(
'img_width', 626,
'raw image width captured from carla')
tf.app.flags.DEFINE_integer(
'net_img_height', 224,
'image height of network input')
tf.app.flags.DEFINE_integer(
'net_img_width', 224,
'raw image width of network input')
tf.app.flags.DEFINE_integer(
'n_action', 21,
'total discrete action in steer')
FLAGS = tf.app.flags.FLAGS
## carla config ##
semantic_camera_config = {'data_type': 'sensor.camera.semantic_segmentation', 'image_size_x': FLAGS.img_width,
'image_size_y': FLAGS.img_height, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=0.5, z=1.6)),
'attach_to':None}
bgr_camera_config = {'data_type': 'sensor.camera.rgb', 'image_size_x': FLAGS.img_width,
'image_size_y': FLAGS.img_height, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=0.5, z=1.6)),
'attach_to':None}
spector_camera_config = {'data_type': 'sensor.camera.rgb', 'image_size_x': FLAGS.img_width,
'image_size_y': FLAGS.img_height, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=-6, z=3.5)),
'attach_to':None}
collision_sensor_config = {'data_type': 'sensor.other.collision','attach_to': None}
invasion_sensor_config = {'data_type': 'sensor.other.lane_detector', 'attach_to': None}
obstacle_sensor_config = {'data_type': 'sensor.other.obstacle', 'sensor_tick': 0.02,
'distance': 3, 'attach_to': None}
def action_index_2_steer(action_index):
""" change the action index to steer val
Args:
action_index: an int between [0, n_action-1]
Return:
a steer val in [-1, 1]
"""
steer = action_index * 2 / float(FLAGS.n_action - 1) - 1. ## range is [-1, 1]
return steer
def single_execuate(target, args):
""" single thread execuate
Args:
target: a func
args: args in target
"""
threading.Thread(target=target, args=args).start()
def check_whether_respawn_actors(world, vehicles):
"""check whether to respawn the static acotors in a frequency"""
while True:
time.sleep(5)
if carla_actors_static(vehicles, bigger_than=0.75):
# respawn_actor_at(world, vehicles[0], spawn_points[45])
respawn_static_actors(world, vehicles)
def online_thread(sess):
"""a thread for target nets in DDPG"""
while True:
## get current state
imgs = []
for camera_sensor, spector in zip(cameras, spectors):
img = camera_sensor.get()
img = img[int(FLAGS.img_height*1.8//5):, :, :] ## corp the ROI
img = cv2.resize(img, dsize=(FLAGS.net_img_height, FLAGS.net_img_width))
sp_img = spector.get()
cv2.imshow('visualization', sp_img)
imgs.append(img)
current_img_state = np.array(imgs)
current_img_state = current_img_state*2./255. - 1.
## get current action and control the egopilots
current_action = sess.run([max_action_index_online], feed_dict={online_img_state: current_img_state})
## control the egopilots ##
i = 0
for egopilot, c_a in zip(egopilots, current_action):
## e-greedy
current_action[i] = c_a[0]
steer = action_index_2_steer(c_a[0])
throttle = 0.5
brake = 0.
ego_v = egopilot.get_velocity()
ego_v = math.sqrt(ego_v.x ** 2 + ego_v.y ** 2 + ego_v.z ** 2)
if ego_v > 8. and throttle > 0.5:
throttle = 0.5 ## avoid velocity too big
## apply control
egopilot.apply_control(carla.VehicleControl(throttle=throttle, steer=steer, brake=brake))
i += 1
cv2.waitKey(25)
# time.sleep(0.5) ## sleep for a while, let the action control the egopilots to next state
if __name__ == '__main__':
online_img_state = tf.placeholder(shape=[None, FLAGS.net_img_height, FLAGS.net_img_width, 3], dtype=tf.float32)
act_val_net_online = action_value_net()
act_val_online, vars_online = act_val_net_online.build_graph(img_state=online_img_state, n_action=FLAGS.n_action, is_training=True,
var_scope='online_act_val')
#########################################
## the best action ops in current step ##
#########################################
max_action_index_online = tf.argmax(act_val_online, axis=-1)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
logger.info('Tensorflow graph bulid success...')
logger.info('Total trainable parameters:%s' %
str(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
########################### TENSORFLOW GRAPH ######################################
#### carla world init ####
client = carla.Client('127.0.0.1', 2000)
client.set_timeout(10.0) # seconds
logger.info('Carla connect success...')
logger.info('Carla world initing...')
world = client.get_world()
destroy_all_actors(world)
## spawn vehicles in carla world
spawn_points = list(world.get_map().get_spawn_points())
spawn_egopilot_at(world, spawn_points[45])
# spawn_vehicles(world, n_autopilots=0, n_egopilots=FLAGS.n_egopilots)
time.sleep(2) ## sometimes unstale
autopilots = get_all_autopilots(world)
egopilots = get_all_egopilots(world)
cameras = []
spectors = []
lane_invasions = []
obj_collisions = []
# obstacle_aheads = []
logger.info('Adding some sensors to egopilots...')
for egopilot in egopilots:
## attach a camera to egopilot ##
# semantic_camera_config['attach_to'] = egopilot
# semantic_sensor = semantic_camera(world, semantic_camera_config)
# cameras.append(semantic_sensor)
bgr_camera_config['attach_to'] = egopilot
bgr_sensor = bgr_camera(world, bgr_camera_config)
cameras.append(bgr_sensor)
spector_camera_config['attach_to'] = egopilot
bgr_sensor = bgr_camera(world, spector_camera_config)
spectors.append(bgr_sensor)
## attach collision sensor to egopilot ##
collision_sensor_config['attach_to'] = egopilot
collision_sensor = collision_query(world, collision_sensor_config)
obj_collisions.append(collision_sensor)
## attach line invasion sensor to egopilot ##
invasion_sensor_config['attach_to'] = egopilot
lane_invasion_sensor = lane_invasion_query(world, invasion_sensor_config)
lane_invasions.append(lane_invasion_sensor)
# ## attach obstacle sensor to egopilot
# obstacle_sensor_config['attach_to'] = egopilot
# obstacle_sensor = obstacle_ahead_query(world, obstacle_sensor_config)
# obstacle_aheads.append(obstacle_sensor)
logger.info('Adding some sensors to egopilots success')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if ckpt:
logger.info('loading %s...' % str(ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
logger.info('Load checkpoint success...')
else:
raise ValueError('you must provide ture checkpoint...')
check_t = threading.Thread(target=check_whether_respawn_actors, args=(world, autopilots + egopilots,))
target_t = threading.Thread(target=online_thread, args=(sess,))
target_t.daemon = True
check_t.daemon = True
check_t.start()
# # respwan_v_t.start()
target_t.start()
# vis_memory_thread()
while True:
pass |
threadpool.py | import asyncio
import logging
import threading
import time
from queue import Queue
from mmpy_bot.scheduler import default_scheduler
from mmpy_bot.webhook_server import WebHookServer
log = logging.getLogger("mmpy.threadpool")
class ThreadPool(object):
def __init__(self, num_workers: int):
"""Threadpool class to easily specify a number of worker threads and assign work
to any of them.
Arguments:
- num_workers: int, how many threads to run simultaneously.
"""
self.num_workers = num_workers
self.alive = False
self._queue = Queue()
self._busy_workers = Queue()
self._threads = []
def add_task(self, function, *args):
self._queue.put((function, args))
def get_busy_workers(self):
return self._busy_workers.qsize()
def start(self):
self.alive = True
# Spawn num_workers threads that will wait for work to be added to the queue
for _ in range(self.num_workers):
worker = threading.Thread(target=self.handle_work)
self._threads.append(worker)
worker.start()
def stop(self):
"""Signals all threads that they should stop and waits for them to finish."""
self.alive = False
# Signal every thread that it's time to stop
for _ in range(self.num_workers):
self._queue.put((self._stop_thread, tuple()))
# Wait for each of them to finish
log.info("Stopping threadpool, waiting for threads...")
for thread in self._threads:
thread.join()
log.info("Threadpool stopped.")
def _stop_thread(self):
"""Used to stop individual threads."""
return
def handle_work(self):
while self.alive:
# Wait for a new task (blocking)
function, arguments = self._queue.get()
# Notify the pool that we started working
self._busy_workers.put(1)
try:
function(*arguments)
except Exception as e:
log.exception(e)
# Notify the pool that we finished working
self._queue.task_done()
self._busy_workers.get()
def start_scheduler_thread(self, trigger_period: float):
def run_pending():
log.info("Scheduler thread started.")
while self.alive:
time.sleep(trigger_period)
default_scheduler.run_pending()
log.info("Scheduler thread stopped.")
self.add_task(run_pending)
def start_webhook_server_thread(self, webhook_server: WebHookServer):
async def start_server():
log.info("Webhook server thread started.")
await webhook_server.start()
while self.alive:
# We just use this to keep the loop running in a non-blocking way
await asyncio.sleep(0.001)
await webhook_server.stop()
log.info("Webhook server thread stopped.")
self.add_task(asyncio.run, start_server())
|
common_cache_test.py | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that are common to both st.memo and st.singleton"""
import threading
import unittest
from unittest.mock import patch
from parameterized import parameterized
import streamlit as st
from streamlit import report_thread
from streamlit.caching import (
MEMO_CALL_STACK,
SINGLETON_CALL_STACK,
clear_memo_cache,
clear_singleton_cache,
)
memo = st.experimental_memo
singleton = st.experimental_singleton
class CommonCacheTest(unittest.TestCase):
def tearDown(self):
# Some of these tests reach directly into CALL_STACK data and twiddle it.
# Reset default values on teardown.
MEMO_CALL_STACK._cached_func_stack = []
MEMO_CALL_STACK._suppress_st_function_warning = 0
SINGLETON_CALL_STACK._cached_func_stack = []
SINGLETON_CALL_STACK._suppress_st_function_warning = 0
# Clear caches
clear_memo_cache()
clear_singleton_cache()
# And some tests create widgets, and can result in DuplicateWidgetID
# errors on subsequent runs.
ctx = report_thread.get_report_ctx()
if ctx is not None:
ctx.widget_ids_this_run.clear()
super().tearDown()
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_simple(self, _, cache_decorator):
@cache_decorator
def foo():
return 42
self.assertEqual(foo(), 42)
self.assertEqual(foo(), 42)
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_multiple_int_like_floats(self, _, cache_decorator):
@cache_decorator
def foo(x):
return x
self.assertEqual(foo(1.0), 1.0)
self.assertEqual(foo(3.0), 3.0)
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_return_cached_object(self, _, cache_decorator):
"""If data has been cached, the cache function shouldn't be called."""
with patch.object(st, "exception") as mock_exception:
called = [False]
@cache_decorator
def f(x):
called[0] = True
return x
self.assertFalse(called[0])
f(0)
self.assertTrue(called[0])
called = [False] # Reset called
f(0)
self.assertFalse(called[0])
f(1)
self.assertTrue(called[0])
mock_exception.assert_not_called()
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_mutate_args(self, _, cache_decorator):
"""Mutating an argument inside a memoized function doesn't throw
an error (but it's probably not a great idea)."""
with patch.object(st, "exception") as mock_exception:
@cache_decorator
def foo(d):
d["answer"] += 1
return d["answer"]
d = {"answer": 0}
self.assertEqual(foo(d), 1)
self.assertEqual(foo(d), 2)
mock_exception.assert_not_called()
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_ignored_args(self, _, cache_decorator):
"""Args prefixed with _ are not used as part of the cache key."""
call_count = [0]
@cache_decorator
def foo(arg1, _arg2, *args, kwarg1, _kwarg2=None, **kwargs):
call_count[0] += 1
foo(1, 2, 3, kwarg1=4, _kwarg2=5, kwarg3=6, _kwarg4=7)
self.assertEqual([1], call_count)
# Call foo again, but change the values for _arg2, _kwarg2, and _kwarg4.
# The call count shouldn't change, because these args will not be part
# of the hash.
foo(1, None, 3, kwarg1=4, _kwarg2=None, kwarg3=6, _kwarg4=None)
self.assertEqual([1], call_count)
# Changing the value of any other argument will increase the call
# count. We test each argument type:
# arg1 (POSITIONAL_OR_KEYWORD)
foo(None, 2, 3, kwarg1=4, _kwarg2=5, kwarg3=6, _kwarg4=7)
self.assertEqual([2], call_count)
# *arg (VAR_POSITIONAL)
foo(1, 2, None, kwarg1=4, _kwarg2=5, kwarg3=6, _kwarg4=7)
self.assertEqual([3], call_count)
# kwarg1 (KEYWORD_ONLY)
foo(1, 2, 3, kwarg1=None, _kwarg2=5, kwarg3=6, _kwarg4=7)
self.assertEqual([4], call_count)
# **kwarg (VAR_KEYWORD)
foo(1, 2, 3, kwarg1=4, _kwarg2=5, kwarg3=None, _kwarg4=7)
self.assertEqual([5], call_count)
@parameterized.expand(
[
("memo", memo, MEMO_CALL_STACK),
("singleton", singleton, SINGLETON_CALL_STACK),
]
)
def test_cached_st_function_warning(self, _, cache_decorator, call_stack):
"""Ensure we properly warn when st.foo functions are called
inside a cached function.
"""
with patch.object(call_stack, "_show_cached_st_function_warning") as warning:
st.text("foo")
warning.assert_not_called()
@cache_decorator
def cached_func():
st.text("Inside cached func")
cached_func()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test warning suppression
@cache_decorator(suppress_st_warning=True)
def suppressed_cached_func():
st.text("No warnings here!")
suppressed_cached_func()
warning.assert_not_called()
# Test nested st.cache functions
@cache_decorator
def outer():
@cache_decorator
def inner():
st.text("Inside nested cached func")
return inner()
outer()
warning.assert_called_once()
warning.reset_mock()
# Test st.cache functions that raise errors
with self.assertRaises(RuntimeError):
@cache_decorator
def cached_raise_error():
st.text("About to throw")
raise RuntimeError("avast!")
cached_raise_error()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test st.cache functions with widgets
@cache_decorator
def cached_widget():
st.button("Press me!")
cached_widget()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
@parameterized.expand(
[("memo", MEMO_CALL_STACK), ("singleton", SINGLETON_CALL_STACK)]
)
def test_multithreaded_call_stack(self, _, call_stack):
"""CachedFunctionCallStack should work across multiple threads."""
def get_counter():
return len(call_stack._cached_func_stack)
def set_counter(val):
call_stack._cached_func_stack = ["foo"] * val
self.assertEqual(0, get_counter())
set_counter(1)
self.assertEqual(1, get_counter())
values_in_thread = []
def thread_test():
values_in_thread.append(get_counter())
set_counter(55)
values_in_thread.append(get_counter())
thread = threading.Thread(target=thread_test)
thread.start()
thread.join()
self.assertEqual([0, 55], values_in_thread)
# The other thread should not have modified the main thread
self.assertEqual(1, get_counter())
@parameterized.expand(
[
("memo", memo, clear_memo_cache),
("singleton", singleton, clear_singleton_cache),
]
)
def test_clear_all_caches(self, _, cache_decorator, clear_cache_func):
"""Calling a cache's global `clear_all` function should remove all
items from all caches of the appropriate type.
"""
foo_vals = []
@cache_decorator
def foo(x):
foo_vals.append(x)
return x
bar_vals = []
@cache_decorator
def bar(x):
bar_vals.append(x)
return x
foo(0), foo(1), foo(2)
bar(0), bar(1), bar(2)
self.assertEqual([0, 1, 2], foo_vals)
self.assertEqual([0, 1, 2], bar_vals)
# Clear the cache and access our original values again. They
# should be recomputed.
clear_cache_func()
foo(0), foo(1), foo(2)
bar(0), bar(1), bar(2)
self.assertEqual([0, 1, 2, 0, 1, 2], foo_vals)
self.assertEqual([0, 1, 2, 0, 1, 2], bar_vals)
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_clear_single_cache(self, _, cache_decorator):
foo_call_count = [0]
@cache_decorator
def foo():
foo_call_count[0] += 1
bar_call_count = [0]
@cache_decorator
def bar():
bar_call_count[0] += 1
foo(), foo(), foo()
bar(), bar(), bar()
self.assertEqual(1, foo_call_count[0])
self.assertEqual(1, bar_call_count[0])
# Clear just foo's cache, and call the functions again.
foo.clear()
foo(), foo(), foo()
bar(), bar(), bar()
# Foo will have been called a second time, and bar will still
# have been called just once.
self.assertEqual(2, foo_call_count[0])
self.assertEqual(1, bar_call_count[0])
|
mpi.py | """
A work manager which uses MPI to distribute tasks and collect results.
"""
import collections
import logging
import threading
from mpi4py import MPI
from westpa.work_managers import WorkManager, WMFuture
log = logging.getLogger(__name__)
class Task:
# tasks are tuples of (task_id, function, args, keyword args)
def __init__(self, task_id, fn, args, kwargs):
self.task_id = task_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def __repr__(self):
return '<Task {self.task_id}: {self.fn!r}(*{self.args!r}, **{self.kwargs!r})>'\
.format(self=self)
class MPIBase:
def __init__(self):
# Initialize communicator and obtain standard MPI variables
comm = MPI.COMM_WORLD
self.comm = comm
self.rank = comm.Get_rank()
self.num_procs = comm.Get_size()
self.name = MPI.Get_processor_name()
# Define master rank
self.master_rank = 0
# Define message tags for task, result, and announce
self.task_tag = 10
self.result_tag = 20
self.announce_tag = 30
# create an empty message buffer
messages = []
def startup(self):
raise NotImplementedError
def shutdown(self):
raise NotImplementedError
@property
def is_master(self):
'''True if this is the master process for task distribution. This is necessary, e.g., for
MPI, where all processes start identically and then must branch depending on rank.'''
if self.rank == self.master_rank:
return True
else:
return False
class MPIWMServer(MPIBase):
def __init__(self):
super(MPIWMServer, self).__init__()
# tasks awaiting dispatch
self.task_queue = collections.deque()
# MPI destination ranks for tasks; exclude master_rank
self.task_dest = collections.deque()
for rank in range(self.num_procs):
if rank != self.master_rank:
self.task_dest.append(rank)
# futures corresponding to tasks
self.pending_futures = dict()
def _dispatch_loop(self):
comm = self.comm
while True:
# Dispatch as many tasks as possible before checking for shutdown
while self.task_dest:
try:
task = self.task_queue.popleft()
task_dest = self.task_dest.popleft()
except IndexError:
break
else:
comm.send(task, dest = task_dest, tag = self.task_tag )
status = MPI.Status()
comm.Iprobe(self.master_rank, self.announce_tag, status)
message_tag = status.Get_tag()
# Check for announcements
if message_tag == self.announce_tag:
messages = comm.recv(source = self.master_rank, tag = self.announce_tag)
if 'shutdown' in messages:
log.debug('exiting _dispatch_loop()')
return
def _receive_loop(self):
comm = self.comm
while True:
status = MPI.Status()
comm.Iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status)
message_src = status.Get_source()
message_tag = status.Get_tag()
# results are tuples of (task_id, {'result', 'exception'}, value)
if message_tag == self.result_tag:
(task_id, result_stat, result_value) = comm.recv(source = message_src, tag = message_tag)
ft = self.pending_futures.pop(task_id)
if result_stat == 'exception':
ft._set_exception(*result_value)
# Check with Matt on what else to do for an exception
else:
ft._set_result(result_value)
self.task_dest.append(message_src)
# Check for announcements
elif message_tag == self.announce_tag:
messages = comm.recv(source = message_src, tag = message_tag)
if 'shutdown' in messages:
log.debug('exiting _receive_loop()')
return
def _make_append_task(self, fn, args, kwargs):
ft = WMFuture()
task_id = ft.task_id
task = Task(task_id, fn, args, kwargs)
self.pending_futures[task_id] = ft
self.task_queue.append(task)
return ft
def submit(self, fn, args=None, kwargs=None):
ft = self._make_append_task(fn, args if args is not None else [], kwargs if kwargs is not None else {})
return ft
def startup(self):
# start up server threads
server_threads = []
self._dispatch_thread = threading.Thread(target=self._dispatch_loop)
self._dispatch_thread.start()
server_threads.append(self._dispatch_thread)
self._receive_thread = threading.Thread(target=self._receive_loop)
self._receive_thread.start()
server_threads.append(self._receive_thread)
self.server_threads = server_threads
class MPIClient(MPIBase):
def __init__(self):
super(MPIClient,self).__init__()
def _create_worker(self):
comm = self.comm
while True:
status = MPI.Status()
comm.Probe(self.master_rank, MPI.ANY_TAG, status)
message_src = self.master_rank
message_tag = status.Get_tag()
# Check for available task
if message_tag == self.task_tag:
task = comm.recv(source = message_src, tag = message_tag)
try:
result_value = task.fn(*task.args, **task.kwargs)
except Exception as e:
result_object = (task.task_id, 'exception', result_value)
else:
result_object = (task.task_id, 'result', result_value)
comm.send(result_object, dest = self.master_rank, tag = self.result_tag)
# Check for announcements
if message_tag == self.announce_tag:
messages = comm.recv(source = message_src, tag = message_tag)
if 'shutdown' in messages:
return
def startup(self):
# start up client thread
self._worker_thread = threading.Thread(target=self._create_worker)
self._worker_thread.start()
def run(self):
self._worker_thread.join()
class MPIWorkManager(MPIWMServer,MPIClient,WorkManager):
'''A work manager using MPI.'''
@classmethod
def from_environ(cls, wmenv=None):
return cls()
def __init__(self):
WorkManager.__init__(self)
MPIWMServer.__init__(self)
MPIClient.__init__(self)
def startup(self):
if self.rank == self.master_rank:
MPIWMServer.startup(self)
else:
MPIClient.startup(self)
def shutdown(self):
comm = self.comm
if self.rank == self.master_rank:
# send 'shutdown' to client threads
for x in self.task_dest:
comm.send('shutdown', dest = x, tag = self.announce_tag )
# send 'shutdown' to server threads
for thread in self.server_threads:
comm.send('shutdown', dest = 0, tag = self.announce_tag )
log.info( "MPIWMServer.shutdown complete" )
|
crypto_util_test.py | """Tests for acme.crypto_util."""
import itertools
import socket
import threading
import time
import unittest
import six
from six.moves import socketserver #type: ignore # pylint: disable=import-error
import josepy as jose
import OpenSSL
from acme import errors
from acme import test_util
class SSLSocketAndProbeSNITest(unittest.TestCase):
"""Tests for acme.crypto_util.SSLSocket/probe_sni."""
def setUp(self):
self.cert = test_util.load_comparable_cert('rsa2048_cert.pem')
key = test_util.load_pyopenssl_private_key('rsa2048_key.pem')
# pylint: disable=protected-access
certs = {b'foo': (key, self.cert.wrapped)}
from acme.crypto_util import SSLSocket
class _TestServer(socketserver.TCPServer):
# pylint: disable=too-few-public-methods
# six.moves.* | pylint: disable=attribute-defined-outside-init,no-init
def server_bind(self): # pylint: disable=missing-docstring
self.socket = SSLSocket(socket.socket(), certs=certs)
socketserver.TCPServer.server_bind(self)
self.server = _TestServer(('', 0), socketserver.BaseRequestHandler)
self.port = self.server.socket.getsockname()[1]
self.server_thread = threading.Thread(
# pylint: disable=no-member
target=self.server.handle_request)
self.server_thread.start()
time.sleep(1) # TODO: avoid race conditions in other way
def tearDown(self):
self.server_thread.join()
def _probe(self, name):
from acme.crypto_util import probe_sni
return jose.ComparableX509(probe_sni(
name, host='127.0.0.1', port=self.port))
def test_probe_ok(self):
self.assertEqual(self.cert, self._probe(b'foo'))
def test_probe_not_recognized_name(self):
self.assertRaises(errors.Error, self._probe, b'bar')
# TODO: py33/py34 tox hangs forever on do_handshake in second probe
#def probe_connection_error(self):
# self._probe(b'foo')
# #time.sleep(1) # TODO: avoid race conditions in other way
# self.assertRaises(errors.Error, self._probe, b'bar')
class PyOpenSSLCertOrReqAllNamesTest(unittest.TestCase):
"""Test for acme.crypto_util._pyopenssl_cert_or_req_all_names."""
@classmethod
def _call(cls, loader, name):
# pylint: disable=protected-access
from acme.crypto_util import _pyopenssl_cert_or_req_all_names
return _pyopenssl_cert_or_req_all_names(loader(name))
def _call_cert(self, name):
return self._call(test_util.load_cert, name)
def test_cert_one_san_no_common(self):
self.assertEqual(self._call_cert('cert-nocn.der'),
['no-common-name.badssl.com'])
def test_cert_no_sans_yes_common(self):
self.assertEqual(self._call_cert('cert.pem'), ['example.com'])
def test_cert_two_sans_yes_common(self):
self.assertEqual(self._call_cert('cert-san.pem'),
['example.com', 'www.example.com'])
class PyOpenSSLCertOrReqSANTest(unittest.TestCase):
"""Test for acme.crypto_util._pyopenssl_cert_or_req_san."""
@classmethod
def _call(cls, loader, name):
# pylint: disable=protected-access
from acme.crypto_util import _pyopenssl_cert_or_req_san
return _pyopenssl_cert_or_req_san(loader(name))
@classmethod
def _get_idn_names(cls):
"""Returns expected names from '{cert,csr}-idnsans.pem'."""
chars = [six.unichr(i) for i in itertools.chain(range(0x3c3, 0x400),
range(0x641, 0x6fc),
range(0x1820, 0x1877))]
return [''.join(chars[i: i + 45]) + '.invalid'
for i in range(0, len(chars), 45)]
def _call_cert(self, name):
return self._call(test_util.load_cert, name)
def _call_csr(self, name):
return self._call(test_util.load_csr, name)
def test_cert_no_sans(self):
self.assertEqual(self._call_cert('cert.pem'), [])
def test_cert_two_sans(self):
self.assertEqual(self._call_cert('cert-san.pem'),
['example.com', 'www.example.com'])
def test_cert_hundred_sans(self):
self.assertEqual(self._call_cert('cert-100sans.pem'),
['example{0}.com'.format(i) for i in range(1, 101)])
def test_cert_idn_sans(self):
self.assertEqual(self._call_cert('cert-idnsans.pem'),
self._get_idn_names())
def test_csr_no_sans(self):
self.assertEqual(self._call_csr('csr-nosans.pem'), [])
def test_csr_one_san(self):
self.assertEqual(self._call_csr('csr.pem'), ['example.com'])
def test_csr_two_sans(self):
self.assertEqual(self._call_csr('csr-san.pem'),
['example.com', 'www.example.com'])
def test_csr_six_sans(self):
self.assertEqual(self._call_csr('csr-6sans.pem'),
['example.com', 'example.org', 'example.net',
'example.info', 'subdomain.example.com',
'other.subdomain.example.com'])
def test_csr_hundred_sans(self):
self.assertEqual(self._call_csr('csr-100sans.pem'),
['example{0}.com'.format(i) for i in range(1, 101)])
def test_csr_idn_sans(self):
self.assertEqual(self._call_csr('csr-idnsans.pem'),
self._get_idn_names())
def test_critical_san(self):
self.assertEqual(self._call_cert('critical-san.pem'),
['chicago-cubs.venafi.example', 'cubs.venafi.example'])
class RandomSnTest(unittest.TestCase):
"""Test for random certificate serial numbers."""
def setUp(self):
self.cert_count = 5
self.serial_num = []
self.key = OpenSSL.crypto.PKey()
self.key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
def test_sn_collisions(self):
from acme.crypto_util import gen_ss_cert
for _ in range(self.cert_count):
cert = gen_ss_cert(self.key, ['dummy'], force_san=True)
self.serial_num.append(cert.get_serial_number())
self.assertTrue(len(set(self.serial_num)) > 1)
class MakeCSRTest(unittest.TestCase):
"""Test for standalone functions."""
@classmethod
def _call_with_key(cls, *args, **kwargs):
privkey = OpenSSL.crypto.PKey()
privkey.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
privkey_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey)
from acme.crypto_util import make_csr
return make_csr(privkey_pem, *args, **kwargs)
def test_make_csr(self):
csr_pem = self._call_with_key(["a.example", "b.example"])
self.assertTrue(b'--BEGIN CERTIFICATE REQUEST--' in csr_pem)
self.assertTrue(b'--END CERTIFICATE REQUEST--' in csr_pem)
csr = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr_pem)
# In pyopenssl 0.13 (used with TOXENV=py26-oldest and py27-oldest), csr
# objects don't have a get_extensions() method, so we skip this test if
# the method isn't available.
if hasattr(csr, 'get_extensions'):
self.assertEquals(len(csr.get_extensions()), 1)
self.assertEquals(csr.get_extensions()[0].get_data(),
OpenSSL.crypto.X509Extension(
b'subjectAltName',
critical=False,
value=b'DNS:a.example, DNS:b.example',
).get_data(),
)
def test_make_csr_must_staple(self):
csr_pem = self._call_with_key(["a.example"], must_staple=True)
csr = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr_pem)
# In pyopenssl 0.13 (used with TOXENV=py26-oldest and py27-oldest), csr
# objects don't have a get_extensions() method, so we skip this test if
# the method isn't available.
if hasattr(csr, 'get_extensions'):
self.assertEquals(len(csr.get_extensions()), 2)
# NOTE: Ideally we would filter by the TLS Feature OID, but
# OpenSSL.crypto.X509Extension doesn't give us the extension's raw OID,
# and the shortname field is just "UNDEF"
must_staple_exts = [e for e in csr.get_extensions()
if e.get_data() == b"0\x03\x02\x01\x05"]
self.assertEqual(len(must_staple_exts), 1,
"Expected exactly one Must Staple extension")
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
padding_fifo_queue_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:4259
# https://github.com/imdone/tensorflow/issues/4257
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32),
((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
(None, None),))
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.float32, dtypes_lib.int32),
((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
(4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3832
# https://github.com/imdone/tensorflow/issues/3831
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3218
# https://github.com/imdone/tensorflow/issues/3217
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3315
# https://github.com/imdone/tensorflow/issues/3314
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3780
# https://github.com/imdone/tensorflow/issues/3777
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:4260
# https://github.com/imdone/tensorflow/issues/4258
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3833
# https://github.com/imdone/tensorflow/issues/3832
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3222
# https://github.com/imdone/tensorflow/issues/3221
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3317
# https://github.com/imdone/tensorflow/issues/3316
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3782
# https://github.com/imdone/tensorflow/issues/3781
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:4261
# https://github.com/imdone/tensorflow/issues/4259
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3835
# https://github.com/imdone/tensorflow/issues/3834
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3225
# https://github.com/imdone/tensorflow/issues/3224
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3321
# https://github.com/imdone/tensorflow/issues/3320
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3784
# https://github.com/imdone/tensorflow/issues/3783
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:4262
# https://github.com/imdone/tensorflow/issues/4260
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_2 = data_flow_ops.PaddingFIFOQueue(
15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_b")
q_b_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_c")
q_c_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_f")
q_f_2 = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO (mrry): Figure out how to do this without sleeping. id:3837
# https://github.com/imdone/tensorflow/issues/3836
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
[tensor_shape.TensorShape(None)])
class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
|
tcp.py | # -*- coding: utf-8 -*-
'''
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import msgpack
import socket
import os
import weakref
import time
import traceback
import errno
# Import Salt Libs
import salt.crypt
import salt.utils
import salt.utils.async
import salt.utils.event
import salt.utils.platform
import salt.utils.verify
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.transport.ipc
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import errno
import tornado.util
from salt.utils.process import SignalHandlingMultiprocessingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingMultiprocessingProcess):
'''
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
'''
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, log_queue=None):
super(LoadBalancerServer, self).__init__(log_queue=log_queue)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue']
)
def __getstate__(self):
return {'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def run(self):
'''
Start the load balancer
'''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to tcp.
Note: this class returns a singleton
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncTCPReqChannel for {0}'.format(key))
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
else:
log.debug('Re-using AsyncTCPReqChannel for {0}'.format(key))
return obj
@classmethod
def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
opts['master_uri'],
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['master_uri'])
host, port = parse.netloc.rsplit(':', 1)
self.master_addr = (host, int(port))
self._closing = False
self.message_client = SaltMessageClientPool(self.opts,
args=(self.opts, host, int(port),),
kwargs={'io_loop': self.io_loop, 'resolver': resolver})
def close(self):
if self._closing:
return
self._closing = True
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
@tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
try:
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to master lost')
raise tornado.gen.Return(ret)
class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event(
'minion',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def send_id(self, tok, force_auth):
'''
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
'''
load = {'id': self.opts['id'], 'tok': tok}
@tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while count <= self.opts['tcp_authentication_retries'] or self.opts['tcp_authentication_retries'] < 0:
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event(
{'master': self.opts['master']},
'__master_connected'
)
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get('__role') == 'syndic':
data = 'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'syndic'
)
else:
data = 'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'minion'
)
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': None,
'tok': self.tok,
'data': data,
'tag': tag}
req_channel = salt.utils.async.SyncWrapper(
AsyncTCPReqChannel, (self.opts,)
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: {0}'.format(
traceback.format_exc())
)
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event(
{'master': self.opts['master']},
'__master_disconnected'
)
@tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token('salt')
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts['master_ip'], int(self.auth.creds['publish_port']),),
kwargs={'io_loop': self.io_loop,
'connect_callback': self.connect_callback,
'disconnect_callback': self.disconnect_callback})
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt:
raise
except Exception as exc:
if '-|RETRY|-' not in str(exc):
raise SaltClientError('Unable to sign_in to master: {0}'.format(exc)) # TODO: better error message
def on_recv(self, callback):
'''
Register an on_recv callback
'''
if callback is None:
return self.message_client.on_recv(callback)
@tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise exc
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(self.socket_queue,
self.handle_message,
io_loop=self.io_loop,
ssl_options=self.opts.get('ssl'))
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self.req_server = SaltMessageServer(self.handle_message,
io_loop=self.io_loop,
ssl_options=self.opts.get('ssl'))
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
@tornado.gen.coroutine
def handle_message(self, stream, header, payload):
'''
Handle incoming messages from underylying tcp streams
'''
try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: {0}'.format(exc), exc_info=True)
raise tornado.gen.Return()
class SaltMessageServer(tornado.tcpserver.TCPServer, object):
'''
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
'''
def __init__(self, message_handler, *args, **kwargs):
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.clients = []
self.message_handler = message_handler
@tornado.gen.coroutine
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client {0} connected'.format(address))
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected {0}'.format(address))
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: {0}'.format(e))
self.clients.remove((stream, address))
stream.close()
def shutdown(self):
'''
Shutdown the whole server
'''
for item in self.clients:
client, address = item
client.close()
self.clients.remove(item)
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
'''
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
'''
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
t = threading.Thread(target=self.socket_queue_thread)
t.start()
def socket_queue_thread(self):
try:
while True:
client_socket, address = self.socket_queue.get(True, None)
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None, io_loop=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(
resolver=resolver, io_loop=io_loop)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
'''
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
io_loop=self.io_loop,
max_buffer_size=max_buffer_size)
return stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __del__(self):
self.close()
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
'''
Low-level message sending client
'''
def __init__(self, opts, host, port, io_loop=None, resolver=None,
connect_callback=None, disconnect_callback=None):
self.opts = opts
self.host = host
self.port = port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._tcp_client = TCPClientKeepAlive(
opts, io_loop=self.io_loop, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, '_stream') and not self._stream.closed():
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
if (not self._stream_return_future.done() and
self.io_loop != tornado.ioloop.IOLoop.current(
instance=False)):
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.async.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
try:
self.io_loop.add_future(
self._stream_return_future,
lambda future: self.io_loop.stop()
)
self.io_loop.start()
finally:
orig_loop.make_current()
self._tcp_client.close()
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
def __del__(self):
self.close()
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
self._connecting_future.set_result(True)
break
except Exception as e:
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id {0} that we are not tracking'.format(message_id))
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to {0}:{1} closed, unable to recv'.format(self.host, self.port))
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
def __del__(self):
self.close()
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(io_loop=io_loop, ssl_options=opts.get('ssl'))
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
def __del__(self):
self.close()
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = set([client])
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to {0} closed, unable to recv'.format(client.address))
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response', exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at {0} connected'.format(address))
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: {0}'.format(package))
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target {0} not connected'.format(topic))
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at {0} has disconnected from publisher'.format(client.address))
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state['secrets']
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, log_queue=None):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.appendproctitle(self.__class__.__name__)
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(0o177)
try:
pull_sock.start()
finally:
os.umask(old_umask)
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
'''
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue()
)
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual async interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.async.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload)
|
smc_udp_WPreceiver.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket, threading, time
from contextlib import closing
import rospy
from autoware_msgs.msg import Lane, Waypoint
import struct
from ctypes import *
#class DesiredCommand(Structure):
#class DesiredCommand(LittleEndianStructure):
class WAYPOINT(BigEndianStructure):
_pack_ = 1
_fields_ = [
('x', c_uint32, 32 ),
('y', c_uint32, 32 ),
('vel', c_uint16, 16 )
]
class RecvWP(BigEndianStructure):
_fields_ = [
('ID', c_uint8 ),
('RollingCounter', c_uint8 ),
('CheckSum', c_uint8 ),
('wpcount', c_uint8),
('waypoints', WAYPOINT * 50),
]
def calc_checksum(d):
return 0xffff #dummy
def calc_actual2bin(val, factor=1, offset=0):
#print int( (val-offset)/factor )
return int( (val-offset)/factor )
def calc_bin2actual(val, factor=1, offset=0):
#print int( (val-offset)/factor )
# print val*factor + offset
return val*factor + offset
def publisher():
r = rospy.Rate(10)
pub = rospy.Publisher('MABX_target_waypoints', Lane, queue_size=1)
while not rospy.is_shutdown():
pub.publish(wpmsg)
r.sleep()
udpcount=0
wpmsg = Lane()
rospy.init_node('mabx_wp_receiver', anonymous=True)
thread = threading.Thread(target=publisher)
thread.start()
host = rospy.get_param("~udp_recv_hostname", '127.0.0.1')
#host = rospy.get_param("~udp_recv_hostname", '192.168.50.2')
port = int( rospy.get_param("~udp_recv_port", '51001') )
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rospy.loginfo("starting MABX-WP UDP receiver. hostname:%s, port:%d", host, port)
bufsize=4096
with closing(sock):
sock.bind((host, port))
while True:
data = sock.recv(bufsize)
udpcount += 1
fid = map( lambda x: struct.unpack('>B',x)[0], data)[0]
print map( lambda x: struct.unpack('>B',x)[0], data)
rospy.loginfo( map( lambda x: struct.unpack('>B',x)[0], data) )
if fid == 0x04:
buf = RecvWP.from_buffer_copy(data)
rospy.loginfo("FrameID %x(DesiredCommand) received.", fid)
rospy.loginfo("ID %d", buf.ID)
rospy.loginfo("RollingCounter %d", buf.RollingCounter)
rospy.loginfo("CheckSum %d", buf.CheckSum)
rospy.loginfo("wpcount %d", buf.wpcount)
rospy.loginfo("wp[0].x %0.0f", calc_bin2actual(buf.waypoints[0].x, 0.01, -21474836.48) )
rospy.loginfo("wp[0].y %0.0f", calc_bin2actual(buf.waypoints[0].y, 0.01, -21474836.48) )
rospy.loginfo("wp[0].vel %0.0f", calc_bin2actual(buf.waypoints[0].vel, 1./128, 0) )
wpmsg.header.stamp = rospy.get_rostime()
waypoints = []
for i in range(0,buf.wpcount):
wp = Waypoint()
wp.pose.pose.position.x = calc_actual2bin(buf.waypoints[i].x, 0.01, -21474836.48)
wp.pose.pose.position.y = calc_actual2bin(buf.waypoints[i].x, 0.01, -21474836.48)
wp.twist.twist.linear.x = calc_actual2bin(buf.waypoints[i].vel, 1./128, 0) / 3.6 # m/s
waypoints.append(wp)
print waypoints
wpmsg.waypoints = waypoints
else:
rospy.loginfo("FrameID invalid: %2x", fid)
exit(0)
# ctypes --- Pythonのための外部関数ライブラリ
# https://docs.python.org/ja/2.7/library/ctypes.html
# pythonでsocket通信を勉強しよう
# https://qiita.com/__init__/items/5c89fa5b37b8c5ed32a4
# PythonでバイナリをあつかうためのTips
# https://qiita.com/pashango2/items/5075cb2d9248c7d3b5d4
|
utils.py | """
Executable tools. This module defines the classes :class:`Option` and
:class:`BaseCommand` that allow to develop automatically executable
commands. A utilitary script ``egglib`` will load this module at runtime
and let the user run any of this commands as if they were independent
programs.
"""
__license__ = """
Copyright 2010-2012 Stephane De Mita, Mathieu Siol
This file is part of EggLib.
EggLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
EggLib is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with EggLib. If not, see <http://www.gnu.org/licenses/>.
"""
import abc, sys, os, math, re, StringIO, glob, inspect, tempfile, multiprocessing, random, time, string
import egglib_binding, tools, fitmodel, data, wrappers, simul
########################################################################
class Option:
"""
Defines the type, default value and requirements of all program
options (except flags that are dealt with otherwise).
"""
####################################################################
def __init__(self, name, doc, convert=str, default=None, requirements=[]):
"""
*name* is the name of the option, *doc* must be a string
providing documentation. *convert* should be a function
taking a string and returning a value of the appropriate type
(can also be classes such as :class:`int`, :class:`float` or
*lambda* expressions, providing that they take a string and
process it the appropriate way), *default* is the default value
(``None`` means that the option must be specified) and
*requirements* is a list of requirements, each of them under the
form of a function taking a possible option value and returning
``True`` is the option is valid.
"""
self.name = name
self.doc = doc
self.convert = convert
self.default = default
self.requirements= requirements
########################################################################
class BaseCommand:
"""
Abstract base class for executable commands. Several members and
function have to be overriden to provide all information needed by
the ``egglib`` script.
"""
####################################################################
__metaclass__ = abc.ABCMeta
####################################################################
def __new__(cls):
o = object.__new__(cls)
o._debug = False
o._quiet = False
return o
####################################################################
options = []
"""
This member must be overriden. List of :class:`~Option` instances.
"""
flags = []
"""
This member must be overriden. List of flag, each given as a tuple
of two strings: ``(label, description)``.
"""
brief = ""
"""
This member must be overriden. One-line summary.
"""
description = ""
"""
This member must be overriden. Full description.
"""
####################################################################
def get_debug(self): return self._debug
def set_debug(self, value): self._debug = value
debug = property(get_debug, set_debug, doc='Flag indicating whether \
full error messages should be returned.')
####################################################################
def get_quiet(self): return self._quiet
def set_quiet(self, value): self._quiet = value
quiet = property(get_quiet, set_quiet, doc='Flag indicating whether \
information should be displayed in the standard output stream \
(if *quiet* is ``True``, information is not displayed).')
####################################################################
@classmethod
def doc(cls):
try:
name = re.match('<class \'egglib\.utils\.(.+)\'>', str(cls)).group(1)
except AttributeError:
try:
name = re.match('<class \'__main__\.(.+)\'>', str(cls)).group(1)
except AttributeError:
raise
LINE = 72
brief = cls.brief
descr = tools.wrap(cls.description, 72, 0)
string = """%s: %s
%s
General usage:
egglib %s OPTION1=VALUE OPTION2=VALUE ... FLAG1 FLAG2 ...
Options:
""" %(name, brief, descr, cls.__name__)
L = max([len(option.name) for option in cls.options]) + 2
for option in cls.options:
substring = ' '
substring += (option.name + ' ').ljust(L, '.')
substring += ' '
substring += option.doc
if option.default != None:
substring += ' (default: `%s`)' %option.default
else:
substring += ' (required)'
string += tools.wrap(substring, LINE, L+5)
string += '\n'
string += '\nFlags (inactive by default):\n\n'
flags = cls.flags + [('quiet', 'Runs without console output'),
('debug', 'Show complete error messages')]
L = max([len(name) for name,doc in flags]) + 2
for name,doc in flags:
name += ' '
string += tools.wrap(' %s %s' %(name.ljust(L,'.'), doc), LINE, L+5)
string += '\n'
return string
####################################################################
def run(self, *fargs, **kwargs):
"""
Execute commands. *fargs* are flag arguments and *kwargs* are
keyword arguments.
"""
fargs = list(fargs)
self.quiet = False
self.debug = False
if 'quiet' in fargs:
self.quiet = True
fargs.remove('quiet')
if 'debug' in fargs:
self.debug = True
fargs.remove('debug')
kwargs = self._process_arguments(*fargs, **kwargs)
self._run(*fargs, **kwargs)
####################################################################
@abc.abstractmethod
def _run(self, kwargs, fargs):
pass
####################################################################
def process_cmdline_arguments(self, arguments):
"""
Processes *arguments*; returns a ``(fargs, kwargs)`` tuple.
Don't change anything of the instance (i.e. don't set anything).
Don't check anything.
"""
# imports arguments
kwargs = {}
fargs = []
for argument in arguments:
c = argument.count('=')
# flag (options without keyword)
if c==0:
if argument in fargs:
raise ValueError, 'duplicated flag: `%s`' %argument
fargs.append(argument)
# normal options KEYWORD=VALUE
elif c==1:
key, val = argument.split('=')
if key in kwargs:
raise ValueError, 'duplicated option: `%s`' %key
kwargs[key] = val
else:
raise ValueError, 'invalid argument: `%s`' %argument
return (fargs, kwargs)
####################################################################
def _process_arguments(self, *fargs, **kwargs):
"""
Processes *arguments*. Checks that arguments correspond to
expected arguments and returns a dictionary with converted
and value-checked *kwargs* and adds to it all other arguments
(with default values).
"""
for farg in fargs:
if farg not in [name for name,doc in self.flags]:
raise ValueError, 'invalid flag: `%s`' %farg
for key, val in kwargs.iteritems():
try:
option = [option for option in self.options
if option.name==key][0]
except IndexError:
raise ValueError, 'invalid option: `%s`' %key
try:
val = option.convert(val)
except (ValueError, AttributeError):
raise ValueError, 'invalid value for option `%s`: `%s`' %(key, val)
for requirement in option.requirements:
if not requirement(val):
raise ValueError, 'invalid value for option `%s`: `%s`' %(key, val)
kwargs[key] = val
# applies default values to keyword arguments (options)
for option in self.options:
if option.name not in kwargs:
if option.default==None:
raise IOError, 'argument `%s` must be specified' %option.name
kwargs[option.name] = option.default
return kwargs
########################################################################
commands = []
########################################################################
class abc_sample(BaseCommand):
"""
Documentation
"""
brief = 'Generates samples to fit Approximate Bayesian\
Computation models'
description = 'This command draws a given number of random\
sample from the prior distribution and generates associated set of\
summary statistics. Note that the output file is overwritten without\
prompting.'
options = [
Option('dir',
'Directory containing fasta files',
str, '.', []),
Option('ext',
'Extension of files to import. If an empty string is\
passed (as in `ext=`), only files without extension (without any dot\
in their name) are processed',
str, 'fas', []),
Option('params',
'Name of report file.',
str, 'abc_sample.txt', []),
Option('data',
'name of main output file',
str, 'abc_sample.out', []),
Option('model',
'Demographic model (use option `model?` for more\
information) (must be specified) If an argument is needed, it must be\
given as in the following example: `AM:2` (for the model AM)',
str, '', []),
Option('prior',
'Prior distribution file (use option `prior?` for more\
information) (must be specified)',
str, '', []),
Option('stats',
'Set of summary statistics (use option `stats?` for more\
information) (must be specified). If an argument is needed, it must be\
given as in the following example: `SFS:4` (for the statistic set SFS)',
str, '', []),
Option('post',
'Number of points to sample',
int, 10000, [lambda x: x>0]),
Option('seeds',
'Seeds of the random number generator. They must be\
given as two integer separated by a comma, as in `seeds=12345,67890`.\
By default, the random number generator is seeded from the system\
time',
lambda x: map(int, x.split(',')), 0,
[lambda x: len(x)==2]),
Option('restart',
'Complete an interrupted run. The arguments are read from\
the file and all other command line arguments are ignored. The argument\
must be the name of a `params` file (or an empty string to disable this\
function). Note that it is currently impossible to restore the random\
number generator status (meaning that the seeds will be lost and that\
the new run will be based on seeds based from system time)',
str, '', []),
Option('add_model',
'The name of a Python module containing a model\
definition. Pass a module name (without dots or dashes), such as "MyModel"\
and create a file "MyModel.py" (with a py extension in addition of the\
module name. The class defining the model must have the same name\
("MyModel")',
str, '',
[]),
Option('max_threads',
'Maximum number of threads to start for parallel\
computations. The maximum number of threads is the number of CPUs\
available. By default (max_threads=0), all CPUs are used',
int, 0,
[lambda x: x>=0])
]
####################################################################
flags = [
('prior?', 'Show instructions for specifying priors'),
('model?', 'Show the list of available demographic models'),
('stats?', 'Show the list of available sets of summary stats'),
('force_positive', 'Forces all values drawn from priors to be >=0')
]
####################################################################
def thread(self, howmany, pipe, index):
try:
for i in range(howmany):
ps = self.prior[index].draw()
ds = self.model.generate(self.config, ps, self.prior[index]._random) # ugly way to get random[i]
self.stats.compute(ds, self.config)
pipe.send((str(ps), ' '.join(map(str, self.stats.stats))))
except BaseException, e:
pipe.send([e])
finally:
pipe.close()
####################################################################
def _run(self, *fargs, **kwargs):
### auto-reinitializes from restart file
if kwargs['restart']!= '':
if not os.path.isfile(kwargs['restart']):
raise ValueError, 'cannot restart: %s parameter file not found' %kwargs['restart']
# import arguments
arguments_in = [ line.strip() for line in open(kwargs['restart']) ]
arguments = []
nloci = None
fnames = []
self.config = []
obs = None
# import sample configuration data (will not read again fasta files)
for i in arguments_in:
# gets number of loci
m = re.match('number_of_loci=(\d+)', i)
if m:
nloci = int(m.group(1))
continue
# gets a locus configuration
m = re.match('locus:(.+)=ls:(\d+),ns:(\d+),pop_ns:(.+),outgroup:(\d+)', i)
if m:
fname = m.group(1)
ls = int(m.group(2))
ns = int(m.group(3))
try:
ns_pop = map(int, m.group(4).split(','))
except ValueError:
raise IOError, 'invalid line in %s: %s' %(kwargs['restart'], i.strip())
nso = int(m.group(5))
fnames.append(fname)
self.config.append((ns, ns_pop, nso, ls))
continue
# gets observed values
m = re.match('observed=(.+)', i)
if m:
try:
obs = map(float, m.group(1).split(','))
except ValueError:
raise IOError, 'invalid line in %s: %s' %(kwargs['restart'], i.strip())
obs = ','.join(map(str, obs))
continue
# gets prior
m = re.match('prior=[\'"](.+)[\'"]', i)
if m:
string = m.group(1)
string.replace('\r\n', '\n')
string.replace('\r', '\n')
arguments.append('prior=%%%s' %string)
continue
# ignores the seeds
if re.match('seeds=(.+)', i): continue
# ignores the model parameters
if re.match('parameters=(.+)', i): continue
# ignores the number of threads
if re.match('number_of_threads=(.+)', i): continue
# uses other entries are command line arguments
arguments.append(i)
if nloci==None: raise IOError, 'no number of loci given in %s' %kwargs['restart']
if nloci!=len(self.config): raise IOError, 'incorrect number of loci specified in %s' %kwargs['restart']
if len(set([i[2] for i in self.config]))!=1: raise IOError, 'inconsistent number of populations in loci specified in %s' %kwargs['restart']
if obs==None: raise IOError, 'observed values not found in %s' %kwargs['restart']
# process non-locus-related arguments
fargs, kwargs = self.process_cmdline_arguments(arguments)
self.quiet = False
self.debug = False
if 'quiet' in fargs:
self.quiet = True
fargs.remove('quiet')
if 'debug' in fargs:
self.debug = True
fargs.remove('debug')
kwargs = self._process_arguments(*fargs, **kwargs)
kwargs['restart'] = ''
if not os.path.isfile(kwargs['data']):
raise ValueError, 'cannot restart: %s results file not found' %kwargs['data']
restart = True
else: restart = False
### processes the helper functionalities ###
help_requested = False
for arg in fargs:
if arg=='prior?':
help_requested = True
self.help_prior()
elif arg=='model?':
help_requested = True
self.help_model()
elif arg=='stats?':
help_requested = True
self.help_stats()
elif arg=='force_positive':
pass
else:
raise ValueError, 'invalid argument for `abc_sample`: `%s`' %arg
# if at least one helper was requested, don't actually do anything
if help_requested:
return None
### computes the number of threads ###
try:
np = multiprocessing.cpu_count()
except NotImplementedError:
nthreads = 1
else:
if kwargs['max_threads']!=0 and kwargs['max_threads']<np:
nthreads = kwargs['max_threads']
else:
nthreads = np
### creates the random objects (one per thread) ###
if kwargs['seeds'] == 0:
random = [egglib_binding.Random()]
else:
random = [egglib_binding.Random( *kwargs['seeds'] )]
for i in range(1, nthreads):
random.append(egglib_binding.Random(
random[-1].irand(999999999), random[-1].irand(999999999)))
### processes prior ###
if kwargs['prior'] == '':
raise ValueError, '`abc_sample`: prior must be specified'
self.prior = None
# if file, imports the file
if kwargs['prior'][0]=='%':
priorstring = kwargs['prior'][1:]
# note: sys takes the argument as `raw`
# the following is an ugly workaround (fixme)
priorstring = priorstring.replace(r'\n', '\n')
priorstring = priorstring.replace(r'\t', '\t')
else:
f = open(kwargs['prior'])
priorstring = f.read()
f.close()
# tries all possible prior type
for Prior in fitmodel.priors:
tentativeprior = map(Prior, random)
try:
[i.parse(priorstring) for i in tentativeprior]
self.prior = tentativeprior
except IOError:
pass
# checks that one of the prior types fit the file or string
if self.prior == None:
raise ValueError, 'invalid prior string'
# bounds prior if needed
if 'force_positive' in fargs:
[i.force_positive() for i in self.prior]
### process model ###
if kwargs['add_model'] != '':
fitmodel.add_model(kwargs['add_model'])
if kwargs['model'] == '':
raise ValueError, '`abc_sample`: model must be specified'
# checks if recombination is to be included and options
if ':' in kwargs['model']:
label, options = kwargs['model'].split(':')
options = options.split(',')
try:
options = map(int, options)
except:
raise ValueError, '`abc_sample`: invalid options in model: `%s`' %kwargs['model']
else:
label = kwargs['model']
options = []
if label[-1]=='R':
label = label[:-1]
recomb = True
else:
recomb = False
# identifies the model
self.model = None
for Model in fitmodel.models:
if Model.name == label:
self.model = Model
break
if self.model == None:
raise ValueError, '`abc_sample`: invalid model: `%s`' %kwargs['model']
options.append(recomb)
try:
self.model = self.model(*options)
except TypeError:
raise ValueError, '`abc_sample`: invalid options for model: `%s`' %kwargs['model']
#### process stats rule ###
if kwargs['stats'] == '':
raise ValueError, '`abc_sample`: stats must be specified'
if ':' in kwargs['stats']:
label, options = kwargs['stats'].split(':')
options = options.split(',')
try:
options = map(int, options)
except:
raise ValueError, '`abc_sample`: invalid options in stats: `%s`' %kwargs['stats']
else:
label = kwargs['stats']
options = []
self.stats = None
for Stats in fitmodel.summstats:
if Stats.name == label:
self.stats = Stats
break
if self.stats == None:
raise ValueError, '`abc_sample`: invalid stats: `%s`' %kwargs['stats']
try:
self.stats = self.stats(*options)
except TypeError:
raise ValueError, '`abc_sample`: invalid options for stats: `%s`' %kwargs['stats']
### imports data (only if not restart) ###
if not restart:
# checking of dir argument:
if not os.path.isdir(kwargs['dir']):
raise ValueError, '`abc_sample`: invalid directory path: `%s`' %kwargs['dir']
# gets list of file names
pattern = kwargs['dir']+'/*'
if len(kwargs['ext']): pattern += '.'+kwargs['ext']
fnames = sorted(glob.glob(pattern))
# import alignments
dataset = fitmodel.Dataset()
for i in fnames:
align = data.Align(i, groups=True)
dataset.add(align)
# sorts alignments
dataset.sort_aligns()
# computes observed statistics
self.config = dataset.config()
self.stats.compute(dataset, self.config)
obs = ','.join(map(str, self.stats.stats))
del dataset
# needs to draw a parameter to ensure the list of parameters if generated
# the exported seeds will be after this draw
ps = self.prior[0].draw()
ds = self.model.generate(self.config, ps, random[0])
### saves parameters ###
params = open(kwargs['params'], 'w')
kwargs.update({'quiet': self.quiet, 'debug': self.debug, 'number_of_threads': str(nthreads)})
for arg, value in kwargs.iteritems():
if arg=='seeds':
seeds = int(random[0].seed1()), int(random[0].seed2())
value = '%d,%d' %seeds
if arg=='prior':
value = repr(self.prior[0].str())
if isinstance(value, bool):
if value==True:
params.write(arg + '\n')
else:
params.write( '%s=%s\n' %(arg, value) )
if arg=='model':
params.write('parameters=%s\n' %(','.join(self.model.parameters)))
params.write( 'number_of_loci=%d\n' %len(self.config))
for fname, cfg in zip(fnames, self.config):
params.write( 'locus:%s=ls:%d,ns:%d,pop_ns:%s,outgroup:%d\n' %(fname, cfg[3], cfg[0], ','.join(map(str,cfg[1])),cfg[2]))
params.write( 'observed=%s\n' %obs )
params.close()
### imports already sampled data
done = 0
if restart:
# draw a paramSample (doesnt matter since the seeds are lost anyway)
check = len(self.prior[0].draw())
# counts points and check integrity
f = open(kwargs['data'])
cache = ''
for line in f:
try:
paramsi, statsi = line.split('#')
except ValueError:
raise IOError, 'invalid data file (invalid line): %s' %kwargs['data']
if len(statsi.split()) != len(obs.split(',')):
raise IOError, 'invalid data file (incorrect number of statistics): %s' %kwargs['data']
if len(paramsi.split()) != check:
raise IOError, 'invalid data file (incorrect number of statistics): %s' %kwargs['data']
done += 1
cache = line
f.close()
if len(line) and line[-1] != '\n':
raise IOError, 'invalid data file (last line miss a newline): %s' %kwargs['data']
fstream = open(kwargs['data'], 'a')
else:
fstream = open(kwargs['data'], 'w')
if not self.quiet:
updater = tools.Updater(kwargs['post']-done)
updater.refresh('%d points sampled of %d (%d%%) - $REMAINING left' %(done, kwargs['post'],100*done/kwargs['post']))
# defines the thread lengths
min_thread_size = 100
sizes = []
cake = kwargs['post']-done
while True:
share = int(0.75*cake/nthreads)
if share<=min_thread_size: break
sizes += [share]*nthreads
cake -= share*nthreads
if not len(sizes):
sizes = [min_thread_size] * nthreads
try:
# initializes and starts the threads
threads = []
pipes = []
for i in range(nthreads):
conn1, conn2 = multiprocessing.Pipe(False)
pipes.append(conn1)
thread = multiprocessing.Process(
target=self.thread, args=(sizes.pop(0), conn2, i))
thread.start()
threads.append(thread)
# main loop
while True:
time.sleep(0.01)
for i in range(nthreads):
res = pipes[i].poll(0.1)
if not res and not threads[i].is_alive():
if len(sizes): size = sizes.pop(0)
else: size = min_thread_size
conn1, conn2 = multiprocessing.Pipe(False)
pipes[i] = conn1
threads[i] = multiprocessing.Process(
target=self.thread, args=(size, conn2, i))
threads[i].start()
else:
if done==kwargs['post']: break
X = pipes[i].recv()
if len(X)==1: raise X[0]
fstream.write('%s # %s\n' %X)
fstream.flush()
done+=1
if not self.quiet:
updater.refresh('%d points sampled of %d (%d%%) - $REMAINING left' %(
done, kwargs['post'], 100*done/kwargs['post']), 1)
if done==kwargs['post']: break
except KeyboardInterrupt:
sys.exit()
except Exception, e:
raise
finally:
if not self.quiet:
updater.wipe()
updater.refresh('%d points sampled - $ELAPSED elapsed' %done, grain=0)
updater.close()
map(multiprocessing.Process.terminate, threads)
####################################################################
def help_prior(self):
# gets the list of prior types
priors = [i.name for i in fitmodel.priors]
print """Prior specification for `abc_sample`
There are two ways of specifying priors: by passing the name of a file
containing a prior specification string, and by passing this string
itself. The prior specification format depends on the prior type and is
given in the documentation of the `fitmodel` module of the EggLib python
package, and examples are given later in this document. Note that the
prior type is automatically detected from the string.
<INSERT HERE>
An example of prior specification for `PriorDiscrete` is:
0.8 0.00;0.05 0.0;0.5
0.1 0.05;0.10 0.0;0.5
0.1 0.00;0.05 0.5;5.0
It specifies an almost flat uniform prior from 0. to 0.1 on the first
axis and from 0. to 5.0 on the second axis, with an increased
probability for values with THETA lesser than 0.05 and ALPHA lesser than
0.5.
An example of prior specification for `PriorDumb` is:
U(0.,0.5) E(0.1)
This prior specifies a flat uniform prior distribution for the first
parameter and an exponential distribution with mean 0.1 for the second
parameter . Note that it is also possible to write the specification for
individual parameters on separated lines.
To pass a file name, use the `prior` option normally, as in:
egglib abc_sample prior=filename
To pass a raw string and avoid that it is mistaken for a file name, use
a % character as below:
egglib abc_sample prior="%0.9 0.00;0.10"
For prior specifications that require more than one line, use the line
separator `\\n` as below:
egglib abc_sample prior="%0.9 0.00;0.05\\n0.1 0.05;0.10"
""".replace('<INSERT HERE>', tools.wrap('Currently available prior types: ' + ', '.join(priors), 72, 0))
####################################################################
def help_model(self):
# gets the list of models
print """
Demographic models (with list of parameters) for `abc_sample`:
"""
for model in fitmodel.models:
print '=' * (len(model.name)+2)
print ' %s' %model.name
print '=' * (len(model.name)+2)
print
params = ' '.join(model.parameters)
print '-' * (len(params)+2)
print ' %s' %params
print '-' * (len(params)+2)
print model.__doc__
####################################################################
def help_stats(self):
# gets the list of stats functions
print """
Sets of summary statistics for `abc_sample`:
"""
for stats in fitmodel.summstats:
print '=' * (len(stats.name)+2)
print ' %s' %stats.name
print '=' * (len(stats.name)+2)
print stats.__doc__
commands.append(abc_sample)
########################################################################
#class abc_fix(BaseCommand):
#
# """
# Documentation
# """
#
# ####################################################################
#
# brief = 'Fix the last line of a corrupted ABC sample file.'
#
# description = """This command generates a new file and removes the \
# last line in case it is invalid (truncated)."""
#
# options = [
# Option('input', 'Input ABC sample file', str, None, []),
# Option('output', 'Output ABC sample file', str, None, [])
# ]
#
# ####################################################################
#
# flags = []
#
# ####################################################################
#
# def _run(self, *fargs, **kwargs):
#
# fin = open(kwargs['input'])
# fout = open(kwargs['output'], 'w')
# cache = ''
# while True:
# line = fin.readline()
# if line=='': break # more fixing needed? doesnt it just remove the last line???
# fout.write(cache)
# cache = line
# fout.close()
# fin.close()
########################################################################
class abc_fit(BaseCommand):
"""
Documentation
"""
brief = 'Uses samples to fit models using Approximate Bayesian\
Computation'
description = 'Performs rejection-regression method of Beaumont et\
al. Genetics 2002. Note: ensure that enough samples will pass the\
tolerance threshold.'
options = [
Option('input',
'Name of data file to analyze. The file must be the\
parameter file generated by `abc_sample` (by default:\
`abc_sample.txt`)',
str, None, []),
Option('tolerance',
'Proportion of samples to include in the local region\
(example: a value of 0.05 specifies that the 5% closest samples should\
be used).',
float, None, [lambda x: x>=0]),
Option('transform',
'Data transformation to apply. Accepted values are `none`, `log` and `tan`',
str, 'none', [lambda x: x in set(['none', 'tan', 'log'])]),
Option('output',
'Name of the output file.',
str, 'abc_fit.out', [])
]
####################################################################
flags = []
####################################################################
def _run(self, *fargs, **kwargs):
# imports the data
params = {}
f = open(kwargs['input'])
for line in f:
if line.count('=') == 1:
k,v = line.strip().split('=')
params[k] = v
if 'prior' not in params:
raise IOError, 'invalid ABC config file: %s' %kwargs['input']
if 'observed' not in params:
raise IOError, 'invalid ABC config file: %s' %kwargs['input']
if 'data' not in params:
raise IOError, 'invalid ABC config file: %s' %kwargs['input']
if 'parameters' not in params:
raise IOError, 'invalid ABC config file: %s' %kwargs['input']
observed = map(float, params['observed'].split(','))
# gets number of parameters from the prior string
prior = None
for Prior in fitmodel.priors:
xprior = Prior()
try:
sprior = re.search('[\'"](.+)[\'"]', params['prior']).group(1)
sprior = sprior.replace(r'\r\n', '\n')
sprior = sprior.replace(r'\n', '\n')
sprior = sprior.replace(r'\r', '\n')
sprior = sprior.replace(r'\t', '\t')
except AttributeError:
raise IOError, 'invalid prior in this ABC config file: %s' %kwargs['input']
try: xprior.parse(sprior)
except fitmodel.PriorParseError:
continue
else:
prior = xprior
break
if prior==None: raise IOError, 'invalid prior in this ABC config file: %s' %kwargs['input']
np = prior.number_of_params()
if not self.quiet:
print 'Number of parameters: %d' %np
param_names = params['parameters'].split(',')
if len(param_names)!=np:
raise ValueError, 'abc_fit: number of parameter names given in {0} is incorrect'.format(kwargs['input'])
# initalizes the ABC instance
ABC = egglib_binding.ABC()
ABC.number_of_statistics(len(observed))
for i,v in enumerate(observed):
ABC.obs(i,v)
if not self.quiet:
print 'Number of statistics: %d' %len(observed)
# 1st step
ABC.add_fname(params['data'], np)
ABC.get_threshold(kwargs['tolerance'])
if not self.quiet:
for i,v in enumerate(observed):
print 'Statistic %d: %f (%f)' %(i+1, v, ABC.sd(i))
print 'Number of points: %d' %ABC.number_of_samples()
print 'Euclidean distance threshold: %f' %ABC.threshold()
# 2nd step (rejection)
handle, tmpfile = tempfile.mkstemp()
try:
os.close(handle)
acc = ABC.rejection(tmpfile, )
if not self.quiet:
print 'Number of accepted points: %d' %acc
if kwargs['transform']=='none':
transform = ABC.NONE
if kwargs['transform']=='tan':
transform = ABC.TAN
if kwargs['transform']=='log':
transform = ABC.LOG
# 3rd step (regression)
ABC.regression(tmpfile, kwargs['output'], transform, ' '.join(params['parameters'].split(',')))
if not self.quiet:
print 'Posterior file %s generated' %kwargs['output']
finally:
if os.path.isfile(tmpfile): os.remove(tmpfile)
commands.append(abc_fit)
########################################################################
class abc_compare(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Compares several models.'
description = """The same set of summary statistics must have been\
used during simulations. This command expects a list of config files\
that must all present the same statistics but may have been generated\
under different models, or models with differing constraints.\
This command will display the proportion of accepted points from each\
file in the console (and ignore the `quiet` arguments). Ref.: Fagundes\
et al. PNAS 2007."""
options = [
Option('input', 'One or several ABC config files, separated by\
commas when more than one.', lambda x: x.split(','), None, []),
Option('tolerance',
'Proportion of samples to include in the local region\
(example: a value of 0.05 specifies that the 5% closest samples should\
be used).',
float, None, [lambda x: x>=0])
]
####################################################################
flags = []
####################################################################
def _run(self, *fargs, **kwargs):
observed = set()
models = []
for fname in kwargs['input']:
params = {}
f = open(fname)
for line in f:
if line.count('=') == 1:
k,v = line.strip().split('=')
params[k] = v
if 'observed' not in params:
raise IOError, 'invalid ABC config file: %s' %fname
observed.add( tuple(map(float, params['observed'].split(','))) )
if 'data' not in params:
raise IOError, 'invalid ABC config file: %s' %fname
if 'parameters' not in params:
raise IOError, 'invalid ABC config file: %s' %fname
models.append((params['data'], len(params['parameters'].split(','))))
if len(observed) != 1:
raise IOError, 'inconsistent observed statistics over posterior files - aborting'
observed = observed.pop()
print 'Number of files to process: %d' %len(models)
handle, tmpfile = tempfile.mkstemp()
try:
ABC = egglib_binding.ABC()
ABC.number_of_statistics(len(observed))
for i,v in enumerate(observed): ABC.obs(i,v)
for fname,np in models: ABC.add_fname(fname, np)
ABC.get_threshold(kwargs['tolerance'])
print 'Total number of points: %d' %ABC.number_of_samples()
accept = ABC.rejection(tmpfile, True)
print 'Number of accepted points: %d' %accept
if accept==0:
print ' (You should increase the tolerance.)'
else:
print 'Model probabilities:'
res = [0] * len(models)
f = open(tmpfile)
for line in f:
match = re.match('\[(\d+)\]', line)
if not match:
raise IOError, 'problem with ABC rejection output'
index = int(match.group(1))-1
if index <0 or index>=len(res):
raise IOError, 'problem with ABC rejection output'
res[index] += 1
T = sum(res)
if T!=accept:
raise IOError, 'problem with ABC rejection output'
for i, v in enumerate(kwargs['input']):
print ' %s\t%f' %(v, 1.*res[i]/T)
finally:
if os.path.isfile(tmpfile): os.remove(tmpfile)
commands.append(abc_compare)
########################################################################
class abc_bin(BaseCommand):
"""
Documentation
"""
####################################################################
class _ranges:
################################################################
def __init__(self, string):
if not isinstance(string, basestring):
raise ValueError
self._items = []
if len(string):
bits = string.split(',')
for bit in bits:
value1, value2 = bit.split(':')
self._items.append((float(value1), float(value2)))
################################################################
def __str__(self):
return ','.join(['%f:%f' %(a,b) for a,b in self._items])
################################################################
def __iter__(self):
for x,y in self._items:
yield x,y
################################################################
def __len__(self):
return len(self._items)
####################################################################
brief = 'Binarizes a posterior distribution'
description = 'Uses the output file of the `abc_fit` command to\
binarize the posterior and generate a "PriorDiscrete"-compatible file.\
The `quiet` argument is ignored.'
options = [
Option('input',
'Name of data file to analyze. The file must be the\
output file generated by `abc_fit` (by default: `abc_fit.out`)',
str, None, []),
Option('bins',
'Number of categories for all parameters. If specified,\
the argument `parambins` overwrites this argument',
int, 12, [lambda x: x>0]),
Option('parambins',
'Specifies specific number of categories for one or more\
parameters. The argument must be a list of integers (separated by\
commas) giving the number of categories for all parameters',
lambda x: map(int,x.split(',')), [], []),
Option('ranges',
'Specifies the prior ranges for one or more parameters.\
The argument must be a list of ranges separated by commas (such as\
`min:max,min:max,min:max,min:max`) giving minimum and maximum value for\
all parameters (if values lie outside of ranges, and error will be\
generated)',
_ranges, _ranges(''), []),
Option('output',
'Name of the output file',
str, 'abc_bin.out', [])
]
####################################################################
flags = [
]
####################################################################
def _run(self, *fargs, **kwargs):
params, data = fitmodel.import_posterior(kwargs['input'])
# creates the binner
bin = tools.Bin(data)
# defines numbers of categories
ncat = [kwargs['bins']] * len(data)
if len(kwargs['parambins'])!=0:
if len(kwargs['parambins'])!=len(ncat):
raise IOError, 'invalid number of items in parambins argument'
ncat = kwargs['parambins']
# defines the specified ranges
ranges = [(None,None)] * len(data)
if len(kwargs['ranges'])!=0:
if len(kwargs['ranges'])!=len(ranges):
raise IOError, 'invalid number of items in ranges argument'
for i,(value1,value2) in enumerate(kwargs['ranges']):
if value2<=value1:
raise ValueError, 'invalid range for parameter %d' %(i+1)
if value1 > min(data[i]) or value2 < max(data[i]):
raise ValueError, 'invalid range for parameter %d [empirical range: %f->%f]' %(i+1, min(data[i]), max(data[i]))
ranges[i] = (value1, value2)
# performs binarization
posterior = [bin]
for i in range(len(data)):
if not self.quiet:
print '-- binarizing dimension %d --' %(i+1)
posterior = [
tools.Bin.slice(bin, i, ncat=ncat[i], bot=ranges[i][0],
top=ranges[i][1]) for bin in posterior ]
posterior = reduce(lambda a,b: a+b, posterior, [])
# prepares the prior formalization
posterior.sort(lambda x,y: cmp(len(y), len(x)))
prior = fitmodel.PriorDiscrete()
N = sum(map(len, posterior))
acc = 0
c = 0
for bin in posterior:
n = 1. * len(bin) / N
acc += n
if n!=0: c+=1
prior.add(n, *bin.ranges())
if not self.quiet:
print 'total frequency: %f (expects 1.)' %acc
print 'total number of bins: %d' %len(prior)
print 'number of non-empty bins: %d' %c
# saves the prior
f = open(kwargs['output'], 'w')
f.write(str(prior.str()))
f.close()
commands.append(abc_bin)
########################################################################
class abc_statsmarg(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Marginal properties of a posterior distribution'
description = 'Uses the output file of the `abc_fit` command and\
computes properties of the marginal distribution of each parameter.\
Results are displayed in the console. The argument `quiet` is ignored.'
options = [
Option('input',
'Name of data file to analyze. The file must be the\
output file generated by `abc_fit` (by default: `abc_fit.out`)',
str, None, [])
]
####################################################################
flags = [
]
####################################################################
def _run(self, *fargs, **kwargs):
# imports the data
params, data = fitmodel.import_posterior(kwargs['input'])
print 'Analyzing data in %s' %kwargs['input']
print '------------------------'
for i in range(len(data)):
print params[i]
print '------------------------'
print ' observations: %d' %len(data[i])
m, V, sd, se = tools.stats(data[i])
print ' average: %f' %m
print ' std deviation: %f' %sd
values = sorted(data[i])
def quantile(p):
return values[int(math.ceil(p*len(values)))-1]
print '--- quantiles ----------'
print ' min: %f' %values[0]
print ' 1%%: %f' %quantile(0.01)
print ' 5%%: %f' %quantile(0.05)
print ' 10%%: %f' %quantile(0.10)
print ' 50%%: %f' %quantile(0.50)
print ' 90%%: %f' %quantile(0.90)
print ' 95%%: %f' %quantile(0.95)
print ' 99%%: %f' %quantile(0.99)
print ' max: %f' %values[-1]
print '------------------------'
if len(data)>1:
print 'Parameter correlation'
print '------------------------'
for i in range(len(data)):
for j in range(i+1, len(data)):
print '%s - %s ' %(params[i],params[j])
r, r2, a = tools.correl(data[i], data[j])
print ' r : %f' %r
print ' r2: %f' %r2
print ' a : %f' %a
print '------------------------'
commands.append(abc_statsmarg)
########################################################################
class abc_statsdisc(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Properties of a discretized posterior distribution'
description = 'The posterior must have been discretized using the\
command `abc_bin`. The joint properties of distribution are computed\
and displayed in the console. The argument `quiet` is ignored.'
options = [
Option('input',
'Name of data file to analyze. The file must be of the\
`PriorDiscrete` form as the output file of `abc_bin` (by default:\
`abc_bin.out`), but any `PriorDiscrete` data is supported',
str, None, []),
Option('q',
'Which credible interval to output (by default, the\
bounds of the 95% density set are presented)',
float, 0.95, [lambda x:x>=0, lambda x:x<=1])
]
####################################################################
flags = [
]
####################################################################
def _run(self, *fargs, **kwargs):
# imports the data
data = fitmodel.PriorDiscrete()
f = open(kwargs['input'])
string = f.read()
f.close()
try:
data.parse(string)
except IOError:
raise IOError, 'cannot import discrete data from `%s`' %kwargs['input']
del string
# computes the number of non-empty classes and of points
P = 0
K = 0
for p, bounds in data:
P+=p
if p>0:
K+=1
# output main statistics
print 'Analyzing data in %s' %kwargs['input']
print 'Sum of class frequencies: %f' %P
print 'Number of categories: %d' %len(data)
print 'Number of non-empty categories: %d' %K
# finds the best category (MAP)
# doesn't assume the prior is sorted
# takes care that no ex-aequo
best = 0., [None]
for p, bounds in data:
if p==best[0]:
best[1].append(bounds)
if p>best[0]:
best = p, [bounds]
if best[0]==0:
raise ValueError, 'best posterior category has 0 frequency'
if len(best[1])>1:
raise ValueError, 'several categories of the posterior have the maximum frequency %f' %best[0]
print 'Midpoint of the best category (MAP estimates):'
for i,(b,t) in enumerate(best[1][0]):
m = b + 0.5 * (t-b)
print ' parameter %d: %f' %(i+1, m)
# finds the bounds of the q density set
bins = sorted([i for i in data], lambda a,b: cmp(b[0], a[0]))
acc = 0
lims = []
for i in bins[0][1]:
lims.append(list(i))
for p, bounds in bins:
for i,(bot,top) in enumerate(bounds):
if bot < lims[i][0]:
lims[i][0] = bot
if top > lims[i][1]:
lims[i][1] = top
acc += p
if acc >= kwargs['q'] * P:
break
print 'Limits of the %f credible interval:' %kwargs['q']
for i,(bot,top) in enumerate(lims):
print ' parameter %d: %f, %f' %(i+1, bot, top)
commands.append(abc_statsdisc)
########################################################################
class abc_plot1D(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Plots marginal distributions from a discretized posterior'
description = 'The posterior must have been discretized using the\
command `abc_bin`. The command will plot the marginal distribution of\
of either one (specified) or all parameters as png (portable network\
graphics) files. The graphics will be histogram, when the class limits\
will be fully defined by the discretization step accomplished\
previously. The Python module matplotlib is needed to use this command.'
options = [
Option('input',
'Name of data file to analyze. The file must be of the\
`PriorDiscrete` form as the output file of `abc_bin` (by default:\
`abc_bin.out`), but any `PriorDiscrete` data is supported',
str, None, []),
Option('index',
'Which parameter to plot. By default (and with an empty\
argument), all parameters are plotted',
lambda x: int(x), '', [lambda x: x>0]),
Option('params',
'Name(s) to use in the graphic axes. It must match the\
number of parameter(s) to be plotted. By default, the index of\
parameters will be used',
lambda x: x.split(','), []),
Option('root',
'Root name of output files. The template output file\
name is `<root>_<param>.png` where <root> is the value of this argument\
and <param> is the name of the parameter being plotted',
str, 'abc_plot', [])
]
####################################################################
flags = [
]
####################################################################
def _run(self, *fargs, **kwargs):
# checks
try:
from matplotlib import pyplot
except ImportError:
raise ImportError, 'this command requires that your Python installation contains the matplotlib module'
# imports the data
posterior = fitmodel.PriorDiscrete()
f = open(kwargs['input'])
string = f.read()
f.close()
try:
posterior.parse(string)
except IOError:
raise IOError, 'plot1D: cannot import discrete data from `%s`' %kwargs['input']
del string
# determines which parameter(s) to plot
if kwargs['index'] != '':
if kwargs['index'] > posterior.number_of_params():
raise ValueError, 'abc_plot1D: invalid parameter index: %d (max: %d)' %(kwargs['index'], posterior.number_of_params())
indices = [kwargs['index']-1]
else:
indices = range(posterior.number_of_params())
# iterates
if len(kwargs['params'])!=0:
if len(kwargs['params'])!=len(indices):
raise ValueError, 'abc_plot1D: invalid number of parameter labels'
for index in indices:
# collects the data
classes = {}
for p, bounds in posterior:
if p==0.: continue
key = bounds[index]
if key not in classes:
classes[key] = 0.
classes[key] += p
# transforms the data to a list
# sorts and checks that no overlap
classes = [(b,p) for b,p in classes.iteritems()]
classes.sort(lambda x,y: cmp(x[0][0], y[0][0]))
for i in range(1, len(classes)):
if classes[i-1][0][1] > classes[i][0][0]:
raise ValueError, 'posterior in `%s` has overlapping classes' %kwargs['input']
# prepares the plot data
left = [b[0] for b,p in classes]
height = [p for b,p in classes]
width = [b[1]-b[0] for b,p in classes]
# gets label
if len(kwargs['params']):
label = kwargs['params'][index]
else:
label = 'Parameter %d' %(index+1)
# plots and saves
pyplot.bar(left, height, width=width, color='0.2')
pyplot.xlabel(label)
pyplot.ylabel('frequency')
fname = '%s_%s.png' %(kwargs['root'], index+1)
pyplot.savefig(fname)
pyplot.clf()
if not self.quiet:
print ' picture `%s` saved' %fname
commands.append(abc_plot1D)
########################################################################
class abc_psimuls(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Performs posterior simulations'
description = 'This command generates a defined number of a user-\
defined list of statistics for one locus. A different set of parameter\
values is randomly drawn for each repetition. Simulations are\
conditioned on the number(s) of sequences and alignment length(s)\
passed as arguments. The command generates a comma-separated table\
without header that is displayed in the console. `None` denote\
unavailable statistics (such as those that are undefined because of the\
lack of polymorphism). The argument `quiet` is ignored.'
options = [
Option('model',
'Model to use for simulation. This argument corresponds\
to the model specification in the `abc_sample` command',
str, None, []),
Option('prior',
'Distribution of parameters. This argument corresponds to\
the prior specification in the `abc_sample` command. Note that\
binarized posterior files generated by the `abc_bin` command are\
compatible.',
str, None, []),
Option('ns',
'Sample configuration: gives the number of sequence\
sampled in one or more subpopulations. Each value must be an integer\
and, when more than one, values must be separated by commas. Each locus\
must contain at least two samples (in any subpopulation)',
lambda x: map(int, x.split(',')), None,
[lambda x: sum(x)>=2]),
Option('ls',
'Sample configuration: gives the number of sites to\
simulate. The argument must be an integer',
lambda x: int(x), None, [lambda x: x>=1]),
Option('nrepets',
'Number of repetitions to perform',
int, None, [lambda x: x>0]),
Option('stats',
'Labels of the statistics to compute. The statistic names\
correspond to the arguments of the EggLib function `polymorphism` (note\
that some statistics are only available when more than one population\
is defined and/or when EggLib\'s core was linked to the Bio++ libraries\
at compile-time). The statistics are printed to the console in the\
order given by this option, one line per simulation',
lambda x: x.split(','), None, []),
Option('seeds',
'Seeds of the random number generator. They must be\
given as two integer separated by a comma, as in `seeds=12345,67890`.\
By default, the random number generator is seeded from the system\
time',
lambda x: map(int, x.split(',')), 0,
[lambda x: len(x)==2]),
Option('add_model',
'The name of a file containing a model definition',
str, '',
[])
]
####################################################################
flags = [
]
####################################################################
def _run(self, *fargs, **kwargs):
# defines the random object
if kwargs['seeds'] == 0:
random = egglib_binding.Random()
else:
random = egglib_binding.Random( *kwargs['seeds'] )
### processes prior ###
if kwargs['prior'] == '':
raise ValueError, '`abc_sample`: prior must be specified'
prior = None
# if file, imports the file
if kwargs['prior'][0]=='%':
priorstring = kwargs['prior'][1:]
# note: sys takes the argument as `raw`
# the following is an ugly workaround (fixme)
priorstring = priorstring.replace(r'\n', '\n')
priorstring = priorstring.replace(r'\t', '\t')
else:
f = open(kwargs['prior'])
priorstring = f.read()
f.close()
# tries all possible prior type
for Prior in fitmodel.priors:
tentativeprior = Prior(random)
try:
tentativeprior.parse(priorstring)
prior = tentativeprior
except IOError:
pass
# checks that one of the prior types fit the file or string
if prior == None:
raise ValueError, 'invalid prior string'
# checks that one of the prior types fit the file or string
if prior == None:
raise ValueError, 'invalid prior string'
### process model ###
if kwargs['add_model'] != '':
fitmodel.add_model(kwargs['add_model'])
if kwargs['model'] == '':
raise ValueError, '`abc_psimuls`: model must be specified'
# checks if recombination is to be included and options
if ':' in kwargs['model']:
label, options = kwargs['model'].split(':')
options = options.split(',')
try:
options = map(int, options)
except:
raise ValueError, '`abc_psimuls`: invalid options in model: `%s`' %kwargs['model']
else:
label = kwargs['model']
options = []
if label[-1]=='R':
label = label[:-1]
recomb = True
else:
recomb = False
# identifies the model
model = None
for Model in fitmodel.models:
if Model.name == label:
model = Model
break
if model == None:
raise ValueError, '`abc_psimuls`: invalid model: `%s`' %kwargs['model']
options.append(recomb)
try:
model = model(*options)
except TypeError:
raise ValueError, '`abc_psimuls`: invalid options for model: `%s`' %kwargs['model']
# makes a pseudodataset of the configuration list
config = [(sum(kwargs['ns']), kwargs['ns'], 0, kwargs['ls'])]
# performs the required number of simulations
for i in range(kwargs['nrepets']):
# generates an alignment
dataset = model.generate( config, prior.draw(), random )
data,bidon = list(dataset.iterator(config))[0]
# adds the group labels
c=0
for p, ns in enumerate(kwargs['ns']):
for i in range(ns):
data.group(c, p)
c+=1
if len(data)==0:
stats = [None] * len(kwargs['stats'])
else:
pol = data.polymorphism()
stats = []
for stat in kwargs['stats']:
if stat not in pol:
raise ValueError, 'abc_psimuls: invalid statistic label: %s' %stat
X = pol[stat]
if stat in ('thetaW', 'Pi') and pol['S']:
X = X*pol['S']/kwargs['ls']
stats.append(X)
stats = map(str, stats)
print ','.join(stats)
commands.append(abc_psimuls)
########################################################################
class abc_plot2D(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Plots discretized posterior on a two-dimensional plan'
description = 'The posterior must have been discretized using the\
command `abc_bin`. The command will plot the (marginal) distribution of\
of two specified parameters as a png (portable network\ graphics) file.\
The graphics will be a two-dimensional density plot, where the class\
limits will be fully defined by the discretization step accomplished\
previously. The distribution should be called `marginal` if the model\
has more than two parameters and will be the full posterior\
distribution (with all information visible) if the model has two\
parameters.'
options = [
Option('input',
'Name of data file to analyze. The file must be of the\
`PriorDiscrete` form as the output file of `abc_bin` (by default:\
`abc_bin.out`), but any `PriorDiscrete` data is supported',
str, None, []),
Option('index1',
'Index of the parameter to plot on the first axis',
int, None, [lambda x:x>0]),
Option('index2',
'Index of the parameter to plot on the second axis',
int, None, [lambda x:x>0]),
Option('param1',
'Name of the parameter to use as first axis label',
str, '', []),
Option('param2',
'Name of the parameter to use as second axis label',
str, '', []),
Option('output',
'Name of the output file. The default corresponds to\
`abc_plot_PARAM1-PARAM2.png`',
str, '', [])
]
####################################################################
flags = [
('CI', 'displays the 95% credible interval as colored region'),
]
####################################################################
def _run(self, *fargs, **kwargs):
# checks
try:
from matplotlib import pyplot
except ImportError:
raise ImportError, 'this command requires that your Python installation contains the matplotlib module'
# imports the data
posterior = fitmodel.PriorDiscrete()
f = open(kwargs['input'])
string = f.read()
f.close()
try:
posterior.parse(string)
except IOError:
raise IOError, 'cannot import discrete data from `%s`' %kwargs['input']
del string
# determines which parameter(s) to plot
if kwargs['index1'] > posterior.number_of_params():
raise ValueError, 'abc_plot2D: invalid parameter index: %d (max: %d)' %(kwargs['index1'], posterior.number_of_params())
index1 = kwargs['index1']-1
if kwargs['index2'] > posterior.number_of_params():
raise ValueError, 'abc_plot2D: invalid parameter index: %d (max: %d)' %(kwargs['index2'], posterior.number_of_params())
index2 = kwargs['index2']-1
# collects the data
classes = {}
for p, bounds in posterior:
key = ((bounds[index1], bounds[index2]))
if key not in classes:
classes[key] = 0.
classes[key] += p
# extracts limits and checks that joint classes
xbins = sorted(set([i[0] for i in classes]), lambda x,y: cmp(x[0],y[0]))
for i in range(1, len(xbins)):
if xbins[i-1][1]!=xbins[i][0]:
raise ValueError, 'missing categories in `%s`' %kwargs['input']
ybins = sorted(set([i[1] for i in classes]), lambda x,y: cmp(x[0],y[0]))
for i in range(1, len(ybins)):
if ybins[i-1][1]!=ybins[i][0]:
raise ValueError, 'missing categories in `%s`' %kwargs['input']
## creates matrix
# matrix = [[ classes[(x,y)] for y in ybins ] for x in xbins]
# determines color bins
if 'CI' in fargs:
colors = zip([ 0.95, 0.99, 1.01],
[ 'r', 'y', 'w'])
else:
colors = zip([ 0.05, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 1.01],
['0.', '.2', '.4', '.6', '.8', '.9', '.95', '1.0'])
lst = [i for i in classes.items()]
lst.sort(lambda x,y: cmp(y[1], x[1]))
total = sum([v for k,v in lst])
lims = [(i*total,j) for
i,j in colors]
acc = 0
for (k,v) in lst:
acc+= (1.*v/total)
if acc >= lims[0][0]:
del lims[0]
classes[k] = lims[0][1]
# plots
for x1,x2 in xbins:
for y1,y2 in ybins:
k=0
color = classes[((x1,x2),(y1,y2))]
pyplot.broken_barh([(x1, x2-x1)], (y1, y2-y1), color=color)
# saves
if len(kwargs['param1']):
label1 = kwargs['param1']
else:
label1 = 'Parameter %d' %(index1+1)
if len(kwargs['param2']):
label2 = kwargs['param2']
else:
label2 = 'Parameter %d' %(index2+1)
if kwargs['output'] == '':
output = 'abc_plot_%s_%s.png' %(index1+1, index2+1)
else:
output = kwargs['output']
pyplot.xlabel(label1)
pyplot.ylabel(label2)
pyplot.savefig(output)
commands.append(abc_plot2D)
########################################################################
class analyzer(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Extended port of samplestat'
description = 'This command reads `ms` output and computes several\
statistics. Results are presented in the console in a format similar to\
the `samplestat` output, one simulation per line (although the number,\
and idenity and order of statistics are different). To analyse data\
from standard input with default options, you have to type:\
`egglib analyzer input=`. This command always displays results in the\
standard output stream; the `quiet` option is ignored'
options = [
Option('input',
'Name of the `ms` output file to read. By default (empty\
string), data are read from standard input)',
str, '', ''),
Option('config',
'Sample configuration. In case of a structured sample,\
this option gives the number of samples from each population, each\
separated by a comma, as in `config=20,20,18`. For a unique and\
non-subdivised population, a single integer should be passed',
lambda x: map(int, x.split(',')), None,
[lambda x: min(x)>=0]),
Option('mis',
'Misorientation rate (if >0, reverse randomly the\
assignation ancestral/derived with the probability)',
float, 0., [lambda x: x>=0, lambda x: x<=1]),
Option('stats',
'Specifies the list of stats (and the order) to compute.\
The list must be comma-separated and contain only names of valid\
statistics that can be computed from the `ms` data passed. Still,\
invalid statistic will be silently skipped. Refer to the documentation\
of EggLib\'s `Align.polymorphism` method for details about the\
statistics. By default, a pre-defined list of statistics is used',
lambda x: x.split(','), [], [])
]
####################################################################
flags = [
]
####################################################################
def _run(self, *fargs, **kwargs):
# defines the list of statistics
if kwargs['stats'] != []:
stats = kwargs['stats']
else:
stats= ['D', 'tHnew_gene', 'tW_gene', 'H', 'Z', 'He', 'K',
'tH_gene', 'S', 'Pi_gene', 'Fst', 'Gst', 'Hst',
'Kst', 'Snn']
# imports the data
if kwargs['input'] == '':
string = sys.stdin.read()
else:
f = open(kwargs['input'])
string = f.read()
f.close()
# parses the individual simulations
items = string.split('//')
for item in items[1:]:
# converts to Align
item = '//\n' + item.lstrip()
dataMatrix = egglib_binding.Ms.get(item, sum(kwargs['config']))
align = egglib_binding.Convert.align(dataMatrix)
Align = data.Align()
Align._object = align
# applies group labels
if sum(kwargs['config']) != len(Align):
raise ValueError, 'sum of `config` argument doesn\'t match with the lenght of simulations'
acc = 0
for i, ns in enumerate(kwargs['config']):
Align.group(acc, i)
acc+=1
# adds fake outgroup
Align.append('outgroup', 'A'*Align.ls(), 999)
# computes statistics
pol = Align.polymorphism()
results = []
for stat in stats:
if stat in pol:
results.append((stat, str(pol[stat])))
# writes to standard output
print ' '.join(['%s: %s' %i for i in results])
commands.append(analyzer)
########################################################################
class blastgb(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Blasts all coding sequences from a GenBank file.'
description = 'Imports a GenBank record and performs a BLAST search\
of all `CDS` features against a given (local) database. Generates\
another GenBank record with the name of the best hit(s) (separated by\
the // string when more than one) appended to the `note` field.'
options = [
Option('input', 'Name of a GenBank file', str, None, []),
Option('output', 'Name of the output file', str, None, []),
Option('db',
'Path of the target database. By default, the database\
should be a fasta-formatted file of nucleotide sequences but flags\
`prot` and `formatted` can control this',
str, None, []),
Option('evalue',
'Expectaction value: expected number of random hits by\
chance alone, depending on the database size. The default value is e^-6\
(therefore much less - and more stringent - than `blastn`\'s default\
value which is 10)',
float, math.exp(-6), [lambda x: x>=0]),
Option('nresults',
'Maximum number of hits to output',
int, 1, [lambda x: x>=0])
]
####################################################################
flags = [
('prot', 'Performs protein-against-protein BLAST searches. With\
this flag activated, the database passed through `db` must contain\
protein sequences'),
('formatted', 'Pass this flag is the file named by the `db`\
option is a pre-formatted BLAST database (using the `formatdb` command)\
rather than a fasta file. In this case, the base name of the database\
should be passed')
]
####################################################################
def _run(self, *fargs, **kwargs):
# import the genbank file
genBank = data.GenBank(kwargs['input'])
# prepares the blast database and blast objects
if 'prot' in fargs:
mode = 'prot'
else:
mode = 'nucl'
if 'formatted' not in fargs:
db = wrappers.BLASTdb(data.Container(kwargs['db']), mode)
else:
db = kwargs['db']
blast = wrappers.BLAST()
# processes all CDS features
features = [
feature for feature in genBank if feature.type() == 'CDS' ]
if not self.quiet:
updater = tools.Updater(len(features))
for i,feature in enumerate(features):
sequence = feature.get_sequence()
# translates if necessary
if 'prot' in fargs:
sequence = tools.translate(sequence)
# makes blast
if 'prot' in fargs:
results = blast.blastp(sequence, db, evalue=kwargs['evalue'])
else:
results = blast.blastn(sequence, db, evalue=kwargs['evalue'])
# crops results
results = results.values()[0][:kwargs['nresults']]
if not len(results):
results = '"No homology found"'
else:
results = '"%s"' %' // '.join([j['subject'] for j in results])
# adds results to the "note" field
feature.add_qualifier('note', results)
if not self.quiet:
updater.refresh(
'feature %d of %d remaining: $REMAINING' %(
i+1, len(features)))
updater.refresh('%d features processed' %len(features), grain=0)
updater.close()
# saves the output file
genBank.write( kwargs['output'] )
commands.append(blastgb)
########################################################################
class clean_seq(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Removes ambiguity characters from nucleotide sequences.'
description = 'The `quiet` option is ignored.'
options = [
Option('input', 'Name of a the input fasta file', str, None, []),
Option('output', 'Name of the output file', str, None, []),
Option('chars',
'A string listing all valid characters. Note that the\
comparisons are case-insensitive.',
str, 'ACGTN-', [])
]
####################################################################
flags = [
]
####################################################################
def _run(self, *fargs, **kwargs):
self._chars = kwargs['chars'].upper()
container = data.Container(kwargs['input'])
for i, (n,s,g) in enumerate(container):
container.sequence(i, self._filter(s))
container.write(kwargs['output'])
####################################################################
def _filter(self, seq):
result = ''
for i in seq:
if i.upper() in self._chars:
result += i
else:
result += 'N'
return result
commands.append(clean_seq)
########################################################################
class clean_tree(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Cleans a newick tree.'
description = 'This command removes internal branch labels and/or\
branch lengths from a newick tree. The `quiet` option is ignored.'
options = [
Option('input', 'Name of an input newick file', str, None, []),
Option('output', 'Name of the output file', str, None, [])
]
####################################################################
flags = [
('keep_labels', 'Don\'t remove internal branch labels'),
('keep_brlens', 'Don\'t remove branch lengths')
]
####################################################################
def _run(self, *fargs, **kwargs):
data.Tree(kwargs['input']).write(
fname=kwargs['output'],
labels=('keep_labels' in fargs),
brlens=('keep_brlens' in fargs))
commands.append(clean_tree)
########################################################################
class codalign(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Protein-based alignment of coding sequences.'
description = 'This command accepts codings (nucleotide) sequences\
and perform alignment at the protein level (or accept the corresponding\
protein alignment) and generates a coding sequence alignment guaranteed\
to fit the reading frame (gaps are multiple of three and don\'t split\
codons apart). Note that errors can be generated by the presence of\
stop codons in sequences. By default, this command crops the final stop\
codon of coding sequences. Use the `keepstop` flag to prevent this.'
options = [
Option('input', 'Name of an input fasta file', str, None, []),
Option('output', 'Name of the output file', str, None, []),
Option('prot',
'Name of a fasta file containing aligned proteins. The\
proteins sequences should match exactly the conceptual traduction of\
coding sequences. If an empty string is passed (the default), the\
option is ignored and the alignment is performed automatically on\
conceptual translations', str, '', [])
]
####################################################################
flags = [
('muscle', 'Uses the program `muscle` (default is `clustalw`)'),
('keepstop', 'Don\'t crop final stop codon of coding sequences')
]
####################################################################
def _run(self, *fargs, **kwargs):
# imports cds sequences
cds = data.Container(kwargs['input'])
# uses clustal-friendly names
mapping = cds.encode()
# removes trailing stop codons
if 'keepstop' not in fargs:
for i in range(len(cds)):
if cds.sequence(i)[-3:].upper() in ('TGA', 'TAA', 'TAG'):
cds.sequence(i, cds.sequence(i)[:-3])
# translates and aligns
if kwargs['prot']=='':
prot = tools.translate(cds)
if 'muscle' not in fargs:
prot = wrappers.clustal(prot, quiet=self.quiet)
else:
prot = wrappers.muscle(prot, quiet=self.quiet)
prot.rename(mapping)
# or imports already-aligned sequences
else:
prot = data.Align(kwargs['prot'])
cds.rename(mapping)
# calls backalign function
align = tools.backalign(cds, prot)
# saves
align.write(kwargs['output'])
commands.append(codalign)
########################################################################
class concat(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Concatenation of sequence alignments.'
description = 'Combines sequence information from fasta-formatted\
sequence alignments. Sequences are concatenated when their names match\
(either exact or partial matches), regardless of the order of\
sequences. When sequences are missing in one of the alignment, they are\
replaced by a stretch of missing data of appropriate length. Spacers of\
missing data can be placed between concatenated alignments, depending\
on option values. The `quiet` option is ignored. By default, the full\
name of sequences is used for comparison. It is possible to restrict\
the comparison to the beginning of the sequence (at a fixed length) or\
using a specified separator character, but not both.'
options = [
Option('input',
'A list of fasta file names. The names must be separated\
by commands, as in `file1,file2,file3`. It is possible to use UNIX\
wild cards (*, ~). File names might be duplicated',
lambda x: x.split(','), None, []),
Option('output', 'Name of the output file', str, None, []),
Option('spacer',
'Gives the length of stretches of missing data to be\
introduced between concatenated alignments. If the argument is an\
integer, the same spacer in introduced between all pairs of consecutive\
alignments. To introduce variable-length spacers, a list of\
comma-separated integers must be passed, and the number of values must\
be equal to the number of alignments minus 1. By default, no spacers\
are inserted.',
lambda x: map(int, str(x).split(',')), 0,
[lambda x: min(x)>=0]),
Option('character',
'Character to use for spacer stretches and for missing\
segments. This argument should be changed to `X` when dealing with\
protein sequences', str, '?', [lambda x: len(x)==1]),
Option('sep',
'Character to use as separator (only characters before\
the first occurrence of the character are considered; the whole string\
is considered if the character is not present in a sequence name)', str,
'', [lambda x: len(x)==1]),
Option('len',
'Maximum number of characters to considered (the rest of\
the string is discarded)', int, -1, [lambda x: x>0])
]
####################################################################
flags = [
('partial', 'The comparison of sequence names is performed only\
over the length of the shortest name, such as `anaco` and `anaconda`\
are held as identical, and concatenated under the name `anaconda`'),
('case', 'Ignore case for comparison (all names are converted\
to lower case)')
]
####################################################################
def _run(self, *fargs, **kwargs):
fnames = []
for name in kwargs['input']:
fnames += glob.glob(name)
if kwargs['len']!=-1 and kwargs['sep']!='':
raise ValueError, 'cannot specified both `len` and `sep` arguments'
aligns = [data.Align(i) for i in fnames]
for align in aligns:
for i in align:
if 'case' in fargs:
i.name = i.name.lower()
if kwargs['len']!=-1:
i.name = i.name[:kwargs['len']]
if kwargs['sep']!='':
i.name = i.name.split(kwargs['sep'])[0]
spacer = kwargs['spacer']
if spacer==0: spacer = [0]
if len(spacer)==1: spacer *= (len(aligns)-1)
if not self.quiet:
print 'Processing %d files...' %len(aligns)
result = tools.concat(aligns=aligns, spacer=spacer,
ch=kwargs['character'], strict='partial' not in fargs)
result.write(kwargs['output'])
if not self.quiet:
print 'Number of input alignments: %d' %len(aligns)
print 'Minimum number of sequences in input: %d' %min(map(len, aligns))
print 'Maximum number of sequences in input: %d' %max(map(len, aligns))
print 'Final number of sequences: %d' %len(result)
print 'List of names in the final output file:\n %s' %tools.wrap(', '.join(result.names()), 70, 4)
print 'Minimum alignment length in input: %d' %(min(map(data.Align.ls, aligns)))
print 'Minimum alignment length in input: %d' %(max(map(data.Align.ls, aligns)))
print 'Final alignment length: %d' %(result.ls())
print 'Result went to `%s`' %(kwargs['output'])
commands.append(concat)
########################################################################
class concatgb(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Concatenation of GenBank records.'
description = 'The `quiet` option is ignored.'
options = [
Option('file1', 'First GenBank record', str, None, []),
Option('file2', 'First GenBank record', str, None, []),
Option('output', 'Name of the output file', str, None, []),
Option('spacer',
'Number of characters to insert between records',
int, 0, [lambda x: x>=0]),
Option('character',
'Character to use for the spacer ',
str, 'N', [lambda x: len(x)==1]),
]
####################################################################
flags = [
]
####################################################################
def _run(self, *fargs, **kwargs):
# imports the input sequences
gb1 = data.GenBank(kwargs['file1'])
gb2 = data.GenBank(kwargs['file2'])
# connects and shifts all gb2's features
for feature in gb2:
copy = feature.copy(gb1)
copy.shift( len(gb1) + kwargs['spacer'] )
gb1.add_feature(copy)
# adds spacer and gb2's sequence to gb1's sequence
gb1.set_sequence( gb1.get_sequence()
+ ''.join([kwargs['character']] * kwargs['spacer'])
+ gb2.get_sequence())
# outputs
gb1.write(kwargs['output'])
commands.append(concatgb)
########################################################################
class consensus(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Builds consensus of sequences with matching names.'
description = 'From a nucleotide sequence alignment, the consensus\
of all pairs of sequences that share the same prefix is computed, and\
only unique names are exported. By default, names `spam_a001`,\
`spam_b145`, as well as `spam` are considered as unique and merged.\
The resulting sequence will be named `spam`. More information is\
available in the documentation of the C++ class `Consensus`.'
options = [
Option('input', 'Nucleotide sequence alignment file', str, None, []),
Option('output', 'File name for results', str, None, []),
Option('separator', 'Character used to separate the common\
prefix from variable part of sequence names', str, '_',
[lambda x: len(x)==1]),
Option('missing',
'Character intepreted as missing data (always ignored)',
str, '?', [lambda x: len(x)==1]),
Option('inconsistency',
'Character used to identify inconsistencies in the\
`conservative` mode',
str, 'Z', [lambda x: len(x)==1])
]
####################################################################
flags = [
('conservative', 'Conservative mode of consensus: all\
differences between two sequences are considered as inconsistencies and\
are marked by the `Z` character (by default)')
]
####################################################################
def _run(self, *fargs, **kwargs):
align = data.Align(kwargs['input'])
consensus = egglib_binding.Consensus()
consensus.setDisagreement(kwargs['inconsistency'])
consensus.setMissing(kwargs['missing'])
result = data.Align()
result._object = consensus.consensus(align._object,
kwargs['separator'], ('conservative' not in fargs))
if not self.quiet:
print 'number of sequences: %d -> %d' %(len(align),len(result))
print 'number of ambiguous positions: %d' %sum(consensus.ambiguousPositions())
print 'number of inconsistent positions: %d' %sum(map(len,consensus.inconsistentPositions()))
result.write(kwargs['output'])
commands.append(consensus)
########################################################################
class cprimers(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Finds consensus primers.'
description = 'Generates consensus primers (degenerated if needed)\
from a nucleotide sequence alignment. Ideally, expects a coding\
sequence alignment as `input` from which the primers will be designed\
and an annotated sequence as `gbin` containing the full sequence with\
introns which will be used to select only primers contained in exons\
and filter the primers overlapping splicing sites out. Generates three\
output files where `output` is an optional base name passed as option:\
`output.list.txt`, `output.pairs.txt` and `output.primers.gb`. The\
first file contains a list of the generated primers, the second\
contains a list of the generated pairs and the last one present the\
reference sequence with annotations showing the position of all\
primers.'
options = [
Option('input', 'Nucleotide sequence alignment file', str, None, []),
Option('output', 'Base file name for results', str, 'cprimers', []),
Option('gbin', 'Reference genbank file (if empty, the first\
sequence of the alignment will be used', str, '', []),
Option('ndeg',
'Maximum number of degenerate positions allowed, per pair',
int, 3, [lambda x: x>=0]),
Option('liml',
'Left limit of the selected region (based on the\
reference sequence, not the alignment)', int, 1, [lambda x: x>=0]),
Option('limr',
'Right limit of the selected region (based on the\
reference sequence, not the alignment) (`-1` means the end of the\
sequence)', int, -1, [lambda x: x>=0 or x==-1]),
Option('clean_ends',
'Number of clean positions (without degenerated bases)\
at the end of primers', int, 3, [lambda x: x>=0]),
Option('nseq',
'Number of sequences to include (the default, 0,\
corresponds to all)', int, 0, [lambda x: x>=0])
]
####################################################################
flags = [
('no_check', 'Don\'t check for primer dimerization and other\
primer pair problems')
]
####################################################################
def _run(self, *fargs, **kwargs):
# imports fasta
align= data.Align(kwargs['input'])
if not self.quiet:
print '# file: '+kwargs['input']
print '# %d sequences' %len(align)
# selects subset of sequences
if kwargs['nseq']==0:
nseq= len(align)
else:
nseq = kwargs['nseq']
align= align.slice(0, nseq)
# generates consensus
consensus= align.consensus()
if not self.quiet:
L = sum([consensus.count(i) for i in 'ACGT'])
print '# consensus length: %d' %len(consensus)
print '# non-ambiguous bases: %d' %L
consensus = consensus.replace('-', 'N')
# imports the reference sequence
if kwargs['gbin']!='':
ref= data.GenBank(kwargs['gbin'])
else:
ref= data.GenBank()
sequence = align.sequence(0).replace('-','').replace('?','')
ref.set_sequence( sequence )
if not self.quiet:
print '# used %s as reference sequence' %align.name(0)
print '# warning: primers may overlap splicing sites'
# generates primers
params = {}
params['PRIMER_MAX_NS_ACCEPTED'] = kwargs['ndeg']
params['PRIMER_MIN_TM']= 54.
params['PRIMER_OPT_TM']= 55.
params['PRIMER_MAX_TM']= 55.
primers= wrappers.Primer3(consensus, **params)
a = primers.find_primers()
if not self.quiet:
print '# primer3 found %d forward and %d reverse primers' %tuple(a)
primers.find_pairs()
if 'no_check' not in fargs:
primers.check_pairs()
if not self.quiet:
print '# pairing yields %d pairs' %len(primers.pairs())
# cleaning ambiguity at ends
primers.clean_primer_ends(kwargs['clean_ends'])
a = primers.clean_pair_ends(kwargs['clean_ends'])
if not self.quiet:
print '# drops to %d after end cleaning' %a
# removes primers with too many amb characters
pairs1 = []
for i in primers.pairs():
concat = i['F']['seq']+i['R']['seq']
valid = sum([concat.count(j) for j in 'ACGT'])
amb = len(concat)-valid
if amb <= kwargs['ndeg']:
pairs1.append(i)
if not self.quiet:
print '# %d pairs with maximum %d degenerated positions' %(len(pairs1), kwargs['ndeg'])
# locates primers
sequence = ref.get_sequence()
liml = kwargs['liml'] -1
limr = kwargs['limr']
if limr == -1:
limr = len(sequence)-1
else:
limr -= 1
fprimers={}
rprimers={}
pairs2 = []
for i in pairs1:
if i['F']['seq'] not in fprimers:
a = tools.locate(sequence, i['F']['seq'], start=liml, stop=limr)
fprimers[i['F']['seq']] = a
else:
a = fprimers[i['F']['seq']]
if i['R']['seq'] not in rprimers:
seq = tools.rc(i['R']['seq'])
b = tools.locate(sequence, seq, start=liml, stop=limr)
rprimers[i['R']['seq']] = b
else:
b = rprimers[i['R']['seq']]
if a != None and b != None:
i['startpos'] = a
i['endpos'] = b + len(i['R']['seq']) - 1
pairs2.append(i)
if not self.quiet:
print '# %d pairs could be mapped on the gene (between\
positions %d and %d)' %(len(pairs2), liml+1, limr+1)
# output individual primers
s = StringIO.StringIO()
pairs2.sort(lambda x,y: cmp(x['startpos'], y['startpos']))
s.write('label\tstart\tend\tsize\tsequence\tQ\tTm\tGC%\tANY\tEND\n')
fprimers = {}
rprimers = {}
for pair in pairs2:
fprimers[ pair['F']['seq'] ] = pair
rprimers[ pair['R']['seq'] ] = pair
n = len(str(len(fprimers)))
for i,k in enumerate(sorted(fprimers, lambda x,y: cmp(
fprimers[x]['startpos'], fprimers[y]['startpos']))):
v = fprimers[k]
fprimers[k]['Flabel'] = '%sF' %(str(i+1).rjust(n+1,'0'))
s.write(fprimers[k]['Flabel'] + '\t')
s.write('%d\t' %(v['startpos']))
s.write(str(v['startpos']+len(v['F']['seq'])-1) + '\t')
s.write(str(len(v['F']['seq'])) + '\t')
s.write(v['F']['seq'] + '\t')
s.write(str(v['F']['Q']) + '\t')
s.write(str(v['F']['Tm']) + '\t')
s.write(str(v['F']['GC%']) + '\t')
s.write(str(v['F']['ANY']) + '\t')
s.write(str(v['F']['END']) + '\n')
n = len(str(len(fprimers)))
for i,k in enumerate(sorted(rprimers, lambda x,y: cmp(
rprimers[x]['endpos'], rprimers[y]['endpos']))):
v = rprimers[k]
rprimers[k]['Rlabel'] = '%sR' %(str(i+1).rjust(n+1,'0'))
s.write(rprimers[k]['Rlabel'] + '\t')
s.write(str(v['endpos']-len(v['R']['seq'])+1) + '\t')
s.write('%d\t' %(v['endpos']))
s.write(str(len(v['R']['seq'])) + '\t')
s.write(v['R']['seq'] + '\t')
s.write(str(v['R']['Q']) + '\t')
s.write(str(v['R']['Tm']) + '\t')
s.write(str(v['R']['GC%']) + '\t')
s.write(str(v['R']['ANY']) + '\t')
s.write(str(v['R']['END']) + '\n')
f = open(kwargs['output']+'.list.txt', 'w')
f.write(s.getvalue())
f.close()
# output pairs
s = StringIO.StringIO()
s.write('forward\treverse\tstart\tend\tsize\t(mean)Q\tTm\t(mean)GC%\t(mean)ANY\t(mean)END\n')
for i in sorted(pairs2, lambda x,y: cmp((x['F']['Q']+x['R']['Q'])/2.,
(y['F']['Q']+y['R']['Q'])/2.)):
s.write(fprimers[i['F']['seq']]['Flabel']+'\t')
s.write(rprimers[i['R']['seq']]['Rlabel']+'\t')
s.write(str(i['startpos'])+'\t')
s.write(str(i['endpos'])+'\t')
s.write(str(i['endpos']-i['startpos']+1)+'\t')
s.write(str((i['F']['Q']+i['R']['Q'])/2.)+'\t')
s.write(str(i['F']['Tm'])+' '+str(i['R']['Tm'])+'\t')
s.write(str((i['F']['GC%']+i['R']['GC%'])/2.)+'\t')
s.write(str((i['F']['ANY']+i['R']['ANY'])/2.)+'\t')
s.write(str((i['F']['END']+i['R']['END'])/2.)+'\n')
f = open(kwargs['output']+'.pairs.txt', 'w')
f.write(s.getvalue())
f.close()
# adds the primers as features in the gb
for i in sorted(pairs2, lambda x,y: cmp((x['F']['Q']+x['R']['Q'])/2.,
(y['F']['Q']+y['R']['Q'])/2.)):
locationF = data.GenBankFeatureLocation()
locationF.addBaseRange(i['startpos'],
i['startpos']+len(i['F']['seq'])-1)
locationR = data.GenBankFeatureLocation()
locationR.addBaseRange(i['endpos']-len(i['R']['seq'])+1,
i['endpos'])
locationR.setComplement()
featureF = data.GenBankFeature(ref)
featureF.set(
type='primer_bind',
location=locationF,
label=fprimers[i['F']['seq']]['Flabel'],
note=i['F']['seq'],
color='2'
)
featureR = data.GenBankFeature(ref)
featureR.set(
type='primer_bind',
location=locationR,
label=rprimers[i['R']['seq']]['Rlabel'],
note=i['R']['seq'],
color='2'
)
ref.add_feature(featureF)
ref.add_feature(featureR)
# writes the gb
ref.write(kwargs['output']+'.primers.gb')
commands.append(cprimers)
########################################################################
class extract(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Extract specified ranges of an alignment.'
description = 'The command reads a fasta alignment, and generates\
another fasta alignment consisting of one or several ranges of\
positions.'
options = [
Option('input', 'Nucleotide sequence alignment file', str, None, []),
Option('output', 'File name for results', str, None, []),
Option('ranges', 'List of positions or ranges to extract,\
separated by commas. Each item of the list can come as a unique integer\
(for a unique position) or as an expression `start-stop` to extract the\
positions `start` to `stop` (both included). It is possible to mix both\
forms, as in `ranges=1-200,225,250,280,300-800` where `225`, for\
example, is strictly equivalent to `225-225`',
lambda x: map(lambda y: map(int, y.split('-')), x.split(',')),
None, []) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
# imports alignment
align = data.Align(kwargs['input'])
# the ranges must be converted manually
positions = []
for pos in kwargs['ranges']:
if len(pos)==1:
positions.append(pos[0]-1)
bounds = pos[0]-1, pos[0]
elif len(pos)==2:
positions += range(pos[0]-1, pos[1])
bounds = pos[0]-1, pos[1]
else:
raise ValueError, 'invalid `ranges` argument (expect a single position or two bounds)'
if bounds[0] < 0:
raise ValueError, 'negative bound value in `ranges` argument'
if bounds[1] > align.ls():
raise ValueError, 'a bound value of `ranges` is over the end of the alignment'
# extracts sequences
extracted = align.extract(positions)
# exports the resulting alignment
extracted.write(kwargs['output'])
commands.append(extract)
########################################################################
class extract_clade(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Extracts the sequences corresponding to a tree clade.'
description = 'The command takes a phylogenetic tree and a fasta\
file containing the corresponding sequences (aligned or not). The\
smallest clade containing all specified names will be extracted as\
another fasta file. By default, clades encompassing the root (which\
would be paraphyletic groups under the assumption that the tree is\
rooted) are exported as well; use the flag `monophyletic` to prevent\
this behaviour. Note that the root (or base of the tree) is never\
returned.'
options = [
Option('sequences', 'Fasta file containing sequences', str, None, []),
Option('tree', 'Newick file containing tree', str, None, []),
Option('output', 'Name of resulting fasta file', str, None, []),
Option('names', 'Name of a least one leaf of the tree (separated\
by commas when more than one). The command supports lists containing\
repeated leaves',
lambda x: x.split(','), None, [lambda x: len(x)>=1]),
Option('threshold', 'Minimum value the node must have as label\
to be returned (only positive values are supproted). Nodes that have a\
label not convertible to float and those whose label is inferior than\
threshold are not returned. By default (-1), this criterion is not\
applied at all (all nodes are returned when they contain the requested\
names). This is different than 0 (then, only nodes that have a number\
as label can be returned.',
float, -1, [lambda x: x>=0 or x==-1]),
Option('minimum', 'Smallest number of descending leaves a clade\
must have to be returned. Clades with less nodes are ignored',
int, 2, [lambda x: x>=2]) ]
####################################################################
flags = [
('monophyletic', 'Consider only monophyletic clades (assuming\
the tree is rooted'),
('exact', 'The clade must contain exactly (rather than `at\
least`) the number of leaves given by the `minimum` option')
]
####################################################################
def _run(self, *fargs, **kwargs):
# imports data
fasta = data.Container(kwargs['sequences'])
tre = data.Tree(kwargs['tree'])
kwargs['names']
# finds the good node
leaves = None
for node in tre:
# checks that label is sufficient
if kwargs['threshold']!=-1:
try:
label = int(node.get_label())
except (TypeError, ValueError):
continue
if label < kwargs['threshold']:
continue
# collect the list(s) of leaves
leaves1 = node.leaves_down()
bleaves1 = list(leaves1)
if 'monophyletic' in fargs:
leaves2 = None
bleaves2 = None
else:
leaves2 = node.leaves_up()
bleaves2 = list(leaves2)
# checks all the leaves are in either of the leaves list
for name in kwargs['names']:
if bleaves1 != None:
if name not in bleaves1:
bleaves1 = None
else:
bleaves1.remove(name)
if bleaves2 != None:
if name not in bleaves2:
bleaves2 = None
else:
bleaves2.remove(name)
if bleaves1==None and bleaves2==None:
break
# checks one or both lists are good
if bleaves1 != None:
if leaves == None or len(leaves1)<len(leaves):
if 'exact' in fargs:
good = len(leaves1)==kwargs['minimum']
else:
good = len(leaves1)>=kwargs['minimum']
if good:
leaves = leaves1
if bleaves2 != None:
if leaves == None or len(leaves2)<len(leaves):
if 'exact' in fargs:
good = len(leaves2)==kwargs['minimum']
else:
good = len(leaves2)>=kwargs['minimum']
if good:
leaves = leaves2
# checks
if leaves==None:
raise ValueError, 'cannot find the specified node'
# imports the sequences
result = data.Container()
for i in sorted(leaves):
x = fasta.find(i)
if x==None:
raise ValueError, '%s not in %s' %(i, kwargs['sequences'])
result.append( *fasta[x] )
# exports data
result.write(kwargs['output'])
if not self.quiet:
print '%d sequences exported' %len(result)
commands.append(extract_clade)
########################################################################
class family(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Finds homologs of a gene family using BLAST.'
description = 'This command uses all sequences from a fasta file of\
source sequences to blast against a database and reports (in a fasta\
file) all sequences of the target database that produce a significant\
hit with any of the source sequences. To use this command, you need to\
have the NCBI BLAST+ package installed. You need a fasta file of\
protein of nucleotide sequences. You need a target database (from which\
sequences should be extracted) as a fasta files.'
options = [
Option('input', 'Fasta file containing source sequences', str, None, []),
Option('target', 'Fasta file containing target database', str, None, []),
Option('output', 'Name of resulting fasta file', str, None, []),
Option('mode', 'Program to use: `blastn` for nucleotide source\
against nucleotide database, `blastp` for protein source against\
protein database, `blastx` for (translated) nucleotide source against\
protein database, `tblastn` for protein source against (translated)\
nucleotide database, `tblastx` for (translated) nucleotide source\
against (translated) nucleotide database',
str, 'blastn', [lambda x: x in set(['blastn', 'blastp',
'blastx', 'tblastn', 'tblastx'])]),
Option('evalue', 'Maximum threshold to report hits. The\
parameter used is E-value, that is for a given BLAST hit the\
theoretical probability of obtaining such hit by chance alogn, given\
the length of the database. It can be necessary to decrease this\
parameter to obtain results',
float, math.exp(-6), [lambda x: x>=0]) ]
##############################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
# configuring blast dabase
if not self.quiet:
print '# importing database from `%s`' %kwargs['target']
target = data.Container(kwargs['target'])
if not self.quiet:
print ' >`%s` contains %d sequences' %(kwargs['target'], len(target))
if kwargs['mode'] in set(['blastp', 'blastx']):
type = 'prot'
else:
type = 'nucl'
db = wrappers.BLASTdb(target, type)
if not self.quiet:
print ' >target database ready'
# imports source data
if not self.quiet:
print '# importing source sequences from `%s`' %kwargs['input']
input = data.Container(kwargs['input'])
if not self.quiet:
print ' >`%s` contains %d sequences' %(kwargs['input'], len(input))
# replace all invalid characters by Ns
if kwargs['mode'] in ['blastn', 'blastx', 'tblastx']:
valid = set('ACGT')
invalid = 'N'
elif kwargs['mode'] in ['blastp', 'tblastn']:
valid = set('ARNDCEQGHILKMFPSTWYV')
invalid = 'X'
for i in range(len(input)):
for j in range(input.ls(i)):
if input.get(i,j).upper() not in valid:
input.set(i,j,invalid)
else: raise ValueError, 'invalid mode: {0}'.format(kwargs['mode'])
# blast phase
if not self.quiet:
print '# blasting...'
blast = wrappers.BLAST()
results = blast._search(kwargs['mode'], input, db, evalue=kwargs['evalue'])
if not self.quiet:
print ' >blast finished'
print (tools.wrap(' >number of hits per sequence: '+
', '.join(map(str,[len(results[n]) for n,s,g in
input])), 72, 3))
hits = set()
for i in results:
hits.update( [j['subject'] for j in results[i]] )
if not self.quiet:
print ' >total number of hits: %d' %len(hits)
# creates the final file
final = data.Container()
for i in sorted(hits):
final.append( i, target.sequenceByName(i) )
final.write(kwargs['output'])
if not self.quiet:
print '# %s successfully saved' %(kwargs['output'])
commands.append(family)
########################################################################
class fasta2mase(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Converts a fasta alignment to the mase format.'
description = 'The `quiet` argument is ignored.'
options = [
Option('input', 'Name of a fasta-formatted alignment', str, None, []),
Option('output', 'Name of resulting mase file', str, None, []) ]
##############################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
align = data.Align(kwargs['input'])
mase = tools.Mase(align)
f = open(kwargs['output'], 'w')
f.write(str(mase))
f.close()
commands.append(fasta2mase)
########################################################################
class fasta2nexus(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Converts a fasta alignment to the NEXUS format.'
description = 'The `quiet` argument is ignored.'
options = [
Option('input', 'Name of a fasta-formatted alignment', str, None, []),
Option('output', 'Name of resulting NEXUS file', str, None, []),
Option('type', '`nucl` for nucleotides or `prot` for proteins',
str, None, [lambda x: x in set(['nucl', 'prot'])]) ]
##############################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
align = data.Align(kwargs['input'])
f = open(kwargs['output'], 'w')
f.write(align.nexus(kwargs['type']=='prot'))
f.close()
commands.append(fasta2nexus)
########################################################################
class fasta2phyml(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Converts a fasta alignment to the `phyml` format.'
description = 'The so-called `phyml` format is a modification of the\
PHYLIP file format suitable for importing data to the programs PAML\
and PHYML. The `quiet` argument is ignored.'
options = [
Option('input', 'Name of a fasta-formatted alignment', str, None, []),
Option('output', 'Name of resulting `phyml` file', str, None, []) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
align = data.Align(kwargs['input'])
f = open(kwargs['output'], 'w')
f.write(align.phyml())
f.close()
commands.append(fasta2phyml)
########################################################################
class fg2gb(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Generates a GenBank record from fgenesh output.'
description = 'The command requires the sequence of the annotated\
regions as a fasta file and the fgenesh output as a separate text file.\
Obviously, all features must fit in the sequence length. A GenBank file\
incorporate the information of predicted genes as `gene`, and `CDS`\
annotation features.'
options = [
Option('seq', 'File with fasta-formatted sequence', str, None, []),
Option('ann', 'File with fgenesh output', str, None, []),
Option('output', 'Name of the resulting GenBank file', str, None, []) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
# imports sequence
sequence = data.Container(kwargs['seq'])
if len(sequence)!=1:
raise ValueError, '%s should contain exactly one fasta-formatted sequence' %kwargs['seq']
name, sequence, group = sequence[0]
if not self.quiet:
print 'imported sequence `%s`' %name
# creates gb
gb = data.GenBank()
gb.title = name
gb.set_sequence(sequence)
if not self.quiet:
print 'sequence of length %d imported' %len(sequence)
# imports annotation
annotations = tools.get_fgenesh(kwargs['ann'])
# imports annotations
cds = 0
gene = 0
for ann in annotations:
feature = data.GenBankFeature(gb)
location = data.GenBankFeatureLocation()
for range in ann['pos']:
location.addBaseRange(*range)
del ann['pos']
if ann['strand']=='minus':
location.setComplement()
del ann['strand']
if ann['type'] == 'CDS':
cds += 1
type = 'CDS'
elif ann['type'] == 'gene':
gene += 1
type = 'gene'
else:
raise ValueError, 'invalid feature type: `%s`' %ann['type']
del ann['type']
feature.set(type, location, **ann)
gb.add_feature(feature)
if not self.quiet:
print '%d gene and %d CDS features imported' %(gene, cds)
# export gb
gb.write(kwargs['output'])
if not self.quiet:
print '`%s` created' %kwargs['output']
commands.append(fg2gb)
########################################################################
class gb2fas(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Converts GenBank records to fasta.'
description = 'The command takes one or more GenBank records and\
generates a single fasta file. Each GenBank record can be multiple\
(contain multiple entries). Each sequence is named after the title of\
the GenBank entry (disregarding the file name).'
options = [
Option('input', 'One or more GenBank file names, separated by\
commas when more than one', lambda x: x.split(','), None, []),
Option('output', 'Name of the fasta file to generate', str, None, []) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
fasta = data.Container()
for i in kwargs['input']:
if not self.quiet:
print 'processing: `%s`' %i
f = open(i)
s = f.read()
f.close()
s = s.split('//')
if len(s)<2:
raise ValueError, 'invalid GenBank file: `%s`' %i
for j in s[:-1]:
gb = data.GenBank(string=j+'//')
fasta.append(gb.title, gb.get_sequence())
if not self.quiet:
print 'number of sequences imported: %d' %len(fasta)
fasta.write(kwargs['output'])
commands.append(gb2fas)
########################################################################
class infos(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Displays basic information from fasta files.'
description = 'The commands displays the number of sequences and\
alignment length (length of the longest sequences for unaligned sets\
of sequences) for all fasta files passed. The `quiet` option is\
ignored.'
options = [
Option('input', 'One or more fasta file names, separated by\
commas when more than one', lambda x: x.split(','), None, []) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
for fname in kwargs['input']:
container = data.Container(fname)
print fname
print ' ... %d sequence%s' %(len(container),
{True:'s',False:''}[len(container)>1])
if len(container)==0:
print ' ... empty file'
elif len(container)==1:
print ' ... single sequence'
print ' ... length: %d' %container.ls(0)
elif container.isEqual():
print ' ... alignment'
print ' ... length: %d' %container.ls(0)
else :
print ' ... not an alignment'
print ' ... max length: %d' %max([container.ls(i)
for i in range(len(container))])
commands.append(infos)
########################################################################
class interLD(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Computes linkage disequilibrium statistics between two loci.'
description = 'Computes association statistics between two\
alignments. It is required that both sequence alignments contain the\
exact same list of sequence names (duplicates are not supported) and\
there should be at least four sequences in each alignment. In the\
definition of statistics, an allele is a haplotype as determnined by\
the method Align.polymorphism(). The frequency of allele i at one locus\
is Pi, the frequency of the combination i,j (i at locus 1 and j at\
locus 2) is Pij. For a given pair of alleles i,j (i at locus 1 and j at\
locus j), Dij is Pij - PiPj. D\'ij is Dij/Dijmax if Dij>=0 and\
Dij/Dijmin if Dij<0, where Dijmax is min(Pi(1-Pj), (1-Pi)Pj) and Dijmin\
is min(PiPj, (1-Pi)(1-Pj)). To obtain the complete LD estimates both\
measures are averaged over all allele pairs as Dijtot = sum(PiPj|Dij|)\
of all i,j pairs).'
options = [
Option('align1', 'First alignment', str, None, []),
Option('align2', 'Second alignment', str, None, []),
Option('permus', 'Number of permutations to perform. If the\
value is larger than 0, the distribution of linkage statistics is\
computed by randomly shuffle the sequences of one of the alignments.',
int, 0, [lambda x: x>=0]),
Option('output', 'Name of output file', str, 'interLD.txt', [])]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
# imports alignments
align1 = data.Align(kwargs['align1'], groups=True)
align2 = data.Align(kwargs['align2'], groups=True)
# checks that names are correct
if align1.contains_duplicates():
raise ValueError, 'error: `%s` contains duplicates' %kwargs['align1']
if align2.contains_duplicates():
raise ValueError, 'error: `%s` contains duplicates' %kwargs['align2']
if set(align1.names()) != set(align2.names()):
raise ValueError, 'error: sequence names from `%s` and `%s` don\'t match' %(kwargs['align1'], kwargs['align2'])
if len(align1)<4:
raise ValueError, 'error: alignments must contain at least 4 sequences'
# computes LD
ns1, ns2, S1, S2, K1, K2, D, Dp = tools.LD(align1, align2, False)
output = open(kwargs['output'], 'w')
output.write('\tLocus 1\tLocus2\n')
output.write('File:\t%s\t%s\n' %(kwargs['align1'], kwargs['align2']))
output.write('ns:\t%d\t%d\n' %(ns1, ns2))
output.write('S:\t%d\t%d\n' %(K1, S2))
output.write('K:\t%d\t%d\n' %(S1, K2))
output.write('\n')
output.write('D:\t%f\n' %D)
output.write('D\':\t%f\n' %Dp)
# performs the test
if kwargs['permus']>0:
P1 = 0.
P2 = 0.
if not self.quiet:
print 'performing permutations'
updater=tools.Updater(kwargs['permus'])
for i in range(kwargs['permus']):
a, b, c, d, e, f, Di, Dpi = tools.LD(align1, align2, True)
if Di>=D: P1+=1
if Dpi>=Dp: P2+=1
if not self.quiet:
updater.refresh()
if not self.quiet:
updater.close()
P1 /= kwargs['permus']
P2 /= kwargs['permus']
output.write('\nP-values from %d iterations:\n')
output.write('D:\t%f\n' %P1)
output.write('D\':\t%f\n' %P2)
output.close()
commands.append(interLD)
########################################################################
class matcher(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Finds homologous regions between two sequences.'
description = 'This command performs a `bl2seq` search using the\
first sequence as query and the second sequence as target. It then\
produces a genbank record containing the first sequence with\
annotation features indicating the positions of the hits with the\
second sequence. The *long* sequence should not contain gaps.'
options = [
Option('long', 'Fasta file containing the first sequence (this\
sequence must be a nucleotide sequence)', str, None, []),
Option('short', 'Fasta file containing the sequence sequence\
depending on the `mode` option value, this sequence must be either a\
nucleotide sequence or a protein sequence)', str, None, []),
Option('output', 'Name of resulting GenBank file', str, 'matcher.gb', []),
Option('mode', 'Program to use: `blastn` for nucleotide source\
against nucleotide database, `blastx` for (translated) nucleotide\
source against protein database, `tblastx` for (translated) nucleotide\
source against (translated) nucleotide database',
str, 'blastn', [lambda x: x in set(['blastn', 'blastx',
'tblastn', 'tblastx'])]),
Option('evalue',
'Expectaction value: expected number of random hits by\
chance alone, depending on the database size. The default value is e^-6\
(therefore much less - and more stringent - than `blastn`\'s default\
value which is 10)',
float, math.exp(-6), [lambda x: x>=0]) ]
####################################################################
def _run(self, *fargs, **kwargs):
# imports sequences
fas1 = data.Container(kwargs['long'], groups=True)
fas2 = data.Container(kwargs['short'], groups=True)
# checks that files look normal
if len(fas1) != 1:
raise ValueError, '%s must contain exactly one sequence' %kwargs['long']
if len(fas2) != 1:
raise ValueError, '%s must contain exactly one sequence' %kwargs['sort']
# performs the blast search
bl2seq = wrappers.BL2SEQ()
res = bl2seq._search(kwargs['mode'], fas1.sequence(0),
fas2.sequence(0), evalue=kwargs['evalue'])
# collects hit positions
pos = []
for i in res:
loc = sorted([i['qstart'], i['qend']])
minus = ((i['qend']>i['qstart'] and i['send']<i['sstart']) or
(i['qend']<i['qstart'] and i['send']>i['sstart']))
pos.append((loc,minus))
pos.sort(lambda x,y: cmp(x[0][0], y[0][0]))
# removes redundant hits
i = 0
while i<len(pos):
flag = False
for j in range(len(pos)):
if (i!=j and pos[i][0][0]>=pos[j][0][0]
and pos[i][0][1]<=pos[j][0][1]):
flag = True
break
if flag:
del pos[i]
else:
i+=1
# small feedbacks
if not self.quiet:
print 'number of hits: %d' %len(pos)
# initializes GenBank record
gb = data.GenBank()
gb.set_sequence(fas1.sequence(0))
gb.title = fas1.name(0)
# adds hits as features
for (start,stop),minus in pos:
loc = data.GenBankFeatureLocation()
loc.addBaseRange(start,stop)
if minus:
loc.setComplement()
feat = data.GenBankFeature(gb)
feat.set('misc_feature', loc, note='bl2seq hit', color='2')
gb.add_feature(feat)
# saves
gb.write(kwargs['output'])
commands.append(matcher)
########################################################################
class names(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Lists sequence names from a fasta file.'
description = 'The order of names is preserved. The `quiet` flag is\
ignored. By default, one name is displayed per line.'
options = [
Option('input', 'Name of fasta-formatted sequence file', str, None, []) ]
####################################################################
flags = [ ('wrap', 'Displays several sequence names per line.\
Activate this flag when sequence names are short and don\'t contain spaces!') ]
####################################################################
def _run(self, *fargs, **kwargs):
fas = data.Container(kwargs['input'])
if 'wrap' in fargs:
print tools.wrap(', '.join(fas.names()), 72, 0)
else:
for name in fas.names():
print name
commands.append(names)
########################################################################
class rename(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Rename sequences according to a replacement list.'
description = 'The replacements must be given in a text file. It is\
not necessary to specify all names of the fasta file. The command does\
not require either that all replacements of the list are performed. If\
present, group labels are preserved and are not considered (they should\
not be included in the replacement list). If leading or trailing spaces\
are present in either old or new names, they will be removed.'
options = [
Option('input', 'Name of fasta-formatted sequence file', str, None, []),
Option('list', 'Name of a text file giving the list of\
replacements to perform. Each replacement must take one line and give\
the old name and the new name, in that order, separated by a tabulation',
str, None, []),
Option('output', 'Name of output file', str, None, []) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
# imports data
input = data.Align(kwargs['input'], groups=True)
rule = {}
f = open(kwargs['list'])
for line in f:
bits = line.split('\t')
if len(bits) != 2:
raise IOError, 'error: invalid line in %s: %s' %(kwargs['list'], line.strip())
old = bits[0].strip()
new = bits[1].strip()
if bits[0] in rule:
raise IOError, 'error: name %s is duplicated in %s' %(old, kwargs['list'])
rule[old] = new
f.close()
# performing replacements
output = data.Align()
cnt = 0
for n,s,g in input:
if n in rule:
output.append(rule[n], s, g)
cnt+=1
if not self.quiet:
print '%s -> %s' %(n, rule[n])
else:
output.append(n, s, g)
if not self.quiet:
print '%d replacements performed' %cnt
# saves
output.write(kwargs['output'])
commands.append(rename)
########################################################################
class truncate(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Truncates sequence names.'
description = 'The user can specify a separator or a number of\
characters (`length`) or both. By default (if neither argument\
`separator` or `length` is specified), nothing is done. If both actions\
are requested, they are always performed in the order: first\
`separator`, then `length`. If present, group labels are preserved and\
are not considered.'
options = [
Option('input', 'Name of fasta-formatted sequence file', str, None, []),
Option('output', 'Name of output file', str, None, []),
Option('separator', 'The separator can be a single character or\
a string. Whenever it occurs in a sequence name, everything right of\
its first occurrence (as well as the separator itself) will be\
removed. The default (an empty string) means that this criterion is not\
applied', str, '', []),
Option('length', 'Maximum length of names. The default (an\
empty string) means that this criterion is not applied',
int, 0, [lambda x: x>=0]) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
# imports data
input = data.Align(kwargs['input'], groups=True)
# performing replacements
output = data.Align()
for n,s,g in input:
new = n
if kwargs['separator'] != '':
pos = n.find(kwargs['separator'])
if pos != None:
new = n[:pos]
if kwargs['length'] != 0:
new = new[:kwargs['length']]
if not self.quiet:
sys.stdout.write('%s -> %s\n' %(n,new))
output.append(new, s, g)
# saves
output.write(kwargs['output'])
commands.append(truncate)
########################################################################
class reroot(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Changes the orientation of a newick tree.'
description = 'This command doesn\'t actually root (or reroot) the\
tree; the original tree must not be rooted (it must have a trifurcation\
at the root, and resulting tree will be likewise (only the\
representation will be altered to present the outgroup as one of the\
basal groups. A list of leaves representing a monophyletic group of the\
current tree (without encompassing the root) must be passed. The\
`quiet` argument is ignored. By default, the command uses the midpoint\
method.'
options = [
Option('input', 'Name of newick-formatted tree file', str, None, []),
Option('output', 'Name of output file', str, None, []),
Option('outgroup', 'List of leaves constituting the outgroup,\
separated by commas when more than one. It is possible to place the\
list in a file (one per line) and pass the name of the file (say,\
`fname`) using the `@` prefix, as in `outgroup=@fname` (there must be\
exactly one item and no comma separator in that case). By default\
(empty string) the command uses the midpoint method',
lambda x: x.split(','), '', []) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
if len(kwargs['outgroup'])==1 and kwargs['outgroup'][0][:1] == '@':
f = open(kwargs['outgroup'][0][1:])
kwargs['outgroup'] = [i.strip() for i in f.readlines() if len(i.strip())]
f.close()
tree = data.Tree(kwargs['input'])
if len(kwargs['outgroup']):
root = tree.findMonophyleticGroup(kwargs['outgroup'])
if root == None or len(root.ascendants()) != 1:
raise ValueError, 'cannot reroot: invalid outgroup'
root = root.ascendants()[0]
tree.reoriente(root)
else:
tree.midroot()
tree.write(kwargs['output'])
commands.append(reroot)
########################################################################
class select(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Select a given list of sequences from a fasta file.'
description = 'The names should not include any group label (`@0`,\
`@1`, `@999` etc. tags) is they are present in the file (group labels\
are ignored). When a name is duplicated in the file (whether the\
different duplicates bear different group label or not), they are all\
exported to the output file. It is required that all names passed are\
found at least once. Sequences are exported in the order as they appear\
in the passed list.'
options = [
Option('input', 'Name of fasta-formatted file', str, None, []),
Option('output', 'Name of the output file', str, None, []),
Option('list', 'List of names of sequences that should be\
selected, separated by commas when more than one. It is possible to\
place the list of names in a file (one per line) and pass the name of\
the file (say, `fname`) using the `@` prefix, as in `list=@fname`\
(there must be exactly one item and no comma separator in that case)',
lambda x: x.split(','), '', []) ]
####################################################################
flags = [ ]
####################################################################
def _run(self, *fargs, **kwargs):
if len(kwargs['list'])==1 and kwargs['list'][0][:1] == '@':
f = open(kwargs['list'][0][1:])
kwargs['list'] = [i.strip() for i in f.readlines() if len(i.strip())]
f.close()
input = data.Container(kwargs['input'], groups=True)
output = data.Container()
for name in kwargs['list']:
found = False
for n,s,g in input:
if n==name:
found = True
output.append(n,s,g)
if not found:
raise ValueError, 'sequence %s not found in %s' %(name, kwargs['input'])
if not self.quiet:
print '%d names requested, %s sequences found' %(len(kwargs['list']), len(output))
output.write(kwargs['output'])
commands.append(select)
########################################################################
class sprimers(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Design copy-specific PCR primers from an alignment.'
description = 'This command designs PCR primers that a specific to\
genes from a sequence alignment (and are as unlikely as possible to\
amplify other genes from the alignment). Primers are generated using\
PRIMER3. Next, they are filtered according to several criteria. The\
preferred primers must be close to the end of sequences (by default),\
with low homology to other sequences of the alignment. A BLAST search\
is performed and primers whose 3\' end matches any other sequence and\
excluded. Finally, a pair check is performed using PRIMER3. The\
corresponding programs must be available.'
options = [
Option ('input', 'Name of the input fasta alignment', str, None, []),
Option ('output', 'Name of the output file', str, 'sprimers.csv', []),
Option ('sizemin', 'Mininal product size', int, 70, [lambda x: x>50]),
Option ('sizemax', 'Maximal product size', int, 150, [lambda x: x>50]),
Option ('minTm', 'Minimal annealing temperature', float, 58, [lambda x: x>20]),
Option ('optTm', 'Optimal annealing temperature', float, 60, [lambda x: x>20]),
Option ('maxTm', 'Maximal annealing temperature', float, 62, [lambda x: x>20]),
Option ('minGc', 'Minimal GC content percentage', float, 30, [lambda x: x>0]),
Option ('optGc', 'Optimal GC content percentage', float, 50, [lambda x: x>0]),
Option ('maxGc', 'Maximal GC content percentage', float, 80, [lambda x: x>0]),
Option ('numAmb', 'Maximal number of degenerate bases in primers', int, 0,[lambda x: x>=0]),
Option ('filter1', 'Pre-selection filter (before BLAST) as a maximal number of pairs to process', int, 5000, [lambda x: x>0]),
Option ('filter2', 'Pre-selection filter (after BLAST) as a maximal number of pairs to process', int, 100, [lambda x: x>0]),
Option ('threshold1', 'Maximum homology score to other genes (a real number between 0. and 1.)', float, 0.75, [lambda x: x<=1 and x>=0]),
Option ('threshold2', 'Maximum homology score to other regions of the same gene (a real number between 0. and 1.)', float, 0.50, [lambda x: x<=1 and x>=0]),
Option ('show', 'How many pairs to export in the output file', int, 10,[lambda x: x>0]),
]
####################################################################
flags = [
('selection', 'Restrict the primer search to one or more\
sequences of the alignment. The user should tag the names of selected\
sequences with labels such as @1 (any number larger to or equal to 1\
is allowed)'),
('prefer_end', 'Prefer pairs closer the end of genes')
]
####################################################################
def _run(self, *fargs, **kwargs):
align = data.Align(kwargs['input'], groups=True)
output = open(kwargs['output'], 'w')
self.db = None
try:
# write output file header
output.write('gene,start,end,size,Fname,Fseq,FTm,FANY,FEND,FQ,FGC%,FBlastScore,FAutoBlastScore,Rname,Rseq,RTm,RANY,REND,RQ,RGC%,RBlastScore,RAutoBlastScore\n')
c=0
# prepare blast database
self.container = data.Container()
for n,s,g in align:
s = s.translate(string.maketrans('?', 'N'), '-')
self.container.append(n, s, g)
self.db = wrappers.BLASTdb(self.container, 'nucl')
for n,s,g in self.container:
# applies to a subset of sample only (if required)
if 'selection' in fargs and g==0:
continue
if not self.quiet:
print n
primers= wrappers.Primer3(
s,
PRIMER_MIN_TM=kwargs['minTm'],
PRIMER_OPT_TM=kwargs['optTm'],
PRIMER_MAX_TM=kwargs['maxTm'],
PRIMER_MIN_GC=kwargs['minGc'],
PRIMER_OPT_GC_PERCENT=kwargs['optGc'],
PRIMER_MAX_GC=kwargs['maxGc'],
PRIMER_MAX_NS_ACCEPTED=kwargs['numAmb']
)
# searches primers
nf, nr = primers.find_primers()
np = primers.find_pairs(kwargs['sizemin'], kwargs['sizemax'])
if not self.quiet:
print ' %d forward, %d reverse primers' %(nf, nr)
print ' %d pairs' %np,
sys.stdout.flush()
# score primers and sort
pairs = primers.pairs()
for i,v in enumerate(pairs):
a = tools.locate(s, v['F']['seq'])
b = tools.locate(s, tools.rc(v['R']['seq']))
if (a==None):
raise RuntimeError, 'sprimers: this primer was not found: {0}'.format(v['F']['seq'])
if (b==None):
print tools.rc(v['R']['seq'])
print s
raise RuntimeError, 'sprimers: this primer was not found: {0}'.format(v['R']['seq'])
b += len(v['R']['seq'])-1
pairs[i]['coord'] = (a,b)
pairs[i]['pscore'] = ('prefer_end' in fargs)*self.pscore(len(s), a+(b-a)/2.)
# the pairs closest of the end are ranked first
pairs.sort(cmp= lambda x,y: cmp(y['pscore'], x['pscore']))
# pre-selects primers
pairs.sort(lambda x,y: cmp(
x['F']['Q']+x['R']['Q']+(1-x['pscore']),
y['F']['Q']+y['R']['Q']+(1-y['pscore']))
)
pairs = pairs[:kwargs['filter1']]
if not self.quiet:
print '-> %d preselected (max %d)' %(len(pairs), kwargs['filter1'])
# selects based on blast
blast_results= {}
accepted= []
if not self.quiet:
updater = tools.Updater(len(pairs))
for i in pairs:
if not i['F']['seq'] in blast_results:
blast_results[i['F']['seq']] = self.blprimer(i['F']['seq'], n)
if not i['R']['seq'] in blast_results:
blast_results[i['R']['seq']] = self.blprimer(i['R']['seq'], n)
fscores = blast_results[i['F']['seq']]
rscores = blast_results[i['R']['seq']]
if ((fscores[0]+rscores[0])/2. <= kwargs['threshold1'] and
(fscores[1]+rscores[1])/2. <= kwargs['threshold2']):
i['fscores']= fscores
i['rscores']= rscores
accepted.append(i)
if len(accepted) == kwargs['filter2']:
break
if not self.quiet:
updater.refresh(' blasting %d/%d $ELAPSED (unknown; max: $REMAINING)' %(len(accepted),kwargs['filter2']))
if not self.quiet:
updater.refresh(' blasting done')
updater.close()
pairs = accepted
pairs.sort(lambda x,y: cmp((x['fscores'][1]+x['rscores'][1]), (y['fscores'][1]+y['rscores'][1])))
pairs.sort(lambda x,y: cmp((x['fscores'][0]+x['rscores'][0]), (y['fscores'][0]+y['rscores'][0])))
if not self.quiet:
print ' %d primer pairs validated (max %d)' %(len(accepted), kwargs['filter2'])
# check
del primers.pairs()[:]
primers.pairs().extend(pairs)
primers.check_pairs()
if not self.quiet:
print ' Final pairs: %d reduced to %d' %(len(primers.pairs()), min(kwargs['show'], len(primers.pairs())))
pairs = primers.pairs()[:kwargs['show']]
# outputting
d=0
for i in pairs:
d += 1
output.write('%s,%d,%d,%d' %(n,i['start'],i['end'],i['size']))
output.write(',%s,%s,%f,%f,%f,%f,%f,%f,%f' %(
'%s_%sF' %(n,str(d).rjust(2,'0')),
i['F']['seq'],
i['F']['Tm'],
i['F']['ANY'],
i['F']['END'],
i['F']['Q'],
i['F']['GC%'],
i['fscores'][0],
i['fscores'][1]))
output.write(',%s,%s,%f,%f,%f,%f,%f,%f,%f\n' %(
'%s_%sR' %(n,str(d).rjust(2,'0')),
i['R']['seq'],
i['R']['Tm'],
i['R']['ANY'],
i['R']['END'],
i['R']['Q'],
i['R']['GC%'],
i['rscores'][0],
i['rscores'][1]))
output.flush()
c+=1
finally:
output.close()
if self.db: del self.db
if not self.quiet:
print 'finished'
####################################################################
def pscore(self, length, position):
"""
Computes a position score
"""
return 1/(1+math.exp(5-10.*position/length))
####################################################################
def blprimer(self, primer, current):
"""
Blasts a primer
"""
bl = wrappers.BLAST()
hits = bl.blastn(primer, self.db, evalue=1000,
word_size=7, penalty=-1, gapopen=1000)
allo_hits = []
auto_hits = []
for i in hits['']:
if i['subject'] == current:
auto_hits.append(i)
else:
allo_hits.append(i)
for i in auto_hits+allo_hits:
(a,b)= sorted([i['qstart'], i['qend']])
(c,d)= sorted([i['hstart'], i['hend']])
c -= a
d += (len(primer)-1)-b
seq = self.container.sequenceByName(i['subject'])
hit = seq[max(0,c):d+1]
if ((i['qstart']>i['qend'] or i['hstart']>i['hend']) and
not (i['qstart']>i['qend'] and i['hstart']>i['hend'])):
hit= tools.rc(hit)
for j in range(c,0):
hit='N'+hit
for j in range(len(seq)-1,d):
hit=hit+'N'
score=0.
mscore=0.
for j in range(len(primer)):
if (primer[j]==hit[j]):
score += self.pscore(len(primer), j)
mscore += self.pscore(len(primer), j)
score /= mscore
i['score'] = score
auto_hits.sort(lambda x,y: cmp(y['score'], x['score']))
allo_hits.sort(lambda x,y: cmp(y['score'], x['score']))
try:
X = allo_hits[0]['score']
except IndexError:
X = 0.
try:
Y = auto_hits[1]['score']
except IndexError:
Y = 0.
return X,Y
commands.append(sprimers)
########################################################################
class staden2fasta(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Converts a STADEN GAP4 dump file to fasta.'
description = 'The file must have been generated using the command\
`dump contig to file` of the GAP4 contig editor. This command will\
generate a fasta alignment file, padding sequences with `?` whenever\
necessary.'
options = [
Option('input', 'Input dump file', str, []),
Option('output', 'Output fasta alignment file', str, []),
Option('consensus', 'Defines what should be done with the\
sequence named `CONSENSUS`. Three values are possible: `remove`:\
removes the `CONSENSUS` sequence (if it is present); `keep`: keeps the\
`CONSENSUS` sequence (if it is present) and `only`: removes all other\
sequences and keeps only the `CONSENSUS` sequence (it must be present)',
str, 'remove', [lambda x: x in set(['remove', 'keep', 'only'])])
]
####################################################################
flags = []
####################################################################
def _run(self, *fargs, **kwargs):
f = open(kwargs['input'])
string = f.read()
f.close()
try:
staden = egglib_binding.Staden.parse(string, False)
except IOError:
raise IOError, 'cannot read GAP4 dump file from %s' %kwargs['input']
align = data.Align()
for i in range(staden.ns()):
align.append(staden.name(i), staden.sequence(i))
if kwargs['consensus'] == 'remove':
i = align.find('CONSENSUS')
if i != None:
del align[i]
if kwargs['consensus'] == 'only':
i = align.find('CONSENSUS')
if i == None:
raise ValueError, '`CONSENSUS` is not in %s' %kwargs['input']
CONSENSUS = align[i]
align.clear()
align.append(*CONSENSUS)
if not self.quiet:
print 'number of sequences: %d' %align.ns()
print 'length of alignment: %d' %align.ls()
align.write(kwargs['output'])
commands.append(staden2fasta)
########################################################################
class translate(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Translates coding sequences to protein sequences.'
description = 'The `quiet` argument is ignored.'
options = [
Option('input', 'Input fasta file', str, '', []),
Option('output', 'Output fasta file', str, '', []),
Option('code', 'Genetic code specification. Should be an integer\
among the available values. Use the flag `codes` to display the list of\
avalaible genetic codes. The default corresponds to the standard code',
int, 1, [])
]
####################################################################
flags = [('codes', 'displays available genetic codes')]
####################################################################
def _run(self, *fargs, **kwargs):
if 'codes' in fargs:
print 'Available codes:'
for i, short, long in tools.GeneticCodes.codes():
print str(i).rjust(3)+':', tools.wrap(long, 72, 5)
else:
if kwargs['input']=='':
raise ValueError, 'error: input argument must be given'
if kwargs['output']=='':
raise ValueError, 'error: output argument must be given'
fasta = data.Container(kwargs['input'])
fasta = tools.translate(fasta, kwargs['code'])
fasta.write(kwargs['output'])
commands.append(translate)
########################################################################
class ungap(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Removes gaps from a sequence alignment.'
description = 'This command either removes all gaps from an\
alignment (break the alignment) or removes alignment positions (column)\
where the frequency of gaps is larger than a given threshold.'
options = [
Option('input', 'Input fasta file', str, None, []),
Option('output', 'Output fasta file', str, None, []),
Option('threshold', 'Proportion giving the threshold frequency\
for removing gaps. All sites for which the frequency of gaps is equal\
to or larger than the specified values will be removed. A value of 0\
will remove all sites and a value of 1 will remove only columns\
consisting of gaps only. If the flag `all` is used, the value of this\
option is ignored',
float, 1, [lambda x: x<=1. and x>=0.])
]
####################################################################
flags = [('all', 'Removes all gaps of the alignment - regardless of\
the value of the `threshold` argument. The output sequences will not\
be aligned anymore (save for special cases)'),
('triplets', 'Removes only complete triplets (the alignment length\
must be a multiple of 3)')]
####################################################################
def _run(self, *fargs, **kwargs):
align = data.Align(kwargs['input'])
if not self.quiet:
print 'alignment length: %d' %align.ls()
if 'all' in fargs:
result = tools.ungap_all(align)
if not self.quiet:
print 'longest sequence: %d' %max([result.ls(i) for i in range(len(result))])
else:
if 'triplets' in fargs:
result = tools.ungap_triplets(align, kwargs['threshold'])
else:
result = tools.ungap(align, kwargs['threshold'])
if not self.quiet:
print 'new alignment length: %d' %result.ls()
result.write(kwargs['output'])
commands.append(ungap)
########################################################################
class winphyml(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Computes tree likelihood along a sliding window.'
description = 'This command runs a sliding window along a sequence\
alignment. For each window, it computes the likelihood of the\
maximum-likelihood tree along as well as the likelihood of a given set\
of trees. It can detect regions of a sequence that support a given tree\
rather than an other. The command expects nucleotide sequences.'
options = [
Option('input', 'Input fasta file', str, None, []),
Option('trees', 'Input newick file containing one or more trees',
str, None, []),
Option('output', 'Main output file name', str, 'winphyml.csv', []),
Option('wsize', 'Window size', int, 500, [lambda x: x>=10]),
Option('wstep', 'Window step length', int, 100, [lambda x: x>=1]),
]
####################################################################
flags = [('savetrees', 'Saves the maximum-likelihood tree for each\
windows. Each window tree will be saved as a file named\
`<base>_<start>_<end>.tre` when `base` is the name of the main output\
file minus the extension if there is one, and `start` and `end` are the\
limits of the window. With default values, the trees will be saved as\
`winphyml_1_200`, `winphyml_21_220`, etc.')]
####################################################################
def _run(self, *fargs, **kwargs):
# import data
align = data.Align(kwargs['input'], groups=True)
f = open(kwargs['trees'])
string = f.read().strip()
f.close()
trees = [ data.Tree(string=bit+';') for bit in string.split(';')[:-1] ]
if not self.quiet:
print 'sequence data: %d sequences - %d sites' %(align.ns(), align.ls())
print 'number of input trees: %d' %len(trees)
# check that names match
names = sorted(align.names())
for tree in trees:
if sorted(tree.all_leaves()) != names:
print
print tree.all_leaves()
print names
raise ValueError, 'error: the names of the alignment don\'t match those of all trees'
# initializes output file
output = open(kwargs['output'], 'w')
output.write(','.join(['Wstart', 'Wend','Best tree']+
['Tree %i' %(i+1) for i in range(len(trees))])+'\n')
# defines bounds
bounds = []
i=0
while i<align.ls():
bounds.append((i, i+kwargs['wsize']))
i += kwargs['wstep']
print '%d windows will be processed' %len(bounds)
if not self.quiet:
updater = tools.Updater(len(bounds))
# loops
pos = kwargs['output'].rfind('.')
root = kwargs['output'][:pos]
c = 0
for start, end in bounds:
c+=1
if not self.quiet:
updater.refresh('window %d: %d - %d [->%d] $ELAPSED\
[$REMAINING]' %(c,start,end,align.ls()), increment=0, grain=-1)
window = align.extract(start, end)
ml_tree, lk0 = wrappers.phyml(window, model='GTR', rates=1,
boot=0, quiet=True)
if 'savetrees' in fargs:
ml_tree.write('%s_%d_%d.tre' %(root, start+1, end+1))
lk1 = []
for tree in trees:
tree, lki = wrappers.phyml(window, topo=tree, model='GTR',
rates=1, boot=0, quiet=True)
lk1.append(lki)
output.write( ','.join(map(str,[start,end,lk0] + lk1)) + '\n' )
output.flush()
if not self.quiet:
updater.increment(1)
# finishes
if not self.quiet:
updater.refresh('done', grain=0)
updater.close()
output.close()
commands.append(winphyml)
########################################################################
class phyml(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Performs maximum-likelihood phylogenetic reconstruction.'
description = 'This command reconstructs a phylogenetic tree and,\
optionnally, performs bootstrap repetitions. Crashes occurring during\
the bootstrap procedure due to estimation problems are ignored,\
allowing to complete the run. The substitution model name implies data\
type (HKY85, JC69, K80, F81, F84, TN93 and GTR imply nucleotides,\
others imply amino acids). `LG` is default for amino acids in the\
stand-alone `phyml` software.'
options = [
Option('input', 'Input alignment fasta file', str, None, []),
Option('output', 'Output newick tree file', str, None, []),
Option('boot_trees', 'Output newick tree file for bootstrap\
trees (by default, no file)', str, '', []),
Option('boot', 'Number of bootstrap repetions', int, 0,
[lambda x: x>=0]),
Option('model', 'substitution model name', str, 'HKY85', []),
Option('rates', 'number of gamma substitution rate categories',
int, 1, [lambda x: x>0]),
Option('search', 'tree topology search operation option (`NNI`,\
`SPR` (slower) or `BEST`', str, 'NNI', [lambda x: x in ['NNI', 'SPR',
'BEST']]),
Option('start', 'name of a file containing the tree topology to\
use as starting tree (by default, use a distance tree)', str, '', []),
Option('recover', 'name of a file containing already generated\
bootstrap trees - must contain the same taxa and can be the same as\
`boot_trees` (ignored if `boot` is 0)', str, '', [])
]
####################################################################
flags = []
####################################################################
def _run(self, *fargs, **kwargs):
# imports data
align = data.Align(kwargs['input'], groups=True)
if kwargs['start']!='':
start = data.Tree(kwargs['start'])
start.clean_edge_lengths()
start.clean_internal_labels()
if sorted(start.all_leaves())!=sorted(align.names()):
raise ValueError, 'the starting tree must have the same taxon names as the input alignment'
else:
start = None
# builds main tree
if self.quiet == False and kwargs['boot']==0:
quiet = False
else:
quiet = True
main_tree, lk = wrappers.phyml(align,
model=kwargs['model'],
search=kwargs['search'],
rates=kwargs['rates'],
boot=0,
start=start,
quiet=quiet)
# performs bootstrap
if kwargs['boot'] > 0:
trees = []
if kwargs['recover'] != '':
f = open(kwargs['recover'])
for line in f:
tree = data.Tree(string=line)
if sorted(tree.all_leaves()) != sorted(main_tree.all_leaves()):
raise ValueError, 'the leaves in the recovered bootstrap tree do not correspond to the alignment!'
trees.append(tree)
f.close()
if kwargs['boot_trees'] != '':
f = open(kwargs['boot_trees'], 'w')
for tree in trees:
f.write(str(tree) + '\n')
f.flush()
random = egglib_binding.Random()
while(len(trees)<kwargs['boot']):
if not self.quiet:
print 'bootstrap %d of %d...' %(len(trees)+1, kwargs['boot'])
l = align.ls()
sites = [random.irand(l-1) for i in range(l)]
sites.sort()
boot = data.Align()
for seq in align:
boot.append(seq.name, ''.join([seq.sequence[i] for i in sites]))
try:
tree, lk = wrappers.phyml(boot,
model=kwargs['model'],
search=kwargs['search'],
rates=kwargs['rates'],
boot=0,
quiet=quiet)
except RuntimeError:
if not self.quiet:
print ' .. failed! ignoring...'
continue
if kwargs['boot_trees'] != '':
f.write(str(tree) + '\n')
f.flush()
trees.append(tree)
if kwargs['boot_trees'] != '':
f.close()
main_tree.frequency_nodes(trees)
main_tree.write(kwargs['output'])
if not self.quiet:
print 'done'
commands.append(phyml)
########################################################################
class diffscan(BaseCommand):
"""
Documentation
"""
####################################################################
brief = 'Scans loci for signatures of adaptive differentiation.'
description = 'This command applies the method of Beaumont and\
Nichols 1996 (Proceedings R Soc. Biol. Sci. 263:1619-1626). It uses a\
large number of loci to estimate the genome-wide between-population\
index of fixation based on Weir and Cockerham 1984 (Evolution\
18:1358-1370; equation 10). The fixation index is called theta in Weir\
and Cockerham but we retain the Fst terminology here.\
\n\
The coalescent migration rate 4Nm (here called M) is estimated as:\n\
\n\
M = (1-Fst)*(d-1)/(Fst*d),\n\
\n\
assuming an island model where `d` is the number of populations in the\
system. Coalescent simulations are performed assuming the number of\
populations, the actual set of samples and assuming a single mutation\
per locus.\n\
\n\
Input file:\n\
\n\
The input file is a string of one or more loci. Each locus is\
represented by populations (demes in Weir and Cockerham). There must\
be at least two populations. The number of populations must be\
consistent over loci. Note that white lines are ignored throughout the\
file and can be used as separators but are not required and need not to\
be homogeneously used. Spaces and tabs can also be used to align the\
file and are ignored when they occur at the beginning and end of lines.\n\
\n\
Comments:\n\
\n\
Comments are lines starting with with a hash (`#`) symbol. White\
spaces before the hash are ignored. Comments cannot be placed at the\
end of lines containing data.\n\
\n\
Loci:\n\
\n\
Loci take a single line each. The type of the locus is given by\
reserved symbols. `$` denotes reference loci (they will be used for\
computing genome-wide parameters and tested) and `%` denotes candidate\
loci (which will be skipped for estimating genome-wide parameters).\
Type symbol must appear before data. An optional label can be placed\
before the symbol. Labels are used to name the locus (by default, an\
automatic label based on its rank is be applied). The same label might\
be used several times and labelled and unlabelled loci might be mixed.\
Labels cannot start by a hash (`#`) symbol, otherwise the line is taken\
as a comment. Labels cannot contain the dollar (`$`) and percent (`%`)\
symbols. The general structure is therefore: `[label]$ data` for\
reference loci and `[label]% data` for test loci. See definition of\
data and example below.\n\
\n\
Locus data:\n\
\n\
Locus data is given by pairs of allele counts, one for each\
population. The number of populations must be the same across loci.\
The alleles are provided in an arbitrary order.\
Counts for both alleles must be provided as two integer values\
separated by a comma (`,`). Population (ie, pairs counts) are\
separated from each other and from the type symbol by any number of\
spaces (tabs are also supported). Frequencies must be 0 or more.\
Unsampled populations (`0,0`) are allowed. (If a population is\
missing for all loci, better use the `k` option to specify the real\
number of populations). The first and second alleles are equivalent (no\
orientation) but they must be the same across all populations of a\
given locus.\n\
\n\
An example of the input file is provided below. This data set comprises\
two reference loci and one locus to be used for testing only, a total\
of five populations with varying sample size.\n\
\n\
# A comment\n\
Reference locus 1 $ 10,4 4,1 5,0 12,1 6,3\n\
Reference locus 2 $ 4,5 3,2 2,4 4,6 3,5\n\
Test locus 1 % 15,1 8,0 3,1 2,12 0,10\n\
\n\
The command will first perform the number of requested simulations,\
unless the argument `load_simuls` is set. In this case, simulations will\
be imported from a text files containing He, Fst values (one pair per\
line). In this case, simulations parameters (`simuls` and `step`) will\
be ignored. The option `save_simuls` (inactive by default) allows to\
save simulations and to import them in a following run, eg for trying\
out different values of the binarization factor.\n\
\n\
The final file contains, for all loci, its He and Fst value, and the\
p-value derived from the distribution.'
options = [
Option('input', 'Genotypes iput file (see description above)', str, None, []),
Option('output', 'Test results output file', str, None, []),
Option('plot', 'Graphical output file (requires matplotlib)', str, '', []),
Option('k', 'Number of populations. By default (0), the number\
of populations in the data set is used. If specified, the value must be\
at least equal to the number of populations in the data set', int, 0, [lambda x: x>=0]),
Option('simuls', 'Number of simulation rounds', int, 10000, []),
Option('step', 'Number of simulation between screen update', int, 100, []),
Option('bins', 'Number of bins in the [0,0.5] heterozygosity range', int, 12, [lambda x: x>1]),
Option('save_simuls', 'Name of file where to save simulations (default: no save)', str, '', []),
Option('load_simuls', 'Name of file where to import simulations (this will skip simulations; default: perform simulations)', str, '', [])
]
####################################################################
flags = []
####################################################################
def _run(self, *fargs, **kwargs):
if not self.quiet: print '<diffscan>'
# imports data
self.parse(kwargs['input'])
# checks number of populations
if not self.quiet:
print '> Number of populations: {0}'.format(self.k)
if kwargs['k']!=0:
if kwargs['k'] < self.k:
raise ValueError, 'Argument `k` is too small!'
self.k = kwargs['k']
print '> Actual number of populations: {0}'.format(self.k)
# computes fstatistics
self.stats()
# determines all available sampling schemes
self.sam = []
for locus, ref in zip(self.loci, self.types):
if ref==False:
continue
self.sam.append(map(sum, locus))
if len(self.sam) != self.n1:
raise RuntimeError, 'Internal error'
# performs simulations if needed
if kwargs['load_simuls'] == '':
self.simulate(kwargs['simuls'], kwargs['step'], kwargs['save_simuls'])
else:
self.import_simuls(kwargs['load_simuls'])
# discretizes
self.discr(kwargs['bins'])
# plot if needed
if kwargs['plot'] != '': self.plot(kwargs['plot'])
# perform tests
self.test(kwargs['output'])
####################################################################
def parse(self, fname):
self.data = []
self.k = set()
self.nb_comments = 0
self.line_c = 0
self.n1 = 0
self.n2 = 0
if not self.quiet:
print '> Reading data from: {0}'.format(fname)
f = open(fname)
for line in f:
self.line_c += 1
# remove leading and trailing white spaces
line = line.strip()
# skip comments
if line[0] == '#':
self.nb_comments += 1
continue
# determine type, extract data
dollar = line.count('$')
percen = line.count('%')
if dollar>1: raise ValueError, 'Invalid format: line {0} contains more than one $'.format(self.line_c)
if percen>1: raise ValueError, 'Invalid format: line {0} contains more than one $'.format(self.line_c)
if dollar+percen>1: raise ValueError, 'Invalid format: line {0} contains both $ and %'.format(self.line_c)
if dollar+percen==0: raise ValueError, 'Invalid format: line {0} contains none of $ and % and is not comment'.format(self.line_c)
if dollar==1:
symb = '$'
reference = True
self.n1 += 1
elif percen==1:
symb = '%'
reference = False
self.n2 += 1
else: symb = None
if symb == line[0]:
label = None
data = line[1:]
else:
line = line.split(symb)
if len(line) != 2:
raise ValueError, 'Invalid format: line {0} does not have a valid structure'.format(self.line_c)
label = line[0].strip()
data= line[1].strip()
self.data.append([reference, label, []])
# processes populations
data = data.split()
k = 0
for pop in data:
k += 1
pop = pop.split(',')
if len(pop) != 2:
raise ValueError, 'Invalid format: line {0}, population {1} does not contain two items'.format(self.line_c, k)
try:
freq = map(int, pop)
except ValueError:
raise ValueError, 'Invalid format: line {0}, population {1} - does not contain integers'.format(self.line_c, k)
self.data[-1][2].append(freq)
self.k.add(k)
f.close()
# general checking
if len(self.k) == 0:
raise ValueError, 'Invalid format: no locus defined'
if len(self.k) != 1:
raise ValueError, 'Invalid format: inconsistent number of populations'
self.k = self.k.pop()
if self.k < 2:
raise ValueError, 'Invalid format: unsufficient number of populations'
if len(self.data)!=self.n1+self.n2:
raise ValueError, 'Invalid format: internal error'
if len(self.data)==0:
raise ValueError, 'Invalid format: empty dataset'
if self.n1 == 0:
raise ValueError, 'Not enough reference loci: cannot continue'
# feedback
if not self.quiet:
print '> Number of read lines (including comments): {0}'.format(self.line_c)
print '> Number of reference loci: {0}'.format(self.n1)
print '> Number of test loci: {0}'.format(self.n2)
print '> Total number of loci: {0}'.format(self.n1 + self.n2)
# reformat data
self.types, self.labels, self.loci = map(list, zip(*self.data))
del self.data
# generate default names
L = len(str(len(self.loci)))
for i in range(len(self.loci)):
if self.labels[i]==None:
self.labels[i] = 'locus{0}'.format(str(i+1).rjust(L, '0'))
####################################################################
def stats(self):
self.locus_Fst = []
self.locus_He = []
self.locus_p = []
T1 = 0.
T2 = 0.
for reference, locus in zip(self.types, self.loci):
hfs = egglib_binding.HFStatistics()
n = sum(map(sum,locus))
p = 0
hfs.reserve( n )
for pop, (A, B) in enumerate(locus):
p += B
for j in range(A):
hfs.loadIndividual(0, pop)
for j in range(B):
hfs.loadIndividual(1, pop)
p = 1.*p/n
self.locus_p.append(p)
self.locus_He.append(2*p*(1-p))
t1 = hfs.T1()
t2 = hfs.T2()
if t2>0: self.locus_Fst.append(t1/t2)
else: self.locus_Fst.append(None)
if reference==True:
T1 += t1
T2 += t2
# computes F-stat
if T2==0:
raise ValueError, 'Cannot estimate Fst: no variance'
Fst = T1/T2
# deduce M
if Fst == 0:
raise ValueError, 'Cannot estimate M: Fst is null'
self.M = (1.-Fst)*(self.k-1)/(Fst*self.k)
# feedback
if not self.quiet:
print '> Estimated Fst: {0}'.format(Fst)
print '> Estimated M: {0}'.format(self.M)
if Fst < 0:
raise ValueError, 'Fst is negative - cannot continue'
####################################################################
def simulate(self, nrepet, step, fname):
self.distri = []
if not self.quiet:
print '> Performing {0} simulations now'.format(nrepet)
if fname != '':
print '> Saving simulations to {0}'.format(fname)
f = open(fname, 'w')
while len(self.distri)<nrepet:
# pick a sampling scheme and complete with unsample populations
sam = random.choice(self.sam)
sam.extend([0] * (self.k - len(sam)))
if len(sam) != self.k:
raise RuntimeError, 'Internal error'
# makes <step> simulations with these parameters
ps = simul.CoalesceParamSet(sam, M=self.M)
mu = simul.CoalesceFiniteAlleleMutator(0)
mu.fixedNumberOfMutation(1)
dms = simul.coalesce(ps, mu, step, convert=False)
for dm in dms:
# generate the simulated table and analyze
if sum(sam) != dm.numberOfSequences():
raise RuntimeError, 'Internal error'
hfs = egglib_binding.HFStatistics()
hfs.reserve(sum(sam))
p = 0
for i in range(dm.numberOfSequences()):
allele = dm.get(i, 0)
label = dm.populationLabel(i)
hfs.loadIndividual(allele, label)
p += allele
p = 1.*p/sum(sam)
He = 2 * p * (1-p)
Fst = hfs.theta()
self.distri.append((He, Fst))
if fname != '':
f.write('{0} {1}\n'.format(He, Fst))
if len(self.distri)==nrepet:
break
if not self.quiet:
print ' --- {0} simulations of {1} ---'.format(len(self.distri), nrepet)
if fname != '':
f.close()
if not self.quiet:
print '> Simulation finished'
####################################################################
def import_simuls(self, fname):
if not self.quiet:
print '> Loading simulations from {0}'.format(fname)
self.distri = []
f = open(fname)
c = 0
for line in f:
c += 1
bits = line.split()
if len(bits) != 2:
raise IOError, 'Cannot read {0}: invalid number of tokens line {1}'.format(fname, c)
try:
a, b = map(float, bits)
except ValueError:
raise IOError, 'Cannot read {0}: invalid tokens are line {1} are not integers'.format(fname, c)
self.distri.append((a,b))
f.close()
if not self.quiet:
print '> {0} simulated imported'.format(c)
####################################################################
def discr(self, bins):
p = 0.01, 0.05, 0.5, 0.95, 0.99
if not self.quiet:
print '> Binarizing'
# binarizes the distribution
self.distri.sort(lambda x,y: cmp(x[0],y[0]))
self.bins = []
c = 0
step = 0.5/bins
while len(self.distri)>0:
self.bins.append(((c, c + step/2., c+step), [None, []]))
while len(self.distri)>0 and self.distri[0][0] <= c + step:
self.bins[-1][1][1].append(self.distri.pop(0)[1])
# finds median Fst
L = len(self.bins[-1][1][1])
if L >= 100:
if L%2==1:
self.bins[-1][1][0] = self.bins[-1][1][1][L//2]
else:
a = self.bins[-1][1][1][L//2]
b = self.bins[-1][1][1][L//2+1]
self.bins[-1][1][0] = a+(b-a)/2
c += step
# computes quantiles of each vector
self.L1 = []
self.L2 = []
self.L3 = []
self.L4 = []
self.L5 = []
for (beg, mid, end), (median,dist) in self.bins:
dist.sort()
L = len(dist)
q = [i*L for i in p]
q[0] = int(math.floor(q[0]))
q[1] = int(math.floor(q[1]))
q[2] = int(math.floor(q[2]))
q[3] = int(math.ceil(q[3]))
q[4] = int(math.ceil(q[4]))
if L >= 1000: self.L1.append((mid, dist[q[0]]))
if L >= 200: self.L2.append((mid, dist[q[1]]))
if L >= 20: self.L3.append((mid, dist[q[2]]))
if L >= 200: self.L4.append((mid, dist[q[3]]))
if L >= 1000: self.L5.append((mid, dist[q[4]]))
####################################################################
def plot(self, fname):
if not self.quiet:
print '> Plotting to {0}'.format(fname)
try:
from matplotlib import pyplot
except ImportError:
raise ValueError, 'Cannot plot: the matplotlib module is not available'
# plot quantile lines
if len(self.L1):
x, y = zip(*self.L1)
pyplot.plot(x, y, 'k:')
if len(self.L2):
x, y = zip(*self.L2)
pyplot.plot(x, y, 'k--')
if len(self.L3):
x, y = zip(*self.L3)
pyplot.plot(x, y, 'k-')
if len(self.L4):
x, y = zip(*self.L4)
pyplot.plot(x, y, 'k--')
if len(self.L5):
x, y = zip(*self.L5)
pyplot.plot(x, y, 'k:')
# plot points
x0 = [i for (i,j) in zip(self.locus_He, self.types) if j]
x1 = [i for (i,j) in zip(self.locus_He, self.types) if not j]
y0 = [i for (i,j) in zip(self.locus_Fst, self.types) if j]
y1 = [i for (i,j) in zip(self.locus_Fst, self.types) if not j]
pyplot.plot(x0, y0, 's', mfc='None', mec='b')
pyplot.plot(x1, y1, 'o', mfc='None', mec='r')
pyplot.xlabel('He')
pyplot.ylabel('Fst')
pyplot.savefig(fname)
pyplot.clf()
####################################################################
def test(self, fname):
if not self.quiet:
print '> Performing tests'
f = open(fname, 'w')
f.write('Locus\tp\tHe\tFst\tp-value\n')
c1 = 0
c2 = 0
t = 0
for typ, label, p, He, Fst in zip(self.types, self.labels,
self.locus_p, self.locus_He, self.locus_Fst):
if Fst==None:
f.write('{0}\t{1}\t{2}\tNA\tNA\n'.format(label, p, He))
continue
# spot right category
for (beg, mid, end), (median, data) in self.bins:
if len(data)<100:
P = 'NA'
break
if He >= beg and He <= end:
lims = [None,None]
if Fst < median:
lims[0] = Fst
lims[1] = median + (median-Fst)
else:
lims[0] = median - (Fst-median)
lims[1] = Fst
P = 0
c = 0
while data[c]<=lims[0]:
c+=1
P+=1
c = -1
while data[c]>=lims[1]:
c-=1
P+=1
P = 1. * P / len(data)
t += 1
if typ:
if P<0.05: c1+=1
if P<0.01: c2+=1
break
else:
raise ValueError, 'Internal error - cannot identify bin for {0}'.format(He)
f.write('{0}\t{1}\t{2}\t{3}\t{4}\n'.format(label, p, He, Fst, P))
if not self.quiet:
print '> Number of loci: {0}'.format(len(self.labels))
print '> Number of loci tested: {0}'.format(t)
if t>0:
print '> Reference loci significant at 5%: {0} ({1:.2%})'.format(c1,1.*c1/t)
print '> Reference loci significant at 1%: {0} ({1:.2%})'.format(c2,1.*c2/t)
f.close()
####################################################################
commands.append(diffscan)
########################################################################
dcommands = dict(zip([i.__name__ for i in commands], commands))
########################################################################
def execute(*fargs, **fkwargs):
"""
Execute utils commands. This functions takes arguments to specify
the command name and its options. There must be at least one
positional argument: the first positional argument gives the
function name and other (optional) positional arguments give the
command flags to be activated. The keyword arguments give the
command options. Flag, option names but also option values should be
string (option values will be converted automatically). In case
options are of a simple type (int, float), they can be passed as
such. But options that are described as a string presenting a list
of values separated by commas CANNOT be passed as a list using the
function. If there is no argument whatsoever, arguments will be read
from :data:`sys.argv`. In this case, the first argument will be
ignored; the second argument must be the command name; and keyword
arguments must be passed under the form ``key=value`` where *key* is
the option name.
For example, running the command::
egglib ungap input=file1.fas output=file2.fas threshold=0.5 triplets
is the equivalent of calling the function::
>>> egglib.utils.execute('ungap', 'triplets', input='file1.fas', output='file2.fas', threshold=0.5)
where *threshold* can also take the string ``"0.5"``.
.. versionchanged 2.1.0:: Accepts arguments in order to be run from
python scripts.
"""
global commands
if len(fargs):
command = fargs[0]
args = list(fargs[1:]) + [str(i)+'='+str(fkwargs[i]) for i in fkwargs]
else:
if len(sys.argv)==1: command = None
else: command = sys.argv[1]
args = sys.argv[2:]
# if no argument at all, print script doc
if command==None:
from . import version
commandstring = 'Available commands: %s' %', '.join(sorted(dcommands))
commandstring = tools.wrap(commandstring, 72, 8)
print __license__
print """Version number: %s
Usage:
egglib <command> [<options> [debug] [quiet]]
%s""" %(version, commandstring)
return
else:
if command not in dcommands:
sys.stderr.write('error - command not available\n')
else:
cmd = dcommands[command]
# runs a particular command
# if command requested without argument, prints specific doc
if len(args)==0:
print cmd.doc()
# runs the command
else:
obj = cmd()
flags, options = obj.process_cmdline_arguments(args)
try:
obj.run(*flags, **options)
except Exception, e:
a= re.match('<type \'exceptions.(.+)\'>',str(e.__class__))
if a: s= '[%s] ' %a.group(1)
else: s=''
sys.stderr.write( 'An error occurred: %s%s\n' %(s,str(e)))
if obj.debug: raise
sys.exit(-1)
########################################################################
if __name__ == '__main__':
execute()
|
lisp-etr.py | #-----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
from builtins import str
import lisp
import lispconfig
import socket
import select
import threading
import time
import struct
from subprocess import getoutput
import os
try:
import pytun
except:
pytun = None
#endtry
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-etr process.
#
lisp_register_timer = None
lisp_trigger_register_timer = None
lisp_etr_info_timer = None
lisp_ephem_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ipc_listen_socket = None
lisp_send_sockets = [None, None, None]
lisp_raw_socket = None
lisp_l2_socket = None
lisp_mac_header = None
LISP_MAP_REGISTER_INTERVAL = 60 # In units of seconds
#
# Test mode. Allows a batch of database-mapping commands to be read from
# lisp.config before any Map-Registers are sent. When an EID 'eid-done' is
# found (which is placed as the last database-mapping command in lisp.config),
# then lisp_build_map_register() is called via the 5-second delay timer.
#
lisp_etr_test_mode = (os.getenv("LISP_ETR_TEST_MODE") != None)
lisp_seen_eid_done = False
#------------------------------------------------------------------------------
#
# lisp_etr_map_server_command
#
# Configure a Map-Server and trigger ETR functionality.
#
def lisp_etr_map_server_command(kv_pair):
global lisp_trigger_register_timer
global lisp_etr_info_timer
ms = lispconfig.lisp_map_server_command(kv_pair)
#
# Trigger a Info-Request if we are doing NAT-traversal if this is the
# first Map-Server..
#
first_ms = (len(lisp.lisp_map_servers_list) == 1)
if (first_ms):
ms = list(lisp.lisp_map_servers_list.values())[0]
lisp_etr_info_timer = threading.Timer(2, lisp_etr_process_info_timer,
[ms.map_server])
lisp_etr_info_timer.start()
else:
#
# Trigger Map-Register to newly configured Map-Server.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (ms and len(lisp.lisp_db_list) > 0):
lisp_build_map_register(lisp_send_sockets, None, None, ms, False)
#endif
#endif
#
# Do not start the trigger timer if we are in test-mode. We may already
# be sending a huge list of Map-Registers after "eid-done".
#
if (lisp_etr_test_mode and lisp_seen_eid_done): return
#
# Handle case where "lisp database-mapping" comes before "lisp map-server"
# in configuration file. We have to start periodic timer.
#
if (len(lisp.lisp_db_list) > 0):
if (lisp_trigger_register_timer != None): return
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_database_mapping_command
#
# This function supports adding additional RLOCs to a database-mapping entry
# that already exists.
#
def lisp_etr_database_mapping_command(kv_pair):
global lisp_register_timer, lisp_trigger_register_timer
global lisp_send_sockets, lisp_seen_eid_done
global lisp_seen_eid_done_count
#
# This is to fix an issue with the same set of database-mappings being
# sent a second time. Only in test-mode we don't want to dup process for
# large numbers of entries.
#
if (lisp_seen_eid_done): return
lispconfig.lisp_database_mapping_command(kv_pair, lisp_ephem_port,
(lisp_etr_test_mode == False))
#
# Trigger Map-Register when all databaase-mappings are configured.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (lisp_trigger_register_timer != None): return
#
# Wait until a large set of database-mapping commands are processed
# before sending the first set of Map-Registers. Used in test mode only.
#
if (lisp_etr_test_mode):
db_size = len(lisp.lisp_db_list)
if (db_size % 1000 == 0):
lisp.fprint("{} database-mappings processed".format(db_size))
#endif
db = lisp.lisp_db_list[-1]
if (db.eid.is_dist_name() == False): return
if (db.eid.address != "eid-done"): return
lisp_seen_eid_done = True
lisp.fprint("Finished batch of {} database-mappings".format(db_size))
t = threading.Timer(0, lisp_process_register_timer,
[lisp_send_sockets])
lisp_register_timer = t
lisp_register_timer.start()
return
#endif
if (len(lisp.lisp_map_servers_list) > 0):
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_show_command
#
# Show ETR configured map-servers and database-mappings.
#
def lisp_etr_show_command(clause):
#
# Show local found RLOCs.
#
output = lispconfig.lisp_show_myrlocs("")
#
# Show decapsulation stats.
#
output = lispconfig.lisp_show_decap_stats(output, "ETR")
#
# Show configured map-servers.
#
dns_suffix = lisp.lisp_decent_dns_suffix
if (dns_suffix == None):
dns_suffix = ":"
else:
dns_suffix = " (dns-suffix '{}'):".format(dns_suffix)
#endif
hover = "{} configured map-servers".format(len(lisp.lisp_map_servers_list))
title = "LISP-ETR Configured Map-Servers{}".format(dns_suffix)
title = lisp.lisp_span(title, hover)
hover = ("P = proxy-reply requested, M = merge-registrations " + \
"requested, N = Map-Notify requested")
reg_title = lisp.lisp_span("Registration<br>flags", hover)
output += lispconfig.lisp_table_header(title, "Address", "Auth-Type",
"xTR-ID", "Site-ID", reg_title, "Map-Registers<br>Sent",
"Map-Notifies<br>Received")
for ms in list(lisp.lisp_map_servers_list.values()):
ms.resolve_dns_name()
ms_name = "" if ms.ms_name == "all" else ms.ms_name + "<br>"
addr_str = ms_name + ms.map_server.print_address_no_iid()
if (ms.dns_name): addr_str += "<br>" + ms.dns_name
xtr_id = "0x" + lisp.lisp_hex_string(ms.xtr_id)
flags = "{}-{}-{}-{}".format("P" if ms.proxy_reply else "p",
"M" if ms.merge_registrations else "m",
"N" if ms.want_map_notify else "n",
"R" if ms.refresh_registrations else "r")
registers_sent = ms.map_registers_sent + \
ms.map_registers_multicast_sent
output += lispconfig.lisp_table_row(addr_str,
"sha1" if (ms.alg_id == lisp.LISP_SHA_1_96_ALG_ID) else "sha2",
xtr_id, ms.site_id, flags, registers_sent,
ms.map_notifies_received)
#endfor
output += lispconfig.lisp_table_footer()
#
# Show database-mappings configured.
#
output = lispconfig.lisp_show_db_list("ETR", output)
#
# Show ELP configuration, if it exists.
#
if (len(lisp.lisp_elp_list) != 0):
output = lispconfig.lisp_show_elp_list(output)
#endif
#
# Show RLE configuration, if it exists.
#
if (len(lisp.lisp_rle_list) != 0):
output = lispconfig.lisp_show_rle_list(output)
#endif
#
# Show JSON configuration, if it exists.
#
if (len(lisp.lisp_json_list) != 0):
output = lispconfig.lisp_show_json_list(output)
#endif
#
# Show group-mappings, if they exist.
#
if (len(lisp.lisp_group_mapping_list) != 0):
title = "Configured Group Mappings:"
output += lispconfig.lisp_table_header(title, "Name", "Group Prefix",
"Sources", "Use MS")
for gm in list(lisp.lisp_group_mapping_list.values()):
sources = ""
for s in gm.sources: sources += s + ", "
if (sources == ""):
sources = "*"
else:
sources = sources[0:-2]
#endif
output += lispconfig.lisp_table_row(gm.group_name,
gm.group_prefix.print_prefix(), sources, gm.use_ms_name)
#endfor
output += lispconfig.lisp_table_footer()
#endif
return(output)
#enddef
#
# lisp_etr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_etr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ETR"))
#enddef
#
# lisp_group_mapping_command
#
# Process the "lisp group-mapping" command clause.
#
def lisp_group_mapping_command(kv_pairs):
sources = []
group_prefix = None
rle_address = None
ms_name = "all"
for kw in list(kv_pairs.keys()):
value = kv_pairs[kw]
if (kw == "group-name"):
group_name = value
#endif
if (kw == "group-prefix"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.store_prefix(value)
#endif
if (kw == "instance-id"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.instance_id = int(value)
#endif
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for source in value:
if (source != ""): sources.append(source)
#endfor
#endif
if (kw == "rle-address"):
if (rle_address == None):
rle_address = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
rle_address.store_address(value)
#endif
#endfor
gm = lisp.lisp_group_mapping(group_name, ms_name, group_prefix, sources,
rle_address)
gm.add_group()
return
#enddef
#
# lisp_build_map_register_records
#
# Build EID and RLOC records to be inserted in a Map-Register message.
#
def lisp_build_map_register_records(quiet, db, eid, group, ttl):
#
# Don't include RTR-list if there is no NAT in the path but nat-traversal
# is configured and NAT in path is tested. When there is a NAT, include
# all RTRs if lisp_register_all_rtrs is configured. Otherwise, if the
# array element is None, then the RTR is down and should be excluded in
# the list to register.
#
rtr_list = {}
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
for rtr_str in lisp.lisp_rtr_list:
rtr = lisp.lisp_rtr_list[rtr_str]
if (lisp.lisp_register_all_rtrs == False and rtr == None):
lisp.lprint(" Exclude unreachable RTR {}".format( \
lisp.red(rtr_str, False)))
continue
#endif
if (rtr == None): continue
rtr_list[rtr_str] = rtr
#endif
break
#endfor
count = 0
eid_records = b""
for iid in [eid.instance_id] + eid.iid_list:
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = len(db.rloc_set) + len(rtr_list)
eid_record.authoritative = True
eid_record.record_ttl = ttl
eid_record.eid.copy_address(eid)
eid_record.eid.instance_id = iid
eid_record.eid.iid_list = []
eid_record.group.copy_address(group)
eid_records += eid_record.encode()
if (not quiet):
prefix_str = lisp.lisp_print_eid_tuple(eid, group)
decent_index = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid)
decent_index = lisp.bold(str(decent_index), False)
decent_index = ", decent-index {}".format(decent_index)
#endif
lisp.lprint(" EID-prefix {} for ms-name '{}'{}".format( \
lisp.green(prefix_str, False), db.use_ms_name, decent_index))
eid_record.print_record(" ", False)
#endif
for rloc_entry in db.rloc_set:
rloc_record = lisp.lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = rloc_entry.rloc.is_local()
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" ")
#endfor
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in list(rtr_list.values()):
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" RTR ")
#endfor
#
# Return to caller number of EID records written to returned buffer.
#
count += 1
#endfor
return(eid_records, count)
#enddef
#
# lisp_build_map_register
#
# From each configured "database-mapping" command, register mappings to
# configured map-servers.
#
def lisp_build_map_register(lisp_sockets, ttl, eid_only, ms_only, refresh):
#
# No database-mapping entries.
#
if (eid_only != None):
db_list_len = 1
else:
db_list_len = lisp.lisp_db_list_length()
if (db_list_len == 0): return
#endif
if (lisp_etr_test_mode):
lisp.fprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
else:
lisp.fprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
#endif
#
# Set boolean if "decentralized-pull-xtr-[modulus,dns-suffix]" configured.
#
decent = lisp.lisp_decent_pull_xtr_configured()
#
# Go quiet with debug output when there are a lot of EID-records.
#
quiet = (db_list_len > 12)
ms_list = {}
if (decent):
#
# If "decentralized-pull-xtr-[modulus,dns-suffix]" is configured,
# decide which map-server this EID belongs too (and is registered with.
#
for db in lisp.lisp_db_list:
eid = db.eid if db.group.is_null() else db.group
dns_name = lisp.lisp_get_decent_dns_name(eid)
ms_list[dns_name] = []
#endfor
else:
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count.
#
for ms in list(lisp.lisp_map_servers_list.values()):
if (ms_only != None and ms != ms_only): continue
ms_list[ms.ms_name] = []
#endfor
#endif
#
# Create data structure instances to build Map-Regiser message.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.use_ttl_for_timeout = True
if (ttl == None): ttl = lisp.LISP_REGISTER_TTL
#
# Traverse the databas-mapping associative array.
#
mtu = 65000 if (lisp_etr_test_mode) else 1100
for db in lisp.lisp_db_list:
if (decent):
ms_dns_name = lisp.lisp_get_decent_dns_name(db.eid)
else:
ms_dns_name = db.use_ms_name
#endif
#
# Is db entry associated with a map-server name that is not
# configured?
#
if (ms_dns_name not in ms_list): continue
msl = ms_list[ms_dns_name]
if (msl == []):
msl = [b"", 0]
ms_list[ms_dns_name].append(msl)
else:
msl = ms_list[ms_dns_name][-1]
#endif
#
# If dynamic-EIDs are discovered, add each of them to EID-records,
# unless, we are doing a trigger in which case a single dynamic-EID
# is built into an EID-record.
#
# Otherwise, add static EID-prefixes into EID-records, unless a single
# one is triggered.
#
eid_records = b""
if (db.dynamic_eid_configured()):
for dyn_eid in list(db.dynamic_eids.values()):
eid = dyn_eid.dynamic_eid
if (eid_only == None or eid_only.is_exact_match(eid)):
records, count = lisp_build_map_register_records(quiet, db,
eid, db.group, ttl)
eid_records += records
msl[1] += count
#endif
#endfor
else:
if (eid_only == None):
if (ttl != 0): ttl = db.register_ttl
eid_records, count = lisp_build_map_register_records(quiet, db,
db.eid, db.group, ttl)
msl[1] += count
#endif
#endif
#
# Add EID-records to correct map-server name set.
#
msl[0] += eid_records
if (msl[1] == 20 or len(msl[0]) > mtu):
msl = [b"", 0]
ms_list[ms_dns_name].append(msl)
#endif
#endfor
#
# Send Map-Register to each configured map-server.
#
sleep_time = .500 if (lisp_etr_test_mode) else .001
count = 0
for ms in list(lisp.lisp_map_servers_list.values()):
if (ms_only != None and ms != ms_only): continue
ms_dns_name = ms.dns_name if decent else ms.ms_name
if (ms_dns_name not in ms_list): continue
for msl in ms_list[ms_dns_name]:
#
# Build map-server specific fields.
#
map_register.record_count = msl[1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.key_id = ms.key_id
map_register.proxy_reply_requested = ms.proxy_reply
map_register.merge_register_requested = ms.merge_registrations
map_register.map_notify_requested = ms.want_map_notify
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
if (ms.refresh_registrations):
map_register.map_register_refresh = refresh
#endif
if (ms.ekey != None): map_register.encryption_key_id = ms.ekey_id
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id(b"")
eid_records = msl[0]
packet = packet + eid_records + trailer
ms.map_registers_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
count += 1
if (count % 100 == 0 and lisp_etr_test_mode):
sleep_time += .1
lisp.fprint("Sent {} Map-Registers, ipd {}".format(count,
sleep_time))
#endif
time.sleep(sleep_time)
#endfor
if (lisp_etr_test_mode):
lisp.fprint("Sent total {} Map-Registers".format(count))
#endif
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Exit loop if we are triggering a Map-Register to a single
# Map-Server.
#
if (ms_only != None and ms == ms_only): break
#endfor
return
#enddef
#
# lisp_etr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_etr_process_info_timer(ms):
global lisp_etr_info_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_socket, lisp_ephem_socket, lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, ms, lisp.LISP_CTRL_PORT)
#
# Build Info-Request for RTRs so we can open up NAT state so RTRs
# can encapsulate to us when ETR is behind NAT.
#
allow_private = (os.getenv("LISP_RTR_BEHIND_NAT") == None)
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
if (rtr.is_private_address() and allow_private == False):
r = lisp.red(rtr.print_address_no_iid(), False)
lisp.lprint("Skip over RTR private address {}".format(r))
continue
#endif
lisp.lisp_build_info_requests(sockets, rtr, lisp.LISP_DATA_PORT)
#endfor
#
# Restart periodic timer. For some reason only this timer has to be
# canceled. Found on while testing NAT-traversal on rasp-pi in Jul 2015.
#
lisp_etr_info_timer.cancel()
lisp_etr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
return
#enddef
#
# lisp_process_register_timer
#
# Time to send a periodic Map-Register.
#
def lisp_process_register_timer(lisp_sockets):
global lisp_register_timer, lisp_trigger_register_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build and send Map-Register.
#
lisp_build_map_register(lisp_sockets, None, None, None, True)
#
# If we are are doing L2-overlays, then register as a join of the
# broadcast MAC address.
#
if (lisp.lisp_l2_overlay):
entry = [ None, "ffff-ffff-ffff", True ]
lisp_send_multicast_map_register(lisp_sockets, [entry])
#endif
#
# If trigger timer called this function, clear it out and only use it
# when a new map-server of database-mapping is configured.
#
if (lisp_trigger_register_timer != None):
lisp_trigger_register_timer.cancel()
lisp_trigger_register_timer = None
#endif
#
# Restart periodic timer.
#
if (lisp_register_timer): lisp_register_timer.cancel()
lisp_register_timer = threading.Timer(LISP_MAP_REGISTER_INTERVAL,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
return
#enddef
#
# lisp_send_multicast_map_register
#
# Build a Map-Register message with a Multicast Info Type LCAF as an EID-record
# for each entry in the 'entries' array. And build an RLOC-record as an RLE
# describing this ETR as the RLOC to be used for replication.
#
# The entries is an array of (source, group, joinleave) tuples.
#
def lisp_send_multicast_map_register(lisp_sockets, entries):
length = len(entries)
if (length == 0): return
afi = None
if (entries[0][1].find(":") != -1): afi = lisp.LISP_AFI_IPV6
if (entries[0][1].find(".") != -1): afi = lisp.LISP_AFI_IPV4
if (entries[0][1].find("-") != -1): afi = lisp.LISP_AFI_MAC
if (afi == None):
lisp.lprint("lisp_send_multicast_map_register() invalid group address")
return
#endif
#
# Find all (*,G) entries in entries array and replace with (S,G) entries
# from lisp_group_mapping_list. The comment to avoid the source check
# is there so we can build a g_entry that can validate against group
# mappings. Have to fix to allow different sources for the same G when
# (S,G) is reported.
#
g_entries = []
for source, group, joinleave in entries:
# if (source != None): continue
g_entries.append([group, joinleave])
#endfor
decent = lisp.lisp_decent_pull_xtr_configured()
ms_list = {}
entries = []
for group, joinleave in g_entries:
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None):
lisp.lprint("No group-mapping for {}, could be underlay group". \
format(group))
continue
#endif
lisp.lprint("Use group-mapping '{}' {} for group {}".format( \
ms_gm.group_name, ms_gm.group_prefix.print_prefix(), group))
iid = ms_gm.group_prefix.instance_id
ms_name = ms_gm.use_ms_name
rle = ms_gm.rle_address
#
# To obtain decent-index for a group address, just use group address
# and no source as part of hash. Because an ITR does not know if (*,G)
# or (S,G) is registered with the mapping system
#
key = ms_name
if (decent):
key = lisp.lisp_get_decent_dns_name_from_str(iid, group)
ms_list[key] = [b"", 0]
#endif
if (len(ms_gm.sources) == 0):
entries.append(["0.0.0.0", group, iid, key, rle, joinleave])
continue
#endif
for s in ms_gm.sources:
ms_list[key] = [b"", 0]
entries.append([s, group, iid, key, rle, joinleave])
#endfor
#endfor
length = len(entries)
if (length == 0): return
lisp.lprint("Build Map-Register for {} multicast entries".format(length))
#
# Build RLE node for RLOC-record encoding. If behind a NAT, we need to
# insert a global address as the RLE node address. We will do that in
# the entries for loop.
#
rle_node = lisp.lisp_rle_node()
rle_node.level = 128
translated_rloc = lisp.lisp_get_any_translated_rloc()
rle = lisp.lisp_rle("")
rle.rle_nodes.append(rle_node)
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count. The ms_list
# is already setup for when pull-based decent is used.
#
if (decent == False):
for ms in list(lisp.lisp_map_servers_list.values()):
ms_list[ms.ms_name] = [b"", 0]
#endfor
#endif
rloc_name = None
if (lisp.lisp_nat_traversal): rloc_name = lisp.lisp_hostname
#
# Count number of RTRs reachable so we know allocation count.
#
rtr_count = 0
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
rtr_count += 1
#endfor
#
# Run through multicast entry array.
#
eid_records = b""
for source, group, iid, ms_dns_name, rle_addr, joinleave in entries:
#
# Is db entry associated with a map-server name that is not configured?
#
if (ms_dns_name not in ms_list): continue
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = 1 + rtr_count
eid_record.authoritative = True
eid_record.record_ttl = lisp.LISP_REGISTER_TTL if joinleave else 0
eid_record.eid = lisp.lisp_address(afi, source, 0, iid)
if (eid_record.eid.address == 0): eid_record.eid.mask_len = 0
eid_record.group = lisp.lisp_address(afi, group, 0, iid)
if (eid_record.group.is_mac_broadcast() and \
eid_record.eid.address == 0): eid_record.eid.mask_len = 0
decent_index = ""
ms_name = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid_record.group)
decent_index = lisp.bold(str(decent_index), False)
decent_index = "with decent-index {}".format(decent_index)
else:
decent_index = "for ms-name '{}'".format(ms_dns_name)
#endif
eid_str = lisp.green(eid_record.print_eid_tuple(), False)
lisp.lprint(" EID-prefix {} {}{}".format(eid_str, ms_name,
decent_index))
eid_records += eid_record.encode()
eid_record.print_record(" ", False)
ms_list[ms_dns_name][1] += 1
#
# Build our RLOC entry.
#
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc_name = rloc_name
#
# Decide on RLE address. Have NAT-traversal take precedent, otherwise
# use configured RLE in group-mapping. If one wasn't configured use
# lisp_myrlocs IPv4 address.
#
if (translated_rloc != None):
rle_node.address = translated_rloc
elif (rle_addr != None):
rle_node.address = rle_addr
else:
rle_node.address = rle_addr = lisp.lisp_myrlocs[0]
#endif
rloc_record.rle = rle
rloc_record.local_bit = True
rloc_record.reach_bit = True
rloc_record.priority = 255
rloc_record.weight = 0
rloc_record.mpriority = 1
rloc_record.mweight = 100
eid_records += rloc_record.encode()
rloc_record.print_record(" ")
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
rloc_record.print_record(" RTR ")
#endfor
#
# Add EID-records to correct map-server name set.
#
ms_list[ms_dns_name][0] += eid_records
#endfor
#
# Build map-server independent fields.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.proxy_reply_requested = True
map_register.map_notify_requested = False
map_register.merge_register_requested = True
#
# Send Map-Register to each configured map-server.
#
for ms in list(lisp.lisp_map_servers_list.values()):
key = ms.dns_name if decent else ms.ms_name
#
# Get EID-records from correct map-server name set.
#
if (key not in ms_list): continue
#
# Build map-server specific fields.
#
map_register.record_count = ms_list[key][1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.alg_id = ms.key_id
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id(b"")
packet = packet + eid_records + trailer
ms.map_registers_multicast_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Go build more EID-records.
#
time.sleep(.001)
#endfor
return
#enddef
#
# lisp_etr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_etr_data_plane(parms, not_used, packet):
global lisp_ipc_listen_socket, lisp_send_sockets
device = parms[0]
lisp_raw_socket = parms[1]
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
if (lisp.lisp_is_macos() == False):
offset = 4 if device == "lo0" else 16
packet = packet[offset::]
#endif
#
# Check IGMP packet.
#
protocol = struct.unpack("B", packet[9:10])[0]
if (protocol == 2):
entries = lisp.lisp_process_igmp_packet(packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 0)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
#
# First check if we are assembling IPv4 fragments. Do this only when
# not doing NAT-traversal. Otherwise, the kernel will do it when we
# receive the same packet on a raw socket (in lisp_etr_nat_data_plane()).
#
if (struct.unpack("B", packet[0:1])[0] & 0xf0 == 0x40):
sport = socket.ntohs(struct.unpack("H", packet[20:22])[0])
if (lisp.lisp_nat_traversal and sport == lisp.LISP_DATA_PORT): return
packet = lisp.lisp_reassemble(packet)
if (packet == None): return
#endif
packet = lisp.lisp_packet(packet)
status = packet.decode(True, lisp_ipc_listen_socket, lisp.lisp_decap_stats)
if (status == None): return
#
# Print some useful header fields.
#
packet.print_packet("Receive", True)
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
source = packet.inner_source.print_address_no_iid()
packet.strip_outer_headers()
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply. The inner LISP header begins at offset 20+16+28=64
# (outer-IPv4 + UDP-outer-LISP + inner-IPv4-UDP).
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet[36::]
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0:1])):
ttl = struct.unpack("B", inner_ip[8:9])[0] - 1
#endif
source = packet.outer_source.print_address_no_iid()
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Strip outer headers and start inner header forwarding logic.
#
packet.strip_outer_headers()
f_or_b = lisp.bold("Forward", False)
#
# Process inner header (checksum and decrement ttl).
#
igmp = False
L2 = packet.inner_dest.is_mac()
if (L2):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
f_or_b = lisp.bold("Bridge", False)
elif (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
if (igmp):
entries = lisp.lisp_process_igmp_packet(packet.packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet (through NAT)")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
#
# We are going to forward or bridge the decapsulated packet.
#
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format(f_or_b, \
lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# If we are decapsulating a MAC frame, then use the L2 socket where
# the MAC header is already in packet.
#
if (L2):
packet.bridge_l2_packet(packet.inner_dest, db)
return
#endif
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header.
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_etr_nat_data_plane
#
# Packet came in on a destination ephemeral port from a source port of 4341.
# That is a RTR encapsulated this packet that is coming through a NAT device.
#
# The packet has the outer IP and UDP headers stripped so the first byte of
# this supplied data packet has the LISP data header on it.
#
def lisp_etr_nat_data_plane(lisp_raw_socket, packet, source):
global lisp_ipc_listen_socket, lisp_send_sockets
#
# Decode LISP header.
#
lisp_header = packet
packet = lisp.lisp_packet(packet[8::])
if (packet.lisp_header.decode(lisp_header) == False): return
#
# Store outer source RLOC address so if we are doing lisp-crypto across
# NAT-traversal, we can find the decryption key.
#
packet.outer_source = lisp.lisp_address(lisp.LISP_AFI_IPV4, source,
lisp.LISP_IPV4_HOST_MASK_LEN, 0)
status = packet.decode(False, lisp_ipc_listen_socket,
lisp.lisp_decap_stats)
if (status == None): return
#
# Special case to log packets with no outer header but are considered
# decapsulated when coming through NATs. Since packets are sent from
# source port 4341, the kernel will strip outer header, so we don't have
# outer header context in lisp_packet().
#
if (lisp.lisp_flow_logging): packet.log_flow(False)
packet.print_packet("Kernel-decap", False)
lisp.dprint(packet.lisp_header.print_header(" "))
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
sport = packet.udp_sport
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply.
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0:1])):
ttl = struct.unpack("B", inner_ip[8:9])[0] - 1
#endif
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on ephemeral socket. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
#endif
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format( \
lisp.bold("NAT-Forward", False), lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out on raw socket.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_register_ipv6_group_entries
#
# Find an IPv6 group-mapping and send a Map-Register for each configured IPv6
# source for the IPv6 group-prefix found.
#
def lisp_register_ipv6_group_entries(group, joinleave):
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None): return
sg = []
for s in ms_gm.sources:
sg.append([s, group, joinleave])
#endfor
lisp_send_multicast_map_register(lisp_send_sockets, sg)
return
#enddef
#
# lisp_etr_join_leave_process
#
# Look at file-system to see if there is a join or leave to be done. This
# function will send joins in the form of building an IP/IGMPv2 packet to
# be passed to lisp_process_igmp_packet(). The groups that are joined are
# ones found as filenames in the current directory as "join-<group>". The
# IGMP Reports wil lbe sent to lisp_process_igmp_packet() every 30 seconds.
#
# For right now, if the group address is IPv6, send a Map-Register directly.
# We will get to MLD support later.
#
# This is used for testing and not meant for production deployment.
#
def lisp_etr_join_leave_process():
global lisp_send_sockets
lisp.lisp_set_exception()
swap = socket.htonl
ipigmp = [swap(0x46000020), swap(0x9fe60000), swap(0x0102d7cc),
swap(0x0acfc15a), swap(0xe00000fb), swap(0x94040000)]
packet = b""
for l in ipigmp: packet += struct.pack("I", l)
#
# Look for files in current directory for "join-<group>" and then send
# an IGMPv2 report to ourselves.
#
while (True):
groups = getoutput("ls join-*").replace("join-", "")
groups = groups.split("\n")
for group in groups:
if (lisp.lisp_valid_address_format("address", group) == False):
continue
#endif
ipv6 = (group.find(":") != -1)
#
# Check if we are leaving group.
#
leavejoin = os.path.exists("leave-{}".format(group))
lisp.lprint("Internal {} group {}".format( \
"leaving" if leavejoin else "joining", group))
#
# Set IGMP message to Report or Leave. Then add group.
#
if (ipv6):
if (group.lower().find("ff02:") != -1):
lisp.lprint("Suppress registration for link-local groups")
continue
#endif
lisp_register_ipv6_group_entries(group, (leavejoin == False))
else:
send_packet = packet
if (leavejoin):
send_packet += struct.pack("I", swap(0x17000000))
else:
send_packet += struct.pack("I", swap(0x16000000))
#endif
octet = group.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
send_packet += struct.pack("I", swap(value))
sg = lisp.lisp_process_igmp_packet(send_packet)
if (type(sg) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, sg)
#endif
time.sleep(.100)
#endif
#endfor
time.sleep(10)
#endwhile
return
#enddef
#
# lisp_etr_process
#
# This thread is for receiving encapsulated LISP packets address to destination
# port 4341. As well as IGMP reports. The IGMP reports can be captured on
# Ubuntu and Fedora but not on MacOS. The former supports IGMPv3 and the
# latter supports IGMPv2 if we listen on "en0".
#
def lisp_etr_process():
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
#
# Find all multicast RLEs so we can receive packets on underlay multicast
# groups.
#
rles = lisp.lisp_get_all_multicast_rles()
#
# We need to listen on en0 when doing IGMP testing on MacOS.
#
device = "any"
# device = "en0" if lisp.lisp_is_macos() else "any"
# device = "lo0" if lisp.lisp_is_macos() else "any"
pfilter = "(proto 2) or "
pfilter += "((dst host "
for addr in lisp.lisp_get_all_addresses() + rles:
pfilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(udp src port 4341) or "
pfilter += "(udp dst port 4342 and ip[28] == 0x12) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
lisp.lprint("Capturing packets for: '{}' on device {}".format(pfilter,
device))
#
# Enter receive loop.
#
if (lisp.lisp_is_python2()):
import pcappy
pcap = pcappy.open_live(device, 1600, 0, 100)
pcap.filter = pfilter
pcap.loop(-1, lisp_etr_data_plane, [device, lisp_raw_socket])
#endif
if (lisp.lisp_is_python3()):
import pcapy
pcap = pcapy.open_live(device, 1600, 0, 100)
pcap.setfilter(pfilter)
while(True):
header, packet = pcap.next()
if (len(packet) == 0): continue
lisp_etr_data_plane([device, lisp_raw_socket], None, packet)
#endwhile
#endif
return
#enddef
#
# lisp_etr_startup
#
# Intialize this LISP ETR process. This function returns no values.
#
def lisp_etr_startup():
global lisp_ipc_listen_socket
global lisp_ephem_socket
global lisp_send_sockets
global lisp_raw_socket
global lisp_l2_socket
global lisp_mac_header
lisp.lisp_i_am("etr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ETR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Prebuild MAC header for lisp_l2_socket sending. Disabled code in favor
# of using pytun. See below.
#
# m = list(lisp.lisp_mymacs.keys())[0]
# mac = ""
# for i in range(0, 12, 2): mac += chr(int(m[i:i+2], 16))
# lisp_mac_header = mac + mac + "\x86\xdd"
# lisp.dprint("Built MAC header for L2 socket:",
# lisp.lisp_format_packet(lisp_mac_header))
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
s = lisp.lisp_open_listen_socket("0.0.0.0", str(lisp_ephem_port))
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
lisp_ephem_socket = s
#
# Open network send socket and internal listen socket.
#
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-etr")
lisp_send_sockets[0] = lisp_ephem_socket
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open a L2 socket so when we decapsulate and have to route an IPv6
# packet, we have the kernel receive a MAC frame on the loopback interface.
# We do this because there is no IP_HDRINCL for IPv6 raw sockets.
#
# Disabling this code in favor of using a tuntap tun interface via the
# pytun module. See code right below.
#
# if ("PF_PACKET" in dir(socket)):
# interface = "lo" if ("lo" in lisp.lisp_myinterfaces.keys()) else \
# "lo0" if ("lo0" in lisp.lisp_myinterfaces.keys()) else None
# if (interface != None):
# lisp_l2_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
# lisp_l2_socket.bind(("lo", 0x86dd))
# #endif
# #endif
#
# Setup tuntap tunnel interface so when we decap IPv6 packets, we can
# send to kernel to route them.
#
if (pytun != None):
lisp_mac_header = b'\x00\x00\x86\xdd'
device = "lispers.net"
try:
lisp_l2_socket = pytun.TunTapDevice(flags=pytun.IFF_TUN,
name=device)
os.system("ip link set dev {} up".format(device))
except:
lisp.lprint("Cannot create tuntap interface")
#endtry
#endif
#
# Start thread to listen on data socket.
#
threading.Thread(target=lisp_etr_process, args=[]).start()
#
# Test code to force IGMPv2 joins and leaves on an airplane. ;-)
#
threading.Thread(target=lisp_etr_join_leave_process, args=[]).start()
return(True)
#enddef
#
# lisp_etr_shutdown
#
# Shut down this process.
#
def lisp_etr_shutdown():
global lisp_register_timer
global lisp_etr_info_timer
#
# Cancel periodic Map-Register and Info timer threads.
#
if (lisp_register_timer): lisp_register_timer.cancel()
if (lisp_etr_info_timer): lisp_etr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_etr_discover_eid
#
# Process IPC message from the lisp-itr process. It will be in the form of:
#
# "learn%<eid-string>%<interface-name>"
#
# Variable "ipc" is a string and not a byte string. Caller converts.
#
def lisp_etr_discover_eid(ipc):
ipc = ipc.split("%")
eid_str = ipc[1]
interface = ipc[2]
if (interface == "None"): interface = None
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_address(eid_str)
#
# Do database-mapping lookup.
#
db = lisp.lisp_db_for_lookups.lookup_cache(eid, False)
if (db == None or db.dynamic_eid_configured() == False):
lisp.lprint("ITR/ETR dynamic-EID configuration out of sync for {}". \
format(lisp.green(eid_str, False)))
return
#endif
#
# Do logic checks. That is do not remove an entry if it is not there and
# don't try to add an entry if it is already cached.
#
dyn_eid = None
if (eid_str in db.dynamic_eids): dyn_eid = db.dynamic_eids[eid_str]
if (dyn_eid == None and interface == None):
lisp.lprint("ITR/ETR state mismatch for {}".format( \
lisp.green(eid_str, False)))
return
#endif
#
# Check if ITR is changing the interface to the same interface, meaning
# it is confused. Otherwise, the IPC is an interface change. Don't register
# in this case.
#
if (dyn_eid and interface):
if (dyn_eid.interface == interface):
lisp.lprint("ITR sent redundant IPC for {}".format( \
lisp.green(eid_str, False)))
else:
lisp.lprint("Dynamic-EID {} interface change, {} -> {}".format( \
lisp.green(eid_str, False), dyn_eid.interface, interface))
dyn_eid.interface = interface
#endif
return
#endif
#
# Add new entry and register it.
#
if (interface):
dyn_eid = lisp.lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = interface
dyn_eid.get_timeout(interface)
db.dynamic_eids[eid_str] = dyn_eid
reg = lisp.bold("Registering", False)
eid_str = lisp.bold(eid_str, False)
lisp.lprint("{} dynamic-EID {} on interface {}, timeout {}".format(reg,
lisp.green(eid_str, False), interface, dyn_eid.timeout))
lisp_build_map_register(lisp_send_sockets, None, eid, None, False)
#
# Add /32 to routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route add {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
return
#endif
#
# Remove existig entry and deregister it.
#
if (eid_str in db.dynamic_eids):
interface = db.dynamic_eids[eid_str].interface
dereg = lisp.bold("Deregistering", False)
lisp.lprint("{} dynamic-EID {}".format(dereg,
lisp.green(eid_str, False)))
lisp_build_map_register(lisp_send_sockets, 0, eid, None, False)
db.dynamic_eids.pop(eid_str)
#
# Delete /32 from routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route delete {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
#endif
return
#enddef
#
# lisp_etr_process_rtr_updown
#
# Process IPC message from lisp-itr. It is telling the lisp-etr process if
# RLOC-probing has determined if the RTR has gone up or down. And therefore
# if it should be registered to the mapping system.
#
# Variable "ipc" is a string and not a byte string. Caller converts.
#
def lisp_etr_process_rtr_updown(ipc):
if (lisp.lisp_register_all_rtrs): return
opcode, rtr_str, status = ipc.split("%")
if (rtr_str not in lisp.lisp_rtr_list): return
lisp.lprint("Process ITR IPC message, RTR {} has gone {}".format(
lisp.red(rtr_str, False), lisp.bold(status, False)))
rtr = lisp.lisp_rtr_list[rtr_str]
if (status == "down"):
lisp.lisp_rtr_list[rtr_str] = None
return
#endif
rtr = lisp.lisp_address(lisp.LISP_AFI_IPV4, rtr_str, 32, 0)
lisp.lisp_rtr_list[rtr_str] = rtr
return
#enddef
#
# lisp_etr_process_nonce_ipc
#
# Process an nonce IPC message from the ITR. It wants to know when a nonce
# is echoed from a remote ITR.
#
# Variable "ipc" is a string and not a byte string. Caller converts.
#
def lisp_etr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
if (opcode == "R"):
echo_nonce.request_nonce_sent = nonce
lisp.lprint("Waiting for echo-nonce 0x{} from {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
elif (opcode == "E"):
echo_nonce.echo_nonce_sent = nonce
lisp.lprint("Sent echo-nonce 0x{} to {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
#endif
return
#enddef
#
# ETR commands procssed by this process.
#
lisp_etr_commands = {
"lisp xtr-parameters" : [lispconfig.lisp_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-server" : [lisp_etr_map_server_command, {
"ms-name" : [True],
"address" : [True],
"dns-name" : [True],
"authentication-type" : [False, "sha1", "sha2"],
"authentication-key" : [False],
"encryption-key" : [False],
"proxy-reply" : [False, "yes", "no"],
"want-map-notify" : [False, "yes", "no"],
"merge-registrations" : [False, "yes", "no"],
"refresh-registrations" : [False, "yes", "no"],
"site-id" : [False, 1, 0xffffffffffffffff] }],
"lisp database-mapping" : [lisp_etr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"register-ttl" : [True, 1, 0xffffffff],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp group-mapping" : [lisp_group_mapping_command, {
"group-name" : [False],
"ms-name" : [True],
"group-prefix" : [False],
"instance-id" : [True, 0, 0xffffffff],
"rle-address" : [False],
"sources" : [],
"address" : [True] }],
"show database-mapping" : [lisp_etr_show_command, { }],
"show etr-keys" : [lisp_etr_show_keys_command, {}],
"show etr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_etr_startup() == False):
lisp.lprint("lisp_etr_startup() failed")
lisp.lisp_print_banner("ETR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_socket, lisp_ipc_listen_socket]
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Info-Reply messages received on ephemeral port.
#
if (lisp_ephem_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ephem_socket, False)
if (source == ""): break
if (port == lisp.LISP_DATA_PORT):
lisp_etr_nat_data_plane(lisp_raw_socket, packet, source)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0:1])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
send_register = lisp.lisp_parse_packet(lisp_send_sockets, packet,
source, port)
#
# Info-Reply from map-server has new RTR-list, trigger a
# Map-Register and a Info-Request to the RTR.
#
if (send_register):
lisp_etr_info_timer = threading.Timer(0,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
lisp_register_timer = threading.Timer(0,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
#endif
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
packet = packet.decode()
if (packet.find("learn%") != -1):
lisp_etr_discover_eid(packet)
elif (packet.find("nonce%") != -1):
lisp_etr_process_nonce_ipc(packet)
elif (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
elif (packet.find("rtr%") != -1):
lisp_etr_process_rtr_updown(packet)
elif (packet.find("stats%") != -1):
packet = packet.split("%")[-1]
lisp.lisp_process_data_plane_decap_stats(packet, None)
else:
lispconfig.lisp_process_command(lisp_ipc_listen_socket,
opcode, packet, "lisp-etr", [lisp_etr_commands])
#endif
elif (opcode == "api"):
packet = packet.decode()
lisp.lisp_process_api("lisp-etr", lisp_ipc_listen_socket, packet)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0:1])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_etr_shutdown()
lisp.lisp_print_banner("ETR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
rdd.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(3)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
try:
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Note that this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(port, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
test_insert.py | import pytest
from pymilvus import DataType, ParamError, BaseException
from utils.utils import *
from common.constants import *
from common.common_type import CaseLabel
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_empty_entity(self, connect, collection):
'''
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
'''
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_None(self, connect, collection):
'''
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
'''
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
'''
collection_name = gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connect(self, dis_connect, collection):
'''
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
'''
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
'''
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_flush_drop_collection(self, connect, collection):
'''
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
'''
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
'''
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
'''
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
'''
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
'''
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
result = connect.insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [i for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [1 for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim, ids)
logging.getLogger().info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
'''
nb = insert_count
with pytest.raises(Exception) as e:
entities = gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
'''
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
'''
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_default_partition(self, connect, collection):
'''
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
'''
result = connect.insert(collection, default_entities, partition_name=default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_repeatedly(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_name_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
'''
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_value_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
'''
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
'''
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
'''
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
'''
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
'''
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_value(self, connect, collection):
'''
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_type(self, connect, collection):
'''
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_name(self, connect, collection):
'''
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
'''
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
'''
delete_nums = 500
disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="issue 7027")
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1,
metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert len(res[0]) == default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True)
ids = future.result().primary_keys
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
result = connect.insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(result.primary_keys) == nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result().primary_keys
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result.primary_keys) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_invalid_params(self, connect):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
collection_new = gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_multi_collections(self, connect):
'''
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
'''
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
'''
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_search_entity_insert_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
'''
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
'''
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
igmpTest.py |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from twisted.internet import defer
from nose.tools import *
from nose.twistedtools import reactor, deferred
from scapy.all import *
from select import select as socket_select
import time, monotonic
import os
import random
import threading
from IGMP import *
from McastTraffic import *
from Stats import Stats
from OnosCtrl import OnosCtrl
from OltConfig import OltConfig
from Channels import IgmpChannel
from CordLogger import CordLogger
from CordTestConfig import setup_module, teardown_module
from CordTestUtils import log_test
log_test.setLevel('INFO')
class IGMPTestState:
def __init__(self, groups = [], df = None, state = 0):
self.df = df
self.state = state
self.counter = 0
self.groups = groups
self.group_map = {} ##create a send/recv count map
for g in groups:
self.group_map[g] = (Stats(), Stats())
def update(self, group, tx = 0, rx = 0, t = 0):
self.counter += 1
index = 0 if rx == 0 else 1
v = tx if rx == 0 else rx
if self.group_map.has_key(group):
self.group_map[group][index].update(packets = v, t = t)
def update_state(self):
self.state = self.state ^ 1
class igmp_exchange(CordLogger):
V_INF1 = 'veth0'
MGROUP1 = '239.1.2.3'
MGROUP2 = '239.2.2.3'
MINVALIDGROUP1 = '255.255.255.255'
MINVALIDGROUP2 = '239.255.255.255'
MMACGROUP1 = "01:00:5e:01:02:03"
MMACGROUP2 = "01:00:5e:02:02:03"
IGMP_DST_MAC = "01:00:5e:00:00:16"
IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
IP_SRC = '1.2.3.4'
IP_DST = '224.0.0.22'
NEGATIVE_TRAFFIC_STATUS = 1
igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
igmp_ip = IP(dst = IP_DST)
IGMP_TEST_TIMEOUT = 5
IGMP_QUERY_TIMEOUT = 60
MCAST_TRAFFIC_TIMEOUT = 20
PORT_TX_DEFAULT = 2
PORT_RX_DEFAULT = 1
max_packets = 100
app = 'org.opencord.igmp'
olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup/olt_config.json'))
ROVER_TEST_TIMEOUT = 300 #3600*86
ROVER_TIMEOUT = (ROVER_TEST_TIMEOUT - 100)
ROVER_JOIN_TIMEOUT = 60
VOLTHA_ENABLED = bool(int(os.getenv('VOLTHA_ENABLED', 0)))
@classmethod
def setUpClass(cls):
cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
cls.port_map, _ = cls.olt.olt_port_map()
if cls.VOLTHA_ENABLED is False:
OnosCtrl.config_device_driver()
OnosCtrl.cord_olt_config(cls.olt)
time.sleep(2)
@classmethod
def tearDownClass(cls):
if cls.VOLTHA_ENABLED is False:
OnosCtrl.config_device_driver(driver = 'ovs')
def setUp(self):
''' Activate the igmp app'''
super(igmp_exchange, self).setUp()
self.onos_ctrl = OnosCtrl(self.app)
self.onos_ctrl.activate()
self.igmp_channel = IgmpChannel()
def tearDown(self):
super(igmp_exchange, self).tearDown()
def onos_load_config(self, config):
log_test.info('onos load config is %s'%config)
status, code = OnosCtrl.config(config)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
time.sleep(2)
def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
return
ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
if flag: #to maintain seperate group-source pair.
for i in range(len(groups)):
d = {}
d['source'] = src_list[i] or '0.0.0.0'
d['group'] = groups[i]
ssm_xlate_list.append(d)
else:
for g in groups:
for s in src_list:
d = {}
d['source'] = s or '0.0.0.0'
d['group'] = g
ssm_xlate_list.append(d)
self.onos_load_config(ssm_dict)
cord_port_map = {}
for g in groups:
cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
self.igmp_channel.cord_port_table_load(cord_port_map)
time.sleep(2)
def mcast_ip_range(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return ip_range
def random_mcast_ip(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return random.choice(ip_range)
def source_ip_range(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return ip_range
def randomsourceip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return random.choice(ip_range)
def get_igmp_intf(self):
inst = os.getenv('TEST_INSTANCE', None)
if not inst:
return 'veth0'
inst = int(inst) + 1
if inst >= self.port_map['uplink']:
inst += 1
if self.port_map.has_key(inst):
return self.port_map[inst]
return 'veth0'
def igmp_verify_join(self, igmpStateList):
sendState, recvState = igmpStateList
## check if the send is received for the groups
for g in sendState.groups:
tx_stats = sendState.group_map[g][0]
tx = tx_stats.count
assert_greater(tx, 0)
rx_stats = recvState.group_map[g][1]
rx = rx_stats.count
assert_greater(rx, 0)
log_test.info('Receive stats %s for group %s' %(rx_stats, g))
log_test.info('IGMP test verification success')
def igmp_verify_leave(self, igmpStateList, leave_groups):
sendState, recvState = igmpStateList[0], igmpStateList[1]
## check if the send is received for the groups
for g in sendState.groups:
tx_stats = sendState.group_map[g][0]
rx_stats = recvState.group_map[g][1]
tx = tx_stats.count
rx = rx_stats.count
assert_greater(tx, 0)
if g not in leave_groups:
log_test.info('Received %d packets for group %s' %(rx, g))
for g in leave_groups:
rx = recvState.group_map[g][1].count
assert_equal(rx, 0)
log_test.info('IGMP test verification success')
def mcast_traffic_timer(self):
log_test.info('MCAST traffic timer expiry')
self.mcastTraffic.stopReceives()
def send_mcast_cb(self, send_state):
for g in send_state.groups:
send_state.update(g, tx = 1)
return 0
##Runs in the context of twisted reactor thread
def igmp_recv(self, igmpState):
s = socket_select([self.recv_socket], [], [], 1.0)
if self.recv_socket in s[0]:
p = self.recv_socket.recv()
try:
send_time = float(p.payload.load)
recv_time = monotonic.monotonic()
except:
log_test.info('Unexpected Payload received: %s' %p.payload.load)
return 0
#log_test.info( 'Recv in %.6f secs' %(recv_time - send_time))
igmpState.update(p.dst, rx = 1, t = recv_time - send_time)
return 0
def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1):
if ssm_load is True:
self.onos_ssm_table_load(groups, src_list)
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype= record_type, mcaddr=g)
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
ip_pkt = self.igmp_eth/self.igmp_ip
pkt = ip_pkt/igmp
IGMPv3.fixup(pkt)
sendp(pkt, iface=iface)
if delay != 0:
time.sleep(delay)
def send_igmp_join_recvQuery(self, groups, rec_queryCount = None, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
self.onos_ssm_table_load(groups, src_list)
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr=g)
gr.sources = src_list
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
ip_pkt = self.igmp_eth/self.igmp_ip
pkt = ip_pkt/igmp
IGMPv3.fixup(pkt)
if rec_queryCount == None:
log_test.info('Sending IGMP join for group %s and waiting for one query packet and printing the packet' %groups)
resp = srp1(pkt, iface=iface)
else:
log_test.info('Sending IGMP join for group %s and waiting for periodic query packets and printing one packet' %groups)
resp = srp1(pkt, iface=iface)
# resp = srp1(pkt, iface=iface) if rec_queryCount else srp3(pkt, iface=iface)
resp[0].summary()
log_test.info('Sent IGMP join for group %s and received a query packet and printing packet' %groups)
if delay != 0:
time.sleep(delay)
def send_igmp_leave(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
log_test.info('entering into igmp leave function')
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
ip_pkt = self.igmp_eth/self.igmp_ip
pkt = ip_pkt/igmp
IGMPv3.fixup(pkt)
sendp(pkt, iface = iface)
if delay != 0:
time.sleep(delay)
def send_igmp_leave_listening_group_specific_query(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
ip_pkt = self.igmp_eth/self.igmp_ip
pkt = ip_pkt/igmp
IGMPv3.fixup(pkt)
log_test.info('Sending IGMP leave for group %s and waiting for one group specific query packet and printing the packet' %groups)
resp = srp1(pkt, iface=iface)
resp[0].summary()
log_test.info('Sent IGMP leave for group %s and received a group specific query packet and printing packet' %groups)
if delay != 0:
time.sleep(delay)
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
def test_igmp_join_verify_traffic(self):
groups = [self.MGROUP1, self.MGROUP1]
self.onos_ssm_table_load(groups)
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
igmpStateRecv = IGMPTestState(groups = groups, df = df)
igmpStateList = (igmpState, igmpStateRecv)
tx_intf = self.port_map[self.PORT_TX_DEFAULT]
rx_intf = self.port_map[self.PORT_RX_DEFAULT]
mcastTraffic = McastTraffic(groups, iface= tx_intf, cb = self.send_mcast_cb, arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = rx_intf, type = ETH_P_IP)
def igmp_srp_task(stateList):
igmpSendState, igmpRecvState = stateList
if not mcastTraffic.isRecvStopped():
self.igmp_recv(igmpRecvState)
reactor.callLater(0, igmp_srp_task, stateList)
else:
self.mcastTraffic.stop()
#log_test.info('Sending IGMP leave for groups: %s' %groups)
self.send_igmp_leave(groups, iface = rx_intf, delay = 2)
self.recv_socket.close()
self.igmp_verify_join(stateList)
self.df.callback(0)
self.send_igmp_join(groups, iface = rx_intf)
mcastTraffic.start()
self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
reactor.callLater(0, igmp_srp_task, igmpStateList)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
def test_igmp_leave_verify_traffic(self):
groups = [self.MGROUP1]
leave_groups = [self.MGROUP1]
self.onos_ssm_table_load(groups)
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
IGMPTestState(groups = groups, df = df)
tx_intf = self.port_map[self.PORT_TX_DEFAULT]
rx_intf = self.port_map[self.PORT_RX_DEFAULT]
mcastTraffic = McastTraffic(groups, iface= tx_intf, cb = self.send_mcast_cb,
arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = rx_intf, type = ETH_P_IP)
mcastTraffic.start()
self.send_igmp_join(groups, iface = rx_intf)
time.sleep(5)
self.send_igmp_leave(leave_groups, delay = 3, iface = rx_intf)
time.sleep(10)
join_state = IGMPTestState(groups = leave_groups)
status = self.igmp_not_recv_task(rx_intf, leave_groups, join_state)
log_test.info('verified status for igmp recv task %s'%status)
assert status == 1 , 'EXPECTED RESULT'
self.df.callback(0)
return df
@deferred(timeout=100)
def test_igmp_leave_join_loop(self):
self.groups = ['226.0.1.1', '227.0.0.1', '228.0.0.1', '229.0.0.1', '230.0.0.1' ]
self.src_list = ['3.4.5.6', '7.8.9.10']
self.onos_ssm_table_load(self.groups,src_list=self.src_list)
df = defer.Deferred()
self.df = df
self.iterations = 0
self.num_groups = len(self.groups)
self.MAX_TEST_ITERATIONS = 10
rx_intf = self.port_map[self.PORT_RX_DEFAULT]
def igmp_srp_task(v):
if self.iterations < self.MAX_TEST_ITERATIONS:
if v == 1:
##join test
self.num_groups = random.randint(0, len(self.groups))
self.send_igmp_join(self.groups[:self.num_groups],
src_list = self.src_list,
iface = rx_intf, delay = 0)
else:
self.send_igmp_leave(self.groups[:self.num_groups],
src_list = self.src_list,
iface = rx_intf, delay = 0)
self.iterations += 1
v ^= 1
reactor.callLater(1.0 + 0.5*self.num_groups,
igmp_srp_task, v)
else:
self.df.callback(0)
reactor.callLater(0, igmp_srp_task, 1)
return df
def igmp_join_task(self, intf, groups, state, src_list = ['1.2.3.4']):
#self.onos_ssm_table_load(groups, src_list)
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype = IGMP_V3_GR_TYPE_INCLUDE, mcaddr = g)
gr.sources = src_list
igmp.grps.append(gr)
for g in groups:
state.group_map[g][0].update(1, t = monotonic.monotonic())
pkt = self.igmp_eth/self.igmp_ip/igmp
IGMPv3.fixup(pkt)
sendp(pkt, iface=intf)
log_test.debug('Returning from join task')
def igmp_recv_task(self, intf, groups, join_state):
recv_socket = L3PacketSocket(iface = intf, type = ETH_P_IP)
group_map = {}
for g in groups:
group_map[g] = [0,0]
log_test.info('Verifying join interface should receive multicast data')
while True:
p = recv_socket.recv()
if p.dst in groups and group_map[p.dst][0] == 0:
group_map[p.dst][0] += 1
group_map[p.dst][1] = monotonic.monotonic()
c = 0
for g in groups:
c += group_map[g][0]
if c == len(groups):
break
for g in groups:
join_start = join_state.group_map[g][0].start
recv_time = group_map[g][1] * 1000000
delta = (recv_time - join_start)
log_test.info('Join for group %s received in %.3f usecs' %
(g, delta))
recv_socket.close()
log_test.debug('Returning from recv task')
def igmp_not_recv_task(self, intf, groups, join_state):
log_test.info('Entering igmp not recv task loop')
recv_socket = L2Socket(iface = intf, type = ETH_P_IP)
group_map = {}
for g in groups:
group_map[g] = [0,0]
log_test.info('Verifying join interface, should not receive any multicast data')
self.NEGATIVE_TRAFFIC_STATUS = 1
def igmp_recv_cb(pkt):
log_test.info('Multicast packet %s received for left groups %s' %(pkt[IP].dst, groups))
self.NEGATIVE_TRAFFIC_STATUS = 2
sniff(prn = igmp_recv_cb, count = 1, lfilter = lambda p: IP in p and p[IP].dst in groups,
timeout = 3, opened_socket = recv_socket)
recv_socket.close()
return self.NEGATIVE_TRAFFIC_STATUS
def group_latency_check(self, groups):
tasks = []
self.send_igmp_leave(groups = groups)
join_state = IGMPTestState(groups = groups)
tasks.append(threading.Thread(target=self.igmp_join_task, args = ('veth0', groups, join_state,)))
traffic_state = IGMPTestState(groups = groups)
mcast_traffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
arg = traffic_state)
mcast_traffic.start()
tasks.append(threading.Thread(target=self.igmp_recv_task, args = ('veth0', groups, join_state)))
for t in tasks:
t.start()
for t in tasks:
t.join()
mcast_traffic.stop()
self.send_igmp_leave(groups = groups)
return
@deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
def test_igmp_1group_join_latency(self):
groups = ['239.0.1.1']
df = defer.Deferred()
def igmp_1group_join_latency():
self.group_latency_check(groups)
df.callback(0)
reactor.callLater(0, igmp_1group_join_latency)
return df
@deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
def test_igmp_2group_join_latency(self):
groups = [self.MGROUP1, self.MGROUP1]
df = defer.Deferred()
def igmp_2group_join_latency():
self.group_latency_check(groups)
df.callback(0)
reactor.callLater(0, igmp_2group_join_latency)
return df
@deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
def test_igmp_Ngroup_join_latency(self):
groups = ['239.0.1.1', '240.0.1.1', '241.0.1.1', '242.0.1.1']
df = defer.Deferred()
def igmp_Ngroup_join_latency():
self.group_latency_check(groups)
df.callback(0)
reactor.callLater(0, igmp_Ngroup_join_latency)
return df
def test_igmp_join_rover_all(self):
s = (224 << 24) | 1
#e = (225 << 24) | (255 << 16) | (255 << 16) | 255
e = (224 << 24) | 10
for i in xrange(s, e+1):
if i&0xff:
ip = '%d.%d.%d.%d'%((i>>24)&0xff, (i>>16)&0xff, (i>>8)&0xff, i&0xff)
self.send_igmp_join([ip], delay = 0)
@deferred(timeout=ROVER_TEST_TIMEOUT)
def test_igmp_join_rover(self):
df = defer.Deferred()
iface = self.get_igmp_intf()
self.df = df
self.count = 0
self.timeout = 0
self.complete = False
def igmp_join_timer():
self.timeout += self.ROVER_JOIN_TIMEOUT
log_test.info('IGMP joins sent: %d' %self.count)
if self.timeout >= self.ROVER_TIMEOUT:
self.complete = True
reactor.callLater(self.ROVER_JOIN_TIMEOUT, igmp_join_timer)
reactor.callLater(self.ROVER_JOIN_TIMEOUT, igmp_join_timer)
self.start_channel = (224 << 24) | 1
self.end_channel = (224 << 24) | 200 #(225 << 24) | (255 << 16) | (255 << 16) | 255
self.current_channel = self.start_channel
def igmp_join_rover(self):
#e = (224 << 24) | 10
chan = self.current_channel
self.current_channel += 1
if self.current_channel >= self.end_channel:
chan = self.current_channel = self.start_channel
if chan&0xff:
ip = '%d.%d.%d.%d'%((chan>>24)&0xff, (chan>>16)&0xff, (chan>>8)&0xff, chan&0xff)
self.send_igmp_join([ip], delay = 0, ssm_load = False, iface = iface)
self.count += 1
if self.complete == True:
log_test.info('%d IGMP joins sent in %d seconds over %s' %(self.count, self.timeout, iface))
self.df.callback(0)
else:
reactor.callLater(0, igmp_join_rover, self)
reactor.callLater(0, igmp_join_rover, self)
return df
@deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
def test_igmp_query(self):
groups = ['224.0.0.1'] ##igmp query group
self.onos_ssm_table_load(groups)
df = defer.Deferred()
self.df = df
self.recv_socket = L2Socket(iface = 'veth0', type = ETH_P_IP)
def igmp_query_timeout():
def igmp_query_cb(pkt):
log_test.info('received igmp query packet is %s'%pkt.show())
log_test.info('Got IGMP query packet from %s for %s' %(pkt[IP].src, pkt[IP].dst))
assert_equal(pkt[IP].dst, '224.0.0.1')
sniff(prn = igmp_query_cb, count=1, lfilter = lambda p: IP in p and p[IP].dst in groups,
opened_socket = self.recv_socket)
self.recv_socket.close()
self.df.callback(0)
#self.send_igmp_join(groups)
self.test_timer = reactor.callLater(self.IGMP_QUERY_TIMEOUT, igmp_query_timeout)
return df
def igmp_send_joins_different_groups_srclist(self, groups, sources, intf = V_INF1, delay = 2, ip_src = None):
g1 = groups[0]
g2 = groups[1]
sourcelist1 = sources[0]
sourcelist2 = sources[1]
eth = Ether(dst = self.IGMP_DST_MAC,type = ETH_P_IP)
ip = IP(dst = self.IP_DST)
log_test.info('Sending join message for the group %s' %g1)
self.send_igmp_join((g1,), src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
eth = Ether(dst = self.MMACGROUP2, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
ip = IP(dst = g2)
log_test.info('Sending join message for group %s' %g2)
self.send_igmp_join((g2,), src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
log_test.info('Done with igmp_send_joins_different_groups_srclist')
def igmp_send_joins_different_groups_srclist_wait_query_packets(self, groups, sources, intf = V_INF1, delay = 2, ip_src = None, query_group1 = None, query_group2 = None):
g1 = groups[0]
g2 = groups[1]
sourcelist1 = sources[0]
sourcelist2 = sources[1]
eth = Ether(dst = self.MMACGROUP1, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
src_ip = ip_src or self.IP_SRC
ip = IP(dst = g1, src = src_ip)
if query_group1 is 'group1':
log_test.info('Sending join message for the group %s and waiting for a query packet on join interface' %g1)
self.send_igmp_join_recvQuery((g1,), None, src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
else:
log_test.info('Sending join message for the group %s' %g1)
self.send_igmp_join((g1,), src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
eth = Ether(dst = self.MMACGROUP2, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
ip = IP(dst = g2, src = src_ip)
if query_group2 is 'group2':
log_test.info('Sending join message for the group %s and waiting for a query packet on join interface' %g2)
self.send_igmp_join_recvQuery((g2,), None, src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
else:
log_test.info('Sending join message for group %s' %g2)
self.send_igmp_join((g2,), src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
def igmp_joins_leave(self,groups,src_list,again_join = False, df = None):
groups1 = [groups[0]]
groups2 = [groups[1]]
src1 = [src_list[0]]
src2 = [src_list[1]]
self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
(src1, src2), intf = self.V_INF1, delay = 2)
src_ip = src1[0]
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
igmpState2 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
dst_mac = self.iptomac(groups1[0])
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb,
arg = igmpState1)
src_ip = src2[0]
dst_mac = self.iptomac(groups1[0])
mcastTraffic2 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb,
arg = igmpState2)
mcastTraffic1.start()
mcastTraffic2.start()
join_state1 = IGMPTestState(groups = groups1)
join_state2 = IGMPTestState(groups = groups2)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
log_test.info('Interface is receiving multicast groups %s' %groups1)
self.igmp_recv_task(self.V_INF1, groups2, join_state2)
log_test.info('Interface is receiving multicast groups %s' %groups2)
log_test.info('Interface is sending leave message for groups %s now' %groups2)
self.send_igmp_leave(groups = groups2, src_list = src2, iface = self.V_INF1, delay = 2)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
target4 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state2)
assert target4 == 1, 'EXPECTED FAILURE'
if again_join:
dst_mac = '01:00:5e:02:02:03'
ip_dst = '239.2.2.3'
eth = Ether(dst = dst_mac, type = ETH_P_IP)
ip = IP(dst = ip_dst)
log_test.info('Interface sending join message again for the groups %s' %groups2)
self.send_igmp_join(groups2, src_list = [src_ip], ip_pkt = eth/ip, iface = self.V_INF1, delay = 2)
self.igmp_recv_task(self.V_INF1, groups2, join_state2)
log_test.info('Interface is receiving multicast groups %s again' %groups2)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
log_test.info('Interface is still receiving from multicast groups %s' %groups1)
else:
log_test.info('Ended test case')
mcastTraffic1.stop()
mcastTraffic2.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_2joins_1leave(self):
df = defer.Deferred()
def igmp_2joins_1leave():
groups = ['234.2.3.4','236.8.7.9']
src_list = ['2.3.4.5','5.4.3.2']
self.onos_ssm_table_load(groups,src_list = src_list)
self.igmp_joins_leave(groups,src_list,again_join = False, df = df)
df.callback(0)
reactor.callLater(0, igmp_2joins_1leave)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+25)
def test_igmp_2joins_1leave_and_join_again(self):
df = defer.Deferred()
def igmp_2joins_1leave_join_again():
groups = ['234.2.3.4','236.8.7.9']
src_list = ['2.3.4.5','5.4.3.2']
self.onos_ssm_table_load(groups,src_list = src_list)
self.igmp_joins_leave(groups,src_list,again_join = True, df = df)
df.callback(0)
reactor.callLater(0, igmp_2joins_1leave_join_again)
return df
def igmp_not_in_src_list(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.onos_ssm_table_load(groups1 + groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','2.2.2.2', '5.5.5.5'])
self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
src_ip = '6.6.6.6'
dst_mac = self.iptomac(groups1[0])
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface = 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
log_test.info('Interface should not receive from multicast groups %s from an interface, which is expected' %groups1)
target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 2, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s, working as expected' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_not_in_src_list(self):
df = defer.Deferred()
def igmp_not_in_src_list():
self.igmp_not_in_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_not_in_src_list)
return df
def igmp_change_to_exclude_src_list(self, df = None):
groups1 = [self.random_mcast_ip()]
groups2 = [self.random_mcast_ip()]
self.onos_ssm_table_load(groups1 + groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','2.2.2.2', '5.5.5.5'])
self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
src_ip = '2.2.2.2'
dst_mac=self.iptomac(groups1[0])
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2'], iface = self.V_INF1, delay =2)
target2 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target2 == 2, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s after sending CHANGE_TO_EXCLUDE' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
def test_igmp_change_to_exclude_src_list(self):
df = defer.Deferred()
def igmp_change_to_exclude_src_list():
self.igmp_change_to_exclude_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_change_to_exclude_src_list)
return df
def igmp_include_to_allow_src_list(self, df = None):
groups1 = [self.random_mcast_ip()] #(self.MGROUP1,)
self.onos_ssm_table_load(groups1,src_list = ['4.4.4.4','6.6.6.6'])
self.send_igmp_join(groups = groups1, src_list = ['4.4.4.4'],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
src_ip = '4.4.4.4'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2',src_ip = src_ip,
cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
mcastTraffic1.stop()
mcastTraffic2 = McastTraffic(groups1, iface= 'veth2',src_ip = '6.6.6.6',
cb = self.send_mcast_cb, arg = igmpState1)
self.send_igmp_join(groups = groups1, src_list = ['6.6.6.6'],record_type = IGMP_V3_GR_TYPE_ALLOW_NEW,
iface = self.V_INF1)
mcastTraffic2.start()
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
mcastTraffic2.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+30)
def test_igmp_include_to_allow_src_list(self):
df = defer.Deferred()
def igmp_include_to_allow_src_list():
self.igmp_include_to_allow_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_include_to_allow_src_list)
return df
def igmp_include_to_block_src_list(self, df = None):
groups1 = [self.random_mcast_ip()] #groups1 = (self.MGROUP1,)
self.onos_ssm_table_load(groups1,src_list = ['4.4.4.4','6.6.6.6'])
self.send_igmp_join(groups = groups1, src_list = ['4.4.4.4','6.6.6.6'],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2',src_ip = '6.6.6.6',
cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
mcastTraffic1.stop()
self.send_igmp_join(groups = groups1, src_list = ['6.6.6.6'],record_type = IGMP_V3_GR_TYPE_BLOCK_OLD,
iface = self.V_INF1)
mcastTraffic2 = McastTraffic(groups1, iface= 'veth2',src_ip = '6.6.6.6',
cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic2.start()
target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is still receiving traffic from old multicast group %s even after we send block for source list' %groups1)
mcastTraffic2.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+30)
def test_igmp_include_to_block_src_list(self):
df = defer.Deferred()
def igmp_include_to_block_src_list():
self.igmp_include_to_block_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_include_to_block_src_list)
return df
def igmp_change_to_include_src_list(self, df = None):
groups1 = [self.random_mcast_ip()]
src_list = ['4.4.4.4','6.6.6.6']
self.onos_ssm_table_load(groups1,src_list = src_list)
self.send_igmp_leave(groups = groups1, src_list = src_list,
iface = self.V_INF1, delay = 2)
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2',src_ip = src_list[0],
cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
mcastTraffic1.stop()
self.send_igmp_join(groups = groups1, src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
mcastTraffic2 = McastTraffic(groups1, iface= 'veth2',src_ip = src_list[1],
cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic2.start()
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
mcastTraffic2.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
def test_igmp_change_to_include_src_list(self):
df = defer.Deferred()
def igmp_change_to_include_src_list():
self.igmp_change_to_include_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_change_to_include_src_list)
return df
#this test case failing because group in include receiving multicast traffic from any of the source
def igmp_exclude_to_allow_src_list(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.onos_ssm_table_load(groups1+groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','6.6.6.6', '7.7.7.7', '8.8.8.8','5.5.5.5'])
self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
iface = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
(['6.6.6.6', '7.7.7.7', '8.8.8.8'], ['6.6.6.6', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
def test_igmp_exclude_to_allow_src_list(self):
df = defer.Deferred()
def igmp_exclude_to_allow_src_list():
self.igmp_exclude_to_allow_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_exclude_to_allow_src_list)
return df
def igmp_exclude_to_block_src_list(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.onos_ssm_table_load(groups1+groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','7.7.7.7','5.5.5.5'])
self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
iface = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5', '7.7.7.7'],
iface = self.V_INF1, delay = 2)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
def test_igmp_exclude_to_block_src_list(self):
df = defer.Deferred()
def igmp_exclude_to_block_src_list():
self.igmp_exclude_to_block_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_exclude_to_block_src_list)
return df
#this test case failing because group in include mode recieves traffic from other sources also.
def igmp_new_src_list(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.onos_ssm_table_load(groups1+groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','5.5.5.5','6.6.6.6'])
self.igmp_send_joins_different_groups_srclist(groups1+groups2,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '6.6.6.6'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
(['2.2.2.2', '6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
log_test.info('Interface is receiving traffic from multicast groups %s after sending join with new source list' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
def test_igmp_new_src_list(self):
df = defer.Deferred()
def igmp_new_src_list():
self.igmp_new_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_new_src_list)
return df
def igmp_block_old_src_list(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
groups = groups1 + groups2
self.onos_ssm_table_load(groups1+groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','5.5.5.5','6.6.6.6','7.7.7.7'])
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Interface is receiving traffic from multicast groups %s' %groups2)
self.igmp_send_joins_different_groups_srclist(groups,
(['6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '7.7.7.7']),
intf = self.V_INF1, delay = 2)
target2 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
assert target2 == 2, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s after sending join with block old source list' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_block_old_src_list(self):
df = defer.Deferred()
def igmp_block_old_src_list():
self.igmp_block_old_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_block_old_src_list)
return df
def igmp_include_empty_src_list(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
groups = groups1 + groups2
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['0']),
intf = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
assert target1==1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s when we sent join with source list is empty' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_include_empty_src_list(self):
## '''Disabling this test as scapy IGMP doesn't work with empty source lists'''
df = defer.Deferred()
def igmp_include_empty_src_list():
self.igmp_include_empty_src_list(df = df)
df.callback(0)
reactor.callLater(0, igmp_include_empty_src_list)
return df
def igmp_exclude_empty_src_list(self, df = None):
groups2 = (self.MGROUP2,)
self.send_igmp_leave(groups = groups2, src_list = ['0'], iface = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Interface is receiving multicast groups %s' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_exclude_empty_src_list(self):
df = defer.Deferred()
def igmp_exclude_empty_src_list():
self.igmp_exclude_empty_src_list()
df.callback(0)
reactor.callLater(0, igmp_exclude_empty_src_list)
return df
def igmp_join_sourceip_0_0_0_0(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
groups = groups1 + groups2
ip_src = '0.0.0.0'
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
intf = self.V_INF1, delay = 2, ip_src = ip_src)
ip_src = self.IP_SRC
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Interface is receiving traffic from multicast groups %s when we sent join with source IP is 0.0.0.0' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_join_sourceip_0_0_0_0(self):
df = defer.Deferred()
def igmp_join_sourceip_0_0_0_0():
self.igmp_join_sourceip_0_0_0_0(df = df)
df.callback(0)
reactor.callLater(0, igmp_join_sourceip_0_0_0_0)
return df
def igmp_invalid_join_packet(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MINVALIDGROUP1,)
groups = groups1 + groups2
ip_src = '1.1.1.1'
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
intf = self.V_INF1, delay = 2, ip_src = ip_src)
ip_src = self.IP_SRC
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
assert target1==1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s when we sent invalid join packet ' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_invalid_join_packet(self):
df = defer.Deferred()
def igmp_invalid_join_packet():
self.igmp_invalid_join_packet(df = df)
df.callback(0)
reactor.callLater(0, igmp_invalid_join_packet)
return df
def igmp_join_data_receiving_during_subscriber_link_toggle(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
groups = groups1 + groups2
ip_src = '1.1.1.1'
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
intf = self.V_INF1, delay = 2, ip_src = ip_src)
ip_src = self.IP_SRC
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Interface is receiving traffic from multicast groups, before bring down the self.V_INF1=%s ' %self.V_INF1)
os.system('ifconfig '+self.V_INF1+' down')
log_test.info(' the self.V_INF1 %s is down now ' %self.V_INF1)
os.system('ifconfig '+self.V_INF1)
time.sleep(10)
os.system('ifconfig '+self.V_INF1+' up')
os.system('ifconfig '+self.V_INF1)
log_test.info(' the self.V_INF1 %s is up now ' %self.V_INF1)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Interface is receiving traffic from multicast groups %s when we nterface up after down ' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_join_data_received_during_subscriber_link_toggle(self):
df = defer.Deferred()
def igmp_join_data_received_during_subscriber_link_toggle():
self.igmp_join_data_received_during_subscriber_link_toggle(df = df)
df.callback(0)
reactor.callLater(0, igmp_join_data_received_during_subscriber_link_toggle)
return df
def igmp_join_data_received_during_channel_distributor_link_toggle(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
groups = groups1 + groups2
ip_src = '1.1.1.1'
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5', '6.6.6.6']),
intf = self.V_INF1, delay = 2, ip_src = ip_src)
ip_src = self.IP_SRC
dst_mac1 = '01:00:5e:01:02:03'
dst_mac2 = '01:00:5e:02:02:03'
src_ip2 = '5.5.5.5'
src_ip1 = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
igmpState2 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac1,
src_ip = src_ip1, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic2 = McastTraffic(groups2, iface= 'veth3', dst_mac = dst_mac2,
src_ip = src_ip2, cb = self.send_mcast_cb, arg = igmpState2)
mcastTraffic1.start()
mcastTraffic2.start()
join_state1 = IGMPTestState(groups = groups1)
join_state2 = IGMPTestState(groups = groups2)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
self.igmp_recv_task(self.V_INF1, groups2, join_state2)
mcastTraffic1.stop()
os.system('ifconfig '+'veth2'+' down')
os.system('ifconfig '+'veth2')
time.sleep(10)
self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1==1, 'EXPECTED FAILURE'
os.system('ifconfig '+'veth2'+' up')
os.system('ifconfig '+'veth2')
time.sleep(10)
mcastTraffic1.start()
self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
self.igmp_recv_task(self.V_INF1, groups2, join_state2)
self.igmp_recv_task(self.V_INF1, groups2, join_state2)
mcastTraffic2.stop()
## This test case is failing to receive traffic from multicast data from defferent channel interfaces TO-DO
###### TO DO scenario #######
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+60)
def test_igmp_join_data_received_during_channel_distributors_link_toggle(self):
df = defer.Deferred()
def igmp_join_data_receiving_during_channel_distributor_link_toggle():
self.igmp_join_data_received_during_channel_distributor_link_toggle(df = df)
df.callback(0)
reactor.callLater(0, igmp_join_data_receiving_during_channel_distributor_link_toggle)
return df
def igmp_invalidClassD_IP_join_packet(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MINVALIDGROUP2,)
groups = groups1 + groups2
ip_src = '1.1.1.1'
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
intf = self.V_INF1, delay = 2, ip_src = ip_src)
ip_src = self.IP_SRC
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
assert target1==1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s when we sent invalid join packet ' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_invalid_class_d_ip_for_join_packet(self):
df = defer.Deferred()
def igmp_invalidClass_D_IP_join_packet():
self.igmp_invalidClassD_IP_join_packet(df = df)
df.callback(0)
reactor.callLater(0, igmp_invalidClass_D_IP_join_packet)
return df
def igmp_invalidClassD_IP_as_srclistIP_join_packet(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
groups = groups1 + groups2
ip_src = '1.1.1.1'
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['239.5.5.5']),
intf = self.V_INF1, delay = 2, ip_src = ip_src)
ip_src = self.IP_SRC
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
assert target1==1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s when we sent invalid join packet ' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
def test_igmp_invalid_class_d_ip_as_srclist_ip_for_join_packet(self):
df = defer.Deferred()
def igmp_invalidClassD_IP_as_srclistIP_join_packet():
self.igmp_invalidClassD_IP_as_srclistIP_join_packet(df = df)
df.callback(0)
reactor.callLater(0, igmp_invalidClassD_IP_as_srclistIP_join_packet)
return df
def igmp_general_query_recv_packet(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
groups = groups1 + groups2
ip_src = '1.1.1.1'
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
intf = self.V_INF1, delay = 2, ip_src = ip_src)
ip_src = self.IP_SRC
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
log_test.info('Started delay to verify multicast data taraffic for group %s is received or not for 180 sec ' %groups2)
time.sleep(100)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Verified that multicast data for group %s is received after 100 sec ' %groups2)
time.sleep(50)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Verified that multicast data for group %s is received after 150 sec ' %groups2)
time.sleep(30)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Verified that multicast data for group %s is received after 180 sec ' %groups2)
time.sleep(10)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Verified that multicast data for group %s is received after 190 sec ' %groups2)
target3 = mcastTraffic1.isRecvStopped()
assert target3==False, 'EXPECTED FAILURE'
log_test.info('Verified that multicast data for a group %s is still transmitting from a data interface' %groups2)
log_test.info('Now checking join interface is receiving a multicast data for group %s after 190 sec' %groups2)
target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
assert target1==1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving multicast data for group %s' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+250)
def test_igmp_general_query_received_traffic(self):
df = defer.Deferred()
def igmp_general_query_recv_packet():
self.igmp_general_query_recv_packet(df = df)
df.callback(0)
reactor.callLater(0, igmp_general_query_recv_packet)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+100)
def test_igmp_query_received_on_joining_interface(self):
groups = ['224.0.1.10', '225.0.0.10']
leave_groups = ['224.0.1.10']
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
igmpStateRecv = IGMPTestState(groups = groups, df = df)
igmpStateList = (igmpState, igmpStateRecv)
mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
def igmp_srp_task(stateList):
igmpSendState, igmpRecvState = stateList
if not mcastTraffic.isRecvStopped():
self.igmp_recv(igmpRecvState)
reactor.callLater(0, igmp_srp_task, stateList)
else:
self.mcastTraffic.stop()
self.recv_socket.close()
self.igmp_verify_leave(stateList, leave_groups)
self.df.callback(0)
log_test.info('Sending join packet and expect to receive on general query packet after 60 sec for multicast %s ' %groups)
self.send_igmp_join_recvQuery(groups)
log_test.info('Received a general query packet for multicast %s group on joing interface and sending traffic' %groups)
mcastTraffic.start()
self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
reactor.callLater(0, igmp_srp_task, igmpStateList)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
def test_igmp_for_periodic_query_received_on_joining_interface(self):
groups = ['224.0.1.10', '225.0.0.10']
leave_groups = ['224.0.1.10']
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
def igmp_srp_task(stateList):
igmpSendState, igmpRecvState = stateList
if not mcastTraffic.isRecvStopped():
self.igmp_recv(igmpRecvState)
reactor.callLater(0, igmp_srp_task, stateList)
else:
self.mcastTraffic.stop()
self.recv_socket.close()
self.igmp_verify_leave(stateList, leave_groups)
self.df.callback(0)
self.send_igmp_join_recvQuery(groups,3)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
def test_igmp_for_periodic_query_received_and_checking_entry_deleted(self):
groups = ['224.0.1.10', '225.0.0.10']
leave_groups = ['224.0.1.10']
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
igmpStateRecv = IGMPTestState(groups = groups, df = df)
igmpStateList = (igmpState, igmpStateRecv)
mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
def igmp_srp_task(stateList):
igmpSendState, igmpRecvState = stateList
if not mcastTraffic.isRecvStopped():
self.igmp_recv(igmpRecvState)
reactor.callLater(0, igmp_srp_task, stateList)
else:
self.mcastTraffic.stop()
self.recv_socket.close()
self.igmp_verify_leave(stateList, leave_groups)
self.df.callback(0)
self.send_igmp_join_recvQuery(groups,3)
log_test.info('Received periodic general query packets for multicast %s, now checking entry is deleted from tabel by sending traffic for that group' %groups)
mcastTraffic.start()
self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
reactor.callLater(0, igmp_srp_task, igmpStateList)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
def test_igmp_member_query_interval_and_expiry_for_rejoining_interface(self):
groups = ['224.0.1.10', '225.0.0.10']
leave_groups = ['224.0.1.10']
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
igmpStateRecv = IGMPTestState(groups = groups, df = df)
igmpStateList = (igmpState, igmpStateRecv)
mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
def igmp_srp_task(stateList):
igmpSendState, igmpRecvState = stateList
if not mcastTraffic.isRecvStopped():
self.igmp_recv(igmpRecvState)
reactor.callLater(0, igmp_srp_task, stateList)
else:
self.mcastTraffic.stop()
self.recv_socket.close()
self.igmp_verify_leave(stateList, leave_groups)
self.df.callback(0)
self.send_igmp_join_recvQuery(groups,3)
log_test.info('Received periodic general query packets for multicast %s, now sending join packet again and verifying traffic for that group is received or not on joining interface' %groups)
self.send_igmp_join(groups)
mcastTraffic.start()
self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
reactor.callLater(0, igmp_srp_task, igmpStateList)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+50)
def test_igmp_leave_received_group_and_source_specific_query(self):
groups = ['224.0.1.10', '225.0.0.10']
leave_groups = ['224.0.1.10']
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
def igmp_srp_task(stateList):
igmpSendState, igmpRecvState = stateList
if not mcastTraffic.isRecvStopped():
self.igmp_recv(igmpRecvState)
reactor.callLater(0, igmp_srp_task, stateList)
else:
self.mcastTraffic.stop()
self.recv_socket.close()
self.igmp_verify_leave(stateList, leave_groups)
self.df.callback(0)
self.send_igmp_join(groups)
self.send_igmp_leave_listening_group_specific_query(leave_groups, delay = 3)
return df
def igmp_change_to_exclude_src_list_check_for_group_source_specific_query(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
self.send_igmp_leave_listening_group_specific_query(groups = groups1, src_list = ['2.2.2.2'], iface = self.V_INF1, delay =2)
time.sleep(10)
target2 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target2 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s after sending CHANGE_TO_EXCLUDE' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+60)
def test_igmp_change_to_exclude_src_list_and_check_for_group_source_specific_query(self):
df = defer.Deferred()
def igmp_change_to_exclude_src_list_check_for_group_source_specific_query():
self.igmp_change_to_exclude_src_list_check_for_group_source_specific_query(df = df)
df.callback(0)
reactor.callLater(0, igmp_change_to_exclude_src_list_check_for_group_source_specific_query)
return df
def igmp_change_to_include_src_list_check_for_general_query(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
iface = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['6.6.6.6', '5.5.5.5']),
intf = self.V_INF1, delay = 2,query_group1 = 'group1', query_group2 = None)
time.sleep(10)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
log_test.info('Interface is receiving traffic from multicast groups %s after send Change to include message' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+80)
def test_igmp_change_to_include_src_list_and_check_for_general_query(self):
df = defer.Deferred()
def igmp_change_to_include_src_list_check_for_general_query():
self.igmp_change_to_include_src_list_check_for_general_query(df = df)
df.callback(0)
reactor.callLater(0, igmp_change_to_include_src_list_check_for_general_query)
return df
def igmp_allow_new_src_list_check_for_general_query(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.igmp_send_joins_different_groups_srclist(groups1+groups2,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '6.6.6.6'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2, (['2.2.2.2', '6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
log_test.info('Interface is receiving traffic from multicast groups %s after sending join with new source list' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+80)
def test_igmp_allow_new_src_list_and_check_for_general_query(self):
df = defer.Deferred()
def igmp_allow_new_src_list_check_for_general_query():
self.igmp_allow_new_src_list_check_for_general_query(df = df)
df.callback(0)
reactor.callLater(0, igmp_allow_new_src_list_check_for_general_query)
return df
def igmp_block_old_src_list_check_for_group_source_specific_query(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
groups = groups1 + groups2
self.igmp_send_joins_different_groups_srclist(groups,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:02:02:03'
src_ip = '5.5.5.5'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups2, df = df)
IGMPTestState(groups = groups2, df = df)
mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups2)
self.igmp_recv_task(self.V_INF1, groups2, join_state1)
log_test.info('Interface is receiving traffic from multicast groups %s' %groups2)
self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups,
(['6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '7.7.7.7']),
intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
target2 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
assert target2 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s after sending join with block old source list' %groups2)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+90)
def test_igmp_block_old_src_list_and_check_for_group_source_specific_query(self):
df = defer.Deferred()
def igmp_block_old_src_list_check_for_group_source_specific_query():
self.igmp_block_old_src_list_check_for_group_source_specific_query(df = df)
df.callback(0)
reactor.callLater(0, igmp_block_old_src_list_check_for_group_source_specific_query)
return df
def igmp_include_to_allow_src_list_check_for_general_query(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,(['2.2.2.2', '3.3.3.3', '4.4.4.4', '6.6.6.6'], ['2.2.2.2', '5.5.5.5']), intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
def test_igmp_include_to_allow_src_list_and_check_for_general_query(self):
df = defer.Deferred()
def igmp_include_to_allow_src_list_check_for_general_query():
self.igmp_include_to_allow_src_list_check_for_general_query(df = df)
df.callback(0)
reactor.callLater(0, igmp_include_to_allow_src_list_check_for_general_query)
return df
def igmp_include_to_block_src_list_check_for_group_source_specific_query(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
(['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
intf = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
self.send_igmp_leave_listening_group_specific_query(groups = groups1, src_list = ['6.6.6.6','7.7.7.7'],
iface = self.V_INF1, delay = 2)
self.igmp_recv_task(self.V_INF1, groups1, join_state1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
def test_igmp_include_to_block_src_list_and_check_for_group_source_specific_query(self):
df = defer.Deferred()
def igmp_include_to_block_src_list_check_for_group_source_specific_query():
self.igmp_include_to_block_src_list_check_for_group_source_specific_query(df = df)
df.callback(0)
reactor.callLater(0, igmp_include_to_block_src_list_check_for_group_source_specific_query)
return df
def igmp_exclude_to_allow_src_list_check_for_general_query(self, df = None):
groups1 = (self.MGROUP1,)
groups2 = (self.MGROUP2,)
self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
iface = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,
(['6.6.6.6', '7.7.7.7', '8.8.8.8'], ['6.6.6.6', '5.5.5.5']), intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+90)
def test_igmp_exclude_to_allow_src_list_and_check_for_general_query(self):
df = defer.Deferred()
def igmp_exclude_to_allow_src_list_check_for_general_query():
self.igmp_exclude_to_allow_src_list_check_for_general_query(df = df)
df.callback(0)
reactor.callLater(0, igmp_exclude_to_allow_src_list_check_for_general_query)
return df
def igmp_exclude_to_block_src_list_check_for_group_source_specific_query(self, df = None):
groups1 = (self.MGROUP1,)
self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
iface = self.V_INF1, delay = 2)
dst_mac = '01:00:5e:01:02:03'
src_ip = '2.2.2.2'
if df is None:
df = defer.Deferred()
igmpState1 = IGMPTestState(groups = groups1, df = df)
IGMPTestState(groups = groups1, df = df)
mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
mcastTraffic1.start()
join_state1 = IGMPTestState(groups = groups1)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
self.send_igmp_leave_listening_group_specific_query(groups = groups1,
src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5', '7.7.7.7'],
iface = self.V_INF1, delay = 2)
target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
assert target1 == 1, 'EXPECTED FAILURE'
log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
mcastTraffic1.stop()
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
def test_igmp_exclude_to_block_src_list_and_check_for_group_source_specific_query(self):
df = defer.Deferred()
def igmp_exclude_to_block_src_list_check_for_group_source_specific_query():
self.igmp_exclude_to_block_src_list_check_for_group_source_specific_query(df = df)
df.callback(0)
reactor.callLater(0, igmp_exclude_to_block_src_list_check_for_group_source_specific_query)
return df
def iptomac(self, mcast_ip):
mcast_mac = '01:00:5e:'
octets = mcast_ip.split('.')
second_oct = int(octets[1]) & 127
third_oct = int(octets[2])
fourth_oct = int(octets[3])
mcast_mac = mcast_mac + format(second_oct,'02x') + ':' + format(third_oct, '02x') + ':' + format(fourth_oct, '02x')
return mcast_mac
def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
dst_mac = self.iptomac(group)
eth = Ether(dst= dst_mac)
ip = IP(dst=group,src=source)
data = repr(monotonic.monotonic())
sendp(eth/ip/data,count=20, iface = intf)
def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
log_test.info('verifying multicast traffic for group %s from source %s'%(group,source))
self.success = False
def recv_task():
def igmp_recv_cb(pkt):
#log_test.info('received multicast data packet is %s'%pkt.show())
log_test.info('multicast data received for group %s from source %s'%(group,source))
self.success = True
sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
t = threading.Thread(target = recv_task)
t.start()
self.send_multicast_data_traffic(group,source=source)
t.join()
return self.success
def test_igmp_include_exclude_modes(self):
groups = ['224.2.3.4','230.5.6.7']
src_list = ['2.2.2.2','3.3.3.3']
self.onos_ssm_table_load(groups, src_list=src_list)
self.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 2)
self.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
iface = self.V_INF1, delay = 2)
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
assert_equal(status,False)
def test_igmp_allow_new_source_mode(self):
group = ['224.8.9.3']
src_list = ['2.2.2.2','3.3.3.3']
#dst_mac = self.iptomac(group[0])
self.onos_ssm_table_load(group, src_list)
self.send_igmp_join(groups = group, src_list = src_list[0],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 1)
status = self.verify_igmp_data_traffic(group[0], intf=self.V_INF1,source = src_list[0])
assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
self.send_igmp_join(groups = group, src_list = src_list[1],record_type = IGMP_V3_GR_TYPE_ALLOW_NEW,
iface = self.V_INF1, delay = 1)
for src in src_list:
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1, source=src)
assert_equal(status,True) # expecting igmp data traffic from both sources
def test_igmp_include_to_exclude_mode_change(self):
group = ['224.2.3.4']
src_list = ['2.2.2.2','3.3.3.3']
self.onos_ssm_table_load(group, src_list)
self.send_igmp_join(groups = group, src_list = src_list[0],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 1)
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source= src_list[0])
assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
self.send_igmp_join(groups = group, src_list = src_list[1],record_type = IGMP_V3_GR_TYPE_EXCLUDE,
iface = self.V_INF1, delay = 1)
for src in src_list:
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source= src)
assert_equal(status,False) # expecting igmp data traffic from both sources
def test_igmp_exclude_to_include_mode_change(self):
group = ['224.2.3.4']
src = ['2.2.2.2']
self.onos_ssm_table_load(group, src)
self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
iface = self.V_INF1, delay = 1)
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 1)
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source = src[0])
assert_equal(status,True) # expecting igmp data traffic from both sources
#this test case wotks properly if the snooping device(ONOS) have multicast router connected.
def test_igmp_to_include_mode_with_null_source(self):
groups = ['224.2.3.4','230.7.9.8']
src = ['192.168.12.34']
dst_mac = []
dst_mac.append(self.iptomac(groups[0]))
dst_mac.append(self.iptomac(groups[1]))
self.onos_ssm_table_load(groups, src)
self.send_igmp_join(groups = groups, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 1)
for grp in groups:
status = self.verify_igmp_data_traffic(grp,intf=self.V_INF1,source= src[0])
assert_equal(status,True) # not expecting igmp data traffic from source src_list[0]
#sending leave packet for group groups[1]
self.send_igmp_join(groups = [groups[1]], src_list = [],record_type = IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE,
iface = self.V_INF1, delay = 1)
for grp in groups:
status = self.verify_igmp_data_traffic(grp,intf=self.V_INF1,source= src[0])
if grp is groups[0]:
assert_equal(status,True) # expecting igmp data traffic to group groups[0]
else:
assert_equal(status,False) # not expecting igmp data traffic to group groups[1]
def test_igmp_to_include_mode(self):
group = ['229.9.3.6']
src_list = ['192.168.12.34','192.18.1.34']
self.onos_ssm_table_load(group, src_list)
self.send_igmp_join(groups = group, src_list = [src_list[0]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 1)
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True) # not expecting igmp data traffic from source src_list[0]
self.send_igmp_join(groups = group, src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE,
iface = self.V_INF1, delay = 1)
for src in src_list:
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source= src)
assert_equal(status,True) # expecting igmp data traffic to group groups[0]
#this test case passed only if mulitcast router connected to ONOS.
def test_igmp_blocking_old_source_mode(self):
group = ['224.2.3.4']
src_list = ['2.2.2.2','3.3.3.3']
self.onos_ssm_table_load(group, src_list)
self.send_igmp_join(groups = group, src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 1)
for src in src_list:
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1, source=src)
assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
self.send_igmp_join(groups = group, src_list = [src_list[1]],record_type = IGMP_V3_GR_TYPE_BLOCK_OLD,
iface = self.V_INF1, delay = 1)
for src in src_list:
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1, source=src)
if src is src_list[0]:
assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
else:
assert_equal(status,False) # not expecting igmp data traffic from source src_list[1]
def test_igmp_multiple_joins_and_data_verification_with_100_groups(self):
groups = []
sources = []
count = 1
mcastips = self.mcast_ip_range(start_ip = '226.0.0.1',end_ip = '226.0.5.254')
sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.5.254')
while count<=100:
group = random.choice(mcastips)
source = random.choice(sourceips)
if group in groups:
pass
else:
log_test.info('group and source are %s and %s'%(group,source))
groups.append(group)
sources.append(source)
count += 1
self.onos_ssm_table_load(groups,src_list=sources,flag=True)
for i in range(100):
self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
assert_equal(status, True)
log_test.info('data received for group %s from source %s'%(groups[i],sources[i]))
def test_igmp_multiple_joins_with_data_verification_and_leaving_100_groups(self):
groups = []
sources = []
count = 1
mcastips = self.mcast_ip_range(start_ip = '226.0.0.1',end_ip = '226.0.5.254')
sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.5.254')
while count<=100:
group = random.choice(mcastips)
source = random.choice(sourceips)
if group in groups:
pass
else:
log_test.info('group and source are %s and %s'%(group,source))
groups.append(group)
sources.append(source)
count += 1
self.onos_ssm_table_load(groups,src_list=sources,flag=True)
for i in range(100):
self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
assert_equal(status, True)
log_test.info('data received for group %s from source %s'%(groups[i],sources[i]))
self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
iface = self.V_INF1, delay = 1)
status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
assert_equal(status, False)
log_test.info("data not received for group %s from source %s after changing group mode to 'TO-EXCLUDE' mode"%(groups[i],sources[i]))
def test_igmp_group_source_for_only_config_with_1000_entries(self):
groups = []
sources = []
count = 1
mcastips = self.mcast_ip_range(start_ip = '229.0.0.1',end_ip = '229.0.50.254')
sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.50.254')
while count<=1000:
group = random.choice(mcastips)
source = random.choice(sourceips)
if group in groups:
pass
else:
log_test.info('group and source are %s and %s'%(group,source))
groups.append(group)
sources.append(source)
count += 1
self.onos_ssm_table_load(groups,src_list=sources,flag=True)
def test_igmp_from_exclude_to_include_mode_with_100_groups(self):
groups = []
sources = []
count = 1
mcastips = self.mcast_ip_range(start_ip = '229.0.0.1',end_ip = '229.0.10.254')
sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.10.254')
while count<=100:
group = random.choice(mcastips)
source = random.choice(sourceips)
if group in groups:
pass
else:
log_test.info('group and source are %s and %s'%(group,source))
groups.append(group)
sources.append(source)
count += 1
self.onos_ssm_table_load(groups,src_list=sources,flag=True)
for i in range(100):
self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_EXCLUDE,
iface = self.V_INF1)
status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
assert_equal(status, False)
log_test.info('data not received for group %s from source %s as expected'%(groups[i],sources[i]))
self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
assert_equal(status, True)
log_test.info("data received for group %s from source %s after changing group mode to 'TO-INCLUDE' mode"%(groups[i],sources[i]))
def test_igmp_with_multiple_joins_and_data_verify_with_1000_groups(self):
groups = []
sources = []
count = 1
mcastips = self.mcast_ip_range(start_ip = '229.0.0.1',end_ip = '229.0.30.254')
sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.30.254')
while count<=1000:
group = random.choice(mcastips)
source = random.choice(sourceips)
if group in groups:
pass
else:
log_test.info('group and source are %s and %s'%(group,source))
groups.append(group)
sources.append(source)
count += 1
self.onos_ssm_table_load(groups,src_list=sources,flag=True)
for i in range(1000):
self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
assert_equal(status, True)
log_test.info('data received for group %s from source %s - %d'%(groups[i],sources[i],i))
def test_igmp_with_multiple_joins_and_data_verify_with_5000_groups(self):
groups = []
sources = []
count = 1
mcastips = self.mcast_ip_range(start_ip = '231.39.19.121',end_ip = '231.40.30.25')
sourceips = self.source_ip_range(start_ip = '192.168.56.43',end_ip = '192.169.110.30')
while count<=5000:
group = random.choice(mcastips)
source = random.choice(sourceips)
if group in groups:
pass
else:
log_test.info('group and source are %s and %s'%(group,source))
groups.append(group)
sources.append(source)
count += 1
self.onos_ssm_table_load(groups,src_list=sources,flag=True)
for i in range(5000):
self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
assert_equal(status, True)
log_test.info('data received for group %s from source %s - %d'%(groups[i],sources[i],i))
"""def test_igmp_join_from_multiple_infts(self):
groups = ['229.9.3.6','234.20.56.2']
src_list = ['192.168.12.34','192.18.1.34']
self.onos_ssm_table_load(groups, src_list=src_list)
self.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = 'veth0')
self.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = 'veth2')
status = self.verify_igmp_data_traffic(groups[0],intf='veth0',source=src_list[0])
assert_equal(status,True)
status = self.verify_igmp_data_traffic(groups[1],intf='veth2',source=src_list[1])
assert_equal(status,True) # not expecting igmp data traffic from source src_list[0]
"""
def test_igmp_send_data_to_non_registered_group(self):
group = ['224.2.3.4']
src = ['2.2.2.2']
self.onos_ssm_table_load(group,src_list= src)
self.send_igmp_join(groups = ['239.0.0.1'], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 1)
status = self.verify_igmp_data_traffic('239.0.0.1',intf=self.V_INF1,source=src[0])
assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
def test_igmp_traffic_verification_for_registered_group_with_no_join_sent(self):
group = ['227.12.3.40']
src = ['190.4.19.67']
self.onos_ssm_table_load(group,src_list= src)
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
def test_igmp_toggling_app_activation(self):
group = [self.random_mcast_ip()]
src = [self.randomsourceip()]
self.onos_ssm_table_load(group,src_list= src)
self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
log_test.info('Multicast traffic received for group %s from source %s before the app is deactivated'%(group[0],src[0]))
self.onos_ctrl.deactivate()
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
assert_equal(status,False) #not expecting igmp data traffic from source src_list[0]
log_test.info('Multicast traffic not received for group %s from source %s after the app is deactivated'%(group[0],src[0]))
self.onos_ctrl.activate()
status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
log_test.info('Multicast traffic received for group %s from source %s the app is re-activated'%(group[0],src[0]))
def test_igmp_with_mismatch_for_dst_ip_and_mac_in_data_packets(self):
group = ['228.18.19.29']
source = [self.randomsourceip()]
self.onos_ssm_table_load(group,src_list= source)
self.send_igmp_join(groups = group, src_list = source,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
dst_mac = '01:00:5e:0A:12:09'
eth = Ether(dst= dst_mac)
ip = IP(dst=group[0],src=source[0])
data = repr(monotonic.monotonic())
pkt = (eth/ip/data)
log_test.info('Multicast traffic packet %s'%pkt.show())
self.success = False
def recv_task():
def igmp_recv_cb(pkt):
#log_test.info('received multicast data packet is %s'%pkt.show())
log_test.info('multicast data received for group %s from source %s'%(group[0],source[0]))
self.success = True
sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group[0] and p[IP].src == source[0], count=1,timeout = 2, iface='veth0')
t = threading.Thread(target = recv_task)
t.start()
sendp(eth/ip/data,count=20, iface = 'veth2')
t.join()
assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
#test case failing, ONOS registering unicast ip also as an igmp join
def test_igmp_registering_invalid_group(self):
groups = ['218.18.19.29']
source = [self.randomsourceip()]
ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
for g in groups:
for s in source:
d = {}
d['source'] = s or '0.0.0.0'
d['group'] = g
ssm_xlate_list.append(d)
log_test.info('onos load config is %s'%ssm_dict)
status, code = OnosCtrl.config(ssm_dict)
self.send_igmp_join(groups, src_list = source, record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 1)
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1, source=source[0])
assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
def test_igmp_registering_invalid_source(self):
groups = [self.random_mcast_ip()]
sources = ['224.10.28.34','193.73.219.257']
ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
for g in groups:
for s in sources:
d = {}
d['source'] = s or '0.0.0.0'
d['group'] = g
ssm_xlate_list.append(d)
log_test.info('onos load config is %s'%ssm_dict)
status, code = OnosCtrl.config(ssm_dict)
assert_equal(status,False)
|
server.py | import os
import sys
import threading
from collections import defaultdict
from enum import Enum
from itertools import groupby
import yaml
from dotenv import load_dotenv
from flask import Flask, jsonify, redirect, render_template, request, send_file
from werkzeug.utils import secure_filename
load_dotenv()
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
if os.getenv("LOCAL_REPO") == 'True':
# Load credentialdigger from local repo instead of pip
sys.path.insert(0, os.path.join(APP_ROOT, '..'))
from backend import PgUiClient, SqliteUiClient # noqa
app = Flask('__name__', static_folder=os.path.join(APP_ROOT, './res'),
template_folder=os.path.join(APP_ROOT, './templates'))
app.config['UPLOAD_FOLDER'] = os.path.join(APP_ROOT, './backend')
app.config['DEBUG'] = True # Remove this line in production
if os.getenv('USE_PG') == 'True':
app.logger.info('Use Postgres Client')
c = PgUiClient(dbname=os.getenv('POSTGRES_DB'),
dbuser=os.getenv('POSTGRES_USER'),
dbpassword=os.getenv('POSTGRES_PASSWORD'),
dbhost=os.getenv('DBHOST'),
dbport=os.getenv('DBPORT'))
else:
app.logger.info('Use Sqlite Client')
c = SqliteUiClient(path=os.path.join(APP_ROOT, './data.db'))
c.add_rules_from_file(os.path.join(APP_ROOT, './backend/rules.yml'))
# ################### UTILS ####################
def _get_active_scans():
active_scans = []
for thread in threading.enumerate():
if thread.name.startswith("credentialdigger"):
active_scans.append(thread.name.split("@")[1])
return active_scans
def _get_rules():
# There may be missing ids. Restructure as a dict
# There may be no mapping between list index and rule id
# Not very elegant, but avoid IndexError
rules = c.get_rules()
cat = set()
rulesdict = {}
for rule in rules:
rulesdict[rule['id']] = rule
cat.add(rule['category'])
return rulesdict, cat
# ################### ROUTES ####################
@app.route('/')
def root():
repos = c.get_repos()
# Total num of discoveries
tot_discoveries = c.get_discoveries_count()
rulesdict, cat = _get_rules()
return render_template('repos.html',
tot_discoveries=tot_discoveries,
len_repos=len(repos),
len_rules=len(rulesdict),
categories=list(cat))
@app.route('/files', methods=['GET'])
def files():
# Get all the discoveries of this repository
url = request.args.get('url')
rulesdict, cat = _get_rules()
discoveries_count = c.get_discoveries_count(repo_url=url)
active_scans = _get_active_scans()
scanning = url in active_scans
return render_template('discoveries/files.html',
url=url,
discoveries_count=discoveries_count,
scanning=scanning,
categories=list(cat))
@app.route('/discoveries', methods=['GET'])
def discoveries():
# Get all the discoveries of this repository
url = request.args.get('url')
file = request.args.get('file')
rulesdict, cat = _get_rules()
discoveries_count = c.get_discoveries_count(repo_url=url, file_name=file)
active_scans = _get_active_scans()
scanning = url in active_scans
if file:
return render_template('discoveries/file.html',
url=url,
file=file,
discoveries_count=discoveries_count,
scanning=scanning,
categories=list(cat))
else:
return render_template('discoveries/discoveries.html',
url=url,
discoveries_count=discoveries_count,
scanning=scanning,
categories=list(cat))
@app.route('/rules')
def rules():
rules = c.get_rules()
return render_template('rules.html', rules=rules)
@app.route('/delete_repo', methods=['POST'])
def delete_repo():
c.delete_repo(**request.values)
return redirect('/')
@app.route('/add_rule', methods=['POST'])
def add_rule():
c.add_rule(**request.values)
return redirect('/rules')
@app.route('/delete_rule', methods=['POST'])
def delete_rule():
c.delete_rule(**request.values)
return redirect('/rules')
@app.route('/upload_rule', methods=['POST'])
def upload_rule():
file = request.files['filename']
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
c.add_rules_from_file(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect('/rules')
@app.route('/download_rule')
def download_rule():
rules = c.get_rules()
dictrules = defaultdict(list)
for rule in rules:
dictrules['rules'].append({
'regex': rule['regex'],
'category': rule['category'],
'description': rule['description']
})
with open(os.path.join(APP_ROOT, './backend/Downloadrules.yml'), 'w') as file:
yaml.dump(dict(dictrules), file)
return send_file(os.path.join(APP_ROOT, './backend/Downloadrules.yml'), as_attachment=True)
# ################### JSON APIs ####################
@app.route('/scan_repo', methods=['POST'])
def scan_repo():
# Get scan properties
repo_link = request.form['repolink'].strip()
rules_to_use = request.form.get('rule_to_use')
use_snippet_model = request.form.get('snippetModel')
use_path_model = request.form.get('pathModel')
# If the form does not contain the 'Force' checkbox,
# then 'forceScan' will be set to False; thus, ignored.
force_scan = request.form.get('forceScan') == 'force'
git_token = request.form.get('gitToken')
if not c.check_connection(repo_link, git_token):
return f'Git token not valid for repository {repo_link}', 401
# Set up models
models = []
if use_path_model == 'path':
models.append('PathModel')
if use_snippet_model == 'snippet':
models.append('SnippetModel')
# Scan
args = {
"repo_url": repo_link,
"models": models,
"force": force_scan,
"git_token": git_token
}
if rules_to_use != 'all':
args["category"] = rules_to_use
thread = threading.Thread(
name=f"credentialdigger@{repo_link}", target=c.scan, kwargs=args)
thread.start()
return 'OK', 200
@app.route('/get_repos')
def get_repos():
active_scans = _get_active_scans()
repos = c.get_repos()
for repo in repos:
repo['lendiscoveries'] = c.get_discoveries_count(repo['url'])
repo['scan_active'] = False
if repo['url'] in active_scans:
repo['scan_active'] = True
return jsonify(repos)
@app.route('/get_files', methods=['GET'])
def get_files():
# Get all the discoveries of this repository
url = request.args.get('url')
files = c.get_files_summary(url)
return jsonify(files)
@app.route('/get_discoveries', methods=['GET'])
def get_discoveries():
# Get all the discoveries of this repository
url = request.args.get('url')
file_name = request.args.get('file')
where = request.args['search[value]']
where = where if len(where) > 0 else None
limit = int(request.args['length'])
offset = int(request.args['start'])
order_by_index = request.args['order[0][column]']
order_by = request.args[f'columns[{order_by_index}][data]']
order_direction = request.args['order[0][dir]']
# Determine the state filter value
col_index = 0
state_filter = None
while f'columns[{col_index}][data]' in request.args:
if request.args[f'columns[{col_index}][data]'] == 'state':
state_filter = request.args[f'columns[{col_index}][search][value]']
if len(state_filter) == 0 or state_filter == 'all':
state_filter = None
break
col_index += 1
discoveries_count, discoveries = c.get_discoveries(
repo_url=url, file_name=file_name, state_filter=state_filter,
where=where, limit=limit, offset=offset, order_by=order_by,
order_direction=order_direction)
# Add the category to each discovery
rulesdict, cat = _get_rules()
categories_found = set()
for discovery in discoveries:
if discovery['rule_id']:
discovery['category'] = rulesdict[discovery['rule_id']]['category']
else:
discovery['category'] = '(rule deleted)'
categories_found.add(discovery['category'])
# Build the response json
class States(Enum):
new = 0
false_positive = 1
addressing = 2
not_relevant = 3
discoveries = sorted(discoveries, key=lambda i: (
i["snippet"], i["category"], States[i["state"]].value))
response = {
"recordsTotal": discoveries_count,
"recordsFiltered": discoveries_count,
"data": sorted([
{
"snippet": keys[0],
"category": keys[1],
"state": States(keys[2]).name,
"occurrences": [
{
"file_name": i["file_name"],
"line_number": i["line_number"],
"commit_id": i["commit_id"],
"id": i["id"]
} for i in list(values)
],
}
for keys, values in groupby(
discoveries, lambda i: (i["snippet"], i["category"], States[i["state"]].value))
], key=lambda i: States[i[order_by]].value, reverse=order_direction == 'desc')
}
return jsonify(response)
@app.route('/get_scan_status')
def get_scan_status():
url = request.args.get('url')
active_scans = _get_active_scans()
return jsonify({"scanning": url in active_scans})
@app.route('/update_discovery_group', methods=['POST'])
def update_discovery_group():
state = request.form.get('state')
url = request.form.get('url')
file = request.form.get('file')
snippet = request.form.get('snippet')
response = c.update_discovery_group(state, url, file, snippet)
if response is False:
return 'Error in updatating the discovery group', 500
else:
return 'OK', 200
app.run(host='0.0.0.0', port=5000)
|
perf2.py | # perf2.py
# nb short request per second
import time
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('', 25000))
n = 0
from threading import Thread
def monitor():
global n
while True:
time.sleep(1)
print("{0} req/s".format(n))
n = 0
th = Thread(target=monitor)
th.setDaemon(True)
th.start()
while True:
sock.send(b'1')
r = sock.recv(100)
n += 1
|
controller.py | """
Game mode - controller:
Game - Abstract Game Class
SingleGame - LocalSingleGame
"""
import tkinter
from abc import abstractmethod
from threading import Thread
from typing import Callable, Dict, Tuple
from rules import Rule
from model import Manager
from player import LocalPlayer, Player
from view import Board
from error import InvalidGridError, SwapRequest, GameEnded, RuleException, GameWon, InvalidPosition, SettedGridError
class Game:
"""Gaming Abstract model"""
def __init__(self, grids: int, size: int,
players: Dict[bool, str], rule: Rule) -> None:
"""Initial a new Game"""
self._tkroot = tkinter.Tk()
self._game = Manager(grids)
self._size, self._grids = size, grids
# Make players
self._curplayer: Player
self._players: Dict[bool, Player] = dict()
self._rule = rule
@property
def player(self) -> Player:
"""Return current player"""
return self._curplayer
def toggle(self) -> None:
"""Toggle game player"""
self._curplayer = self._players[not bool(self._curplayer)]
self.player.active()
@abstractmethod
def swap(self, request: SwapRequest, callbacks: Dict[bool, Callable]) -> None:
"""Swap player handler"""
...
def click(self, row: int, column: int) -> None:
"""Click handler function"""
# Check if game already over
if self._game.ended:
raise GameEnded("Game has already ended!")
# Play piece for looking winner
# If rule said is invalid, cancel this operation
self._game[row, column] = bool(self.player)
situation = self._game.find(row, column)
# Check rule
try:
self._rule((row, column), self._game.steps, situation)
except GameWon as error:
self._game.end()
raise
except InvalidPosition as error:
self._game[row, column] = None
self.player.announce(error.title, error.msg)
raise
except SwapRequest as error:
raise
def restart(self) -> None:
"""Restart handler function"""
self._game.reset()
oldplayer = self._curplayer
self._curplayer = self._players[True]
oldplayer.handler(-1, -1)
self.player.active()
def gaming(self) -> None:
"""Game logistic"""
while position := self.player.event:
row, column = position
try:
self.click(row, column)
except GameEnded:
break
except SettedGridError:
continue
except InvalidGridError:
continue
except GameWon as error:
self.player.play(row, column)
self.player.win(error.pieces)
break
# When player swapping dont change
except SwapRequest as request:
self.player.play(row, column)
self.swap(request, {
True: self.player.active, # If swapped dont toggle
False: self.toggle # If not swapped toggle
})
continue
# For General Rule check exception dont play piece
except RuleException as _error:
continue
self.player.play(row, column)
self.toggle()
# Restore resources
...
@abstractmethod
def start(self) -> None:
"""Start game"""
...
class LocalGame(Game):
"""LocalGame"""
def __init__(self, grids: int, size: int,
players: Dict[bool, str], rule: Rule) -> None:
"""Initlize a new local game"""
super().__init__(grids, size, players, rule)
# Initialize tkUI
self._board = Board(self._tkroot, self._size, self._grids)
self._board.click = self.click
self._board.restart = self.restart
self._board.fundo = self.undo
# Initialize gamer
for color, name in players.items():
self._players[color] = LocalPlayer(name, color, self._board)
def undo(self) -> Tuple[int, int]:
"""Undo last step"""
x, y = self._game.undo()
self.player.handler(-1, -1)
self.toggle()
return x, y
def swap(self, request: SwapRequest, callbacks: Dict[bool, Callable]) -> None:
"""Swap handler for Local Game using tkinter"""
labels = request.hint
options = request.options
title = request.SwapSelectionPanelTitle
# Wrap all callback handlers
_options: Dict[Tuple[str, ...], Callable[[], bool]] = dict()
for key in options:
_options[key] = lambda func=options[key]: func(self._players)
self._board.selpanel(title, labels, _options, callbacks)
def start(self) -> None:
"""Start Local Game with UI settings etc."""
# Bind sente player
self._curplayer = self._players[True]
self.player.active()
# Draw UI and run tk mainloop in thread
self._board.draw()
thread = Thread(target=self.gaming)
thread.setDaemon(True)
thread.setName("Gaming")
thread.start()
# Mainloop
self._tkroot.mainloop()
|
faces.py |
#!git clone https://github.com/26medias/keras-face-toolbox.git
#!mv keras-face-toolbox/models models
#!mv keras-face-toolbox/utils utils
#!rm -r keras-face-toolbox
#!gdown https://drive.google.com/uc?id=1H37LER8mRRI4q_nxpS3uQz3DcGHkTrNU
#!mv lresnet100e_ir_keras.h5 models/verifier/insightface/lresnet100e_ir_keras.h5
#!pip install git+https://github.com/rcmalli/keras-vggface.git
#!pip show keras-vggface
#!pip install matplotlib
#!pip install mtcnn
#!pip install bs4
#!pip install selenium
# Example:
#from databuilder.faces import builder
#faceBuilder = builder()
#faces, landmarks, segmentations = faceBuilder.videoToFaces(filename)
from IPython.display import HTML, display
import time
import requests
import ntpath
import cv2
import math
import os, sys
from matplotlib import pyplot
from PIL import Image
import numpy as np
from numpy import asarray
import scipy
from scipy.spatial.distance import cosine
from mtcnn.mtcnn import MTCNN
import keras_vggface
from keras_vggface.vggface import VGGFace
from keras_vggface.utils import preprocess_input
import glob
import mtcnn
from pathlib import Path
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from scipy.cluster import hierarchy
from bs4 import BeautifulSoup
from selenium import webdriver
import csv
from detector import face_detector
from parser import face_parser
from utils.visualize import show_parsing_with_annos
from threading import Thread
import threading
class pystack:
def __init__(self, stackSize=5):
self.started = False
self.stackSize = stackSize
self.stack = []
def add(self, fn, args):
self.stack.append((fn, args))
if self.started is False:
self.start()
def start(self):
self.started = True
self.processNextStack()
print("Done.")
self.started = False
def processNextStack(self):
if len(self.stack)==0:
return False
stack = self.stack[0:self.stackSize]
self.stack = self.stack[self.stackSize:]
for item in stack:
fn, args = item
t = threading.Thread(target=fn, args=args)
t.start()
t.join()
print("All threads done!")
self.processNextStack()
class builder():
def __init__(self, VIDEO_QUALITY="720", FRAME_PERCENTAGE=40, DIR_VIDEOS="Videos", DIR_FACES="Faces", OUTPUT_SIZE=224):
# The variables
self.VIDEO_QUALITY = VIDEO_QUALITY # The trailer quality we'll download: 480, 720 or 1080
self.FRAME_PERCENTAGE = FRAME_PERCENTAGE # from 0.1 to 100: The percentage of frames that will be analyzed in the video
self.DIR_VIDEOS = DIR_VIDEOS
self.DIR_FACES = DIR_FACES
self.OUTPUT_SIZE = OUTPUT_SIZE
if not os.path.isdir(self.DIR_VIDEOS):
os.mkdir(self.DIR_VIDEOS, 755);
if not os.path.isdir(self.DIR_FACES):
os.mkdir(self.DIR_FACES, 755);
# Create the detector, using default weights
print("Creating the detector model")
self.detector = MTCNN()
# Create a vggface model
print("Creating the face embedding model")
self.embedding_model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')
# Create a face detector
print("Creating the face detector model")
self.fd = face_detector.FaceAlignmentDetector(lmd_weights_path="/content/face-alignment/2DFAN-4_keras.h5")
# Create a face parser (segmentation)
print("Creating the face segmentation model")
self.prs = face_parser.FaceParser()
print("Face dataset builder ready")
# The methods
# ===========
# Colab progress bar
def progress(self, value, max=100):
return HTML('<progress value="{value}" max="{max}" style="width: 50%"> {value}</progress>'.format(value=value, max=max))
# Convert a value from one range to another
def rangeConvert(self, x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
# Get the directory of a filename
def getDir(self, filename):
p = Path(filename);
return p.parts[len(p.parts)-2]
# Dowload a video from a url
def downloadFile(self, url):
print("Downloading ", url)
filename = self.DIR_VIDEOS+"/"+ntpath.basename(url)
if os.path.exists(filename):
return filename
myfile = requests.get(url)
open(filename, 'wb').write(myfile.content)
print(filename," downloaded.")
return filename
# Resize an image
def resize_image(self, im, max_size=768):
if np.max(im.shape) > max_size:
ratio = max_size / np.max(im.shape)
print(f"Resize image to ({str(int(im.shape[1]*ratio))}, {str(int(im.shape[0]*ratio))}).")
return cv2.resize(im, (0,0), fx=ratio, fy=ratio)
return im
def imageFilesToGrid(self, directory, outputFilename):
filenames = glob.glob(directory+'/*.jpg')
#print(directory, ": ", len(filenames), " images")
if len(filenames) < 4:
return False
result_figsize_resolution = 10 # 1 = 100px
images_count = len(filenames)
# Calculate the grid size:
grid_size = math.ceil(math.sqrt(images_count))
# Create plt plot:
fig, axes = pyplot.subplots(grid_size, grid_size, figsize=(result_figsize_resolution, result_figsize_resolution))
current_file_number = 0
for image_filename in filenames:
x_position = current_file_number % grid_size
y_position = current_file_number // grid_size
plt_image = pyplot.imread(image_filename)
axes[x_position, y_position].imshow(plt_image)
current_file_number += 1
pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)
pyplot.savefig(outputFilename)
#pyplot.show()
def exportImageGrids(self, directory, outputDirectory):
print("Exporting image grids...")
dirs = os.listdir(directory)
dirs.sort()
ndirs = len(dirs)
for n,dir in enumerate(dirs):
if dir is not "ALL":
self.imageFilesToGrid(directory+"/"+dir, outputDirectory+"/"+dir+".jpg");
self.progress(n, ndirs)
# Extract the faces from an image, return an array of numpy faces
def extractFacesFromImage(self, pixels, required_size=(224, 224), limit=50):
results = self.detector.detect_faces(pixels)
faces = []
errors = 0
for i,faceData in enumerate(results):
if len(faces) > limit:
break
x1, y1, width, height = faceData['box']
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
try:
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
faces.append(face_array)
if limit==1:
return face_array
except:
errors+=1
if limit==1 and len(faces)==0:
return False
return faces;
# Extract the faces from an image, return an array of numpy faces & landmarks
def extractFacesAndLandmarksFromImage(self, pixels, required_size=(224, 224), limit=50):
rw, rh = required_size
results, landmarks = self.fd.detect_face(pixels, with_landmarks=True)
nResults = len(results)
faces = []
errors = 0
for i,bbox in enumerate(results):
if len(faces) > limit:
break
# Get the face
x0, y0, x1, y1, score = bbox
# Find the center of the face
w = x1-x0
h = y1-y0
xCenter = x0+int(w/2)
yCenter = y0+int(h/2)
if w>h:
y0 = yCenter-int(w/2)
y1 = yCenter+int(w/2)
if h>w:
x0 = xCenter-int(h/2)
x1 = xCenter+int(h/2)
x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])
face = pixels[x0:x1, y0:y1, :]
# Recalculate the landmarks coordinates
for li in range(len(landmarks[i])):
landmark = landmarks[i][li]
lx, ly = landmark
landmarks[i][li] = (self.rangeConvert(lx-x0, 0, face.shape[1], 0, rw), self.rangeConvert(ly-y0, 0, face.shape[0], 0, rh))
# Resize pixels to the model size
try:
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
faces.append(face_array)
if limit==1:
return face_array
except:
errors+=1
if limit==1 and len(faces)==0:
return False
return faces, landmarks
# Extract the faces from an image, return an array of numpy faces & landmarks
def extractFacesLandmarksAndSegmentationFromImage(self, pixels, required_size=(224, 224), limit=50):
rw, rh = required_size
results, landmarks = self.fd.detect_face(pixels, with_landmarks=True)
nResults = len(results)
faces = []
segmentations = []
errors = 0
for i,bbox in enumerate(results):
if len(faces) > limit:
break
# Get the face
x0, y0, x1, y1, score = bbox
# Find the center of the face
w = x1-x0
h = y1-y0
xCenter = x0+int(w/2)
yCenter = y0+int(h/2)
if w>h:
y0 = yCenter-int(w/2)
y1 = yCenter+int(w/2)
if h>w:
x0 = xCenter-int(h/2)
x1 = xCenter+int(h/2)
x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])
face = pixels[x0:x1, y0:y1, :]
# Recalculate the landmarks coordinates
for li in range(len(landmarks[i])):
landmark = landmarks[i][li]
lx, ly = landmark
landmarks[i][li] = (self.rangeConvert(lx-x0, 0, face.shape[1], 0, rw), self.rangeConvert(ly-y0, 0, face.shape[0], 0, rh))
# Resize pixels to the model size
try:
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
faces.append(face_array)
# Get the segmentation on the resized image
segmentation = self.prs.parse_face(face_array)
segmentations.append(segmentation)
if limit==1:
return face_array
except:
errors+=1
if limit==1 and len(faces)==0:
return False
return faces, landmarks, segmentations
# Export the frames out of a video at a specific fps
def videoToFaces(self, filename, maxFrame=0):
print("Extracting faces from the video frames...")
basename = os.path.splitext(ntpath.basename(filename))[0]
#print("basename:", basename)
cap = cv2.VideoCapture(filename)
# Get the video's FPS
fps = cap.get(cv2.CAP_PROP_FPS)
nframes = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
processFrames = int(nframes*self.FRAME_PERCENTAGE/100)
skipFrame = int(nframes/processFrames)
print(basename, "fps:", fps, "skipFrame:",skipFrame,"Frames:", str(processFrames)+"/"+str(nframes))
out = display(self.progress(0, processFrames), display_id=True)
i = 0
c = 0
faces = []
landmarks = []
segmentations = []
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
i+=1
if maxFrame>0 and i > maxFrame:
break;
#print(i, "-", i % skipFrame)
if (i % skipFrame == 0):
c+=1
#print("Checking faces in frame #"+str(i))
#frameFaces = self.extractFacesFromImage(frame)
frameFaces, frameLandmarks, frameSegmentations = self.extractFacesLandmarksAndSegmentationFromImage(frame, required_size=(self.OUTPUT_SIZE, self.OUTPUT_SIZE))
out.update(self.progress(c, processFrames))
for nf, f in enumerate(frameFaces):
faces.append(f)
landmarks.append(frameLandmarks[nf])
segmentations.append(frameSegmentations[nf])
else:
continue
#cv2.imwrite(DIR_IMAGES+"/"+basename+'/'+str(round((i-1)/fps,2))+'sec.jpg',frame)
cap.release()
cv2.destroyAllWindows()
print(basename, " processed.")
print(processFrames,"/",nframes," frames analyzed.")
print(len(faces), " faces found.")
return faces, landmarks, segmentations
# Show a few images
def showImages(self, images, width=4):
fig = pyplot.figure(figsize=(width, math.ceil(len(images)/width)))
for i in range(len(images)):
pyplot.subplot(width, math.ceil(len(images)/width), i+1)
pyplot.imshow(images[i])
pyplot.axis('off')
pyplot.savefig('preview.png')
pyplot.show()
# Save an array of images to files
def saveImages(self, images, dest, names=False, prefix="", showProgress=True):
if not os.path.isdir(dest):
os.mkdir(dest, 755);
nImages = len(images)
if showProgress is True:
print("Saving ",nImages," images to ", dest)
out = display(self.progress(0, nImages), display_id=True)
filenames = []
for n, image in enumerate(images):
if names is False:
filename = dest+"/"+prefix+('{:04d}'.format(n))+'.jpg'
else:
filename = dest+"/"+prefix+str(names[n])+'.jpg'
cv2.imwrite(filename, image)
filenames.append(filename)
if showProgress is True:
out.update(self.progress(n, nImages))
return filenames
# Save Numpy Arrays to files
def saveNpArrays(self, npArrays, dest, names=False, prefix="", showProgress=True):
if not os.path.isdir(dest):
os.mkdir(dest, 755);
nArrays = len(npArrays)
if showProgress is True:
print("Saving ",nArrays," numpy arrays to ", dest)
out = display(self.progress(0, nArrays), display_id=True)
filenames = []
for n, npArray in enumerate(npArrays):
if names is False:
filename = dest+"/"+prefix+('{:04d}'.format(n))+'.npy'
else:
filename = dest+"/"+prefix+str(names[n])+'.npy'
np.save(filename, npArray)
filenames.append(filename)
if showProgress is True:
out.update(self.progress(n, nArrays))
return filenames
# Extract faces and calculate face embeddings for a list of photo files
def get_embeddings(self, faces):
print("Calculating the embeddings...")
# convert into an array of samples
samples = []
for img in faces:
samples.append(cv2.resize(img, (224,224), interpolation=cv2.INTER_CUBIC))
samples = asarray(samples, 'float32')
# prepare the face for the model, e.g. center pixels
samples = preprocess_input(samples, version=2)
# perform prediction
embeddings = self.embedding_model.predict(samples)
return embeddings
# Determine if a candidate face is a match for a known face
def is_match(self, known_embedding, candidate_embedding, threshold=0.5):
# calculate distance between embeddings
score = cosine(known_embedding, candidate_embedding)
return score >= threshold
# Cluster the faces by cosine distance
def clusterFaces(self, faces, embeddings, landmarks, segmentations, minFaces=2):
groups = [] # Array of dict {faces:[], embeddings: []}
nFaces = len(faces)
print("Clustering ",nFaces," faces...")
out = display(self.progress(0, nFaces), display_id=True)
# For each faces
for n, face in enumerate(faces):
out.update(self.progress(n, nFaces))
if len(groups)==0:
groups.append({
"faces": [face],
"names": [n],
"embeddings": [embeddings[n]],
"landmarks": [landmarks[n]],
"segmentations": [segmentations[n]]
})
else:
# Not the first face, match it against all the groups, see if the average of cosine distance match an existing face
scores = [] # array of dict {group: n, embeddings: []}
for g, group in enumerate(groups):
groupScores = []
for embedding in group["embeddings"]:
groupScores.append(cosine(embedding, embeddings[n]))
score = np.mean(groupScores)
scores.append({
"group": g,
"score": score
})
# Sort the scores for each group by lowest score, check if that score is below the threshold
scores = sorted(scores, key = lambda i: i["score"], reverse=False)
if scores[0]["score"] <= 0.5:
# Add to the existing group the face matches
groups[scores[0]["group"]]["landmarks"].append(landmarks[n])
groups[scores[0]["group"]]["embeddings"].append(embeddings[n])
groups[scores[0]["group"]]["segmentations"].append(segmentations[n])
groups[scores[0]["group"]]["faces"].append(face)
groups[scores[0]["group"]]["names"].append(n)
#print("[Matched] face #", n, " to group #", scores[0]["group"], "score:", scores[0]["score"])
else:
groups.append({
"faces": [face],
"names": [n],
"embeddings": [embeddings[n]],
"landmarks": [landmarks[n]],
"segmentations": [segmentations[n]]
})
#print("[New face] face #", n, " / Best score:", scores[0]["score"])
# Filter out the groups that don't have enough faces
return [item for item in groups if len(item["faces"]) >= minFaces]
#return groups;
# Cluster all the faces from a remote video
def clusterFacesOnVideo(self, url):
print("Processing ", url);
# Download the video
videoFilename = self.downloadFile(url)
# Get the directories name for that video
# /Faces/[dirname]/Faces
# /Faces/[dirname]/Embeddings
# /Faces/[dirname]/Landmarks
# /Faces/[dirname]/Segmentations
# /Faces/[dirname]/Previews
dirname = os.path.splitext(ntpath.basename(videoFilename))[0]
dirClustered = self.DIR_FACES+"/"+dirname
dirFaces = dirClustered+"/Faces/"
dirEmbeddings = dirClustered+"/Embeddings/"
dirLandmarks = dirClustered+"/Landmarks/"
dirSegmentations = dirClustered+"/Segmentations/"
dirPreviews = dirClustered+"/Previews/"
if os.path.exists(dirPreviews):
# Video already processed, go to the next one
print("Video already processed.")
#return False
# Create the directories
if not os.path.isdir(dirClustered):
os.mkdir(dirClustered, 755);
if not os.path.isdir(dirFaces):
os.mkdir(dirFaces, 755);
if not os.path.isdir(dirEmbeddings):
os.mkdir(dirEmbeddings, 755);
if not os.path.isdir(dirLandmarks):
os.mkdir(dirLandmarks, 755);
if not os.path.isdir(dirSegmentations):
os.mkdir(dirSegmentations, 755);
if not os.path.isdir(dirPreviews):
os.mkdir(dirPreviews, 755);
# Open a CSV to save the datasets
with open(dirClustered+"/"+dirname+".csv", "w") as csvfile:
fieldnames = ["video_name", "face_group", "image_filename", "embeddings_filename", "landmarks_filename", "segmentations_filename"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# Find the faces on the video
faces, landmarks, segmentations = self.videoToFaces(videoFilename)
nFaces = len(faces)
print(nFaces," faces detected")
# Get the embedding for all the faces
embeddings = self.get_embeddings(faces)
# Cluster the faces using cosine distance
clusters = self.clusterFaces(faces, embeddings, landmarks, segmentations, minFaces=5)
nClusters = len(clusters)
# Export each face group
print("Saving ",nClusters," face clusters...")
for n, group in enumerate(clusters):
ngImg = len(group["faces"])
ngEbd = len(group["embeddings"])
ngldk = len(group["landmarks"])
# Save the face as an image
images_filenames = self.saveImages(group["faces"], dirFaces+('{:04d}'.format(n)), showProgress=False)
# Save the embedding as a numpy array
embeddings_filenames = self.saveNpArrays(group["embeddings"], dirEmbeddings+('{:04d}'.format(n)), showProgress=False)
# Save the landmarks as a numpy array
landmarks_filenames = self.saveNpArrays(group["landmarks"], dirLandmarks+('{:04d}'.format(n)), showProgress=False)
# Save the segmentations as a numpy array
segmentations_filenames = self.saveNpArrays(group["segmentations"], dirSegmentations+('{:04d}'.format(n)), showProgress=False)
# Update the CSV
for i, image_filename in enumerate(images_filenames):
writer.writerow({
"video_name": dirname,
"face_group": n,
"image_filename": image_filename,
"embeddings_filename": embeddings_filenames[i],
"landmarks_filename": landmarks_filenames[i],
"segmentations_filename": segmentations_filenames[i]
})
# Build grids to show each face groups
self.exportImageGrids(dirFaces, dirPreviews)
def clusterFacesFromVideos(self, urls):
nUrls = len(urls)
for n,url in enumerate(urls):
self.clusterFacesOnVideo(url)
def fetchAllHDVideos(self, url):
response = requests.get(url)
soup = BeautifulSoup(response.content, "html5lib")
links = soup.find_all('a')
videos = []
for tag in links:
link = tag.get('href', None)
if link is not None and 'h'+str(self.VIDEO_QUALITY)+'p' in link:
videos.append(link)
return videos
class threadedBuilder():
def __init__(self, VIDEO_QUALITY="720", FRAME_PERCENTAGE=40, DIR_VIDEOS="Videos", DIR_FACES="Faces"):
# The variables
self.VIDEO_QUALITY = VIDEO_QUALITY # The trailer quality we'll download: 480, 720 or 1080
self.FRAME_PERCENTAGE = FRAME_PERCENTAGE # from 0.1 to 100: The percentage of frames that will be analyzed in the video
self.DIR_VIDEOS = DIR_VIDEOS
self.DIR_FACES = DIR_FACES
def fetchAllHDVideos(self, url):
response = requests.get(url)
soup = BeautifulSoup(response.content, "html5lib")
links = soup.find_all('a')
videos = []
for tag in links:
link = tag.get('href', None)
if link is not None and 'h'+str(self.VIDEO_QUALITY)+'p' in link:
videos.append(link)
return videos
def processVideo(self, url):
datasetBuilder = builder(FRAME_PERCENTAGE=2)
#urls = datasetBuilder.fetchAllHDVideos("https://www.davestrailerpage.co.uk/")
datasetBuilder.clusterFacesFromVideos([url])
def process(self, website):
videos = self.fetchAllHDVideos(website)
print("videos", videos)
for video in videos:
print("video", video)
Thread(target=self.processVideo, args=(self, video)).start()
|
run_dispatcher.py | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import os
import logging
from multiprocessing import Process
from django.conf import settings
from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from kombu import Connection, Exchange, Queue
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
logger = logging.getLogger('awx.main.dispatch')
def construct_bcast_queue_name(common_name):
return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID
class Command(BaseCommand):
help = 'Launch the task dispatcher'
def add_arguments(self, parser):
parser.add_argument('--status', dest='status', action='store_true',
help='print the internal state of any running dispatchers')
parser.add_argument('--running', dest='running', action='store_true',
help='print the UUIDs of any tasked managed by this dispatcher')
parser.add_argument('--reload', dest='reload', action='store_true',
help=('cause the dispatcher to recycle all of its worker processes;'
'running jobs will run to completion first'))
def beat(self):
from celery import app
from celery.beat import PersistentScheduler
from celery.apps import beat
class AWXScheduler(PersistentScheduler):
def __init__(self, *args, **kwargs):
self.ppid = os.getppid()
super(AWXScheduler, self).__init__(*args, **kwargs)
def setup_schedule(self):
super(AWXScheduler, self).setup_schedule()
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
def tick(self, *args, **kwargs):
if os.getppid() != self.ppid:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
raise SystemExit()
return super(AWXScheduler, self).tick(*args, **kwargs)
def apply_async(self, entry, publisher, **kwargs):
task = TaskWorker.resolve_callable(entry.task)
result, queue = task.apply_async()
class TaskResult(object):
id = result['uuid']
return TaskResult()
app = app.App()
app.conf.BROKER_URL = settings.BROKER_URL
app.conf.CELERY_TASK_RESULT_EXPIRES = False
beat.Beat(
30,
app,
schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler
).run()
def handle(self, *arg, **options):
if options.get('status'):
print Control('dispatcher').status()
return
if options.get('running'):
print Control('dispatcher').running()
return
if options.get('reload'):
return Control('dispatcher').control({'control': 'reload'})
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race conditions)
django_connection.close()
django_cache.close()
beat = Process(target=self.beat)
beat.daemon = True
beat.start()
reaper.reap()
consumer = None
with Connection(settings.BROKER_URL) as conn:
try:
bcast = 'tower_broadcast_all'
queues = [
Queue(q, Exchange(q), routing_key=q)
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
]
queues.append(
Queue(
construct_bcast_queue_name(bcast),
exchange=Exchange(bcast, type='fanout'),
routing_key=bcast,
reply=True
)
)
consumer = AWXConsumer(
'dispatcher',
conn,
TaskWorker(),
queues,
AutoscalePool(min_workers=4)
)
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()
|
managed_event_loop.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asyncio import AbstractEventLoop, new_event_loop, run_coroutine_threadsafe
from concurrent.futures import Future
from threading import Thread, Lock
from typing import ContextManager, Generic, TypeVar, Optional, Callable
_T = TypeVar("_T")
class _Lazy(Generic[_T]):
_Factory = Callable[[], _T]
_lock: Lock
_factory: _Factory
_impl: Optional[_T]
def __init__(self, factory: _Factory):
self._lock = Lock()
self._factory = factory
self._impl = None
def get(self) -> _T:
with self._lock:
if self._impl is None:
self._impl = self._factory()
return self._impl
class _ManagedEventLoopImpl(ContextManager):
_loop: AbstractEventLoop
_thread: Thread
def __init__(self, name=None):
self._loop = new_event_loop()
self._thread = Thread(
target=lambda: self._loop.run_forever(), name=name, daemon=True
)
def __enter__(self):
self._thread.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._loop.call_soon_threadsafe(self._loop.stop)
self._thread.join()
def submit(self, coro) -> Future:
return run_coroutine_threadsafe(coro, self._loop)
# TODO(user): Remove when underlying issue is fixed.
# This is a workaround for https://github.com/grpc/grpc/issues/25364, a grpc
# issue which prevents grpc-asyncio working with multiple event loops in the
# same process. This workaround enables multiple topic publishing as well as
# publish/subscribe from the same process, but does not enable use with other
# grpc-asyncio clients. Once this issue is fixed, roll back the PR which
# introduced this to return to a single event loop per client for isolation.
_global_event_loop: _Lazy[_ManagedEventLoopImpl] = _Lazy(
lambda: _ManagedEventLoopImpl(name="PubSubLiteEventLoopThread").__enter__()
)
class ManagedEventLoop(ContextManager):
_loop: _ManagedEventLoopImpl
def __init__(self, name=None):
self._loop = _global_event_loop.get()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def submit(self, coro) -> Future:
return self._loop.submit(coro)
|
_trimesh.py | from __future__ import division
import collections
import logging
import threading
import pyglet
import trimesh.viewer
from .. import model as model_module
logger = logging.getLogger('trimesh')
logger.setLevel(logging.ERROR)
class TrimeshSceneViewer(trimesh.viewer.SceneViewer):
def __init__(self, resolution=None):
if resolution is None:
resolution = (640, 480)
self._links = collections.OrderedDict()
self._redraw = True
pyglet.clock.schedule_interval(self.on_update, 1 / 30)
self.scene = trimesh.Scene()
self._kwargs = dict(
scene=self.scene,
resolution=resolution,
offset_lines=False,
start_loop=False,
)
self.lock = threading.Lock()
def show(self):
self.thread = threading.Thread(target=self._init_and_start_app)
self.thread.daemon = True # terminate when main thread exit
self.thread.start()
def _init_and_start_app(self):
with self.lock:
super(TrimeshSceneViewer, self).__init__(**self._kwargs)
pyglet.app.run()
def redraw(self):
self._redraw = True
def on_update(self, dt):
self.on_draw()
def on_draw(self):
if not self._redraw:
with self.lock:
self._update_vertex_list()
super(TrimeshSceneViewer, self).on_draw()
return
with self.lock:
self._update_vertex_list()
# apply latest angle-vector
for link_id, link in self._links.items():
link.update(force=True)
transform = link.worldcoords().T()
self.scene.graph.update(link_id, matrix=transform)
super(TrimeshSceneViewer, self).on_draw()
self._redraw = False
def on_mouse_press(self, *args, **kwargs):
self._redraw = True
return super(TrimeshSceneViewer, self).on_mouse_press(*args, **kwargs)
def on_mouse_drag(self, *args, **kwargs):
self._redraw = True
return super(TrimeshSceneViewer, self).on_mouse_drag(*args, **kwargs)
def on_mouse_scroll(self, *args, **kwargs):
self._redraw = True
return super(TrimeshSceneViewer, self).on_mouse_scroll(*args, **kwargs)
def on_key_press(self, *args, **kwargs):
self._redraw = True
return super(TrimeshSceneViewer, self).on_key_press(*args, **kwargs)
def on_resize(self, *args, **kwargs):
self._redraw = True
return super(TrimeshSceneViewer, self).on_resize(*args, **kwargs)
def _add_link(self, link):
assert isinstance(link, model_module.Link)
with self.lock:
link_id = str(id(link))
if link_id in self._links:
return
transform = link.worldcoords().T()
self.scene.add_geometry(
geometry=link.visual_mesh,
node_name=link_id,
geom_name=link_id,
transform=transform,
)
self._links[link_id] = link
for child_link in link._child_links:
self._add_link(child_link)
def add(self, geometry):
if isinstance(geometry, model_module.Link):
links = [geometry]
elif isinstance(geometry, model_module.CascadedLink):
links = geometry.link_list
else:
raise TypeError('geometry must be Link or CascadedLink')
for link in links:
self._add_link(link)
self._redraw = True
def delete(self, geometry):
if isinstance(geometry, model_module.Link):
links = [geometry]
elif isinstance(geometry, model_module.CascadedLink):
links = geometry.link_list
else:
raise TypeError('geometry must be Link or CascadedLink')
with self.lock:
for link in links:
link_id = str(id(link))
if link_id not in self._links:
continue
self.scene.delete_geometry(link_id)
self._links.pop(link_id)
self.cleanup_geometries()
self._redraw = True
def set_camera(self, *args, **kwargs):
with self.lock:
self.scene.set_camera(*args, **kwargs)
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9581
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
ami.py | import logging
import time
from importlib import import_module
from threading import Thread
import numpy as np
from ophyd.device import Device, Component as Cpt, Staged
from ophyd.signal import Signal
from ophyd.status import Status
from ophyd.utils.errors import ReadOnlyError
from toolz.itertoolz import partition
from .ext_scripts import hutch_name, get_ami_proxy
logger = logging.getLogger(__name__)
L3T_DEFAULT = '/reg/neh/operator/{}opr/l3t/amifil.l3t'
# Set uninitialized globals for style-checker
pyami = None
pyami_connected = None
ami_proxy = None
l3t_file = None
monitor_det = None
last_filter_string = None
# Define default starting values. Can also use to reset module.
def _reset_globals():
defaults = dict(pyami=None,
pyami_connected=False,
ami_proxy=None,
l3t_file=None,
monitor_det=None,
last_filter_string=None)
globals().update(defaults)
_reset_globals()
def auto_setup_pyami():
"""
Does a best-guess at the ami configuration, if it has not yet been setup.
The steps are:
1. check hutch name
2. determine ami proxy and register it
3. setup detault l3t file
4. makes sure pyami is imported and connected to the ami proxy
This will be called the first time pyami is needed. We don't import at the
top of this file because we need to be able to import this file even if
pyami isn't in the environment, which is semi-frequent.
"""
if None in (ami_proxy, l3t_file):
# This fails if not on nfs, so only do if 100% needed
hutch = hutch_name()
if ami_proxy is None:
proxy = get_ami_proxy(hutch)
set_pyami_proxy(proxy)
if l3t_file is None:
set_l3t_file(L3T_DEFAULT.format(hutch))
if pyami is None:
logger.debug('importing pyami')
globals()['pyami'] = import_module('pyami')
if not pyami_connected:
logger.debug('initializing pyami')
try:
pyami.connect(ami_proxy)
globals()['pyami_connected'] = True
except Exception:
globals()['pyami_connected'] = False
raise
def set_pyami_proxy(proxy):
"""
Pick the hostname or group to use for the pyami connection.
Parameters
----------
proxy: ``str`` or ``int``
Either the server name or group number
"""
globals()['ami_proxy'] = proxy
def set_l3t_file(l3t_file):
"""
Pick the file to write out for the l3t trigger
Parameters
----------
l3t_file: ``str``
Full file path
"""
globals()['l3t_file'] = l3t_file
def set_monitor_det(det):
"""
Designate one `AmiDet` as the monitor.
The monitor det is the default normalization detector and the default
filtering detector when no detector is provided.
Parameters
----------
det: `AmiDet` or ``bool``
The detector to set as the monitor. Alternatively, pass in ``False`` to
disable the monitor det.
"""
if det:
globals()['monitor_det'] = det
else:
globals()['monitor_det'] = None
def set_pyami_filter(*args, event_codes=None, operator='&', or_bykik=True):
"""
Set up the l3t filters.
These connect through pyami to call set_l3t or clear_l3t. The function
takes in arbitrary dets whose prefixes are the ami names, along with low
and highs.
Event codes are handled as a special case, since you always want high vs
low.
.. note::
By default this will treat bykik at an l3t pass! This is so you don't
lose your off shots when the l3t trigger is in veto mode. You can
disable this with ``or_bykik=False``, but this will remain the default
behavior for backwards compatibility and to prevent someone from losing
shots that they wanted in the data.
Parameters
----------
*args: (`AmiDet`, ``float``, ``float``) n times
A sequence of (detector, low, high), which create filters that make
sure the detector is between low and high. You can omit the first
`AmiDet` as a shorthand for the current monitor, assuming a monitor has
been set with `Daq.set_monitor` or `set_monitor_det`.
event_codes: ``list``, optional
A list of event codes to include in the filter. l3pass will be when the
event code is present.
operator: ``str``, optional
The operator for combining the detector ranges and event codes. This
can either be ``|`` to ``or`` the conditions together, so l3pass will
happen if any filter passes, or it can be left at the default ``&`` to
``and`` the conditions together, so l3pass will only happen if all
filters pass.
or_bykik: ``bool``, optional
True by default, appends an ``or`` condition that marks l3t pass when
we see the bykik event code. This makes sure the off shots make it into
the data if we're in l3t veto mode.
"""
auto_setup_pyami()
filter_string = dets_filter(*args, event_codes=event_codes,
operator=operator, or_bykik=or_bykik)
if filter_string is None:
pyami.clear_l3t()
else:
pyami.set_l3t(filter_string, l3t_file)
globals()['last_filter_string'] = filter_string
def dets_filter(*args, event_codes=None, operator='&', or_bykik=True):
"""
Return valid l3t/pyami filter strings in a useful format.
The function takes in arbitrary dets whose prefixes are the ami names,
along with low and highs. Event codes are handled as a special case, since
you always want high vs low.
.. note::
By default this will treat bykik at an l3t pass! This is so you don't
lose your off shots when the l3t trigger is in veto mode. You can
disable this with ``or_bykik=False``, but this will remain the default
behavior for backwards compatibility and to prevent someone from losing
shots that they wanted in the data.
Parameters
----------
*args: (`AmiDet`, ``float``, ``float``) n times
A sequence of (detector, low, high), which create filters that make
sure the detector is between low and high. You can omit the first
`AmiDet` as a shorthand for the current monitor, assuming a monitor has
been set with `Daq.set_monitor` or `set_monitor_det`.
event_codes: ``list``, optional
A list of event codes to include in the filter. l3pass will be when the
event code is present.
operator: ``str``, optional
The operator for combining the detector ranges and event codes. This
can either be ``|`` to ``or`` the conditions together, so l3pass will
happen if any filter passes, or it can be left at the default ``&`` to
``and`` the conditions together, so l3pass will only happen if all
filters pass.
or_bykik: ``bool``, optional
True by default, appends an ``or`` condition that marks l3t pass when
we see the bykik event code. This makes sure the off shots make it into
the data if we're in l3t veto mode.
Returns
-------
filter_string: ``str``
A valid filter string for `AmiDet` or for ``pyami.set_l3t``
"""
filter_strings = []
if len(args) % 3 == 2:
# One arg missing, add the monitor det as first arg
if monitor_det is None:
raise RuntimeError('Did not recieve args multiple of 3, but ',
'monitor_det is not set. Aborting.')
else:
args = [monitor_det] + list(args)
for det, lower, upper in partition(3, args):
if isinstance(det, str):
ami_name = det
elif isinstance(det, AmiDet):
ami_name = det.prefix
else:
raise TypeError('Must use AmiDet or string for filtering!')
filter_strings.append(basic_filter(ami_name, lower, upper))
if event_codes is not None:
for code in event_codes:
filter_strings.append(evr_filter(code))
if len(filter_strings) == 0:
return None
else:
base = concat_filter_strings(filter_strings, operator=operator)
if or_bykik:
bykik = evr_filter(162)
return concat_filter_strings([base, bykik], operator='|')
else:
return base
def basic_filter(ami_name, lower, upper):
"""
Helper function for creating an ami filter string.
Parameters
----------
ami_name: ``str``
The name of the value in ami
lower: ``float``
The lower bound for the value to pass
upper: ``float``
The upper bound for the value to pass
Returns
-------
filter_string: ``str``
"""
return '{}<{}<{}'.format(lower, ami_name, upper)
def evr_filter(event_code):
"""
Helper function that creates a filter for a certain event code.
Parameters
----------
event_code: ``int``
The event code to create a filter for
Returns
-------
filter_string: ``str``
"""
evr_base = 'DAQ:EVR:Evt{}'
return basic_filter(evr_base.format(event_code), 0.1, 2)
def concat_filter_strings(filter_strings, operator='&'):
"""
Helper function to combine ami filter strings
Parameters
----------
filter_strings: ``list``
The valid filter strings to combine
operator: ``str``
The operator to place between the filter strings. This can either be
``&`` or ``|``, for ``and`` or ``or`` respectively.
"""
if len(filter_strings) == 0:
raise ValueError('filter_strings must have at least one element')
elif len(filter_strings) == 1:
return filter_strings[0]
else:
sep = ')' + operator + '('
return '(' + sep.join(filter_strings) + ')'
class AmiDet(Device):
"""
Detector that gets data from pyami scalars.
The data will be in the form of an accumulated mean, rms, and number
of entries used in the calculations. The raw data is not avaiable via
pyami.
This only supports scalars. The array features are known to crash both the
python session and active ami clients, so don't use them.
Parameters
----------
prefix: ``str``
The ami name to use to retrieve the data.
name: ``str``, required keyword
The shorter name to use to label the data.
filter_str: ``str``, optional
If provided, we'll filter the incoming data using this filter string.
If omitted or None, we'll use the last set_l3t string.
If False, but not None, we'll do no filtering at all. This includes the
empty string.
min_duration: ``float``, optional
If provided, we'll wait this many seconds before declaring the
acquisition as complete. Otherwise, we'll stop acquring on read.
normalize: ``bool`` or ``AmiDet``, optional
Determines the normalization behavior of this detector. The default is
``True``, which means normalize to the current ``monitor_det``. See
`set_monitor_det`. ``False`` means do not normalize. You can also pass
in any other detector to normalize against something that is not the
``monitor_det``.
"""
mean = Cpt(Signal, value=0., kind='hinted')
err = Cpt(Signal, value=0., kind='hinted')
entries = Cpt(Signal, value=0, kind='hinted')
mean_raw = Cpt(Signal, value=0., kind='normal')
err_raw = Cpt(Signal, value=0., kind='normal')
mean_mon = Cpt(Signal, value=0., kind='normal')
err_mon = Cpt(Signal, value=0., kind='normal')
entries_mon = Cpt(Signal, value=0., kind='normal')
mon_prefix = Cpt(Signal, value='', kind='normal')
rms = Cpt(Signal, value=0., kind='omitted')
def __init__(self, prefix, *, name, filter_string=None, min_duration=0,
normalize=True):
auto_setup_pyami()
self._entry = None
self._monitor = None
self.filter_string = filter_string
self.min_duration = min_duration
self.normalize = normalize
super().__init__(prefix, name=name)
def stage(self):
"""
Called early in a bluesky scan to initialize the pyami.Entry object.
Note that pyami.Entry objects begin accumulating data immediately.
This will be when the filter_string is used to determine how to filter
the pyami data. Setting the filter_string after stage is called will
have no effect.
Internally this creates a new pyami.Entry object. These objects start
accumulating data immediately.
"""
if self.filter_string is None and last_filter_string is not None:
self._entry = pyami.Entry(self.prefix, 'Scalar',
last_filter_string)
elif self.filter_string:
self._entry = pyami.Entry(self.prefix, 'Scalar',
self.filter_string)
else:
self._entry = pyami.Entry(self.prefix, 'Scalar')
if self.normalize:
if isinstance(self.normalize, AmiDet):
self._monitor = self.normalize
else:
self._monitor = monitor_det
if self._monitor is not None:
self.mon_prefix.put(self._monitor.prefix)
return super().stage()
def unstage(self):
"""
Called late in a bluesky scan to remove the pyami.Entry object and the
monitor.
"""
self._entry = None
if self._monitor is not None and self._monitor is not self:
self._monitor.unstage()
unstaged = super().unstage() + [self._monitor]
else:
unstaged = super().unstage()
self._monitor = None
self.mon_prefix.put('')
return unstaged
def trigger(self):
"""
Called during a bluesky scan to clear the accumulated pyami data.
This must be done because the pyami.Entry objects continually
accumulate data forever. You can stop it by deleting the objects
as in `unstage`, and you can clear it here to at least start from a
clean slate.
If min_duration is zero, this will return a status already marked done
and successful. Otherwise, this will return a status that will be
marked done after min_duration seconds.
If there is a normalization detector in use and it has not been staged,
it will be staged during the first trigger in a scan.
"""
if self._entry is None:
raise RuntimeError('AmiDet %s(%s) was never staged!', self.name,
self.prefix)
if self._monitor is not None and self._monitor is not self:
if self._monitor._staged != Staged.yes:
self._monitor.unstage()
self._monitor.stage()
monitor_status = self._monitor.trigger()
else:
monitor_status = None
self._entry.clear()
if self.min_duration:
def inner(duration, status):
time.sleep(duration)
status._finished()
status = Status(obj=self)
Thread(target=inner, args=(self.min_duration, status)).start()
else:
status = Status(obj=self, done=True, success=True)
if monitor_status is None:
return status
else:
return status & monitor_status
def get(self, *args, **kwargs):
self._get_data()
return super().get(*args, **kwargs)
def read(self, *args, **kwargs):
self._get_data()
return super().read(*args, **kwargs)
def _get_data(self):
"""
Helper function that stuffs ami data into this device's signals.
Parameters
----------
del_entry: ``bool``
If ``True``, we'll clear the accumulated data after getting it.
"""
if self._entry is None:
raise RuntimeError('Must stage AmiDet to begin accumulating data')
data = self._entry.get()
self.mean_raw.put(data['mean'])
self.rms.put(data['rms'])
self.entries.put(data['entries'])
# Calculate the standard error because old python did
if data['entries']:
data['err'] = data['rms']/np.sqrt(data['entries'])
else:
data['err'] = 0
self.err_raw.put(data['err'])
def adj_error(det_mean, det_err, mon_mean, mon_err):
return det_err/mon_mean + mon_err * (det_mean/mon_mean)**2
if self._monitor is None:
self.mean.put(data['mean'])
self.err.put(data['err'])
self.mean_mon.put(0)
self.err_mon.put(0)
self.entries_mon.put(0)
elif self._monitor is self:
self.mean.put(1)
if data['mean'] == 0:
self.err.put(np.nan)
else:
self.err.put(adj_error(data['mean'], data['err'],
data['mean'], data['err']))
self.mean_mon.put(data['mean'])
self.err_mon.put(data['err'])
self.entries_mon.put(data['entries'])
else:
mon_data = self._monitor.get()
if mon_data.mean_raw == 0:
self.mean.put(np.nan)
self.err.put(np.nan)
else:
self.mean.put(data['mean']/mon_data.mean_raw)
self.err.put(adj_error(data['mean'], data['err'],
mon_data.mean_raw,
mon_data.err_raw))
self.mean_mon.put(mon_data.mean_raw)
self.err_mon.put(mon_data.err_raw)
self.entries_mon.put(mon_data.entries)
def put(self, *args, **kwargs):
raise ReadOnlyError('AmiDet is read-only')
def set_det_filter(self, *args, event_codes=None, operator='&'):
"""
Set the filter on this detector only.
This lets you override the l3t filter for a single AmiDet. Call with
no arguments to revert to the last l3t filter. Call with a simple
``False`` to disable filtering on this detector. Call as you would to
set the l3t filter to setup a normal filtering override.
Parameters
----------
*args: (``AmiDet``, ``float``, ``float``) n times
A sequence of (detector, low, high), which create filters that make
sure the detector is between low and high. If instead, the first
argument is ``False``, we'll disable filtering on this detector.
event_codes: ``list``, optional
A list of event codes to include in the filter. l3pass will be when
the event code is present.
operator: ``str``, optional
The operator for combining the detector ranges and event codes.
This can either be ``|`` to ``or`` the conditions together, so
l3pass will happen if any filter passes, or it can be left at the
default ``&`` to ``and`` the conditions together, so l3pass will
only happen if all filters pass.
"""
if len(args) == 1 and not args[0]:
self.filter_string = False
else:
self.filter_string = dets_filter(*args, event_codes=event_codes,
operator=operator)
|
Chembl_loader.py | import os.path as osp
import multiprocessing as mp
import gzip
import torch
from torch_geometric.data import Dataset, Data
# from chemreader.readers import Smiles
from rdkit import Chem
import numpy as np
from tqdm import tqdm
from util import get_filtered_fingerprint
class ChemBLFP(Dataset):
# allowable node and edge features in contextPred
allowable_features = {
"possible_atomic_num_list": list(range(1, 119)),
"possible_formal_charge_list": [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5],
"possible_chirality_list": [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER,
],
"possible_hybridization_list": [
Chem.rdchem.HybridizationType.S,
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2,
Chem.rdchem.HybridizationType.UNSPECIFIED,
],
"possible_numH_list": [0, 1, 2, 3, 4, 5, 6, 7, 8],
"possible_implicit_valence_list": [0, 1, 2, 3, 4, 5, 6],
"possible_degree_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"possible_bonds": [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC,
],
"possible_aromatic_list": [True, False],
"possible_bond_dirs": [ # only for double bond stereo information
Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT,
],
}
def __init__(
self,
root=None,
transform=None,
pre_transform=None,
n_workers=4,
atom_feat_format="contextpred",
scale="full",
):
""" Dataset class for ChemBL dataset.
Args:
root (str): path to the dataset root directory
transform (callable): a callable to transform the data on the fly
pre_transform (callable): a callable to transform the data during processing
n_workers (int): number of workers for multiprocessing
atom_feat_format (str): "contextpred" or "sim_atom_type". The
"sim_atom_type" format has simpler atom types comparing to "contextpred"
format.
scale (str): the scale of the dataset. "filtered" or "full". "full" has
1785415 chemical compounds. "filtered" has 430709 chemical compounds.
"""
if root is None:
root = osp.join("data", "ChemBL")
self.n_workers = n_workers
assert atom_feat_format in [
"contextpred",
"sim_atom_type",
], f"{atom_feat_format} should be in ['contextpred', 'sim_atom_type']"
assert scale in [
"full",
"filtered",
], f"{scale} should be in ['full', 'filtered']"
self.atom_feat_format = atom_feat_format
self.scale = scale
super().__init__(root, transform, pre_transform)
@property
def raw_dir(self):
return self.root
@property
def processed_dir(self):
name = "_".join([self.atom_feat_format, self.scale, "processed"])
return osp.join(self.root, name)
@property
def raw_file_names(self):
if self.scale == "filtered":
return ["smiles.csv"]
else:
return ["chembl_25.csv.gz"]
@property
def processed_file_names(self):
if self.scale == "filtered":
return ["data_1.pt", "data_2.pt", "data_430000.pt"]
else:
return ["data_1.pt", "data_2.pt", "data_1780000.pt"]
def download(self):
""" Get raw data and save to raw directory.
"""
pass
def save_data(self, q):
""" Save graphs in q to data.pt files.
"""
while 1:
data = q.get()
if data == "END":
break
graph, label, idx = data
graph.y = label
graph.id = idx
torch.save(graph, osp.join(self.processed_dir, f"data_{idx}.pt"))
print(
"graph #{} saved to data_{}.pt{}".format(idx, idx, " " * 40), end="\r"
)
def create_graph(self, smi, idx, q):
from chemreader.readers import Smiles
try:
graph = Smiles(smi).to_graph(sparse=True, pyg=True)
except AttributeError:
return
fp = get_filtered_fingerprint(smi)
label = torch.tensor(list(fp), dtype=torch.long)[None, :]
q.put((graph, label, idx))
def _create_contextpred_graph(self, smi, idx, q):
"""
Converts rdkit mol object to graph Data object required by the pytorch
geometric package. NB: Uses simplified atom and bond features, and represent
as indices
:param mol: rdkit mol object
:return: graph data object with the attributes: x, edge_index, edge_attr
"""
# atoms
# num_atom_features = 6 # atom type, chirality tag
atom_features_list = []
mol = Chem.MolFromSmiles(smi)
if mol is None:
return
for atom in mol.GetAtoms():
atom_feature = (
[
self.allowable_features["possible_atomic_num_list"].index(
atom.GetAtomicNum()
)
]
+ [
self.allowable_features["possible_degree_list"].index(
atom.GetDegree()
)
]
+ [
self.allowable_features["possible_formal_charge_list"].index(
atom.GetFormalCharge()
)
]
+ [
self.allowable_features["possible_hybridization_list"].index(
atom.GetHybridization()
)
]
+ [
self.allowable_features["possible_aromatic_list"].index(
atom.GetIsAromatic()
)
]
+ [
self.allowable_features["possible_chirality_list"].index(
atom.GetChiralTag()
)
]
)
atom_features_list.append(atom_feature)
x = torch.tensor(np.array(atom_features_list), dtype=torch.long)
# bonds
num_bond_features = 2 # bond type, bond direction
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = [
self.allowable_features["possible_bonds"].index(bond.GetBondType())
] + [
self.allowable_features["possible_bond_dirs"].index(
bond.GetBondDir()
)
]
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format, shape [2, num_edges]
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
# data.edge_attr: Edge feature matrix, shape [num_edges, num_edge_features]
edge_attr = torch.tensor(np.array(edge_features_list), dtype=torch.long)
else: # mol has no bonds
edge_index = torch.empty((2, 0), dtype=torch.long)
edge_attr = torch.empty((0, num_bond_features), dtype=torch.long)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
fp = get_filtered_fingerprint(smi)
label = torch.tensor(list(fp), dtype=torch.long)[None, :]
return q.put((data, label, idx))
def process(self):
""" The method converting SMILES and labels to graphs.
"""
# init Queue
manager = mp.Manager()
q = manager.Queue(maxsize=self.n_workers * 2)
# init listener
writer = mp.Process(target=self.save_data, args=[q])
writer.start()
# init pool
pool = mp.Pool(self.n_workers)
# init SMILES generator
data = self._get_data()
pb = tqdm(data, total=self.len(), desc="Load tasks: ")
# main loop
if not self.atom_feat_format:
worker = self.create_graph
else:
worker = self._create_contextpred_graph
for i, smi in enumerate(pb):
pool.apply_async(worker, args=[smi, i, q])
# finish the tasks
pool.close()
pool.join()
q.put("END")
writer.join()
def len(self):
return self.__len__()
def _get_len(self):
if self.scale == "filtered":
return 430710
else:
return 1785415
def __len__(self):
try:
return self._data_len
except AttributeError:
self._data_len = self._get_len()
return self._data_len
def get(self, idx):
if idx == 604838: # this molecule is not convertable to rdkit Mol
idx = 604839
data = torch.load(osp.join(self.processed_dir, f"data_{idx}.pt"))
return data
def _get_data(self):
""" Method to get SMILES strings.
"""
if self.scale == "filtered":
with open(self.raw_paths[0]) as f:
for smiles in f.readlines():
yield smiles.strip()
else:
with gzip.open(self.raw_paths[0]) as f:
for line in f.readlines():
# skip header
if line.decode().startswith("smiles"):
continue
yield line.decode().split(",")[0]
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--root", type=str, default=None)
parser.add_argument("--workers", type=int, default=4)
parser.add_argument("--scale", type=str, default="full")
parser.add_argument("--atom-feat-format", type=str, default="contextpred")
args = parser.parse_args()
chembl = ChemBLFP(
root=args.root,
n_workers=args.workers,
scale=args.scale,
atom_feat_format=args.atom_feat_format,
)
print(chembl[0])
|
waiting_animation_thread.py | import os
from threading import Thread, Event
class WaitingAnimationThread:
MESSAGE = ":~$ ReuBERT is reading and understanding text to answer your question. $~:"
def __init__(self):
self.stopping_event = Event()
self.sleep_interval = 0.07
self.thread = Thread(target=self.run, daemon=True)
def run(self):
all_cols, chars, half_cols = self._get_columns_settings()
i = 0
while not self.stopping_event.isSet():
self._print_animation_frame(chars, half_cols, i)
i += 1
self._erase_animation_on_exit(all_cols)
def _get_columns_settings(self):
try:
_, all_cols = os.popen('stty size', 'r').read().split()
all_cols = int(all_cols)
except Exception as e:
all_cols = 120 # Assumed default terminal size.
chars = self._get_animation_pattern()
cols = all_cols - len(WaitingAnimationThread.MESSAGE)
half_cols = int(cols / 2)
return all_cols, chars, half_cols
def _get_animation_pattern(self):
chars = "_,.-'¯ "
return chars
def _print_animation_frame(self, chars, half_cols, i):
animated_ = "\r" + "".join(
[chars[(i + j) % len(chars)] for j in range(half_cols)]) \
+ WaitingAnimationThread.MESSAGE + "".join(
[chars[(i - j) % len(chars)] for j in range(half_cols)])
print(animated_, end="\r")
self.stopping_event.wait(self.sleep_interval)
def _erase_animation_on_exit(self, all_cols):
print(" " * all_cols, end="\r")
def start(self):
self.thread.start()
def join(self):
self.stopping_event.set()
self.thread.join()
|
mock_static_file_server.py | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os
import http.server
import socketserver
from threading import Thread
import logging
log = logging.getLogger(__name__)
PORT = 8999
def serve(port=PORT):
'''Serves test XML files over HTTP'''
# Make sure we serve from the tests' XML directory
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data-samples'))
Handler = http.server.SimpleHTTPRequestHandler
class TestServer(socketserver.TCPServer):
allow_reuse_address = True
skip_connection = False
try:
httpd = TestServer(("", port), Handler)
except Exception as e:
print('Serve error {}'.format(e))
skip_connection = True
if skip_connection is False:
info = 'Serving test HTTP server at port', port
print(info)
log.info(info)
httpd_thread = Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
|
Player1.py | # encoding: utf-8
import os
from pygame import freetype
__author__ = "Yoann Berenguer"
__credits__ = ["Yoann Berenguer"]
__version__ = "1.0.0"
__maintainer__ = "Yoann Berenguer"
__email__ = "yoyoberenguer@hotmail.com"
import random
import socket
import _pickle as cpickle
import threading
import time
import copyreg
try:
import pygame
except ImportError:
print("\n<Pygame> library is missing on your system."
"\nTry: \n C:\\pip install pygame on a window command prompt.")
raise SystemExit
try:
import lz4.frame
except ImportError:
print("\n<lz4> library is missing on your system."
"\nTry: \n C:\\pip install lz4 on a window command prompt.")
raise SystemExit
try:
from Textures import *
from Sounds import BLUE_LASER_SOUND, RED_LASER_SOUND, EXPLOSION_SOUND, IMPACT, IMPACT1, IMPACT_SHORT
from Backgrounds import Background
from Asteroids import Asteroid
from MessageSender import SpriteClient
from Transports import Transport
from CreateHalo import PlayerHalo
from SoundServer import SoundControl
from TextureTools import *
from NetworkBroadcast import Broadcast, EventAttr, StaticSprite, AnimatedSprite, SoundAttr, BlendSprite, \
DeleteSpriteCommand
from Explosions import Explosion
from LayerModifiedClass import LayeredUpdatesModified
import GLOBAL
from GLOBAL import GL
from ShootingStars import ShootingStar
from AfterBurners import AfterBurner
from End import PlayerLost, PlayerWin
from PlayerScore import DisplayScore
from CosmicDust import COSMIC_DUST_ARRAY, create_dust, display_dust
from Gems import MakeGems
from LifeBar import HorizontalBar, ShowLifeBar
from Dialogs import DialogBox
except ImportError:
print("\nOne or more game libraries is missing on your system."
"\nDownload the source code from:\n"
"https://github.com/yoyoberenguer/MultiplayerGameEngine.git")
raise SystemExit
# socket.setdefaulttimeout(0)
def unserialize_event(is_set: threading.Event) -> threading.Event:
"""
Set the internal flag to true. All threads waiting for it to become true are awakened.
Return a threading event set to True
:param is_set: threading event
:return: return a threading event set to true.
>>> event_ = threading.Event()
>>> u_event = unserialize_event(event_)
>>> assert isinstance(u_event, threading.Event)
>>> event_.set()
>>> event_.isSet()
True
>>> u_event = unserialize_event(event_)
>>> u_event.isSet()
True
>>> event_.clear()
>>> u_event = unserialize_event(event_)
>>> u_event.isSet()
True
"""
assert isinstance(is_set, threading.Event), \
print("Positional argument <is_set> is type %s , expecting threading.Event." % type(is_set))
event_ = threading.Event()
if is_set:
event_.set()
return event_
def serialize_event(e: threading.Event) -> tuple:
"""
:param e: threading event
:return: <function unserialize_event>, True or False
>>> event_ = threading.Event()
>>> s = serialize_event(event_)
>>> assert isinstance(s, tuple)
>>> u, v = list(s)
>>> assert isinstance(u, type(unserialize_event))
>>> assert v[0] == False
>>> event_ = threading.Event()
>>> event_.set()
>>> s = serialize_event(event_)
>>> assert isinstance(s, tuple)
>>> u, v = list(s)
>>> assert isinstance(u, type(unserialize_event))
>>> assert v[0] == True
"""
assert isinstance(e, threading.Event), \
print("Positional argument <e> is type %s , expecting threading.Event." % type(e))
return unserialize_event, (e.isSet(),)
copyreg.pickle(threading.Event, serialize_event)
class LaserImpact(pygame.sprite.Sprite):
containers = None
images = None
def __init__(self, gl_, pos_, parent_, timing_=8, blend_=None, layer_=0):
"""
Create an impact sprite effect (absorption effect) where the laser is colliding.
:param gl_: class GL (contains all the game global variables)
:param pos_: tuple of the impact position (x:int, y:int)
:param parent_: parent object (MirroredPlayer1Class class instance)
:param timing_: integer; refreshing time in milliseconds (default 16ms is 60FPS)
:param blend_: integer; blend effect to apply to the sprite, default pygame.BLEND_RGB_ADD = 0
:param layer_: integer < 0; represent the sprite layer. default = 0
"""
assert isinstance(pos_, tuple), \
"Positional argument <pos_> is type %s , expecting tuple." % type(pos_)
# assert isinstance(parent_, Asteroid), \
# "Positional argument <parent_> is type %s ,
# expecting class MirroredPlayer1Class instance." % type(parent_)
assert isinstance(timing_, int), \
"Positional argument <timing_> is type %s , expecting integer." % type(timing_)
if blend_ is None:
raise ValueError('Blend should not be unset!')
else:
assert isinstance(blend_, int), \
"Positional argument <blend_> is type %s , expecting integer." % type(blend_)
assert isinstance(layer_, int), \
"Positional argument <layer_> is type %s , expecting integer." % type(layer_)
if self.containers is None:
raise ValueError('LaserImpact.containers is not initialised.\nMake sure to assign the containers to'
' a pygame group prior instantiation.\ne.g: LaserImpact.containers = '
'pygame.sprite.Group()')
if self.images is None:
raise ValueError("LaserImpact.images is not initialised.\nMake sure to assign a texture to "
"prior instantiation.\ne.g: LaserImpact.images = 'P1_SURFACE'")
if timing_ < 0:
raise ValueError('Positional argument timing_ cannot be < 0')
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(self.containers, pygame.sprite.LayeredUpdates):
if layer_:
self.containers.change_layer(self, layer_)
assert isinstance(self.images, (list, pygame.Surface))
self.image = self.images[0] if isinstance(self.images, list) else self.images
self.rect = self.image.get_rect(center=pos_)
self.timing = timing_
self.dt = 0
self.index = 0
self.gl = gl_
self.blend = blend_
self.layer = layer_
self.length = len(self.images) - 1
self.parent_ = parent_
self.surface_name = 'IMPACT_LASER'
self.id_ = id(self)
self.impact_object = Broadcast(self.make_object())
Broadcast.add_object_id(self.id_)
def delete_object(self) -> DeleteSpriteCommand:
"""
Send a command to kill an object on client side.
:return: DetectCollisionSprite object
"""
return DeleteSpriteCommand(frame_=self.gl.FRAME, to_delete_={self.id_: self.surface_name})
def make_object(self) -> AnimatedSprite:
"""
Create an AnimatedSprite message object (see NetworkBroadcast library)
:return: AnimatedSprite instance
"""
# Only attributes self.gl.FRAME change, self.rect and self.index are changing over the time.
return AnimatedSprite(frame_=self.gl.FRAME, id_=self.id_, surface_=self.surface_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect,
index_=self.index)
def quit(self) -> None:
Broadcast.remove_object_id(self.id_)
obj = Broadcast(self.delete_object())
obj.queue()
self.kill()
def update(self):
if self.dt > self.timing:
if self.rect.colliderect(self.gl.SCREENRECT):
if isinstance(self.images, list):
self.image = self.images[self.index % self.length]
# follow the parent object with half of its speed
self.rect.move_ip(self.parent_.speed // 2)
self.index += 1
if self.index > self.length:
self.quit()
self.dt = 0
else:
self.quit()
if self.rect.colliderect(self.gl.SCREENRECT):
self.impact_object.update({'frame': self.gl.FRAME, 'rect': self.rect, 'index': self.index})
self.impact_object.queue()
self.dt += self.gl.TIME_PASSED_SECONDS
class Direction(pygame.sprite.Sprite):
images = LIGHT
containers = None
def __init__(self, gl_, parent_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(gl_.All, pygame.sprite.LayeredUpdates):
if layer_:
gl_.All.change_layer(self, layer_)
self.image = Direction.images
self.surface = pygame.Surface((202 + 50, 74), pygame.SRCALPHA).convert_alpha()
w, h = gl_.SCREENRECT.size
w2, h2 = w // 2, h // 2
self.rect = self.image.get_rect(
topleft=(w2 - self.surface.get_width() // 2, h2 - self.surface.get_height() // 2 - 150))
self.timing = timing_
self.parent = parent_
self.gl = gl_
self.arrow_right_array = pygame.surfarray.pixels3d(self.image)
self.arrow_left_array = self.arrow_right_array[::-1]
self.dt = 0
self.blend = pygame.BLEND_RGBA_ADD
self.id_ = id(self)
def update(self) -> None:
if self.parent.alive():
if self.dt > self.timing:
self.surface = pygame.Surface((202 + 50, 74), pygame.SRCALPHA).convert_alpha()
image_right, self.arrow_right_array = scroll_surface(self.arrow_right_array, 0, 1)
image_left, self.arrow_left_array = scroll_surface(self.arrow_left_array, 0, -1)
self.surface.blit(image_left, (0, 0))
self.surface.blit(image_right, (self.surface.get_width() - image_right.get_width(), 0))
self.image = self.surface
self.dt += self.gl.TIME_PASSED_SECONDS
else:
self.kill()
class DisplayAmountDamage(pygame.sprite.Sprite):
containers = GL.All
def __init__(self, gl_, text_, pos_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(gl_.All, pygame.sprite.LayeredUpdates):
if layer_:
gl_.All.change_layer(self, layer_)
self.image, self.rect = ARCADE_FONT.render(text_,
fgcolor=pygame.Color(255, 255, 0), bgcolor=None)
self.image_copy = self.image.copy()
self.rect.center = pos_
self.timing = timing_
self.gl = gl_
self.dt = 0
self.blend = 0 # pygame.BLEND_RGBA_ADD
self.id_ = id(self)
self.start = self.gl.FRAME
# self.dx = 0
# self.pos = pos_
# self.w, self.h = self.image.get_size()
def update(self) -> None:
if self.dt > self.timing:
if self.gl.FRAME - self.start > 30:
self.kill()
else:
# self.image = pygame.transform.smoothscale(
# self.image_copy, (int(self.w + self.dx), int(self.h + self.dx)))
# self.rect = self.image.get_rect(center=self.pos)
# self.dx += 0.5
...
self.dt += self.gl.TIME_PASSED_SECONDS
class Light(pygame.sprite.Sprite):
images = LIGHT
containers = None
def __init__(self, gl_, pos_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(gl_.All, pygame.sprite.LayeredUpdates):
if layer_:
gl_.All.change_layer(self, layer_)
self.images = Light.images
self.image = Light.images[0]
self.rect = self.image.get_rect(center=pos_)
self.pos = pos_
self.timing = timing_
self.dim = len(self.images) - 2
self.gl = gl_
self.index = 0
self.dt = 0
self.blend = pygame.BLEND_RGBA_ADD
self.id_ = id(self)
def update(self) -> None:
if self.dt > self.timing:
self.image = self.images[self.index]
self.rect = self.image.get_rect(center=self.pos)
if self.index > self.dim:
self.kill()
self.index += 1
...
self.dt += self.gl.TIME_PASSED_SECONDS
class Flare(pygame.sprite.Sprite):
images = FLARE
containers = None
def __init__(self, gl_, pos_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(gl_.All, pygame.sprite.LayeredUpdates):
if layer_:
gl_.All.change_layer(self, layer_)
self.images = Flare.images
self.image = Flare.images[0]
self.rect = self.image.get_rect(center=pos_)
self.timing = timing_
self.dim = len(self.images) - 2
self.gl = gl_
self.index = 0
self.dt = 0
self.blend = pygame.BLEND_RGBA_ADD
self.id_ = id(self)
def update(self) -> None:
if self.dt > self.timing:
self.image = self.images[self.index]
if self.index > self.dim:
self.kill()
self.index += 1
...
self.dt += self.gl.TIME_PASSED_SECONDS
class Shot(pygame.sprite.Sprite):
images = None
containers = None
last_shot = 0
shooting = False
mask = None # image mask for perfect collision detection
def __init__(self, parent_, pos_, gl_, timing_=0, layer_=-1, surface_name_=''):
"""
Create a sprite shoot
:param parent_: parent object class Player
:param pos_: tuple (x:int, y:int); sprite shoot position (start position)
:param gl_: Constants (class GL)
:param timing_: integer; sprite refreshing time FPS, e.g 16ms is 60FPS
:param layer_: integer; sprite layer (default 0 top layer)
:param surface_name_: string; surface name e.g 'BLUE_LASER'; surface = eval(BLUE_LASER')
"""
assert isinstance(parent_, Player1), \
"Positional argument <parent_> is type %s , expecting class MirroredPlayer1Class instance." % type(parent_)
assert isinstance(pos_, tuple), \
"Positional argument <pos_> is type %s , expecting tuple." % type(pos_)
assert isinstance(timing_, int), \
"Positional argument <timing_> is type %s , expecting integer." % type(timing_)
assert isinstance(layer_, int), \
"Positional argument <layer_> is type %s , expecting integer." % type(layer_)
assert isinstance(surface_name_, str), \
"Positional argument <surface_name_> is type %s , " \
"expecting python string." % type(surface_name_)
if self.containers is None:
raise ValueError('Shot.containers is not initialised.\nMake sure to assign the containers to'
' a pygame group prior instantiation.\ne.g: Shot.containers = pygame.sprite.Group()')
if self.images is None:
raise ValueError("Shot.images is not initialised.\nMake sure to assign a texture to "
"prior instantiation.\ne.g: Shot.images = 'P1_SURFACE'")
if timing_ < 0:
raise ValueError('Positional argument timing_ cannot be < 0')
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(gl_.All, pygame.sprite.LayeredUpdates):
if layer_:
gl_.All.change_layer(self, layer_)
self.images = Shot.images
self.image = self.images[0] if isinstance(self.images, list) else self.images
self.mask_ = Shot.mask
self.speed = pygame.math.Vector2(0, -35)
self.timing = timing_
self.pos = pos_
self.gl = gl_
self.position = pygame.math.Vector2(*self.pos)
self.rect = self.image.get_rect(center=self.pos)
self.dt = 0
self.blend = pygame.BLEND_RGBA_ADD
self.index = 0
self.parent = parent_
self.surface_name = surface_name_
self.id_ = id(self)
if Shot.shooting and self.is_reloading(self.gl.FRAME):
self.kill()
else:
self.gl.MIXER.stop_object(id(BLUE_LASER_SOUND))
self.gl.MIXER.play(sound_=BLUE_LASER_SOUND, loop_=False, priority_=0, volume_=1.0,
fade_out_ms=0, panning_=True, name_='BLUE_LASER_SOUND', x_=self.rect.centerx,
object_id_=id(BLUE_LASER_SOUND), screenrect_=self.gl.SCREENRECT)
self.sound_object = Broadcast(self.make_sound_object('BLUE_LASER_SOUND'))
self.sound_object.play()
Shot.last_shot = FRAME
Shot.shooting = True
# Create a network object
self.shot_object = Broadcast(self.make_object())
self.shot_object.queue()
Broadcast.add_object_id(self.id_)
def delete_object(self) -> DeleteSpriteCommand:
"""
Send a command to kill an object on client side.
:return: DetectCollisionSprite object
"""
return DeleteSpriteCommand(frame_=self.gl.FRAME, to_delete_={self.id_: self.surface_name})
def make_sound_object(self, sound_name_: str) -> SoundAttr:
"""
Create a sound object for network broadcasting.
:param sound_name_: string; representing the texture to use e.g 'BLUE_LASER_SOUND"
:return: Sound object, SoundAttr instance.
"""
assert isinstance(sound_name_, str), \
"Positional argument <sound_name_> is type %s , expecting python string." % type(sound_name_)
if sound_name_ not in globals():
raise NameError('Sound %s is not define.' % sound_name_)
return SoundAttr(frame_=self.gl.FRAME, id_=self.id_, sound_name_=sound_name_, rect_=self.rect)
def make_object(self) -> StaticSprite:
"""
Create a StaticSprite message object (see NetworkBroadcast library)
:return: StaticSprite object
"""
# Only attributes self.gl.FRAME change and self.rect are changing over the time.
return StaticSprite(frame_=self.gl.FRAME, id_=self.id_, surface_=self.surface_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect)
@staticmethod
def is_reloading(frame_: int) -> bool:
"""
Check if the player is shooting or reloading.
Compare the actual FRAME number to the latest Shot frame number.
Shot.last_shot default value is set to 0 during instantiation.
Reloading time is hard encoded to 10 frames interval.
When the player is ready to shoot, the shooting flag is set to false, otherwise stays True
:frame_: integer; must be > 0 (Actual frame number)
:return: True or False. True player is reloading, False player is ready to shoot
>>> Shot.shooting = True
>>> Shot.last_shot = 0
>>> Shot.is_reloading(9)
True
>>> assert Shot.shooting is True
>>> Shot.is_reloading(10)
False
>>> Shot.is_reloading(-1)
Traceback (most recent call last):
...
ValueError: frame_ must be >= 0
"""
assert isinstance(frame_, int), \
"argument frame_ should be integer got %s" % type(frame_)
if not frame_ >= 0:
raise ValueError("frame_ must be >= 0")
assert hasattr(Shot, 'last_shot'), 'Class Shot is missing attribute <last_shot>.'
assert hasattr(Shot, 'shooting'), 'Class Shot is missing attribute <shooting>.'
assert isinstance(Shot.last_shot, int), \
"last_shot variable should be integer got %s" % type(Shot.last_shot)
assert frame_ >= Shot.last_shot, \
"Game constant frame_ value:%s should be > or equal to %s " % (frame_, Shot.last_shot)
if frame_ - Shot.last_shot < 10:
# still reloading
return True
else:
# ready to shoot
Shot.shooting = False
return False
def collide(self, rect_: pygame.Rect, object_, damage_) -> None:
"""
Create a laser impact sprite
:param rect_: pygqme.Rect object
:param object_: object colliding with the rectangle
:param damage_: damage quantity
:return: None
"""
assert isinstance(rect_, pygame.Rect), \
'Positional argument rect_ should be a <pygame.Rect> type got %s ' % type(rect_)
assert object_ is not None, 'Positional argument object_ cannot be None'
# if sprite belongs to any group(s)
if self.alive():
LaserImpact.containers = self.gl.All
LaserImpact.images = IMPACT_LASER
LaserImpact(gl_=self.gl, pos_=rect_.topleft, parent_=object_,
timing_=8, blend_=pygame.BLEND_RGBA_ADD, layer_=0)
Flare.containers = self.gl.All
Flare.images = FLARE
Flare(gl_=self.gl, pos_=self.rect.center, timing_=8, layer_=0)
Light.containers = self.gl.All
Light.images = LIGHT
Light(gl_=self.gl, pos_=self.rect.center, timing_=8, layer_=0)
DisplayAmountDamage.containers = self.gl.All
DisplayAmountDamage(self.gl, str(damage_), pos_=self.rect.center, timing_=8, layer_=0)
self.quit()
def quit(self) -> None:
Broadcast.remove_object_id(self.id_)
obj = Broadcast(self.delete_object())
obj.queue()
self.kill()
def update(self) -> None:
"""
Update shot sprites whereabouts.
sprite position is given by its rectangle.
The position changed by incrementing the position with its speed vector (self.speed)
if the sprite belongs to the screen dimensions, a message is broadcast to the client
:return: None
"""
if self.dt > self.timing:
if self.gl.SCREENRECT.colliderect(self.rect):
# Move the laser sprite
if self.images != IMPACT_LASER:
self.position += self.speed
self.rect.center = (self.position.x, self.position.y)
if self.rect.colliderect(self.gl.SCREENRECT):
self.shot_object.update({'frame': self.gl.FRAME, 'rect': self.rect})
self.shot_object.queue()
self.dt = 0
else:
self.quit()
else:
self.dt += self.gl.TIME_PASSED_SECONDS
class Player1(pygame.sprite.Sprite):
containers = None
image = None
def __init__(self, gl_, timing_=8, pos_: tuple = (0, 0), layer_=0):
"""
:param gl_: Game constants (GL class) see GLOBAL library for more details
:param timing_: integer; default 15ms (60 FPS)
:param pos_: tuple; (x:int, y:int) representing player 1 position (default x=0, y=0)
:param layer_: Sprite layer used by player 1
"""
assert isinstance(timing_, int), \
"Positional argument <timing_> is type %s , expecting integer." % type(timing_)
assert isinstance(pos_, tuple), \
"Positional argument <pos_> is type %s , expecting tuple." % type(pos_)
assert isinstance(layer_, int), \
"Positional argument <layer_> is type %s , expecting integer." % type(layer_)
if self.containers is None:
raise ValueError(
'MirroredPlayer1Class.containers is not initialised.\nMake sure to assign the containers to'
' a pygame group prior instantiation.\ne.g: MirroredPlayer1Class.containers = pygame.sprite.Group()')
if self.image is None:
raise ValueError("MirroredPlayer1Class.image is not initialised.\nMake sure to assign a texture to "
"prior instantiation.\ne.g: MirroredPlayer1Class.image = 'P1_SURFACE'")
pygame.sprite.Sprite.__init__(self, self.containers)
assert isinstance(Player1.image, (pygame.Surface, list)), \
"image is not a pygame.Surface or a list, got %s instead" % type(Player1.image)
if timing_ < 0:
raise ValueError('Positional argument timing_ cannot be < 0')
self.image = Player1.image
self.image_copy = self.image.copy()
self.rect = self.image.get_rect(center=pos_)
self.timing = timing_
self.surface_name = 'P1_SURFACE'
self.gl = gl_
self.dt = 0
self.speed = 600
self.layer = layer_
self.blend = 0
self.shooting = False
self.previous_pos = pygame.math.Vector2() # previous position
self.life = 200 # player's life
self.max_life = 200 # maximum life
self.eng_right = self.right_engine() # instance for right engine
self.eng_left = self.left_engine() # isntance for left engine
# todo test if convert_alpha otherwise this is useless
self.mask = pygame.mask.from_surface(self.image) # Image have to be convert_alpha compatible
self.damage = 800 # -> gives 800 hit point of damage after collision
self.id_ = id(self)
self.player_object = Broadcast(self.make_object())
self.impact_sound_object = Broadcast(self.make_sound_object('IMPACT'))
self.impact_sound_object_short = Broadcast(self.make_sound_object('IMPACT_SHORT'))
self.update_score = self.gl.P1_SCORE.score_update
Broadcast.add_object_id(self.id_)
def delete_object(self) -> DeleteSpriteCommand:
"""
Send a command to kill an object on client side.
:return: DetectCollisionSprite object
"""
return DeleteSpriteCommand(frame_=self.gl.FRAME, to_delete_={self.id_: self.surface_name})
def make_sound_object(self, sound_name_: str) -> SoundAttr:
"""
Create a sound object for network broadcasting.
:param sound_name_: string; representing the texture to use e.g 'BLUE_LASER_SOUND"
:return: Sound object, SoundAttr instance.
"""
assert isinstance(sound_name_, str), \
'Positional argument sound_name_ is not a string type, got %s ' % type(sound_name_)
if sound_name_ not in globals():
raise NameError('Sound %s is not define.' % sound_name_)
return SoundAttr(frame_=self.gl.FRAME, id_=self.id_, sound_name_=sound_name_, rect_=self.rect)
def make_object(self) -> StaticSprite:
"""
Create a sprite object for network broadcast similar to MirroredPlayer1Class
:return: StaticSprite object (see NetworkBroadcast library for more details)
"""
# Only attributes self.gl.FRAME, self.rect are changing over the time.
return StaticSprite(
frame_=self.gl.FRAME, id_=self.id_, surface_=self.surface_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect, life=self.life, damage=self.damage)
def player_lost(self):
self.gl.All.add(LOST)
def explode(self) -> None:
"""
Player explosion sprites and halo
:return: None
"""
if self.alive():
Explosion.images = PLAYER_EXPLOSION1
Explosion(self, self.rect.center,
self.gl, 8, self.layer, texture_name_='PLAYER_EXPLOSION1')
PlayerHalo.images = HALO_SPRITE13
PlayerHalo.containers = self.gl.All
PlayerHalo(texture_name_='HALO_SPRITE13', object_=self, timing_=8)
# self.player_lost()
self.quit()
def collide(self, damage_: int) -> None:
"""
Player1 collide with object, transfer the damage and play the collision sound locally
if life < 1, trigger player1 explosion.
:param damage_: integer; must be > 0 (total damage transferred to the player after collision.)
:return: None
"""
assert isinstance(damage_, int), \
'Positional argument damage_ is not an int, got %s instead.' % type(damage_)
assert damage_ > 0, 'damage_ argument should be > 0 '
if self.alive():
self.life -= damage_
if damage_ > 10:
# player loud impact sound locally
self.gl.MIXER.play(sound_=IMPACT, loop_=False, priority_=0,
volume_=1.0, fade_out_ms=0, panning_=True,
name_='IMPACT', x_=self.rect.centerx,
object_id_=id(IMPACT),
screenrect_=self.gl.SCREENRECT)
# Broadcast loud impact sound
self.impact_sound_object.play()
else:
# player short impact sound locally (for small object collision)
self.gl.MIXER.play(sound_=IMPACT_SHORT, loop_=False, priority_=0,
volume_=1.0, fade_out_ms=0, panning_=True,
name_='IMPACT', x_=self.rect.centerx,
object_id_=id(IMPACT),
screenrect_=self.gl.SCREENRECT)
# Broadcast short impact sound to client
self.impact_sound_object_short.play()
"""
def hit(self, damage_: int) -> None:
#Transfer damage to the player after being hit.
#:param damage_: integer > 0, damage transfer to the player
#:return: None
assert isinstance(damage_, int), \
'Positional argument damage_ is not an int, got %s instead.' % type(damage_)
assert damage_ > 0, 'damage_ argument should be > 0 '
if self.alive():
self.life -= damage_
"""
def left_engine(self) -> AfterBurner:
"""
Create a sprite for the left engine
:return: AfterBurner instance
"""
if EXHAUST:
AfterBurner.images = EXHAUST
else:
raise NameError('EXHAUST is not defined.')
return AfterBurner(self, self.gl, (-22, 38),
0, pygame.BLEND_RGB_ADD, self.layer - 1, texture_name_='EXHAUST')
def right_engine(self) -> AfterBurner:
"""
Create a sprite for the right engine
:return: AfterBurner instance
"""
if EXHAUST:
AfterBurner.images = EXHAUST
else:
raise NameError('EXHAUST is not defined.')
return AfterBurner(self, self.gl, (22, 38),
0, pygame.BLEND_RGB_ADD, self.layer - 1, texture_name_='EXHAUST')
def get_centre(self) -> tuple:
"""
Get MirroredPlayer1Class position.
:return: tuple representing MirroredPlayer1Class rect centre
"""
return self.rect.center
def disruption(self) -> None:
"""
Create an electric effect on MirroredPlayer1Class hull.
:return: None
"""
if 'DISRUPTION' in globals():
if isinstance(DISRUPTION, list):
index = (FRAME >> 1) % len(DISRUPTION) - 1
else:
raise ValueError('DISRUPTION is not a list, got %s instead.' % type(DISRUPTION))
else:
raise NameError('DISRUPTION is not defined.')
self.image.blit(DISRUPTION[index], (-20, -20), special_flags=pygame.BLEND_RGB_ADD)
# self.disruption_object.update({'frame': self.gl.FRAME, 'surface': 'DISRUPTION',
# 'rect': self.rect, 'index': index})
# self.disruption_object.show(self.disruption_object)
# self.disruption_object.queue()
def shooting_effect(self) -> pygame.Surface:
"""
Apply a special effect on the aircraft hull when firing.
:return: pygame.Surface
"""
if 'GRID' in globals():
self.image.blit(GRID, (0, 0), special_flags=pygame.BLEND_RGB_ADD)
else:
raise NameError('GRID is not defined.')
return self.image
def contain_sprite(self, move_: tuple) -> bool:
"""
Check if the player can move toward the screen edges
Return True if the movement is allowed else False
:param move_: Tuple; player movement
:return: True if the movement is allowed else return False
"""
rect_copy = self.rect.copy()
rect_copy.x += move_[0]
rect_copy.y += move_[1]
if self.gl.SCREENRECT.contains(rect_copy):
return True
else:
return False
def quit(self) -> None:
Broadcast.remove_object_id(self.id_)
obj = Broadcast(self.delete_object())
obj.queue()
self.kill()
def update(self) -> None:
"""
Update MirroredPlayer1Class sprite
:return: None
"""
self.rect.clamp_ip(self.gl.SCREENRECT)
# Inside the 60 FPS area
if self.dt > self.timing:
self.image = self.image_copy.copy()
if self.life < 1:
self.explode()
displacement = self.speed * self.gl.SPEED_FACTOR
if self.gl.KEYS[pygame.K_UP]:
if self.contain_sprite((0, -displacement)):
self.rect.move_ip(0, -displacement)
if self.gl.KEYS[pygame.K_DOWN]:
if self.contain_sprite((0, displacement)):
self.rect.move_ip(0, displacement)
if self.gl.KEYS[pygame.K_LEFT]:
if self.contain_sprite((-displacement, 0)):
self.rect.move_ip(-displacement, 0)
if self.gl.KEYS[pygame.K_RIGHT]:
if self.contain_sprite((displacement, 0)):
self.rect.move_ip(displacement, 0)
# if self.gl.JOYSTICK is not None and self.gl.JOYSTICK.PRESENT:
# x, y = self.gl.JOYSTICK.axes_status[0]
if self.gl.KEYS[pygame.K_SPACE]:
self.shooting_effect()
Shot(self, self.rect.center, self.gl, 0,
self.layer - 1, surface_name_='BLUE_LASER')
if joystick is not None:
self.rect.move_ip(JL3.x * self.gl.SPEED_FACTOR * self.speed,
JL3.y * self.gl.SPEED_FACTOR * self.speed)
if self.previous_pos == self.rect.center:
self.rect.centerx += random.randint(-1, 1)
self.rect.centery += random.randint(-1, 1)
if self.gl.FRAME < 100:
self.rect.centery -= 7
self.previous_pos = self.rect.center
if self.alive():
# Broadcast the spaceship position every frames
self.player_object.update({'frame': self.gl.FRAME,
'rect': self.rect,
'life': self.life})
self.player_object.queue()
self.dt = 0
else:
self.dt += self.gl.TIME_PASSED_SECONDS
# Outside the 60FPS area
# Below code will be processed every frames
# !UPDATE the <follower> sprites with the new player position.
self.eng_left.update()
self.eng_right.update()
self.disruption()
class MirroredPlayer2Class(pygame.sprite.Sprite):
def __init__(self, sprite_):
"""
Create an instance of Player2 on the server
:param sprite_: object containing all attributes
>>> import pygame
>>> pygame.init()
(8, 0)
>>> SCREENRECT = pygame.Rect(0, 0, 800, 1024)
>>> screen = pygame.display.set_mode(SCREENRECT.size, pygame.HWSURFACE, 32)
>>> from Textures import P1_SURFACE, DISRUPTION
>>> attributes = {'rect': pygame.Rect(0, 0, 0, 0),
... 'image':eval('P1_SURFACE'), 'blend':0, 'layer':-1, 'id_':35555,
... 'frame':0, 'damage': 800, 'life': 200, 'surface':'P1_SURFACE'}
>>> sprite__ = pygame.sprite.Sprite()
>>> for attr, value in attributes.items():
... setattr(sprite__, attr, value)
>>> spr = MirroredPlayer2Class(sprite__)
>>> print(spr.surface)
P1_SURFACE
"""
assert sprite_ is not None, 'Positional argument sprite__ is None.'
attributes = ['rect', 'image', 'blend', 'layer', 'id_', 'frame', 'damage', 'life', 'surface']
for attr in attributes:
assert hasattr(sprite_, attr),\
'Positional argument sprite__ is missing attribute %s ' % attr
pygame.sprite.Sprite.__init__(self)
self.rect = sprite_.rect
self.image = sprite_.image
self.image_copy = sprite_.image.copy()
self.blend = sprite_.blend
self.layer = sprite_.layer
self.id_ = sprite_.id_
self.frame = sprite_.frame
self.surface = sprite_.surface
self.damage = sprite_.damage
self.gl = GL
def disruption(self) -> None:
if globals().__contains__('FRAME') and globals().__contains__('DISRUPTION'):
index = (FRAME >> 1) % len(DISRUPTION) - 1
self.image.blit(DISRUPTION[index], (-20, -20), special_flags=pygame.BLEND_RGB_ADD)
def update(self) -> None:
if self.image is None:
raise ValueError('Cannot copy() NoneType.')
self.image = self.image_copy.copy()
self.disruption()
class P2Shot(pygame.sprite.Sprite):
def __init__(self, gl_, sprite_, timing_=0):
"""
:param gl_: class GL (Constants)
:param sprite_: object containing all the original sprite attributes
:param timing_: integer > 0 representing the refreshing time.
"""
assert isinstance(gl_, type(GL)), \
"Positional argument <gl_> is type %s , expecting class GL instance." % type(gl_)
assert sprite_ is not None, 'Positional argument sprite__ is None.'
attributes = ['rect', 'image', 'blend', 'layer', 'id_', 'surface']
for attr in attributes:
assert hasattr(sprite_, attr), \
'Positional argument sprite__ is missing attribute %s ' % attr
if timing_ < 0:
raise ValueError('argument timing_ must be > 0')
pygame.sprite.Sprite.__init__(self)
self.rect = sprite_.rect
self.image = sprite_.image
self.blend = sprite_.blend
self.layer = sprite_.layer
self.id_ = sprite_.id_
self.surface = sprite_.surface
self.gl = gl_
self.timing = timing_
def collide(self, rect_, object_) -> None:
"""
:param rect_: pygame.Rect type
:param object_:
:return:
"""
if hasattr(GL, 'All'):
LaserImpact.containers = GL.All
else:
raise AttributeError('Class GL missing attribute All.')
if IMPACT_LASER:
LaserImpact.images = IMPACT_LASER
else:
raise NameError('IMPACT_LASER is not define.')
LaserImpact(gl_=self.gl, pos_=rect_.topleft, parent_=object_,
timing_=8, blend_=pygame.BLEND_RGBA_ADD, layer_=0)
def update(self) -> None:
...
class SpriteServer(threading.Thread):
def __init__(self,
gl_, # Global variables class
host_: str, # host address (string)
port_: int, # port value (integer)
):
"""
:param gl_: class GL
:param host_: string; ip address
:param port_: integer; port to use
"""
assert isinstance(host_, str), \
"Positional argument <host_> is type %s , expecting string." % type(host_)
assert isinstance(port_, int), \
"Positional argument <port_> is type %s , expecting integer" % type(port_)
threading.Thread.__init__(self)
self.gl = gl_
self.gl.SPRITE_SERVER_STOP = False
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as error:
print('\n[-]SpriteServer - ERROR : %s %s' % (error, time.ctime()))
gl_.P1CS_STOP = True
self.gl.SPRITE_SERVER_STOP = True
try:
self.sock.bind((host_, port_))
self.sock.listen(1)
except socket.error as error:
print('\n[-]SpriteServer - ERROR : %s %s' % (error, time.ctime()))
gl_.P1CS_STOP = True
self.gl.SPRITE_SERVER_STOP = True
self.buf = self.gl.BUFFER
self.total_bytes = 0
self.view = memoryview(bytearray(self.buf))
def run(self):
# Accept a connection. The socket must be bound to
# an address and listening for connections.
# The return value is a pair (conn, address) where
# conn is a new socket object usable to send and receive
# data on the connection, and address is the address
# bound to the socket on the other end of the connection.
connection = None
try:
# The thread will be stopped here until first connection
connection, client_address = self.sock.accept()
except socket.error as error:
print("\n[-]SpriteServer - Lost connection with Player 2 ...")
print("\n[-]SpriteServer - ERROR %s %s" % (error, time.ctime()))
self.gl.P1CS_STOP = True
while not self.gl.P1CS_STOP and not self.gl.SPRITE_SERVER_STOP:
# try:
while not self.gl.P1CS_STOP and not self.gl.SPRITE_SERVER_STOP:
# Receive data from the socket, writing it into buffer instead
# of creating a new string. The return value is a pair (nbytes, address)
# where nbytes is the number of bytes received and address is the address
# of the socket sending the data.
try:
nbytes, sender = connection.recvfrom_into(self.view, self.buf)
except socket.error as error:
print("\n[-]SpriteServer - Lost connection with Player 2 ...")
print("\n[-]SpriteServer - ERROR %s %s" % (error, time.ctime()))
# signal to kill both threads SpriteServer and SpriteClient
# todo : Player 2 is now deconnected from the server and should not be
# display on the server display, create a method to kill the sprite of Player 2
self.gl.SPRITE_SERVER_STOP = True
nbytes = 0
buffer = self.view.tobytes()[:nbytes]
try:
connection.sendall(self.view.tobytes()[:nbytes])
except ConnectionResetError as error:
print("\n[-]SpriteServer - Lost connection with Player 2 ...")
print("\n[-]SpriteServer - ERROR %s %s" % (error, time.ctime()))
# todo : Player 2 is now deconnected from the server and should not be
# display on the server display, create a method to kill the sprite of Player 2
# signal to kill both threads SpriteServer and SpriteClient
self.gl.SPRITE_SERVER_STOP = True
try:
# Decompress the data frame
decompress_data = lz4.frame.decompress(buffer)
data = cpickle.loads(decompress_data)
except Exception:
# The decompression error can also happen when
# the bytes stream sent is larger than the buffer size.
# todo : Player 2 is now deconnected from the server and should not be
# display on the server display, create a method to kill the sprite of Player 2
# signal to kill both threads SpriteServer and SpriteClient
self.gl.SPRITE_SERVER_STOP = True
self.gl.SPRITE_CLIENT_STOP = True
data = None
# todo check if self.gl.NetGroupAll.empty() is faster
# self.gl.NetGroupAll = LayeredUpdatesModified()
data_set = set()
if isinstance(data, list):
for sprite_ in data:
# print(GL.FRAME, sprite__.id_, sprite__.surface if hasattr(sprite__, "surface") else None)
if isinstance(sprite_, set):
continue
if hasattr(sprite_, 'event'):
continue
elif hasattr(sprite_, 'sound_name'):
try:
sound = eval(sprite_.sound_name)
except NameError:
raise NameError("\n[-]SpriteServer - Sound effect "
"'%s' does not exist " % sprite_.sound_name)
# self.gl.MIXER.stop_object(id(sound))
# play the sound locally
self.gl.MIXER.play(sound_=sound, loop_=False, priority_=0,
volume_=1.0, fade_out_ms=0, panning_=True,
name_=sprite_.sound_name, x_=sprite_.rect.centerx,
object_id_=id(sound),
screenrect_=self.gl.SCREENRECT)
continue
# DELETE
elif hasattr(sprite_, 'to_delete'):
# todo need to implement here
...
else:
assert hasattr(sprite_, 'surface'), "\nBroadcast message is missing <surface> attribute."
try:
sprite_.image = eval(sprite_.surface) # load surface
except (NameError, AttributeError):
raise RuntimeError("\n[-]SpriteServer - Surface "
"'%s' does not exist " % sprite_.surface)
if isinstance(sprite_.image, list):
sprite_.image = sprite_.image[sprite_.index % len(sprite_.image) - 1]
# --- Apply transformation ---
# Apply transformation to texture rotation/scale and
# store the transformation inside a buffer
# Check if the texture has been already transformed and use
# the buffer transformation instead (for best performance).
if hasattr(sprite_, 'rotation'):
if sprite_.rotation is not None and sprite_.rotation != 0:
if sprite_.id_ in self.gl.XTRANS_ROTATION.keys():
cache_image_, cache_rotation_ = self.gl.XTRANS_ROTATION[sprite_.id_]
if cache_rotation_ == sprite_.rotation:
sprite_.image = self.gl.XTRANS_ROTATION[sprite_.id_]
else:
sprite_.image = pygame.transform.rotate(
sprite_.image, sprite_.rotation)
self.gl.XTRANS_ROTATION.update(
{sprite_.id_: (sprite_.image, sprite_.rotation)})
else:
sprite_.image = pygame.transform.rotate(
sprite_.image, sprite_.rotation)
self.gl.XTRANS_ROTATION.update(
{sprite_.id_: (sprite_.image, sprite_.rotation)})
if hasattr(sprite_, 'scale'):
if sprite_.scale != 1:
if sprite_.id_ in self.gl.XTRANS_SCALE.keys():
sprite_.image = self.gl.XTRANS_SCALE[sprite_.id_]
else:
sprite_.image = pygame.transform.scale(sprite_.image, (
int(sprite_.image.get_size()[0] * sprite_.scale),
int(sprite_.image.get_size()[1] * sprite_.scale)))
self.gl.XTRANS_SCALE.update({sprite_.id_: sprite_.image})
s = None
# find Player 2
if sprite_.surface == 'P2_SURFACE':
s = MirroredPlayer2Class(sprite_)
# find player 2 shots
elif sprite_.surface == "RED_LASER":
s = P2Shot(self.gl, sprite_, 16)
# generic sprite that doesn't have
# to be instantiated with specific methods
else:
# Generic sprite (without methods)
s = pygame.sprite.Sprite()
s.frame = sprite_.frame
s.rect = sprite_.rect
s.surface = sprite_.surface
s.image = sprite_.image
s.blend = sprite_.blend
s.layer = sprite_.layer
s.id_ = sprite_.id_
if hasattr(sprite_, 'life'):
s.life = sprite_.life
if hasattr(sprite_, 'damage'):
s.damage = sprite_.damage
# Add broadcast sprites to DATA_SET (reset every time a message from client is received).
# DATA_SET contains all sprites sent by the client for a specific frame number.
# The DATA_SET cannot contains duplicates. The id attribute (memory location)
# is used as unique identification number to store sprites in the DATA_SET.
# The element in data set represent all active (alive) sprites display on the
# client side (before client collision detection).
data_set.add(sprite_.id_)
# Add the sprite in self.gl.NetGroupAll (if not already in the group) or
# update position and texture.
# NetGroupAll, will be used in the main loop (locally) to display
# all the sprites broadcast from a specific frame number.
# If a sprite is not added to that group, it will be ignored
# and not display on the client side.
if s is not None and len(self.gl.NetGroupAll) > 0:
has_ = False
for sprites in self.gl.NetGroupAll:
if sprites.id_ == s.id_:
has_ = True
sprites.rect = s.rect
sprites.image = sprite_.image
sprites.frame = sprite_.frame
break
if not has_:
self.gl.NetGroupAll.add(s)
else:
self.gl.NetGroupAll.add(s)
# Compare NetGroupAll group to DATA_SET and delete sprite(s)
# accordingly. Sprites in NetGroupAll and not in DATA_SET will
# be killed and remove from every groups they are belonging to.
# When a sprite is deleted, the transformation/scale buffer associated
# to it will be deleted (using its id).
for spr_ in self.gl.NetGroupAll:
if spr_.id_ not in data_set:
spr_.kill()
if spr_.id_ in self.gl.XTRANS_SCALE.keys():
self.gl.XTRANS_SCALE.pop(spr_.id_)
if spr_.id_ in self.gl.XTRANS_ROTATION.keys():
self.gl.XTRANS_ROTATION.pop(spr_.id_)
# Reload original texture
# for pair in modified_surface.items():
# globals()[pair[0]] = pair[1]
buffer = b''
# data fully received breaking the loop, clear the buffer
break
# pygame.time.wait(1)
"""
except Exception as error:
print('\n[-]SpriteServer - ERROR @ frame: %s : %s %s' % (FRAME, error, time.ctime()))
finally:
# Clean up the connection
if 'connection' in globals() and connection is not None:
connection.close()
"""
print('\n[-]SpriteServer is now terminated...')
def force_quit(host_: str, port_: int) -> None:
"""
function used for terminating SERVER/ CLIENT threads listening (blocking socket)
:param host_: string; ip address
:param port_: integer; port to use
:return: None
"""
assert isinstance(host_, str), \
"Positional argument <host_> is type %s , expecting string." % type(host_)
assert isinstance(port_, int), \
"Positional argument <port_> is type %s , expecting integer." % type(port_)
# todo create assert ( port should be > 1024)
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host_, port_))
data = cpickle.dumps(b"QUIT")
sock.sendall(data)
print("\n[+]Termination signal sent to SpriteServer...")
except Exception as error:
print("\n[-]Cannot send termination signal to SpriteServer...")
print("\n[-]ERROR %s " % error)
finally:
if 'sock' in globals() and isinstance(sock, socket.socket):
sock.close()
def collision_detection():
p2_shots = pygame.sprite.Group()
p2 = None
# todo:
# Below code can be simplify by adding single sprite e.g (P1, p2, T1) to other groups in order
# to create a single iteration using pygame.sprite.groupcollide().
# Each time P1, p2 and T1 are checking collisions 3 times with the entire group GL.Asteroid.
# Result to be checked with profiling.
for sprite_ in GL.NetGroupAll:
# detect player sprite
if hasattr(sprite_, 'surface'):
if sprite_.surface == "P2_SURFACE":
p2 = sprite_
continue
# detect player 2 shots and add them to a specific
# group p2_shots. p2_shots has to be destroyed before
# leaving the function.
elif sprite_.surface == "RED_LASER":
p2_shots.add(sprite_)
"""
mygroup = pygame.sprite.Group()
if GL.PLAYER_SHOTS is not None:
mygroup.add(GL.PLAYER_SHOTS)
if p2 is not None:
mygroup.add(p2)
if P1 is not None:
mygroup.add(P1)
if p2_shots is not None:
mygroup.add(p2_shots)
if T1 is not None:
mygroup.add(T1)
for players_sprite, asteroids in pygame.sprite.groupcollide(mygroup, GL.ASTEROID, 0, 0).items():
print(players_sprite, asteroid)
"""
if P1 is not None and P1.alive():
# Player 1 collision with asteroid
# Use collision mask for collision detection
# It is compulsory to have sprite textures with alpha transparency information
# in order to check for collision otherwise the collision will be ignored.
collision = pygame.sprite.spritecollideany(P1, GL.ASTEROID, collided=pygame.sprite.collide_mask)
if collision is not None:
P1.collide(collision.damage)
if hasattr(collision, 'collide'):
collision.collide(P1, P1.damage)
else:
print(type(collision))
raise AttributeError
# Player 1 shots collision with asteroids
for shots, asteroids in pygame.sprite.groupcollide(
GL.PLAYER_SHOTS, GL.ASTEROID, 0, 0, collided=pygame.sprite.collide_mask).items():
if asteroids is not None:
for aster in asteroids:
if hasattr(aster, 'hit'):
aster.hit(P1, 100)
new_rect = shots.rect.clamp(aster.rect) # todo need to make sure shots is not a list
shots.collide(rect_=new_rect, object_=aster, damage_=100)
else:
print(type(aster))
raise AttributeError
# Player 2 shots colliding with asteroid
if p2 is not None and p2.alive():
for shots, asteroids in pygame.sprite.groupcollide(
p2_shots, GL.ASTEROID, 0, 0).items(): # ,collided=pygame.sprite.collide_mask).items():
if asteroids is not None:
for aster in asteroids:
if hasattr(aster, 'hit'):
aster.hit(None, 100)
new_rect = shots.rect.clamp(aster.rect) # todo need to make sure shots is not a list
# shots.collide(rect_=new_rect, object_=aster)
else:
print(type(aster))
raise AttributeError
for spr in GL.NetGroupAll:
if spr.id_ == shots.id_:
spr.kill()
# Use collision mask for collision detection
# Check collision between Player 2 and asteroids
if p2 is not None and p2.alive():
collision = pygame.sprite.spritecollideany(p2, GL.ASTEROID, collided=pygame.sprite.collide_mask)
if collision is not None:
# Cannot send damage to player 2, this
# is done on the remote host in the collision detection section
# Pass only damage to the asteroid
collision.collide(p2.damage)
# MirroredTransportClass collision with asteroid
if T1 is not None and T1.alive():
# todo check collision masks
collision = pygame.sprite.spritecollideany(T1, GL.ASTEROID, collided=pygame.sprite.collide_mask)
if collision is not None:
# transfer damage to transport
T1.collide(collision.damage)
# transfer damage to asteroid.
if hasattr(collision, 'collide'):
collision.collide(T1, T1.damage)
else:
print(type(collision))
raise AttributeError
p2_shots.remove()
p2_shots.empty()
del p2_shots
"""
mygroup.remove()
mygroup.empty()
del mygroup
"""
def window():
scrw_half = SCREENRECT.w >> 1
scrh_half = SCREENRECT.h >> 1
w, h = FRAMEBORDER.get_size()
screen.blit(BACKGROUND, (0, 0))
screen.blit(FRAMEBORDER, (scrw_half - (w >> 1), scrh_half - (h >> 1)))
font = freetype.Font(os.path.join('Assets\\Fonts\\', 'ARCADE_R.ttf'), size=15)
frame_ = FRAME.copy()
rect = font.get_rect("Waiting for connection...")
font.render_to(frame_, ((frame_.get_width() - rect.w) // 2,
(frame_.get_height() - rect.h) // 2),
"Waiting for connection...",
fgcolor=pygame.Color(255, 255, 255), size=15)
screen.blit(frame_, (scrw_half - (w >> 1) + 20, scrh_half - (h >> 1) + 40))
clock = pygame.time.Clock()
frame = 0
while GL.CONNECTION is False:
screen.blit(BACKGROUND, (0, 0))
pygame.event.pump()
Square()
GL.All.update()
GL.All.draw(screen)
screen.blit(frame_, (scrw_half - (w >> 1) + 20, scrh_half - (h >> 1) + 40))
GL.TIME_PASSED_SECONDS = clock.tick(70)
frame += 1
pygame.display.flip()
class Square(pygame.sprite.Sprite):
def __init__(self):
self.layer = -1
pygame.sprite.Sprite.__init__(self, GL.All)
if isinstance(GL.All, pygame.sprite.LayeredUpdates):
if self.layer:
GL.All.change_layer(self, self.layer)
self.image = pygame.Surface((randint(200, 500), randint(200, 500)))
self.image.fill((10, 15, 25, 15))
self.image.convert(32, pygame.RLEACCEL)
self.rect = self.image.get_rect(center=(randint(0, SCREENRECT.w),
randint(0, SCREENRECT.h)))
self.dt = 0
self.blend = pygame.BLEND_RGBA_ADD
self.i = 128
def update(self):
self.image.set_alpha(self.i)
self.i -= 10
if self.i < 0:
self.kill()
if __name__ == '__main__':
RECT = pygame.sprite.Group()
import doctest
doctest.testmod()
SERVER = '127.0.0.1'
CLIENT = '127.0.0.1'
# SERVER = '192.168.0.1'
# CLIENT = '192.168.0.4'
position = (-800, 0)
DRIVER = 'windib' # 'windib' | 'directx'
os.environ['SDL_VIDEODRIVER'] = DRIVER
os.environ['SDL_VIDEO_WINDOW_POS'] = str(position[0]) + "," + str(position[1])
SCREENRECT = pygame.Rect(0, 0, 800, 1024)
GL.SCREENRECT = SCREENRECT
screen = pygame.display.set_mode(SCREENRECT.size, pygame.HWSURFACE, 32)
GL.SCREEN = screen
pygame.display.set_caption('PLAYER 1')
# *********************************************************************
# JOYSTICK
joystick_count = pygame.joystick.get_count()
if joystick_count > 0:
joystick = pygame.joystick.Joystick(0)
joystick.init()
else:
joystick = None
GL.JOYSTICK = joystick
# ********************************************************************
# NETWORK SERVER / CLIENT
# SpriteServer -> receive client(s) positions
# 1) Start the Server to receive client(s) position(s)
# If no connection is made, the thread will remains listening/running
# in the background, except if an error is raised.
server = SpriteServer(GL, SERVER, 1025)
server.start()
# SpriteClient -> forward all sprites positions
# 2) Start the Client to send all sprites positions to client(s)
client = SpriteClient(gl_=GL, host_=CLIENT, port_=1024)
client.start()
# Killing threads if no client connected
if not client.is_alive() or GL.CONNECTION is False:
print('No player detected')
GL.SPRITE_CLIENT_STOP = True
GL.SPRITE_SERVER_STOP = True
force_quit(SERVER, 1025)
window()
# *********************************************************************
GL.All = LayeredUpdatesModified()
GL.ASTEROID = pygame.sprite.Group()
GL.PLAYER_SHOTS = pygame.sprite.Group()
GL.TRANSPORT = pygame.sprite.GroupSingle()
GL.BACKGROUND = pygame.sprite.Group()
Player1.image = P1_SURFACE
Player1.containers = GL.All
Shot.images = BLUE_LASER
Shot.containers = GL.All, GL.PLAYER_SHOTS
Shot.mask = pygame.mask.from_surface(BLUE_LASER)
Explosion.containers = GL.All
Background.containers = GL.All, GL.BACKGROUND
AfterBurner.containers = GL.All
Asteroid.containers = GL.All, GL.ASTEROID
Asteroid.image = DEIMOS
Transport.image = TRANSPORT
Transport.containers = GL.All, GL.TRANSPORT
Background.image = BACK1_S
B1 = Background(vector_=pygame.math.Vector2(0, 1),
position_=pygame.math.Vector2(x=0, y=-1024),
gl_=GL, layer_=-8, event_name_='BACK1_S')
Background.image = BACK2_S
B2 = Background(vector_=pygame.math.Vector2(0, 1),
position_=pygame.math.Vector2(x=0, y=0),
gl_=GL, layer_=-8, event_name_='BACK2_S')
Background.image = BACK3
B3 = Background(vector_=pygame.math.Vector2(0, 1),
position_=pygame.math.Vector2(x=0, y=-12288),
gl_=GL, layer_=-8, event_name_='BACK3')
Background.image = CL1
B4 = Background(vector_=pygame.math.Vector2(0, 1.2),
position_=pygame.math.Vector2(x=0, y=-480),
gl_=GL, layer_=-7, blend_=pygame.BLEND_RGB_ADD, event_name_='CL1')
Background.image = CL2
B5 = Background(vector_=pygame.math.Vector2(0, 1.2),
position_=pygame.math.Vector2(x=randint(0, 800), y=200),
gl_=GL, layer_=-7, blend_=pygame.BLEND_RGB_ADD, event_name_='CL2')
Background.image = BLUE_PLANET
B6 = Background(vector_=pygame.math.Vector2(0, 1.4),
position_=pygame.math.Vector2(x=randint(0, 800), y=200),
gl_=GL, layer_=-6, blend_=pygame.BLEND_RGBA_MAX, event_name_='BLUE_PLANET')
Background.image = STATION
B7 = Background(vector_=pygame.math.Vector2(0, 1),
position_=pygame.math.Vector2(x=80, y=-12096),
gl_=GL, layer_=-5, event_name_='STATION')
Background.image = NEBULA2
B8 = Background(vector_=pygame.math.Vector2(0, 2),
position_=pygame.math.Vector2(x=0, y=0),
gl_=GL, layer_=-7, blend_=pygame.BLEND_RGBA_ADD, event_name_='NEBULA1')
Background.image = NEBULA1
B9 = Background(vector_=pygame.math.Vector2(0, 2),
position_=pygame.math.Vector2(x=0, y=-1024),
gl_=GL, layer_=-7, blend_=pygame.BLEND_RGBA_ADD, event_name_='NEBULA2')
deletable = [B1, B2, B4, B5, B6, B8, B9]
ShootingStar.containers = GL.All
ShootingStar.image = SHOOTING_STAR
DisplayScore.containers = GL.All
DisplayScore.images = pygame.Surface((10, 10))
GL.P1_SCORE = DisplayScore(gl_=GL, timing_=8)
MakeGems.containers = GL.All
P1 = Player1(GL, 0, (screen.get_size()[0] // 2 - 150, SCREENRECT.h)) # screen.get_size()[1] // 2))
# P1 = None
T1 = Transport(gl_=GL, timing_=8,
pos_=(SCREENRECT.w >> 1, SCREENRECT.h + 200), surface_name_='TRANSPORT', layer_=-2)
ShowLifeBar.containers = GL.All
ShowLifeBar(gl_=GL, player_=P1, left_gradient_=pygame.Color(0, 7, 255, 0),
right_gradient=pygame.Color(120, 255, 255, 0), pos_=(5, 5), timing_=8, scan_=True)
ShowLifeBar(gl_=GL, player_=T1, left_gradient_=pygame.Color(255, 7, 15, 0),
right_gradient=pygame.Color(12, 12, 255, 0),
pos_=(SCREENRECT.w // 2 + 120, 5), timing_=8, scan_=True)
PlayerLost.containers = GL.All
PlayerLost.DIALOGBOX_READOUT_RED = DIALOGBOX_READOUT_RED
PlayerLost.SKULL = SKULL
PlayerWin.containers = GL.All
PlayerWin.DIALOGBOX_READOUT_RED = DIALOGBOX_READOUT_RED
PlayerWin.SKULL = SKULL
font_ = freetype.Font('Assets\\Fonts\\Gtek Technology.ttf', size=14)
ARCADE_FONT = freetype.Font(os.path.join('Assets\\Fonts\\', 'ARCADE_R.ttf'), size=9)
ARCADE_FONT.antialiased = True
LOST = PlayerLost(gl_=GL, font_=font_, image_=FINAL_MISSION.copy(), layer_=1)
WIN = None
GL.All.remove(LOST)
GL.TIME_PASSED_SECONDS = 0
clock = pygame.time.Clock()
GL.STOP_GAME = False
FRAME = 0
GL.FRAME = 0
GL.MIXER = SoundControl(30)
f = open('P1_log.txt', 'w')
text_size = 120
half_frame = 0
pygame.mixer.music.load('Assets\\MUSIC_1.mp3')
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.play()
# DIALOGBOX
FRAMEBORDER.blit(FRAMESURFACE, (10, 15))
DIALOG = FRAMEBORDER
DialogBox.containers = GL.All
DialogBox.images = DIALOG
DialogBox.character = NAMIKO
DialogBox.voice_modulation = VOICE_MODULATION
DialogBox.readout = DIALOGBOX_READOUT
FONT = freetype.Font('C:\\Windows\\Fonts\\Arial.ttf')
FONT.antialiased = False
DialogBox.FONT = FONT
DialogBox.text = ["Protect the transport and reach out ", "Altera the green planet outside the", "asteroid belt.",
"There are huge asteroids ahead, focus ", "and dodge them carefully.", "Have fun and good luck.",
" ", "Over and out!", "Masako"]
im = pygame.image.load("Assets\\icon_glareFx_blue.png").convert()
DialogBox.scan_image = pygame.image.load("Assets\\icon_glareFx_blue.png").convert()
DialogBox.scan_image.set_colorkey((0, 0, 0, 0), pygame.RLEACCEL)
TIME_PASSED_SECONDS = 0
FRAME = 0
GL.FRAME = FRAME
masako = DialogBox(gl_=GL, location_=(-DIALOG.get_width(), 150),
speed_=0, layer_=0, voice_=True, scan_=True, start_=0, direction_='RIGHT',
text_color_=pygame.Color(149, 119, 236, 245), fadein_=500, fadeout_=1000)
cobra = pygame.image.load('Assets\\Cobra.png').convert()
cobra.set_colorkey((0, 0, 0, 0), pygame.RLEACCEL)
cobra = pygame.transform.smoothscale(cobra, (100, 170))
DialogBox.character = [cobra, cobra]
DialogBox.text = ["Don't worry, it won't take long", "before I wreck everything.", " "]
DialogBox.images = DIALOG
DialogBox.scan_image = pygame.image.load("Assets\\icon_glareFx_red.png").convert()
DialogBox.scan_image.set_colorkey((0, 0, 0, 0), pygame.RLEACCEL)
cob = DialogBox(gl_=GL, location_=(SCREENRECT.w + DialogBox.images.get_width(), 650),
speed_=0, layer_=-3, voice_=True, scan_=True, start_=500, direction_='LEFT',
text_color_=pygame.Color(249, 254, 56, 245), fadein_=500, fadeout_=1100)
while not GL.STOP_GAME:
# pygame.event.pump()
# print('Server frame # %s vector1 %s vector2 %s' % (FRAME, vector1, vector2))
# Send an event to the client triggering the next frame
GL.NEXT_FRAME.set() # set the event
event_obj = EventAttr(event_=GL.NEXT_FRAME, frame_=GL.FRAME)
Broadcast(event_obj).next_frame()
# Create cosmic dust
if GL.FRAME % 10 == 0:
if len(COSMIC_DUST_ARRAY) < 15:
create_dust(GL)
# Asteroid shows up at frame 1200
if 1250 < FRAME < 6500:
if len(GL.ASTEROID) < 15:
for _ in range(15):
asteroid = random.choices(['DEIMOS', 'EPIMET'])[0]
scale = random.uniform(0.1, 0.5)
rotation = random.randint(0, 360)
Asteroid.image = pygame.transform.rotozoom(eval(asteroid).copy(), rotation, scale)
GL.ASTEROID.add(Asteroid(asteroid_name_=asteroid, gl_=GL, blend_=0, rotation_=rotation,
scale_=scale, timing_=15, layer_=-2))
if FRAME == 2048:
Asteroid.image = MAJOR_ASTEROID
major_asteroid = Asteroid(asteroid_name_='MAJOR_ASTEROID',
gl_=GL, blend_=0, rotation_=0,
scale_=1, timing_=15, layer_=-3)
GL.ASTEROID.add(major_asteroid)
Direction.images = ARROW_RIGHT
Direction.containers = GL.All
Direction(gl_=GL, parent_=major_asteroid, timing_=15, layer_=0)
if joystick is not None:
JL3 = pygame.math.Vector2(joystick.get_axis(0), joystick.get_axis(1))
for event in pygame.event.get():
keys = pygame.key.get_pressed()
GL.KEYS = keys
if event.type == pygame.QUIT:
print('Quitting')
GL.STOP_GAME = True
if keys[pygame.K_ESCAPE]:
GL.STOP_GAME = True
if keys[pygame.K_F8]:
pygame.image.save(screen, 'P1_screenshot.png')
if event.type == pygame.MOUSEMOTION:
GL.MOUSE_POS = pygame.math.Vector2(event.pos)
if random.randint(0, 1000) > 992:
shoot = ShootingStar(gl_=GL, layer_=-4, timing_=8, surface_name_='SHOOTING_STAR')
# update sprites positions and add sprites transformation.
# At this stage no sprites are display onto the screen.
# If any of your sprite class is bliting directly onto the variable screen,
# then it might be override by the method GL.All.draw(screen) below.
GL.All.update()
# Always display the group GL.All first has it contains the background surfaces
# Any sprite attached to the group GL.All and blit directly to the screen surface
# will be override by the network sprite if sprites occupy the same location..
# Ideally all sprites should be on the same group in order to draw them ordered by
# their layer number.
GL.All.draw(screen)
# Draw the network sprite above the background
if GL.CONNECTION:
GL.NetGroupAll.update() # -> run all the update method
GL.NetGroupAll.draw(screen)
# *************************************************************
# Draw here all the other sprites that does not belongs to
# common groups (GL.All & GL.NetGroupAll).
# Sprite blit last onto the display are at the top layer.
# Be aware that any surface(s) blit with blend attribute will
# also blend with the entire sprite scene (blending with
# sprites from all layers)
# e.g Drawing GUI and life/energy sprite bars, screen bullet impacts
# special effects, final score, ending screen and text inputs etc.
# Update the sound Controller
GL.MIXER.update()
collision_detection()
Broadcast.live_object_inventory = set()
for sprite_ in GL.All:
if hasattr(sprite_, 'id_'):
Broadcast.live_object_inventory.add(sprite_.id_)
Broadcast.MessageQueue.append(Broadcast.live_object_inventory)
# Authorize Player 1 to send data to the client.
# Allowing to send only one set of data every frame.
# The clear method is used by the class SpriteClient right after receiving the thread Event
# We are sending the network messages right after the collision detection to make sure the
# the client will receive the most accurate sprite status (killed, alive, life quantity etc)
GL.SIGNAL.set()
# Uncomment below to display the transport fly zone
# half = SCREENRECT.w >> 1
# safe_zone = pygame.Rect(half - 200, half, 400, SCREENRECT.bottom - half)
# pygame.draw.rect(screen, pygame.Color(255, 0, 0, 0), safe_zone, 1)
# dust particles (show on the top of all other sprites)
if len(COSMIC_DUST_ARRAY) > 0:
display_dust(GL)
# screen.blit(LIFE_HUD, (0, 0))
# Display the message get ready
if FRAME < 200:
if FRAME % 2 == 0:
half_frame += 1
size__ = max(35, text_size - half_frame if FRAME < text_size else 35)
rect1 = font_.get_rect("get ready", style=freetype.STYLE_NORMAL,
size=size__)
font_.render_to(screen, ((SCREENRECT.w >> 1) - (rect1.w >> 1), (SCREENRECT.h >> 1)),
"get ready", fgcolor=pygame.Color(255, 244, 78),
size=size__)
# Delete background sprite when not needed
if FRAME > 10240:
if not B4.rect.colliderect(SCREENRECT):
if B4 is not None and B4.alive():
B4.kill()
if not B5.rect.colliderect(SCREENRECT):
if B5 is not None and B5.alive():
B5.kill()
if not B6.rect.colliderect(SCREENRECT):
if B6 is not None and B6.alive():
B6.kill()
elif FRAME > 11864:
B1.kill()
# Delete background sprite when not needed
if len(deletable) > 0:
if FRAME > 12288:
for del_sprite in deletable:
if del_sprite is not None:
del_sprite.kill()
GL.TIME_PASSED_SECONDS = clock.tick(70)
GL.SPEED_FACTOR = GL.TIME_PASSED_SECONDS / 1000
GL.FPS.append(clock.get_fps())
masako.time_passed = GL.TIME_PASSED_SECONDS
pygame.display.flip()
FRAME += 1
GL.FRAME = FRAME
# print(len(Broadcast.MessageQueue), len(GL.ASTEROID), len(GL.All), len(Broadcast.live_object_inventory))
"""
# logging entries
if not f.closed:
f.write('\nSENDING ' + str(GL.FRAME))
for element in Broadcast.MessageQueue:
if hasattr(element, 'to_delete'):
for key, value in element.to_delete.items():
f.write('\nid ' + str(key) + ' surface ' + str(value))
f.write('\nLIVE ' + str(Broadcast.live_object_inventory))
"""
# Very import !! Make sure to empty the
# network list before next frame
Broadcast.empty()
def my_timer():
timer = time.time()
while time.time() - timer < 5:
time.sleep(0.01)
GL.All.add(LOST)
# Check if Player 1 is still alive otherwise display 'mission fail'
if not GL.All.has(P1):
if not GL.All.has(LOST):
t = threading.Thread(target=my_timer, args=())
t.start()
else:
if FRAME > 12288 and P1.alive():
if WIN is None:
WIN = PlayerWin(gl_=GL, player_=P1,
font_=font_, image_=FINAL_MISSION.copy(), layer_=1, score_=GL.P1_SCORE.score)
# Save data into a log file
# print(GL.FRAME)
"""
# logging entries
for r in GL.NetGroupAll:
if not f.closed:
f.write('\n NETGROUPALL ' + str(GL.FRAME) + " ")
f.write(' Surface: ' + str(r.surface) if hasattr(r, 'surface') else str(r))
f.write(' Rect: ' + str(r.rect) if hasattr(r, 'rect') else '')
f.write(' id: ' + str(r.id_))
for r in GL.All:
if not f.closed:
f.write('\n GL.All ' + str(GL.FRAME) + ' ')
f.write(' Surface: ' + str(r.surface) if hasattr(r, 'surface') else str(r))
f.write(' Rect: ' + str(r.rect) if hasattr(r, 'rect') else '')
f.write(' id: ' + str(r.id_) if hasattr(r, 'id_') else '')
"""
f.close()
GL.SPRITE_CLIENT_STOP = True
GL.SPRITE_SERVER_STOP = True
force_quit(SERVER, 1025)
import matplotlib.pyplot as plt
plt.title("FPS ")
# plt.plot(GL.BYTES_SENT)
plt.plot(GL.FPS)
plt.draw()
plt.show()
plt.title("BYTES RECEIVED")
plt.plot(GL.BYTES_RECEIVED)
plt.draw()
plt.show()
pygame.quit()
|
ex7.py | #!/usr/bin/env python
from net_system.models import NetworkDevice, Credentials
from netmiko import ConnectHandler
import django, datetime, multiprocessing
def show_arp(device):
remote_conn = ConnectHandler(device_type=device.device_type,
ip=device.ip_address,
username=device.credentials.username,
password=device.credentials.password,
port=device.port)
print device.device_name.center(80, '-')
print remote_conn.send_command('show arp')
def main():
django.setup()
start = datetime.datetime.now()
procs = []
for device in NetworkDevice.objects.all():
proc = multiprocessing.Process(target=show_arp, args=(device,))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
print "Elapsed time: {}".format(datetime.datetime.now() - start)
if __name__ == "__main__":
main()
|
test_cli.py | import os
import json
import pytest
import textwrap
from unittest.mock import patch
from multiprocessing import Process, Event
from pymem.cli import main, format_output
from click.testing import CliRunner
@pytest.fixture
def cli():
return CliRunner()
def test_invalid_pid(cli):
pid = os.getpid()
with patch("pymem.cli.check_process_exist") as mock_check_process_exist:
mock_check_process_exist.return_value = False
result = cli.invoke(main, [str(pid)])
assert result.exit_code == 1
assert f"Process(pid={pid}) is not found.\n" == result.output
def test_no_debugger(cli):
pid = os.getpid()
with patch("distutils.spawn.find_executable") as mock_find_executable:
mock_find_executable.return_value = ""
result = cli.invoke(main, [str(pid)])
assert result.exit_code == 1
assert "Could not find debugger in your bin path.\n" == result.output
def test_print(cli):
def sleep_until_wake(e):
e.wait()
e = Event()
p = Process(target=sleep_until_wake, args=(e,))
p.start()
pid = p.pid
with patch("distutils.spawn.find_executable") as mock_find_executable:
mock_find_executable.return_value = "/usr/bin/gdb"
with patch("pymem.cli.get_objects") as mock_get_objects:
mock_get_objects.return_value = [
{
"type": "<class 'abc.ABCMeta'>",
"count": 91,
"total_size": "88.88 KiB",
}
]
with patch("pymem.cli.get_garbages") as mock_get_garbages:
mock_get_garbages.return_value = {"count": 0, "objects": []}
with patch("pymem.cli.get_malloc_stats") as mock_get_malloc_stats:
mock_get_malloc_stats.return_value = {"arenas_allocated_total": 1048}
result = cli.invoke(main, [str(pid)])
# make sure subprocess exit, before throwing AssertionError.
e.set()
p.join()
assert result.exit_code == 0
data = json.loads(result.output)
assert data["objects"] == [
{
"type": "<class 'abc.ABCMeta'>",
"count": 91,
"total_size": "88.88 KiB",
}
]
assert data["garbages"] == {"count": 0, "objects": []}
assert "summary" in data
assert data["malloc_stats"] == {"arenas_allocated_total": 1048}
def test_format_output():
text = r"""{
"objects": [
{
"type": "<class 'list'>",
"count": 5797,
"total_size": "6.24 MiB"
},
{
"type": "<class 'str'>",
"count": 26988,
"total_size": "3.21 MiB"
}
],
"garbages": {
"count": 1,
"objects": ["<__main__.A at 0x7f7a8d781b50>"]
},
"malloc_stats": {
"arenas_allocated_total": 1725,
"arenas_reclaimed": 1661,
"arenas_highwater_mark": 73,
"arenas_allocated_current": 64,
"bytes_in_allocated_blocks": 15942032,
"bytes_in_available_blocks": 127776,
"bytes_lost_to_pool_headers": 192528,
"bytes_lost_to_quantization": 166720,
"bytes_lost_to_arena_alignment": 0
},
"summary": {
"private": "39.28 MiB",
"shared": "41.82 MiB",
"total": "81.10 MiB",
"swap": "0.00 MiB"
}
}"""
data = json.loads(text)
output = format_output(data, "text")
expect = textwrap.dedent("""\
summary:
+---------+-----------+
| private | 39.28 MiB |
| shared | 41.82 MiB |
| total | 81.10 MiB |
| swap | 0.00 MiB |
+---------+-----------+
garbages(total: 1):
+--------------------------------+
| <__main__.A at 0x7f7a8d781b50> |
+--------------------------------+
malloc stats:
+-------------------------------+----------+
| arenas_allocated_total | 1725 |
| arenas_reclaimed | 1661 |
| arenas_highwater_mark | 73 |
| arenas_allocated_current | 64 |
| bytes_in_allocated_blocks | 15942032 |
| bytes_in_available_blocks | 127776 |
| bytes_lost_to_pool_headers | 192528 |
| bytes_lost_to_quantization | 166720 |
| bytes_lost_to_arena_alignment | 0 |
+-------------------------------+----------+
objects:
+----------------+-------+------------+
| type | count | total_size |
+----------------+-------+------------+
| <class 'list'> | 5797 | 6.24 MiB |
| <class 'str'> | 26988 | 3.21 MiB |
+----------------+-------+------------+
""")
assert output == expect
|
start_host.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Start host."""
from __future__ import print_function
from builtins import range
from builtins import str
from future import standard_library
standard_library.install_aliases()
import os
import shutil
import socket
import subprocess
import sys
import threading
import time
MNT_DIR = '/mnt/scratch0'
SRC_DIR = os.path.join(MNT_DIR, 'clusterfuzz')
BOT_BASEDIR = os.path.join(MNT_DIR, 'bots')
NUM_WORKERS_PER_HOST = int(os.environ['NUM_WORKERS_PER_HOST'])
def setup_environment():
"""Set up host environment."""
os.environ['QUEUE_OVERRIDE'] = 'LINUX_UNTRUSTED'
os.environ['WORKER_ROOT_DIR'] = os.path.join(MNT_DIR, 'clusterfuzz')
os.environ['WORKER_BOT_TMPDIR'] = os.path.join(MNT_DIR, 'tmp')
if not os.path.exists(BOT_BASEDIR):
os.mkdir(BOT_BASEDIR)
def start_bot_instance(instance_num):
"""Set up bot directory."""
env = os.environ.copy()
host_name = os.getenv('HOSTNAME', socket.gethostname())
bot_name = '%s-%d' % (host_name, instance_num)
env['BOT_NAME'] = bot_name
env['HOST_INSTANCE_NAME'] = host_name
env['HOST_INSTANCE_NUM'] = str(instance_num)
bot_directory = os.path.join(BOT_BASEDIR, bot_name)
bot_root_directory = os.path.join(bot_directory, 'clusterfuzz')
tmp_directory = os.path.join(bot_directory, 'tmp')
if not os.path.exists(bot_directory):
os.mkdir(bot_directory)
os.mkdir(tmp_directory)
env['ROOT_DIR'] = bot_root_directory
env['BOT_TMPDIR'] = tmp_directory
env['PYTHONPATH'] = os.path.join(bot_root_directory, 'src')
if os.path.exists(bot_root_directory):
shutil.rmtree(bot_root_directory)
shutil.copytree(SRC_DIR, bot_root_directory)
while True:
bot_proc = subprocess.Popen(
sys.executable + ' src/python/bot/startup/run.py 2>&1 > console.txt',
shell=True,
env=env,
cwd=bot_root_directory)
bot_proc.wait()
print('Instance %i exited.' % instance_num, file=sys.stderr)
def main():
setup_environment()
for i in range(NUM_WORKERS_PER_HOST):
print('Starting bot %i.' % i)
thread = threading.Thread(target=start_bot_instance, args=(i,))
thread.start()
while True:
# sleep forever
time.sleep(1000)
if __name__ == '__main__':
main()
|
test_cgo_engine.py | import json
import os
import sys
import threading
import unittest
import logging
import time
import torch
from pathlib import Path
from nni.retiarii.execution.cgo_engine import CGOExecutionEngine
from nni.retiarii.execution.logical_optimizer.logical_plan import LogicalPlan
from nni.retiarii.execution.logical_optimizer.opt_dedup_input import DedupInputOptimizer
from nni.retiarii.codegen import model_to_pytorch_script
from nni.retiarii import Model, Node
from nni.retiarii import Model, submit_models
from nni.retiarii.codegen import model_to_pytorch_script
from nni.retiarii.integration import RetiariiAdvisor
from nni.retiarii.trainer import PyTorchImageClassificationTrainer, PyTorchMultiModelTrainer
from nni.retiarii.utils import import_
def _load_mnist(n_models: int = 1):
path = Path(__file__).parent / 'converted_mnist_pytorch.json'
with open(path) as f:
mnist_model = Model._load(json.load(f))
if n_models == 1:
return mnist_model
else:
models = [mnist_model]
for i in range(n_models-1):
models.append(mnist_model.fork())
return models
@unittest.skip('Skipped in this version')
class CGOEngineTest(unittest.TestCase):
def test_submit_models(self):
os.environ['CGO'] = 'true'
os.makedirs('generated', exist_ok=True)
from nni.runtime import protocol, platform
import nni.runtime.platform.test as tt
protocol._out_file = open('generated/debug_protocol_out_file.py', 'wb')
protocol._in_file = open('generated/debug_protocol_out_file.py', 'rb')
models = _load_mnist(2)
advisor = RetiariiAdvisor()
submit_models(*models)
if torch.cuda.is_available() and torch.cuda.device_count() >= 2:
cmd, data = protocol.receive()
params = json.loads(data)
params['parameters']['training_kwargs']['max_steps'] = 100
tt.init_params(params)
trial_thread = threading.Thread(target=CGOExecutionEngine.trial_execute_graph())
trial_thread.start()
last_metric = None
while True:
time.sleep(1)
if tt._last_metric:
metric = tt.get_last_metric()
if metric == last_metric:
continue
advisor.handle_report_metric_data(metric)
last_metric = metric
if not trial_thread.is_alive():
break
trial_thread.join()
advisor.stopping = True
advisor.default_worker.join()
advisor.assessor_worker.join()
if __name__ == '__main__':
unittest.main()
|
bldc_monitor.py | '''
bldc_monitor.py
Distributed under MIT License
Copyright (c) 2020 Haoze Zhang | Brown Engineering
'''
import os
import time
import threading
import yaml
import serial
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scope import Scope
from messenger import Messenger
import cmd_pb2
import info_pb2
cmdFilePath = "cmd.yaml"
def populateCmd(cmdData, mid):
cmd = cmd_pb2.Cmd()
cmd.id = mid
pid_cmd = cmd.pid_cmd
for k,v in cmdData["pid_cmd"].items():
setattr(pid_cmd, k, v)
return cmd
def loadCmdFile(messenger):
lastModTime = os.path.getmtime(cmdFilePath)
mid = 0
while True:
if os.path.getmtime(cmdFilePath) > lastModTime:
with open(cmdFilePath, "r") as f:
cmd = yaml.load(f, Loader=yaml.Loader)
cmdContainer = populateCmd(cmd, mid)
messenger.send(cmdContainer)
print(f"Command sent:\n{cmdContainer}")
lastModTime = os.path.getmtime(cmdFilePath)
mid += 1
time.sleep(0.5)
def signalSource(messenger):
scopespd = Scope.scopes[0]; scopeact = Scope.scopes[1]
spdAnno = scopespd.ax.annotate(
'0.00', xy=(1, 0), va='center', ha="left",
xycoords=('axes fraction',"data"),
bbox=dict(boxstyle="round,pad=0.1", fc="#ADD6FF", ec="none", alpha=0.7))
uAnno = scopeact.ax.annotate(
'0.00', xy=(1, 0), va='center', ha="left",
xycoords=('axes fraction',"data"),
bbox=dict(boxstyle="round,pad=0.1", fc="#ADD6FF", ec="none", alpha=0.7))
scopespd.ax.grid(True)
scopeact.ax.set_ymargin(0.1)
scopeact.canvasFlag = True
while True:
msg = messenger.receive()
scopespd.addData("y", msg.timestamp/100.0, msg.pid_info.y_real)
scopeact.addData("u", msg.timestamp/100.0, msg.pid_info.x)
spdAnno.set_y(msg.pid_info.y_real)
spdAnno.set_text("{:.1f}".format(msg.pid_info.y_real))
uAnno.set_y(msg.pid_info.x)
uAnno.set_text("{:.2f}".format(msg.pid_info.x))
def main():
fig = plt.figure()
fig.canvas.set_window_title("BLDC Monitor")
axspd = plt.subplot2grid((2,1), (0,0))
axact = plt.subplot2grid((2,1), (1,0), sharex=axspd)
axspd.set_ylabel("Speed [Hz]")
axact.set_ylabel("Controll Effort [%]")
axact.set_xlabel("Time [s]")
Scope(axspd, {"ylim":(-5,80)})
Scope(axact)
aniation = animation.FuncAnimation(fig, Scope.updateAll, interval=100)
with Messenger(info_pb2.Info, True) as messenger:
thread = threading.Thread(target=signalSource, args=(messenger,))
thread.setDaemon(True)
thread.start()
thread = threading.Thread(target=loadCmdFile, args=(messenger,))
thread.setDaemon(True)
thread.start()
plt.show()
if __name__ == "__main__":
main()
|
collective_ops_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
if communication == 'NCCL':
self.skipTest('b/170672646: it crashes with NCCL and group size one')
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
def testOpErrorNotAbort(self, collective_op, device, communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
def testOpErrorAbort(self, collective_op, device, communication):
# Abort collective ops if there're active collective ops at the time of an
# op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
main-signlal-handling.py | import multiprocessing
import signal
import sys
import time
def main():
signal.signal(signal.SIGTERM, lambda signum, frame: signal_handler('main', signum, frame))
processes = []
for process_id in range(5):
processes.append(multiprocessing.Process(target=run, args=(process_id,)))
for process in processes:
process.start()
for process in processes:
process.join()
print('Exiting main')
def run(id):
print('Starting process {}'.format(id))
signal.signal(signal.SIGTERM, lambda signum, frame: signal_handler(id, signum, frame))
while True:
time.sleep(1)
print('Exiting process {}'.format(id))
def signal_handler(process, signum, frame):
print('sig_handler called in {} for signal number {}'.format(process, signum))
sys.exit()
if __name__ == '__main__':
main()
|
babigen.py |
import pickle
import numpy as np
from shutil import rmtree
import os
from os import listdir, mkdir
from os.path import join, isfile, isdir, dirname, basename, normpath, abspath, exists
import subprocess
import death.DNC.archi.param as param
from threading import Thread
import time
def create_dictionary(files_list):
"""
creates a dictionary of unique lexicons in the dataset and their mapping to numbers
Parameters:
----------
files_list: list
the list of files to scan through
Returns: dict
the constructed dictionary of lexicons
"""
lexicons_dict = {}
id_counter = 0
for indx, filename in enumerate(files_list):
with open(filename, 'r') as fobj:
for line in fobj:
# first seperate . and ? away from words into seperate lexicons
line = line.replace('.', ' .')
line = line.replace('?', ' ?')
line = line.replace(',', ' ')
for word in line.split():
if not word.lower() in lexicons_dict and word.isalpha():
lexicons_dict[word.lower()] = id_counter
id_counter += 1
return lexicons_dict
def encode_data(files_list, lexicons_dictionary, padding_to_length=None):
"""
encodes the dataset into its numeric form given a constructed dictionary
padding the vectors to the padding_to_length, by adding dummy symbols in the end
Parameters:
----------
files_list: list
the list of files to scan through
lexicons_dictionary: dict
the mappings of unique lexicons
Returns: tuple (dict, int)
the data in its numeric form, maximum story length
"""
files = {}
story_inputs = None
story_outputs = None
stories_lengths = []
answers_flag = False # a flag to specify when to put data into outputs list
limit = padding_to_length if not padding_to_length is None else float("inf")
# add a padding symbol
plus_index = len(lexicons_dictionary)
lexicons_dictionary["+"] = plus_index
for indx, filename in enumerate(files_list):
files[filename] = []
with open(filename, 'r') as fobj:
for line in fobj:
# first seperate . and ? away from words into seperate lexicons
line = line.replace('.', ' .')
line = line.replace('?', ' ?')
line = line.replace(',', ' ')
answers_flag = False # reset as answers end by end of line
for i, word in enumerate(line.split()):
if word == '1' and i == 0:
# beginning of a new story
if not story_inputs is None:
story_len = len(story_inputs)
stories_lengths.append(story_len)
if story_len <= limit:
# if below limit, padding starts
# input is a symbol &
story_inputs += [plus_index] * (limit - story_len)
files[filename].append({
'inputs': story_inputs,
'outputs': story_outputs
})
story_inputs = []
story_outputs = []
if word.isalpha() or word == '?' or word == '.':
if not answers_flag:
story_inputs.append(lexicons_dictionary[word.lower()])
else:
story_inputs.append(lexicons_dictionary['-'])
story_outputs.append(lexicons_dictionary[word.lower()])
# set the answers_flags if a question mark is encountered
if not answers_flag:
answers_flag = (word == '?')
return files, stories_lengths
def load(path):
return pickle.load(open(path, 'rb'))
def onehot(index, size):
vec = np.zeros(size, dtype=np.float32)
vec[int(index)] = 1.0
return vec
def prepare_sample(sample, target_code, word_space_size, batch_size):
list_of_input_vec = []
list_of_output_vec = []
list_of_ignore_index = []
list_of_seq_len = []
for i in range(batch_size):
# input_vector is a story.
# this is an array of ~120 elements, with hyphen
input_vec = np.array(sample[i]['inputs'], dtype=np.float32)
# this is an array that will have 152 elements, wihtout hyphen
output_vec = np.array(sample[i]['inputs'], dtype=np.float32)
seq_len = input_vec.shape[0]
weights_vec = np.zeros(seq_len, dtype=np.float32)
# target_mask is where an answer is required
target_mask = (input_vec == target_code)
critical_index = np.where(target_mask == True)
output_vec[target_mask] = sample[i]['outputs']
input_vec = np.array([onehot(code, word_space_size) for code in input_vec])
#
output_vec = output_vec.astype("long")
# most of the output sequence is the same with the input sequence
# except for the - part, where the machine is prompt to answer
list_of_input_vec.append(input_vec)
list_of_output_vec.append(output_vec)
list_of_seq_len.append(seq_len)
list_of_ignore_index.append(critical_index)
input_vec = np.stack(list_of_input_vec)
output_vec = np.stack(list_of_output_vec)
critical_index = np.stack(list_of_ignore_index)
critical_index = critical_index.squeeze(1)
return (
np.reshape(input_vec, (batch_size, -1, word_space_size)),
output_vec,
critical_index
)
def write_babi_to_disk(story_limit=150):
'''
calls raw babi commands
pickles train and test data
:param task:
:param sets:
:param train_files_count:
:return:
'''
# babi_command(task,sets,True,train=True, files_count=train_files_count)
# babi_command(task,sets,True,train=False, files_count=int(train_files_count/5))
task_dir = os.path.dirname(abspath(__file__))
data_dir = join(task_dir, 'data/')
joint_train = True
files_list = []
if not exists(join(task_dir, 'data')):
mkdir(join(task_dir, 'data'))
if data_dir is None:
raise ValueError("data_dir argument cannot be None")
for entryname in listdir(data_dir):
entry_path = join(data_dir, entryname)
if isfile(entry_path):
files_list.append(entry_path)
lexicon_dictionary = create_dictionary(files_list)
lexicon_count = len(lexicon_dictionary)
# append used punctuation to dictionary
lexicon_dictionary['?'] = lexicon_count
lexicon_dictionary['.'] = lexicon_count + 1
lexicon_dictionary['-'] = lexicon_count + 2
encoded_files, stories_lengths = encode_data(files_list, lexicon_dictionary, story_limit)
processed_data_dir = join(task_dir, 'data',"processed")
train_data_dir = join(processed_data_dir, 'train')
test_data_dir = join(processed_data_dir, 'test')
if exists(processed_data_dir) and isdir(processed_data_dir):
rmtree(processed_data_dir)
mkdir(processed_data_dir)
mkdir(train_data_dir)
mkdir(test_data_dir)
pickle.dump(lexicon_dictionary, open(join(processed_data_dir, 'lexicon-dict.pkl'), 'wb'))
joint_train_data = []
for filename in encoded_files:
if filename.endswith("test.txt"):
pickle.dump(encoded_files[filename], open(join(test_data_dir, "test" + '.pkl'), 'wb'))
elif filename.endswith("train.txt"):
if not joint_train:
pickle.dump(encoded_files[filename], open(join(train_data_dir, basename(filename) + '.pkl'), 'wb'))
else:
joint_train_data.extend(encoded_files[filename])
if joint_train:
pickle.dump(joint_train_data, open(join(train_data_dir, 'train.pkl'), 'wb'))
def gendata(batch_size, validate=False):
'''
The main function to generate data.
:param batch_size:
:param story_limit: padding the input/output vectors to length.
:return:
'''
dirname = os.path.dirname(__file__)
data_dir = os.path.join(dirname, 'data', 'processed')
lexicon_dict = load(join(data_dir, 'lexicon-dict.pkl'))
if validate == False:
file_path = join(data_dir, 'train', 'train.pkl')
else:
file_path = join(data_dir, 'test', 'test.pkl')
data = load(file_path)
word_space_size = len(lexicon_dict)
sample = np.random.choice(data, batch_size)
input_data, target_output, ignore_index = prepare_sample(sample, lexicon_dict['-'], word_space_size, batch_size)
# (batch_size, story_word_count, one_hot_dictionary_size)
return input_data, target_output, ignore_index
class PreGenData():
# the purpose of this class is to generate data before it's required to use.
# this will reduce 11% of my code run time according to cProfiler.
def __init__(self, batch_size):
self.batch_size = batch_size
self.val_ready = False
self.train_ready = False
self.next_train = None
self.next_validate = None
self.__gendata_train()
self.__gendata_val()
param.x = self.next_train[0].shape[2]
param.v_t = param.x
def get_train(self):
Thread(target=self.__gendata_train).start()
while not self.train_ready:
print('train data is not ready?')
time.sleep(1)
return self.next_train
def get_validate(self):
Thread(target=self.__gendata_val).start()
while not self.val_ready:
print('val data is not ready?')
time.sleep(1)
return self.next_train
def __gendata_train(self):
self.next_train = gendata(self.batch_size, False)
self.train_ready = True
def __gendata_val(self):
self.next_validate = gendata(self.batch_size, True)
self.val_ready = True
def main():
write_babi_to_disk(story_limit=150)
pgd=PreGenData(param.bs)
input_data, target_output, ignore_index=pgd.get_train()
print("done")
if __name__ == '__main__':
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.