repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
Jayich-Lab/jax | util/devices.py | __all__ = ["Devices"]
class Devices:
"""Sinara device manager.
It gets lists of device names from device_db.py.
It saves the devices requested by an experiment in lists.
These lists can be used to reset used devices at the end of the experiment.
Device types that are saved include urukuls, ad9910s, and ttl_outs. To access the devices
used by an experiment in the kernel code, there must be at least one of such device in the
sinara system, so the device can be used as a placeholder in the list.
If the sinara system does not contain those devices or other device types need to be saved,
this class can be inherited. Check jax.experiments.SinaraEnvironment about how to use a
inherited device manager class.
Attributes in this class are not guarenteed to have non-zero lengths, so they cannot be used
reliably in ARTIQ python directly. Check whether they are empty in host code before using them
in the kernel.
Attributes:
urukuls: a list of urukul CPLD names.
ad9910s: a list of AD9910 DDS names.
ttl_ins: a list of TTL input channel names.
ttl_outs: a list of TTL output channel names.
ttl_in_outs: a list of TTL input/output channel names. These channels are usually inputs.
They cannot be used for both inputs and outputs without toggling a switch
on the TTL board.
urukuls_used: a list of tuple (name, object) of urukuls used in the experiment.
ad9910s_used: a list of tuple (name, object) AD9910s used in the experiment.
ttl_ins_used: a list of tuple (name, object) TTL inputs used in the experiment.
ttl_outs_used: a list of tuple (name, object) TTL outputs used in the experiment.
ttl_in_outs_used: a list of tuple (name, object) of TTL in/outs used in the experiment.
Args:
device_db: device_db dict from device_db.py.
"""
kernel_invariants = {
"urukuls", "ad9910s", "ttl_ins", "ttl_outs", "ttl_in_outs",
"urukuls_used", "ad9910s_used", "ttl_ins_used", "ttl_outs_used", "ttl_in_outs_used"
}
def __init__(self, device_db):
self._parse_device_db(device_db)
def use_device(self, key, device):
"""Saves a device to the used device lists.
Args:
key: str, device name.
device, device object.
"""
if key in self.urukuls:
if device not in self.urukuls_used:
self.urukuls_used.append((key, device))
if key in self.ad9910s:
if device not in self.ad9910s_used:
self.ad9910s_used.append((key, device))
if key in self.ttl_ins:
if device not in self.ttl_ins_used:
self.ttl_ins_used.append((key, device))
if key in self.ttl_outs:
if device not in self.ttl_outs_used:
self.ttl_outs_used.append((key, device))
if key in self.ttl_in_outs:
if device not in self.ttl_in_outs_used:
self.ttl_in_outs_used.append((key, device))
def _parse_device_db(self, device_db):
self.urukuls = []
self._urukul_io_updates = [] # We don't want to manually control these TTL outputs.
self.ad9910s = []
self._ad9910_sws = [] # We don't want to manually control these TTL outputs.
self.ttl_ins = []
self.ttl_outs = []
self.ttl_in_outs = []
self.urukuls_used = []
self.ad9910s_used = []
self.ttl_ins_used = []
self.ttl_outs_used = []
self.ttl_in_outs_used = []
for kk in device_db:
if "class" not in device_db[kk]:
continue
urukul_module = "artiq.coredevice.urukul"
if device_db[kk]["class"] == "CPLD" and device_db[kk]["module"] == urukul_module:
self.urukuls.append(kk)
self._urukul_io_updates.append(device_db[kk]["arguments"]["io_update_device"])
if device_db[kk]["class"] == "AD9910":
self.ad9910s.append(kk)
self._ad9910_sws.append(device_db[kk]["arguments"]["sw_device"])
for kk in device_db:
if "class" not in device_db[kk]:
continue
if device_db[kk]["class"] == "TTLIn":
self.ttl_ins.append(kk)
if device_db[kk]["class"] == "TTLOut":
if kk not in self._ad9910_sws and kk not in self._urukul_io_updates:
self.ttl_outs.append(kk)
if device_db[kk]["class"] == "TTLInOut":
self.ttl_in_outs.append(kk)
|
Jayich-Lab/jax | base/environments/sinara_environment.py | <filename>base/environments/sinara_environment.py<gh_stars>1-10
import numpy as _np
from sipyco import pyon
from artiq.experiment import *
from jax import JaxEnvironment
from jax.util.devices import Devices
class SinaraEnvironment(JaxEnvironment):
"""Environment for Jayich lab experiments that use sinara hardware.
See JaxEnvironment for additional attributes that is not specific to sinara hardware.
self.devices provides lists of devices and tracks devices used in an experiment.
A new device manager class can inherit jax.utilities.devices.Devices to support more device
types, and DEVICE_CLASS must be set to the new device manager class.
Attributes:
core: ARTIQ core device.
devices: Devices, device manager.
rtio_cycle_mu: np.int64, real-time input/output clock time in machine unit.
In order for RTIO to distinguish different events, e.g., TTL pulses.
The time separation between them must be at least this value.
dds_set_delay_mu: np.int64, slack time needed for an ad9910.set() call.
This is due to slow float operations in the kasli.
"""
DEVICE_CLASS = Devices
def build(self):
self._get_devices()
super().build()
self.setattr_device("core")
def _get_devices(self):
"""Get a device manager that lists and handles available devices."""
self.devices = self.DEVICE_CLASS(self.get_device_db())
def prepare(self):
super().prepare()
self.rtio_cycle_mu = _np.int64(self.core.ref_multiplier)
self.dds_set_delay_mu = self.core.seconds_to_mu(200*us)
self._preexp_dds_params = pyon.decode(self.cxn.artiq.get_dds_parameters())
self._preexp_ttl_params = pyon.decode(self.cxn.artiq.get_ttl_parameters())
@host_only
def get_device(self, key):
"""Gets a device and saves it to the device manager.
This overloads artiq.experiment.HasEnvironment.get_device.
"""
device = super().get_device(key)
self.devices.use_device(key, device)
return device
@host_only
def setattr_device(self, key):
"""Sets a device as an attribute of the experiment, and saves it in the device manager.
self.`key` can be used to access the device.
This overloads artiq.experiment.HasEnvironment.setattr_device.
"""
super().setattr_device(key)
self.devices.use_device(key, getattr(self, key))
@host_only
def turn_off_all_ddses(self):
"""Turns off all DDSes used in an experiment."""
if len(self.devices.ad9910s_used) > 0:
self._turn_off_ad9910s()
@host_only
def reset_sinara_hardware(self):
"""Sets all hardware back to pre-experiment values."""
if len(self.devices.urukuls_used) > 0:
self._reset_urukuls()
if len(self.devices.ad9910s_used) > 0:
params_used_ad9910s = []
for kk in self.devices.ad9910s_used:
params_used_ad9910s.append(self._preexp_dds_params[kk[0]])
self._reset_ad9910s(params_used_ad9910s)
if len(self.devices.ttl_outs_used) > 0:
params_used_ttl_outs = []
for kk in self.devices.ttl_outs_used:
params_used_ttl_outs.append(self._preexp_ttl_params[kk[0]])
self._reset_ttl_outs(params_used_ttl_outs)
@kernel
def _turn_off_ad9910s(self):
"""Turns off the rf switches and sets amplitudes to 0."""
for name, ad9910 in self.devices.ad9910s_used:
self.core.break_realtime()
delay_mu(self.rtio_cycle_mu)
ad9910.sw.off()
freq, phase, amp = ad9910.get()
self.core.break_realtime()
delay_mu(self.dds_set_delay_mu)
ad9910.set(freq, phase, 0.)
@kernel
def _reset_urukuls(self):
"""Sets all urukuls back to profile 7 (the default profile)."""
for name, urukul in self.devices.urukuls_used:
self.core.break_realtime()
delay_mu(self.rtio_cycle_mu)
urukul.set_profile(7)
@kernel
def _reset_ad9910s(self, params):
"""Sets all AD9910s back to pre-experiment parameters."""
for kk in range(len(self.devices.ad9910s_used)):
name, ad9910 = self.devices.ad9910s_used[kk]
values = params[kk]
self._set_dds(ad9910, values)
@kernel
def _reset_ttl_outs(self, params):
"""Sets all TTL outputs back to pre-experiment parameters."""
for kk in range(len(self.devices.ttl_outs_used)):
name, ttl = self.devices.ttl_outs_used[kk]
value = params[kk]
self._set_ttl(ttl, value)
@kernel(flags={"fast-math"})
def _set_dds(self, device, values):
"""Sets frequency, phase, amplitude, attenuation, and state of a DDS.
Args:
device: AD9910, DDS device.
values: list of floats, [frequency, phase, amplitude, attenuation, and state].
"""
self.core.break_realtime()
delay_mu(self.rtio_cycle_mu)
delay_mu(self.dds_set_delay_mu)
device.set(values[0], values[1], values[2])
self.core.break_realtime()
device.get_att_mu()
self.core.break_realtime()
delay_mu(self.dds_set_delay_mu)
device.set_att(values[3])
self.core.break_realtime()
if values[4] > 0.:
device.sw.on()
else:
device.sw.off()
@kernel(flags={"fast-math"})
def _set_ttl(self, device, value):
"""Sets state of a TTL.
Args:
device: TTLOut or TTLInOut, TTL device.
values: float, state, -1. (off), 1. (on).
"""
self.core.break_realtime()
delay_mu(self.rtio_cycle_mu)
if value > 0.:
device.on()
else:
device.off()
|
Jayich-Lab/jax | util/tree_dict.py | __all__ = ["TreeDict"]
class TreeDict:
"""Converts a nested dict to an object.
Items in the dict are set to object attributes.
ARTIQ python does not support dict type. Inherit this class to convert the dict to an object.
self.value_parser() can be inherited to parse non-dict values.
Args:
dict_value: dict, dictionary to convert to an object.
nested_dict_class: class for nested dicts. Default None, which represents self.__class__.
This can be a different class (usually another class inherited from TreeDict).
"""
def __init__(self, dict_value, nested_dict_class=None):
self._set_attributes(dict_value, nested_dict_class)
def value_parser(self, value):
"""Parser for non-dict values."""
return value
def _set_attributes(self, dict_value, nested_dict_class):
if nested_dict_class is None:
class SubClass(self.__class__):
"""A derived class from the current class.
ARTIQ python does not support nesting a class as an attribute of the same class,
so a derived class from self.__class__ is necessary.
"""
pass
nested_dict_class = SubClass
for item in dict_value:
if isinstance(dict_value[item], dict):
setattr(self, item, nested_dict_class(dict_value[item]))
else:
setattr(self, item, self.value_parser(dict_value[item]))
|
Jayich-Lab/jax | base/experiments/scan.py | <gh_stars>1-10
from artiq.experiment import *
from jax import JaxExperiment
__all__ = ["Scan"]
class Scan(JaxExperiment):
"""Base class for all scan experiments.
self.scanned_values is the list of values that are scanned.
It must be defined in self.prepare() or in self.host_startup().
If CHECK_STOP is set to False, the experiment cannot be terminated or paused gracefully,
but the experiment runs faster (~50 ms faster each loop).
The sequence during self.run() is shown below:
self.host_startup() # host code to set up the experiment.
self.kernel_run() # run the following indented kernel functions.
self.kernel_before_loops() # kernel code to set up the scan.
# loops through self.scanned_values
# checks if the experiment should be terminated or paused.
self.kernel_loop() # runs a loop in kernel code.
self.kernel_after_loops() # kernel code to clean up the scan.
self.host_cleanup() # host code to clean up the experiment.
Use RPCs to call host functions during the kernel execution if needed.
"""
CHECK_STOP = True
def run(self):
try:
self.host_startup()
while self._scans_finished < len(self.scanned_values):
should_stop = self.check_stop_or_do_pause()
if should_stop:
break
else:
self.turn_off_all_ddses()
self.kernel_run()
except Exception as e:
raise e
finally:
self.host_cleanup()
def host_startup(self):
"""Called at the start of self.run(). Can be overriden."""
self.open_file()
self._scans_finished = 0
def host_cleanup(self):
"""Called at the end of self.run(). Can be overriden."""
self.reset_sinara_hardware()
self.close_file()
self.disconnect_labrad()
@kernel
def kernel_run(self):
self.kernel_before_loops()
for kk in range(len(self.scanned_values)):
if kk < self._scans_finished:
continue # skips scanned points after pausing the experiment.
if self.CHECK_STOP:
if self.scheduler.check_pause():
break
self.core.break_realtime()
self.kernel_loop(kk)
self._scans_finished += 1
self.kernel_after_loops()
@kernel
def kernel_before_loops(self):
"""Called at the start of self.kernel_run(). Can be overriden."""
self.core.reset()
@kernel
def kernel_loop(self, loop_index):
"""Called during each loop of self.kernel_run(). Can be overriden."""
pass
@kernel
def kernel_after_loops(self):
"""Called at the end of self.kernel_run(). Can be overriden."""
pass
|
Jayich-Lab/jax | base/applets/real_time_plot_applet.py | import asyncio
import numpy as _np
import pyqtgraph as _pg
from PyQt5 import QtCore, QtGui, QtWidgets
from jax import JaxApplet
from jax.util.ui.fast_plot_trace import FastPlotTrace
__all__ = ["RealTimePlotApplet"]
class RealTimePlotApplet(QtWidgets.QWidget, JaxApplet):
"""Base applet for making a real-time updated plot.
Plots shared datasets in the vault server.
A plot can contain multiple traces (FastPlotTrace), stored in self.traces.
When the control key is pressed down, automatic scrolling is disabled.
Derived classes should implement self.initialize_datasets, self._set, and self._append.
Args:
num_of_traces: int, total number of traces to plot in the figure.
dataset_names: list of strs, names of shared dataset to get updates for.
xlabel: str, x-axis label. Default "".
ylabel: str, y-axis label. Default "".
scrolling: bool, whether the viewport scrolls with new data. Default True.
ip: str, vault server IP address to connect to.
"""
set_data = QtCore.pyqtSignal(str, _np.ndarray)
append_data = QtCore.pyqtSignal(str, _np.ndarray)
def __init__(self, num_of_traces, dataset_names, xlabel="", ylabel="", scrolling=True,
ip="127.0.0.1", **kwds):
super().__init__(**kwds)
self.num_of_traces = num_of_traces
self.dataset_names = dataset_names
self.scrolling = scrolling
self._control_pressed = False
self.setDisabled(True)
self._color_index = 0
self._initialize_gui(xlabel, ylabel)
self.set_data.connect(self._set)
self.append_data.connect(self._append)
self.connect_to_labrad(ip)
def _initialize_gui(self, xlabel, ylabel):
layout = QtWidgets.QGridLayout(self)
self.plot_widget = _pg.PlotWidget()
self._set_axes_style(xlabel, ylabel)
layout.addWidget(self.plot_widget, 0, 0)
self.coords = QtWidgets.QLabel("")
self.coords.setFont(QtGui.QFont("Arial", 15))
layout.addWidget(self.coords, 1, 0)
self.setLayout(layout)
self._set_all_traces()
self.plot_widget.scene().sigMouseMoved.connect(self._mouse_moved)
self.plot_widget.sigRangeChanged.connect(self._range_changed)
def _set_axes_style(self, xlabel, ylabel):
axis_fontsize = 15
tick_text_offset = 10
y_width = 90
label_style = {"color": "#AAA", "font-size": f"{axis_fontsize}pt"}
font = QtGui.QFont("Arial", axis_fontsize)
x = self.plot_widget.plotItem.getAxis("bottom")
x.setLabel(xlabel, **label_style)
x.setStyle(tickFont=font, tickTextOffset=tick_text_offset)
y = self.plot_widget.plotItem.getAxis("left")
y.setLabel(ylabel, **label_style)
y.setStyle(tickFont=font, tickTextOffset=tick_text_offset)
y.setWidth(y_width)
def _set_all_traces(self):
"""Adds all traces with default colors."""
self.traces = []
for kk in range(self.num_of_traces):
self.traces.append(self.new_trace())
def new_trace(self, color=None):
"""Creates a new trace.
Always use trace width of 1 if possible. Non-unity trace width reduces plot speed
significantly. This may be fixed soon (see pyqtgraph PR #2011).
Args:
color: color of the trace. See pyqtgraph color documention for details.
If None, a color from a color wheel will be used.
"""
color_wheel = ["w", "c", "y", "g", "r", "m"]
if color is None:
color = color_wheel[self._color_index % len(color_wheel)]
self._color_index += 1
trace = FastPlotTrace(self.plot_widget, pen_kwargs={"color": color, "width": 1})
trace.trace_updated.connect(self._trace_updated)
trace.trace_removed.connect(self._trace_removed)
return trace
def _mouse_moved(self, position):
if self.plot_widget.sceneBoundingRect().contains(position):
point = self.plot_widget.plotItem.vb.mapSceneToView(position)
coordinates = f"({point.x():.8}, {point.y():.8})"
self.coords.setText(coordinates)
def _range_changed(self):
lims = self.plot_widget.viewRange()
self.current_xlimits = [lims[0][0], lims[0][1]]
def keyPressEvent(self, key_event):
if key_event.key() == QtCore.Qt.Key_Control:
self._control_pressed = True
super().keyPressEvent(key_event)
def keyReleaseEvent(self, key_event):
if key_event.key() == QtCore.Qt.Key_Control:
self._control_pressed = False
super().keyPressEvent(key_event)
def _trace_updated(self, data_xmax):
# it is probably easier to disable scrolling by pressing the mouse.
# however, PyQtGraph does not emit the MouseReleaseEvent.
# so the control key is used instead.
if self.scrolling and not self._control_pressed:
try:
plot_xmin, plot_xmax = self.current_xlimits
plot_width = plot_xmax - plot_xmin
# scroll if we have reached 80% of the window
if data_xmax > (plot_xmin + 0.8 * plot_width) and data_xmax < plot_xmax:
shift = plot_width / 4
xmin = plot_xmin + shift
xmax = plot_xmax + shift
self.plot_widget.setXRange(xmin, xmax)
self.current_xlimits = [xmin, xmax]
except Exception as e:
pass
def _trace_removed(self):
pass # does not need to do anything when a trace is removed.
async def labrad_connected(self):
await self.vault_connected()
await self.setup_cxn_listeners()
async def vault_connected(self):
self.dv = self.cxn.get_server("vault")
for kk in self.dataset_names:
await self.dv.subscribe_to_shared_dataset(kk)
try:
await self.initialize_datasets() # implemented by the derived class.
except Exception as e:
print(e)
SHARED_DATA_CHANGE = 128936
await self.dv.on_shared_data_change(SHARED_DATA_CHANGE)
self.dv.addListener(listener=self._data_changed, source=None, ID=SHARED_DATA_CHANGE)
self.setDisabled(False)
async def setup_cxn_listeners(self):
self.cxn.add_on_connect("vault", self.run_in_labrad_loop(self.vault_connected))
self.cxn.add_on_disconnect("vault", self.vault_disconnected)
def vault_disconnected(self):
self.setDisabled(True)
def _data_changed(self, signal, data):
operation = data[0]
dataset_name = data[1]
value = data[2]
if operation == "set":
self.set_data.emit(dataset_name, value)
elif operation == "append":
self.append_data.emit(dataset_name, value)
async def initialize_datasets(self):
"""Implement this function to get initial dataset values."""
raise NotImplementedError()
def _set(self, dataset_name, value):
"""Called when a dataset listened to is set to a new value."""
raise NotImplementedError()
def _append(self, dataset_name, value):
"""Called when a dataset listened to is appended by a new value."""
raise NotImplementedError()
|
Jayich-Lab/jax | util/parameter_group.py | <reponame>Jayich-Lab/jax<gh_stars>1-10
from jax.util.tree_dict import TreeDict
__all__ = ["ParameterGroup"]
class ParameterGroup(TreeDict):
"""Holds parameters for an experiment in an ARTIQ python compatible class.
Args:
parameters_dict: A nested dict representing the parameters. Example:
{
"collection_1": {
"parameter_1": value_1,
"parameter_2": value_2
},
"collection_2": {
"parameter_3": value_3
}
}.
"""
def __init__(self, parameters_dict):
super().__init__(parameters_dict)
|
Jayich-Lab/jax | tools/applets/dds.py | <reponame>Jayich-Lab/jax
import time
from sipyco import pyon
from PyQt5 import QtGui, QtWidgets, QtCore
from artiq.applets.simple import SimpleApplet
from jax import JaxApplet
from jax.tools.applets.dds_channel import DDSChannel, DDSParameters
from jax.util.ui.custom_list_widget import CustomListWidget
class DDS(QtWidgets.QWidget, JaxApplet):
# signal emitted after getting DDS parameters.
# a signal is needed to run self.initialize_channels on the default thread.
# widgets can only be created in the default thread.
do_initialize = QtCore.pyqtSignal()
def __init__(self, args, **kwds):
super().__init__(**kwds)
self.setDisabled(True) # start with the applet disabled, until artiq server is connected.
self.do_initialize.connect(self.initialize_channels)
self.initialize_gui()
self.load_config_file("dds", args)
# connects to LabRAD in a different thread, and calls self.labrad_connected when finished.
self.connect_to_labrad(args.ip)
def initialize_gui(self):
font = QtGui.QFont("Arial", 15)
layout = QtWidgets.QGridLayout()
self.list_widget = CustomListWidget()
layout.addWidget(self.list_widget)
self.setLayout(layout)
async def labrad_connected(self):
"""Called when LabRAD is connected."""
await self.artiq_connected()
await self.setup_cxn_listeners()
async def artiq_connected(self):
self.artiq = self.cxn.get_server("artiq")
initialize_now = await self.artiq.is_dds_initialized()
if initialize_now:
await self.get_dds_parameters()
SIGNALID = 124890
await self.artiq.on_dds_change(SIGNALID)
self.artiq.addListener(listener=self._dds_changed, source=None, ID=SIGNALID)
await self.artiq.on_dds_initialize(SIGNALID + 1)
self.artiq.addListener(listener=self._dds_initialized, source=None, ID=SIGNALID+1)
async def get_dds_parameters(self):
self.params = await self.artiq.get_dds_parameters()
self.params = pyon.decode(self.params)
# tells the main thread that it can populate the DDS channels.
self.do_initialize.emit()
self.setDisabled(False)
@QtCore.pyqtSlot()
def initialize_channels(self):
self.channels = {}
self.list_widget.clear()
for channel in self.params:
cpld = "Not implemented" # current code does not query the cpld name.
frequency = self.params[channel][0]
phase = self.params[channel][1]
amp = self.params[channel][2]
att = self.params[channel][3]
state = (self.params[channel][4] > 0)
channel_param = DDSParameters(self, channel, cpld, amp, att, frequency, phase, state)
channel_widget = DDSChannel(channel_param, self)
self.channels[channel] = channel_widget
self._still_looping = False
self.list_widget.add_item_and_widget(channel, channel_widget)
if "list_widget" not in self.config:
self.config["list_widget"] = {}
self.list_widget_reordered(self.list_widget.set_visibility_and_order(
self.config["list_widget"]))
self.list_widget.visibility_and_order_changed.connect(self.list_widget_reordered)
def list_widget_reordered(self, widget_config):
self.config["list_widget"] = widget_config
self.save_config_file()
async def setup_cxn_listeners(self):
self.cxn.add_on_connect("artiq", self.run_in_labrad_loop(self.artiq_connected))
self.cxn.add_on_disconnect("artiq", self.artiq_disconnected)
def artiq_disconnected(self):
self.setDisabled(True)
def _dds_changed(self, signal, value):
channel, attribute, val = value
if attribute == "frequency":
self.channels[channel].on_monitor_freq_changed(val)
elif attribute == "amplitude":
self.channels[channel].on_monitor_amp_changed(val)
elif attribute == "attenuation":
self.channels[channel].on_monitor_att_changed(val)
elif attribute == "state":
self.channels[channel].on_monitor_switch_changed(val > 0.)
def _dds_initialized(self, signal, value):
self.run_in_labrad_loop(self.get_dds_parameters)()
def main():
applet = SimpleApplet(DDS)
DDS.add_labrad_ip_argument(applet) # adds IP address as an argument.
DDS.add_id_argument(applet)
applet.run()
if __name__ == "__main__":
main()
|
spasche/aiohue | aiohue/sensors.py | from datetime import datetime
from .api import APIItems
TYPE_DAYLIGHT = "Daylight"
TYPE_CLIP_GENERICFLAG = "CLIPGenericFlag"
TYPE_CLIP_GENERICSTATUS = "CLIPGenericStatus"
TYPE_CLIP_HUMIDITY = "CLIPHumidity"
TYPE_CLIP_LIGHTLEVEL = "CLIPLightLevel"
TYPE_CLIP_OPENCLOSE = "CLIPOpenClose"
TYPE_CLIP_PRESENCE = "CLIPPresence"
TYPE_CLIP_SWITCH = "CLIPSwitch"
TYPE_CLIP_TEMPERATURE = "CLIPTemperature"
TYPE_GEOFENCE = "Geofence"
TYPE_ZGP_SWITCH = "ZGPSwitch"
TYPE_ZLL_LIGHTLEVEL = "ZLLLightLevel"
TYPE_ZLL_PRESENCE = "ZLLPresence"
TYPE_ZLL_ROTARY = "ZLLRelativeRotary"
TYPE_ZLL_SWITCH = "ZLLSwitch"
TYPE_ZLL_TEMPERATURE = "ZLLTemperature"
ZGP_SWITCH_BUTTON_1 = 34
ZGP_SWITCH_BUTTON_2 = 16
ZGP_SWITCH_BUTTON_3 = 17
ZGP_SWITCH_BUTTON_4 = 18
ZLL_SWITCH_BUTTON_1_INITIAL_PRESS = 1000
ZLL_SWITCH_BUTTON_2_INITIAL_PRESS = 2000
ZLL_SWITCH_BUTTON_3_INITIAL_PRESS = 3000
ZLL_SWITCH_BUTTON_4_INITIAL_PRESS = 4000
ZLL_SWITCH_BUTTON_1_HOLD = 1001
ZLL_SWITCH_BUTTON_2_HOLD = 2001
ZLL_SWITCH_BUTTON_3_HOLD = 3001
ZLL_SWITCH_BUTTON_4_HOLD = 4001
ZLL_SWITCH_BUTTON_1_SHORT_RELEASED = 1002
ZLL_SWITCH_BUTTON_2_SHORT_RELEASED = 2002
ZLL_SWITCH_BUTTON_3_SHORT_RELEASED = 3002
ZLL_SWITCH_BUTTON_4_SHORT_RELEASED = 4002
ZLL_SWITCH_BUTTON_1_LONG_RELEASED = 1003
ZLL_SWITCH_BUTTON_2_LONG_RELEASED = 2003
ZLL_SWITCH_BUTTON_3_LONG_RELEASED = 3003
ZLL_SWITCH_BUTTON_4_LONG_RELEASED = 4003
class Sensors(APIItems):
"""Represents Hue Sensors.
https://developers.meethue.com/documentation/sensors-api
"""
def __init__(self, logger, raw, v2_resources, request):
super().__init__(logger, raw, v2_resources, request, "sensors", create_sensor)
class GenericSensor:
"""Represents the base Hue sensor."""
ITEM_TYPE = "sensors"
def __init__(self, id, raw, v2_resources, request):
self.id = id
self.raw = raw
for resource in v2_resources:
if resource.get("type") == "device":
self.device = resource
break
else:
self.device = None
self._request = request
@property
def name(self):
return self.raw["name"]
@property
def type(self):
return self.raw["type"]
@property
def modelid(self):
return self.raw["modelid"]
@property
def manufacturername(self):
return self.raw["manufacturername"]
@property
def productname(self):
return self.raw.get("productname")
@property
def uniqueid(self):
return self.raw.get("uniqueid")
@property
def swversion(self):
return self.raw.get("swversion")
@property
def state(self):
return self.raw["state"]
@property
def config(self):
return self.raw["config"]
class GenericCLIPSensor(GenericSensor):
@property
def battery(self):
return self.raw["state"].get("battery")
@property
def lastupdated(self):
return self.raw["state"]["lastupdated"]
@property
def on(self):
return self.raw["config"]["on"]
@property
def reachable(self):
return self.raw["config"]["reachable"]
@property
def url(self):
return self.raw["config"].get("url")
async def set_config(self, config):
"""Change config of a CLIP sensor."""
await self._request("put", "sensors/{}/config".format(self.id), json=config)
async def set_state(self, state):
"""Change state of a CLIP sensor."""
await self._request("put", "sensors/{}/state".format(self.id), json=state)
class GenericZLLSensor(GenericSensor):
@property
def battery(self):
return self.raw["config"].get("battery")
@property
def lastupdated(self):
return self.raw["state"].get("lastupdated")
@property
def on(self):
return self.raw["config"]["on"]
@property
def reachable(self):
return self.raw["config"]["reachable"]
class DaylightSensor(GenericSensor):
@property
def configured(self):
return self.raw["config"]["configured"]
@property
def daylight(self):
return self.raw["state"]["daylight"]
@property
def on(self):
return self.raw["config"]["on"]
@property
def sunriseoffset(self):
return self.raw["config"]["sunriseoffset"]
@property
def sunsetoffset(self):
return self.raw["config"]["sunsetoffset"]
async def set_config(
self, on=None, long=None, lat=None, sunriseoffset=None, sunsetoffset=None
):
"""Change config of a Daylight sensor."""
data = {
key: value
for key, value in {
"on": on,
"long": long,
"lat": lat,
"sunriseoffset": sunriseoffset,
"sunsetoffset": sunsetoffset,
}.items()
if value is not None
}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class GeofenceSensor(GenericSensor):
@property
def on(self):
return self.raw["config"]["on"]
@property
def presence(self):
return self.raw["state"]["presence"]
@property
def reachable(self):
return self.raw["config"]["reachable"]
async def set_config(self, on=None):
"""Change config of the Geofence sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class CLIPPresenceSensor(GenericCLIPSensor):
@property
def presence(self):
return self.raw["state"]["presence"]
async def set_config(self, on=None):
"""Change config of a CLIP Presence sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class ZLLPresenceSensor(GenericZLLSensor):
@property
def presence(self):
return self.raw["state"]["presence"]
def process_update_event(self, update):
state = dict(self.state)
if "motion" in update:
state["presence"] = update["motion"]["motion"]
state["lastupdated"] = datetime.utcnow().replace(microsecond=0).isoformat()
self.raw = {**self.raw, "state": state}
async def set_config(self, on=None, sensitivity=None, sensitivitymax=None):
"""Change config of a ZLL Presence sensor."""
data = {
key: value
for key, value in {
"on": on,
"sensitivity": sensitivity,
"sensitivitymax": sensitivitymax,
}.items()
if value is not None
}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class ZLLRotarySensor(GenericZLLSensor):
@property
def rotaryevent(self):
return self.raw["state"]["rotaryevent"]
@property
def expectedrotation(self):
return self.raw["state"]["expectedrotation"]
@property
def expectedeventduration(self):
return self.raw["state"]["expectedeventduration"]
async def set_config(self, on=None):
"""Change config of a ZLL Rotary sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class CLIPSwitchSensor(GenericCLIPSensor):
@property
def buttonevent(self):
return self.raw["state"]["buttonevent"]
async def set_config(self, on=None):
"""Change config of a CLIP Switch sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class ZGPSwitchSensor(GenericSensor):
@property
def buttonevent(self):
return self.raw["state"]["buttonevent"]
@property
def lastupdated(self):
return self.raw["state"].get("lastupdated")
@property
def on(self):
return self.raw["config"]["on"]
@property
def inputs(self):
return self.raw.get("capabilities", {}).get("inputs")
def process_update_event(self, update):
state = dict(self.state)
if "button" in update and self.device:
for idx, button in enumerate(self.device["services"]):
if button["rid"] != update["id"]:
continue
for event in self.inputs[idx]["events"]:
if event["eventtype"] == update["button"]["last_event"]:
state["buttonevent"] = event["buttonevent"]
break
break
state["lastupdated"] = datetime.utcnow().replace(microsecond=0).isoformat()
self.raw = {**self.raw, "state": state}
async def set_config(self, on=None):
"""Change config of a ZGP Switch sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class ZLLSwitchSensor(GenericZLLSensor):
@property
def buttonevent(self):
return self.raw["state"]["buttonevent"]
@property
def inputs(self):
return self.raw.get("capabilities", {}).get("inputs")
def process_update_event(self, update):
state = dict(self.state)
if "button" in update and self.device:
for idx, button in enumerate(self.device["services"]):
if button["rid"] != update["id"]:
continue
for event in self.inputs[idx]["events"]:
if event["eventtype"] == update["button"]["last_event"]:
state["buttonevent"] = event["buttonevent"]
break
break
state["lastupdated"] = datetime.utcnow().replace(microsecond=0).isoformat()
self.raw = {**self.raw, "state": state}
async def set_config(self, on=None):
"""Change config of a ZLL Switch sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class CLIPLightLevelSensor(GenericCLIPSensor):
@property
def dark(self):
return self.raw["state"]["dark"]
@property
def daylight(self):
return self.raw["state"]["daylight"]
@property
def lightlevel(self):
return self.raw["state"]["lightlevel"]
@property
def tholddark(self):
return self.raw["config"]["tholddark"]
@property
def tholdoffset(self):
return self.raw["config"]["tholdoffset"]
async def set_config(self, on=None, tholddark=None, tholdoffset=None):
"""Change config of a CLIP LightLevel sensor."""
data = {
key: value
for key, value in {
"on": on,
"tholddark": tholddark,
"tholdoffset": tholdoffset,
}.items()
if value is not None
}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class ZLLLightLevelSensor(GenericZLLSensor):
@property
def dark(self):
return self.raw["state"]["dark"]
@property
def daylight(self):
return self.raw["state"]["daylight"]
@property
def lightlevel(self):
return self.raw["state"]["lightlevel"]
@property
def tholddark(self):
return self.raw["config"]["tholddark"]
@property
def tholdoffset(self):
return self.raw["config"]["tholdoffset"]
async def set_config(self, on=None, tholddark=None, tholdoffset=None):
"""Change config of a ZLL LightLevel sensor."""
data = {
key: value
for key, value in {
"on": on,
"tholddark": tholddark,
"tholdoffset": tholdoffset,
}.items()
if value is not None
}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class CLIPTemperatureSensor(GenericCLIPSensor):
@property
def temperature(self):
return self.raw["state"]["temperature"]
async def set_config(self, on=None):
"""Change config of a CLIP Temperature sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class ZLLTemperatureSensor(GenericZLLSensor):
@property
def temperature(self):
return self.raw["state"]["temperature"]
async def set_config(self, on=None):
"""Change config of a ZLL Temperature sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class CLIPGenericFlagSensor(GenericCLIPSensor):
@property
def flag(self):
return self.raw["state"]["flag"]
async def set_config(self, on=None):
"""Change config of a CLIP Generic Flag sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class CLIPGenericStatusSensor(GenericCLIPSensor):
@property
def status(self):
return self.raw["state"]["status"]
async def set_config(self, on=None):
"""Change config of a CLIP Generic Status sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class CLIPHumiditySensor(GenericCLIPSensor):
@property
def humidity(self):
return self.raw["state"]["humidity"]
async def set_config(self, on=None):
"""Change config of a CLIP Humidity sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
class CLIPOpenCloseSensor(GenericCLIPSensor):
@property
def open(self):
return self.raw["state"]["open"]
async def set_config(self, on=None):
"""Change config of a CLIP Open Close sensor."""
data = {} if on is None else {"on": on}
await self._request("put", "sensors/{}/config".format(self.id), json=data)
def create_sensor(id, raw, v2_resources, request):
type = raw["type"]
if type == TYPE_DAYLIGHT:
return DaylightSensor(id, raw, [], request)
elif type == TYPE_CLIP_GENERICFLAG:
return CLIPGenericFlagSensor(id, raw, v2_resources, request)
elif type == TYPE_CLIP_GENERICSTATUS:
return CLIPGenericStatusSensor(id, raw, v2_resources, request)
elif type == TYPE_CLIP_HUMIDITY:
return CLIPHumiditySensor(id, raw, v2_resources, request)
elif type == TYPE_CLIP_LIGHTLEVEL:
return CLIPLightLevelSensor(id, raw, v2_resources, request)
elif type == TYPE_CLIP_OPENCLOSE:
return CLIPOpenCloseSensor(id, raw, v2_resources, request)
elif type == TYPE_CLIP_PRESENCE:
return CLIPPresenceSensor(id, raw, v2_resources, request)
elif type == TYPE_CLIP_SWITCH:
return CLIPSwitchSensor(id, raw, v2_resources, request)
elif type == TYPE_CLIP_TEMPERATURE:
return CLIPTemperatureSensor(id, raw, v2_resources, request)
elif type == TYPE_GEOFENCE:
return GeofenceSensor(id, raw, v2_resources, request)
elif type == TYPE_ZGP_SWITCH:
return ZGPSwitchSensor(id, raw, v2_resources, request)
elif type == TYPE_ZLL_LIGHTLEVEL:
return ZLLLightLevelSensor(id, raw, v2_resources, request)
elif type == TYPE_ZLL_PRESENCE:
return ZLLPresenceSensor(id, raw, v2_resources, request)
elif type == TYPE_ZLL_ROTARY:
return ZLLRotarySensor(id, raw, v2_resources, request)
elif type == TYPE_ZLL_SWITCH:
return ZLLSwitchSensor(id, raw, v2_resources, request)
elif type == TYPE_ZLL_TEMPERATURE:
return ZLLTemperatureSensor(id, raw, v2_resources, request)
else:
return GenericSensor(id, raw, v2_resources, request)
|
spasche/aiohue | aiohue/bridge.py | <reponame>spasche/aiohue<filename>aiohue/bridge.py
from __future__ import annotations
import asyncio
from contextlib import asynccontextmanager
import logging
import aiohttp
from aiohttp import client_exceptions
from .clip import Clip
from .config import Config
from .errors import raise_error
from .groups import Groups
from .lights import Lights
from .scenes import Scenes
from .sensors import Sensors
_DEFAULT = object()
class Bridge:
"""Control a Hue bridge."""
def __init__(
self,
host: str,
websession: aiohttp.ClientSession,
*,
username: str | None = None,
bridge_id: str | None = None,
):
self.host = host
self.username = username
self.websession = websession
self._bridge_id = bridge_id
self.proto = None
self.config = None
self.groups = None
self.lights = None
self.scenes = None
self.sensors = None
self.clip = Clip(self.request_2)
self.logger = logging.getLogger(f"{__name__}.{host}")
# self.capabilities = None
# self.rules = None
# self.schedules = None
@property
def id(self):
"""Return the ID of the bridge."""
if self.config is not None:
return self.config.bridgeid
return self._bridge_id
async def create_user(self, device_type):
"""Create a user.
https://developers.meethue.com/documentation/configuration-api#71_create_user
"""
result = await self.request("post", "", {"devicetype": device_type}, auth=False)
self.username = result[0]["success"]["username"]
return self.username
async def initialize(self):
result = await self.request("get", "")
self.config = Config(result.pop("config"), self.request)
try:
v2_resources = (await self.clip.resources())["data"]
except client_exceptions.ClientResponseError as err:
if err.status != 404:
raise
# Older hubs
v2_resources = []
self.groups = Groups(
self.logger, result.pop("groups"), v2_resources, self.request
)
self.lights = Lights(
self.logger, result.pop("lights"), v2_resources, self.request
)
if "scenes" in result:
self.scenes = Scenes(
self.logger, result.pop("scenes"), v2_resources, self.request
)
if "sensors" in result:
self.sensors = Sensors(
self.logger, result.pop("sensors"), v2_resources, self.request
)
self.logger.debug("Unused result: %s", result)
async def request(self, method, path, json=None, auth=True):
"""Make a request to the API."""
# By default we assume we need to connect over `https`
# Old bridges and incompatible emulates still use `http` so we force a fallback
# We will store protocol in `self.proto` if request succesful.
if self.proto is None:
proto = "https"
else:
proto = self.proto
url = "{}://{}/api/".format(proto, self.host)
if auth:
url += "{}/".format(self.username)
url += path
try:
async with self.websession.request(
method, url, json=json, ssl=False
) as res:
res.raise_for_status()
# Store the protocol that worked
if self.proto is None:
self.proto = proto
data = await res.json()
_raise_on_error(data)
return data
except client_exceptions.ClientConnectionError:
if self.proto is not None:
raise
self.proto = "http"
return await self.request(method, path, json, auth)
@asynccontextmanager
async def request_2(self, method, path, **kwargs):
"""Make a request to any path with Hue's new request method.
This method has the auth in a header.
"""
url = f"{self.proto or 'https'}://{self.host}/{path}"
kwargs["ssl"] = False
if "headers" not in kwargs:
kwargs["headers"] = {}
kwargs["headers"]["hue-application-key"] = self.username
async with self.websession.request(method, url, **kwargs) as res:
res.raise_for_status()
yield res
async def listen_events(self):
"""Listen to events and apply changes to objects."""
pending_events = asyncio.Queue()
async def receive_events():
while True:
self.logger.debug("Subscribing to events")
last_event_id = None
try:
async for events in self.clip.stream_events(last_event_id):
last_event_id = events["id"]
for event in events["data"]:
self.logger.debug("Received event: %s", event)
pending_events.put_nowait(event)
except client_exceptions.ServerDisconnectedError:
self.logger.debug("Event endpoint disconnected")
except client_exceptions.ClientError as err:
if isinstance(err, client_exceptions.ClientResponseError):
# We get 503 when it's too busy, but any other error
# is probably also because too busy.
self.logger.debug(
"Got status %s from endpoint. Sleeping while waiting to resolve",
err.status,
)
else:
self.logger.debug("Unable to reach event endpoint: %s", err)
await asyncio.sleep(5)
except asyncio.TimeoutError:
pass
except Exception:
self.logger.exception("Unexpected error")
pending_events.put(None)
break
event_task = asyncio.create_task(receive_events())
while True:
try:
event = await pending_events.get()
except asyncio.CancelledError:
event_task.cancel()
await event_task
raise
# If unexpected error occurred
if event is None:
return
if event["type"] not in ("update", "motion"):
self.logger.debug("Unknown event type: %s", event)
continue
for event_data in event["data"]:
# We don't track object that groups all items (bridge_home)
if event_data["id_v1"] == "/groups/0":
continue
item_type = event_data["id_v1"].split("/", 2)[1]
if item_type not in (
# These all inherit from APIItems and so can handle events
"lights",
"sensors",
"scenes",
"groups",
):
self.logger.debug(
"Received %s event for unknown item type %s: %s",
event["type"],
item_type,
event_data,
)
continue
obj = getattr(self, item_type).process_event(event["type"], event_data)
# if obj is None, we didn't know the object
# We could consider triggering a full refresh
if obj is not None:
yield obj
def _raise_on_error(data):
"""Check response for error message."""
if isinstance(data, list):
data = data[0]
if isinstance(data, dict) and "error" in data:
raise_error(data["error"])
|
spasche/aiohue | aiohue/__init__.py | from .bridge import Bridge # noqa
from .errors import * # noqa
|
spasche/aiohue | aiohue/clip.py | <reponame>spasche/aiohue
import json
import logging
class Clip:
"""Represent Hue clip."""
def __init__(self, request_2):
self._request_2 = request_2
async def stream_events(self, last_event_id=None):
"""Async iterate over the incoming events.
https://nchan.io/#eventsource
"""
kwargs = {"headers": {"Accept": "text/event-stream"}, "timeout": 0}
if last_event_id is not None:
kwargs["headers"]["Last-Event-ID"] = last_event_id
async with self._request_2(
"get",
"eventstream/clip/v2",
**kwargs,
) as resp:
event = {}
# First event is `{"": "hi"}`, which we will skip.
skip_event = True
async for line in resp.content:
line = line.decode().strip()
# Object finished.
if not line:
if skip_event:
skip_event = False
else:
yield event
continue
elif skip_event:
continue
try:
key, value = line.split(": ", 1)
if key == "data":
value = json.loads(value)
event[key] = value
except ValueError:
logging.getLogger(__name__).error("Unexpected event data: %s", line)
skip_event = True
event = {}
async def resources(self):
"""Fetch resources from Hue.
Available types:
homekit
device
bridge
zigbee_connectivity
entertainment
light
bridge_home
grouped_light
room
scene
"""
async with self._request_2("get", "clip/v2/resource") as resp:
return await resp.json()
|
spasche/aiohue | aiohue/lights.py | from collections import namedtuple
from .api import APIItems
# Represents a CIE 1931 XY coordinate pair.
XYPoint = namedtuple("XYPoint", ["x", "y"])
# Represents the Gamut of a light.
GamutType = namedtuple("GamutType", ["red", "green", "blue"])
class Lights(APIItems):
"""Represents Hue Lights.
https://developers.meethue.com/documentation/lights-api
"""
def __init__(self, logger, raw, v2_resources, request):
super().__init__(logger, raw, v2_resources, request, "lights", Light)
class Light:
"""Represents a Hue light."""
ITEM_TYPE = "lights"
def __init__(self, id, raw, v2_resources, request):
self.id = id
self.raw = raw
self._request = request
@property
def uniqueid(self):
return self.raw["uniqueid"]
@property
def manufacturername(self):
return self.raw["manufacturername"]
@property
def modelid(self):
return self.raw["modelid"]
@property
def productname(self):
# productname added in Bridge API 1.24 (published 03/05/2018)
return self.raw.get("productname")
@property
def name(self):
return self.raw["name"]
@property
def state(self):
return self.raw["state"]
@property
def type(self):
return self.raw["type"]
@property
def swversion(self):
"""Software version of the light."""
return self.raw["swversion"]
@property
def swupdatestate(self):
"""Software update state of the light."""
return self.raw.get("swupdate", {}).get("state")
@property
def controlcapabilities(self):
"""Capabilities that the light has to control it."""
return self.raw.get("capabilities", {}).get("control", {})
@property
def colorgamuttype(self):
"""The color gamut type of the light."""
light_spec = self.controlcapabilities
return light_spec.get("colorgamuttype", "None")
@property
def colorgamut(self):
"""The color gamut information of the light."""
try:
light_spec = self.controlcapabilities
gtup = tuple([XYPoint(*x) for x in light_spec["colorgamut"]])
color_gamut = GamutType(*gtup)
except KeyError:
color_gamut = None
return color_gamut
def process_update_event(self, update):
state = dict(self.state)
if color := update.get("color"):
state["xy"] = [color["xy"]["x"], color["xy"]["y"]]
if ct := update.get("color_temperature"):
state["ct"] = ct["mirek"]
if "on" in update:
state["on"] = update["on"]["on"]
if dimming := update.get("dimming"):
state["bri"] = int(dimming["brightness"] / 100 * 254)
state["reachable"] = True
self.raw = {**self.raw, "state": state}
async def set_state(
self,
on=None,
bri=None,
hue=None,
sat=None,
xy=None,
ct=None,
alert=None,
effect=None,
transitiontime=None,
bri_inc=None,
sat_inc=None,
hue_inc=None,
ct_inc=None,
xy_inc=None,
):
"""Change state of a light."""
data = {
key: value
for key, value in {
"on": on,
"bri": bri,
"hue": hue,
"sat": sat,
"xy": xy,
"ct": ct,
"alert": alert,
"effect": effect,
"transitiontime": transitiontime,
"bri_inc": bri_inc,
"sat_inc": sat_inc,
"hue_inc": hue_inc,
"ct_inc": ct_inc,
"xy_inc": xy_inc,
}.items()
if value is not None
}
await self._request("put", "lights/{}/state".format(self.id), json=data)
|
rishabhiitbhu/deep-learning-from-scratch | utils/lr_utils.py | import _pickle as cPickle
import gzip
import numpy as np
from matplotlib import pyplot as plt
def get_relevant_data(data):
X, Y = [], []
for idx, num in enumerate(data[1]):
if num==0 or num==1:
X.append(data[0][idx])
Y.append(num)
X = np.asarray(X)
Y = np.asarray(Y)
X = X.T
Y = Y.reshape(1, Y.shape[0])
# X.shape = (784, m)
# Y.shape = (1, m)
# m is number of images
return (X, Y)
def load_data():
'''
MNIST dataset contains 28*28 pixel images of 0-9 digits
Loads the .pkl.gz file, returns train and validation set for 0 and 1 images only.
'''
# Download dataset from https://github.com/mnielsen/neural-networks-and-deep-learning/blob/master/data/mnist.pkl.gz
f = gzip.open('data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f, encoding="latin1")
f.close()
train = get_relevant_data(training_data)
val = get_relevant_data(validation_data)
return (train, val)
def plot_training(costs_train, costs_val):
epochs = range(len(costs_train))
plt.plot(epochs, costs_train)
plt.plot(epochs, costs_val)
plt.legend(['train', 'val'], loc='upper left')
plt.title('Cost')
plt.show()
if __name__ == "main":
pass |
rishabhiitbhu/deep-learning-from-scratch | utils/sc_utils.py | import _pickle as cPickle
import gzip
import numpy as np
from matplotlib import pyplot as plt
def load_data():
'''
MNIST dataset contains 28*28 pixel images of 0-9 digits
Loads the .pkl.gz file, returns train and validation set for 0 and 1 images only.
'''
# Download dataset from https://github.com/mnielsen/neural-networks-and-deep-learning/blob/master/data/mnist.pkl.gz
f = gzip.open('data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f, encoding="latin1")
f.close()
X_train, Y_train = training_data
X_val, Y_val = validation_data
X_train = X_train.T
X_val = X_val.T
Y_train = np.eye(10)[Y_train]
Y_val = np.eye(10)[Y_val]
Y_train = Y_train.T
Y_val = Y_val.T
return ((X_train, Y_train), (X_val, Y_val))
def plot_training(costs_train, costs_val):
epochs = range(len(costs_train))
plt.plot(epochs, costs_train)
plt.plot(epochs, costs_val)
plt.legend(['train', 'val'], loc='upper left')
plt.title('Cost')
plt.show()
if __name__ == "main":
pass |
BestSonny/open-vot-1 | lib/trackers/kcf.py | from __future__ import absolute_import, division
import numpy as np
import cv2
from . import Tracker
from ..utils import dict2tuple
from ..utils.complex import real, fft2, ifft2, complex_add, complex_mul, complex_div, fftshift
from ..descriptors.fhog import fast_hog
class TrackerKCF(Tracker):
def __init__(self, **kargs):
super(TrackerKCF, self).__init__('KCF')
self.parse_args(**kargs)
self._correlation = self.setup_kernel(self.cfg.kernel_type)
def parse_args(self, **kargs):
self.cfg = {
'lambda_': 1e-4,
'padding': 1.5,
'output_sigma_factor': 0.125,
'interp_factor': 0.012,
'sigma': 0.6,
'poly_a': 1,
'poly_b': 7,
'cell_size': 4,
'kernel_type': 'gaussian'}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg = dict2tuple(self.cfg)
def setup_kernel(self, kernel_type):
assert kernel_type in ['linear', 'polynomial', 'gaussian']
if kernel_type == 'linear':
return lambda x1, x2: self._linear_correlation(x1, x2)
elif kernel_type == 'polynomial':
return lambda x1, x2: self._polynomial_correlation(
x1, x2, self.cfg.poly_a, self.cfg.poly_b)
elif kernel_type == 'gaussian':
return lambda x1, x2: self._gaussian_correlation(
x1, x2, self.cfg.sigma)
def init(self, image, init_rect):
# initialize parameters
self.resize_image = False
if np.sqrt(init_rect[2:].prod()) > 100:
self.resize_image = True
init_rect = init_rect / 2
self.t_center = init_rect[:2] + init_rect[2:] / 2
self.t_sz = init_rect[2:]
mod = self.cfg.cell_size * 2
self.padded_sz = self.t_sz * (1 + self.cfg.padding)
self.padded_sz = self.padded_sz.astype(int) // mod * mod + mod
# get feature size and initialize hanning window
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
self.z = self._crop(image, self.t_center, self.padded_sz)
self.z = fast_hog(np.float32(self.z), self.cfg.cell_size)
self.feat_sz = self.z.shape
self.hann_window = np.outer(
np.hanning(self.feat_sz[0]),
np.hanning(self.feat_sz[1])).astype(np.float32)
self.hann_window = self.hann_window[:, :, np.newaxis]
self.z *= self.hann_window
# create gaussian labels
output_sigma = self.cfg.output_sigma_factor * \
np.sqrt(np.prod(self.feat_sz[:2])) / (1 + self.cfg.padding)
rs, cs = np.ogrid[:self.feat_sz[0], :self.feat_sz[1]]
rs, cs = rs - self.feat_sz[0] // 2, cs - self.feat_sz[1] // 2
y = np.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = fft2(y)
# train classifier
k = self._correlation(self.z, self.z)
self.alphaf = complex_div(self.yf, complex_add(fft2(k), self.cfg.lambda_))
def update(self, image):
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
# locate target
x = self._crop(image, self.t_center, self.padded_sz)
x = self.hann_window * fast_hog(np.float32(x), self.cfg.cell_size)
k = self._correlation(x, self.z)
score = real(ifft2(complex_mul(self.alphaf, fft2(k))))
offset = self._locate_target(score)
self.t_center += offset * self.cfg.cell_size
# limit the estimated bounding box to be overlapped with the image
self.t_center = np.clip(
self.t_center, -self.t_sz / 2 + 2,
image.shape[1::-1] + self.t_sz / 2 - 1)
# update model
new_z = self._crop(image, self.t_center, self.padded_sz)
new_z = self.hann_window * fast_hog(np.float32(new_z), self.cfg.cell_size)
k = self._correlation(new_z, new_z)
new_alphaf = complex_div(self.yf, complex_add(fft2(k), self.cfg.lambda_))
self.alphaf = (1 - self.cfg.interp_factor) * self.alphaf + \
self.cfg.interp_factor * new_alphaf
self.z = (1 - self.cfg.interp_factor) * self.z + \
self.cfg.interp_factor * new_z
bndbox = np.concatenate([
self.t_center - self.t_sz / 2, self.t_sz])
if self.resize_image:
bndbox = bndbox * 2
return bndbox
def _crop(self, image, center, size):
corners = np.zeros(4, dtype=int)
corners[:2] = np.floor(center - size / 2).astype(int)
corners[2:] = corners[:2] + size
pads = np.concatenate(
(-corners[:2], corners[2:] - image.shape[1::-1]))
pads = np.maximum(0, pads)
if np.any(pads > 0):
corners = np.concatenate((
corners[:2] + pads[:2],
corners[2:] - pads[2:])).astype(int)
patch = image[corners[1]:corners[3], corners[0]:corners[2]]
if np.any(pads > 0):
patch = cv2.copyMakeBorder(
patch, pads[1], pads[3], pads[0], pads[2],
borderType=cv2.BORDER_REPLICATE)
return patch
def _linear_correlation(self, x1, x2):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
return xcorr / x1.size
def _polynomial_correlation(self, x1, x2, a, b):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
out = (xcorr / x1.size + a) ** b
return out
def _gaussian_correlation(self, x1, x2, sigma):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
out = (np.sum(x1 * x1) + np.sum(x2 * x2) - 2.0 * xcorr) / x1.size
out[out < 0] = 0
out = np.exp(-out / self.cfg.sigma ** 2)
return out
def _locate_target(self, score):
def subpixel_peak(left, center, right):
divisor = 2 * center - left - right
if abs(divisor) < 1e-3:
return 0
return 0.5 * (right - left) / divisor
_, _, _, max_loc = cv2.minMaxLoc(score)
loc = np.float32(max_loc)
if max_loc[0] in range(1, score.shape[1] - 1):
loc[0] += subpixel_peak(
score[max_loc[1], max_loc[0] - 1],
score[max_loc[1], max_loc[0]],
score[max_loc[1], max_loc[0] + 1])
if max_loc[1] in range(1, score.shape[0] - 1):
loc[1] += subpixel_peak(
score[max_loc[1] - 1, max_loc[0]],
score[max_loc[1], max_loc[0]],
score[max_loc[1] + 1, max_loc[0]])
offset = loc - np.float32(score.shape[1::-1]) / 2
return offset
class TrackerDCF(TrackerKCF):
def __init__(self, **kargs):
kargs.update({'kernel_type': 'linear'})
super(TrackerDCF, self).__init__(**kargs)
|
BestSonny/open-vot-1 | lib/utils/complex.py | from __future__ import absolute_import
import cv2
import numpy as np
import torch
import numbers
import torch
def real(img):
return img[..., 0]
def imag(img):
return img[..., 1]
def conj(img):
img = img.copy()
img[..., 1] = -img[..., 1]
return img
def fft2(img):
img = np.float32(img)
if img.ndim == 2:
out = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
elif img.ndim == 3:
out = []
for c in range(img.shape[2]):
out.append(cv2.dft(
img[..., c], flags=cv2.DFT_COMPLEX_OUTPUT))
out = np.stack(out, axis=2)
else:
raise Exception('only supports 2 or 3 dimensional array')
return out
def ifft2(img):
img = np.float32(img)
if img.ndim == 3:
out = cv2.dft(img, flags=cv2.DFT_INVERSE | cv2.DFT_SCALE)
elif img.ndim == 4:
out = []
for c in range(img.shape[2]):
out.append(cv2.dft(
img[:, :, c, :], flags=cv2.DFT_INVERSE | cv2.DFT_SCALE))
else:
raise Exception('only supports 3 or 4 dimensional array')
return out
def fft1(img):
img = np.float32(img)
if img.ndim == 1:
img = img[np.newaxis, :]
out = cv2.dft(img, flags=cv2.DFT_ROWS | cv2.DFT_COMPLEX_OUTPUT)
out = out.squeeze(0)
elif img.ndim == 2:
out = cv2.dft(img, flags=cv2.DFT_ROWS | cv2.DFT_COMPLEX_OUTPUT)
else:
raise Exception('only supports 1 or 2 dimensional array')
return out
def ifft1(img):
img = np.float32(img)
if img.ndim == 2:
img = img[np.newaxis, :, :]
out = cv2.dft(img, flags=cv2.DFT_ROWS | cv2.DFT_SCALE)
out = out.squeeze(0)
elif img.ndim == 3:
out = cv2.dft(img, flags=cv2.DFT_ROWS | cv2.DFT_SCALE)
else:
raise Exception('only supports 2 or 3 dimensional array')
return out
def complex_add(a, b):
out = a.copy()
if isinstance(b, numbers.Number):
out[..., 0] += b
else:
out += b
return out
def complex_mul(a, b):
a, b = np.broadcast_arrays(a, b)
out = a.copy()
out[..., 0] = a[..., 0] * b[..., 0] - a[..., 1] * b[..., 1]
out[..., 1] = a[..., 0] * b[..., 1] + a[..., 1] * b[..., 0]
return out
def complex_div(a, b):
a, b = np.broadcast_arrays(a, b)
out = a.copy()
divisor = b[..., 0] ** 2 + b[..., 1] ** 2
out[..., 0] = (a[..., 0] * b[..., 0] +
a[..., 1] * b[..., 1]) / divisor
out[..., 1] = (a[..., 1] * b[..., 0] +
a[..., 0] * b[..., 1]) / divisor
return out
def tensor_complex_mul(x, z):
out_real = x[..., 0] * z[..., 0] - x[..., 1] * z[..., 1]
out_imag = x[..., 0] * z[..., 1] + x[..., 1] * z[..., 0]
return torch.stack((out_real, out_imag), -1)
def tensor_complex_mulconj(x, z):
out_real = x[..., 0] * z[..., 0] + x[..., 1] * z[..., 1]
out_imag = x[..., 1] * z[..., 0] - x[..., 0] * z[..., 1]
return torch.stack((out_real, out_imag), -1)
def tensor_complex_conj(x):
out_real = x[..., 0]
out_imag = -1*x[..., 1]
return torch.stack((out_real, out_imag), -1)
def tensor_complex_division(x, z):
denominator = z[..., 0]**2 + z[..., 1]**2
out_real = (x[..., 0] * z[..., 0] + x[..., 1] * z[..., 1]) / denominator
out_imag = (x[..., 1] * z[..., 0] - x[..., 0] * z[..., 1]) / denominator
return torch.stack((out_real, out_imag), -1)
def fftshift(img):
out = img.copy()
if img.ndim == 1:
w = img.shape[0]
c = w // 2
out[:c], out[c:] = img[w - c:], img[:w - c]
elif img.ndim == 2:
h, w = img.shape
cy, cx = h // 2, w // 2
out[:cy, :cx], out[cy:, cx:] = \
img[h - cy:, w - cx:], img[:h - cy, :w - cx]
out[:cy, cx:], out[cy:, :cx] = \
img[h - cy:, :w - cx], img[:h - cy, w - cx:]
else:
raise Exception('only supports 1 or 2 dimensional array')
return out
def ifftshift(img):
out = img.copy()
if img.ndim == 1:
w = img.shape[0]
c = w // 2
out[w - c:], out[:w - c] = img[:c], img[c:]
elif img.ndim == 2:
h, w = img.shape
cy, cx = h // 2, w // 2
out[h - cy:, w - cx:], out[:h - cy, :w - cx] = \
img[:cy, :cx], img[cy:, cx:]
out[h - cy:, :w - cx], out[:h - cy, w - cx:] = \
img[:cy, cx:], img[cy:, :cx]
else:
raise Exception('only supports 1 or 2 dimensional array')
return out
|
BestSonny/open-vot-1 | tests/models/test_alexnet.py | from __future__ import absolute_import, print_function
import unittest
import torch
import random
import time
from lib.models import AlexNetV1, AlexNetV2
class TestAlexNet(unittest.TestCase):
def setUp(self):
self.x = torch.randn((2, 3, 224, 224))
def tearDown(self):
pass
def test_alexnet_v1(self):
net = AlexNetV1()
with torch.set_grad_enabled(True):
net.train()
start = time.time()
out_train = net(self.x)
print('inference time of training: %.3f' % (time.time() - start))
self.assertTrue(out_train.requires_grad)
self.assertTrue(net.training)
with torch.set_grad_enabled(False):
net.eval()
start = time.time()
out_eval = net(self.x)
print('inference time of test: %.3f' % (time.time() - start))
self.assertFalse(out_eval.requires_grad)
self.assertFalse(net.training)
self.assertNotAlmostEqual(
out_train.mean().item(), out_eval.mean().item())
def test_alexnet_v2(self):
net = AlexNetV2()
with torch.set_grad_enabled(True):
net.train()
start = time.time()
out_train = net(self.x)
print('inference time of training: %.3f' % (time.time() - start))
self.assertTrue(out_train.requires_grad)
self.assertTrue(net.training)
with torch.set_grad_enabled(False):
net.eval()
start = time.time()
out_eval = net(self.x)
print('inference time of test: %.3f' % (time.time() - start))
self.assertFalse(out_eval.requires_grad)
self.assertFalse(net.training)
self.assertNotAlmostEqual(
out_train.mean().item(), out_eval.mean().item())
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/datasets/pairwise.py | from __future__ import absolute_import
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
class Pairwise(Dataset):
def __init__(self, base_dataset, transform=None, pairs_per_video=25,
frame_range=100, causal=False, return_index=False,
rand_choice=True, subset='train', train_ratio=0.95):
super(Pairwise, self).__init__()
assert subset in ['train', 'val']
self.base_dataset = base_dataset
self.transform = transform
self.pairs_per_video = pairs_per_video
self.frame_range = frame_range
self.causal = causal
self.return_index = return_index
self.rand_choice = rand_choice
n = len(self.base_dataset)
split = int(n * train_ratio)
split = np.clip(split, 10, n - 10)
if subset == 'train':
self.indices = np.arange(0, split, dtype=int)
self.indices = np.tile(self.indices, pairs_per_video)
elif subset == 'val':
self.indices = np.arange(split, n, dtype=int)
def __getitem__(self, index):
if index >= len(self):
raise IndexError('list index out of range')
if self.rand_choice:
index = np.random.choice(self.indices)
else:
index = self.indices[index]
img_files, anno = self.base_dataset[index]
rand_z, rand_x = self._sample_pair(len(img_files))
img_z = Image.open(img_files[rand_z])
img_x = Image.open(img_files[rand_x])
if img_z.mode == 'L':
img_z = img_z.convert('RGB')
img_x = img_x.convert('RGB')
bndbox_z = anno[rand_z, :]
bndbox_x = anno[rand_x, :]
if self.return_index:
item = (img_z, img_x, bndbox_z, bndbox_x, rand_z, rand_x)
else:
item = (img_z, img_x, bndbox_z, bndbox_x)
if self.transform is not None:
return self.transform(*item)
else:
return item
def __len__(self):
return len(self.indices)
def _sample_pair(self, n):
if self.causal:
rand_z = np.random.choice(n - 1)
else:
rand_z = np.random.choice(n)
if self.frame_range == 0:
return rand_z, rand_z
possible_x = np.arange(
rand_z - self.frame_range,
rand_z + self.frame_range + 1)
possible_x = np.intersect1d(possible_x, np.arange(n))
if self.causal:
possible_x = possible_x[possible_x > rand_z]
else:
possible_x = possible_x[possible_x != rand_z]
rand_x = np.random.choice(possible_x)
return rand_z, rand_x
|
BestSonny/open-vot-1 | tests/trackers/test_mosse.py | from __future__ import absolute_import, print_function
import unittest
import random
from lib.trackers import TrackerMOSSE
from lib.datasets import OTB
class TestTrackerMOSSE(unittest.TestCase):
def setUp(self):
self.otb_dir = 'data/OTB'
def tearDown(self):
pass
def test_mosse(self):
dataset = OTB(self.otb_dir, download=True)
tracker = TrackerMOSSE()
img_files, anno = random.choice(dataset)
rects, speed = tracker.track(
img_files, anno[0, :], visualize=True)
self.assertEqual(rects.shape, anno.shape)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/trackers/dcf.py | from __future__ import absolute_import, division
import numpy as np
import cv2
from . import Tracker
from ..utils import dict2tuple
from ..utils.complex import real, conj, fft2, ifft2, complex_add, complex_mul, complex_div
from ..descriptors.fhog import fast_hog
class TrackerDCF(Tracker):
def __init__(self, **kargs):
super(TrackerDCF, self).__init__('DCF')
self.parse_args(**kargs)
def parse_args(self, **kargs):
self.cfg = {
'lambda_': 1e-4,
'padding': 1.5,
'output_sigma_factor': 0.125,
'interp_factor': 0.012,
'cell_size': 4}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg = dict2tuple(self.cfg)
def init(self, image, init_rect):
# initialize parameters
self.resize_image = False
if np.sqrt(init_rect[2:].prod()) > 100:
self.resize_image = True
init_rect = init_rect / 2
self.t_center = init_rect[:2] + init_rect[2:] / 2
self.t_sz = init_rect[2:]
mod = self.cfg.cell_size * 2
self.padded_sz = self.t_sz * (1 + self.cfg.padding)
self.padded_sz = self.padded_sz.astype(int) // mod * mod + mod
# get feature size and initialize hanning window
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
z = self._crop(image, self.t_center, self.padded_sz)
z = fast_hog(np.float32(z), self.cfg.cell_size)
self.feat_sz = z.shape
self.hann_window = np.outer(
np.hanning(self.feat_sz[0]),
np.hanning(self.feat_sz[1])).astype(np.float32)
self.hann_window = self.hann_window[:, :, np.newaxis]
self.zf = fft2(z * self.hann_window)
# create gaussian labels
output_sigma = self.cfg.output_sigma_factor * \
np.sqrt(np.prod(self.feat_sz[:2])) / (1 + self.cfg.padding)
rs, cs = np.ogrid[:self.feat_sz[0], :self.feat_sz[1]]
rs, cs = rs - self.feat_sz[0] // 2, cs - self.feat_sz[1] // 2
y = np.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = fft2(y)
# train classifier
kf = self._linear_correlation(self.zf, self.zf)
self.alphaf = complex_div(self.yf, complex_add(kf, self.cfg.lambda_))
def update(self, image):
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
# locate target
x = self._crop(image, self.t_center, self.padded_sz)
x = self.hann_window * fast_hog(np.float32(x), self.cfg.cell_size)
kf = self._linear_correlation(fft2(x), self.zf)
score = real(ifft2(complex_mul(self.alphaf, kf)))
offset = self._locate_target(score)
self.t_center += offset * self.cfg.cell_size
# limit the estimated bounding box to be overlapped with the image
self.t_center = np.clip(
self.t_center, -self.t_sz / 2 + 2,
image.shape[1::-1] + self.t_sz / 2 - 1)
# update model
new_z = self._crop(image, self.t_center, self.padded_sz)
new_z = fast_hog(np.float32(new_z), self.cfg.cell_size)
new_zf = fft2(new_z * self.hann_window)
kf = self._linear_correlation(new_zf, new_zf)
new_alphaf = complex_div(self.yf, complex_add(kf, self.cfg.lambda_))
self.alphaf = (1 - self.cfg.interp_factor) * self.alphaf + \
self.cfg.interp_factor * new_alphaf
self.zf = (1 - self.cfg.interp_factor) * self.zf + \
self.cfg.interp_factor * new_zf
bndbox = np.concatenate([
self.t_center - self.t_sz / 2, self.t_sz])
if self.resize_image:
bndbox = bndbox * 2
return bndbox
def _crop(self, image, center, size):
corners = np.zeros(4, dtype=int)
corners[:2] = np.floor(center - size / 2).astype(int)
corners[2:] = corners[:2] + size
pads = np.concatenate(
(-corners[:2], corners[2:] - image.shape[1::-1]))
pads = np.maximum(0, pads)
if np.any(pads > 0):
corners = np.concatenate((
corners[:2] + pads[:2],
corners[2:] - pads[2:])).astype(int)
patch = image[corners[1]:corners[3], corners[0]:corners[2]]
if np.any(pads > 0):
patch = cv2.copyMakeBorder(
patch, pads[1], pads[3], pads[0], pads[2],
borderType=cv2.BORDER_REPLICATE)
return patch
def _linear_correlation(self, x1f, x2f):
xcorr = complex_mul(x1f, conj(x2f))
xcorr = np.sum(xcorr, axis=2) / x1f.size
return xcorr
def _locate_target(self, score):
def subpixel_peak(left, center, right):
divisor = 2 * center - left - right
if abs(divisor) < 1e-3:
return 0
return 0.5 * (right - left) / divisor
_, _, _, max_loc = cv2.minMaxLoc(score)
loc = np.float32(max_loc)
if max_loc[0] in range(1, score.shape[1] - 1):
loc[0] += subpixel_peak(
score[max_loc[1], max_loc[0] - 1],
score[max_loc[1], max_loc[0]],
score[max_loc[1], max_loc[0] + 1])
if max_loc[1] in range(1, score.shape[0] - 1):
loc[1] += subpixel_peak(
score[max_loc[1] - 1, max_loc[0]],
score[max_loc[1], max_loc[0]],
score[max_loc[1] + 1, max_loc[0]])
offset = loc - np.float32(score.shape[1::-1]) / 2
return offset
|
BestSonny/open-vot-1 | tests/trackers/test_goturn.py | from __future__ import absolute_import, print_function
import unittest
import random
from torch.utils.data import DataLoader
from lib.trackers import TrackerGOTURN
from lib.datasets import VOT, Pairwise
from lib.transforms import TransformGOTURN
class TestTrackerGOTURN(unittest.TestCase):
def setUp(self):
self.vot_dir = 'data/vot2017'
self.net_path = 'pretrained/goturn/tracker.pth'
def tearDown(self):
pass
def test_goturn_track(self):
dataset = VOT(self.vot_dir, return_rect=True, download=True)
tracker = TrackerGOTURN(self.net_path)
img_files, anno = random.choice(dataset)
rects, speed = tracker.track(img_files, anno[0, :],
visualize=True)
self.assertEqual(rects.shape, anno.shape)
def test_goturn_train(self):
tracker = TrackerGOTURN(net_path=self.net_path)
transform = TransformGOTURN()
base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
dataset = Pairwise(
base_dataset, transform, pairs_per_video=1,
frame_range=1, causal=True)
dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
# training loop
for it, batch in enumerate(dataloader):
update_lr = it == 0
loss = tracker.step(batch, backward=True, update_lr=update_lr)
print('Iter: {} Loss: {:.6f}'.format(it + 1, loss))
# validation loop
for it, batch in enumerate(dataloader):
loss = tracker.step(batch, backward=False)
print('Val. Iter: {} Loss: {:.6f}'.format(it + 1, loss))
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | tests/utils/test_warp.py | <filename>tests/utils/test_warp.py
from __future__ import absolute_import, print_function, division
import unittest
import random
import cv2
import torch
import numpy as np
from PIL import Image
from lib.utils.warp import pad_pil, crop_pil, pad_array, crop_array, crop_tensor, resize_tensor
from lib.utils.viz import show_frame
from lib.datasets import OTB
class TestWarp(unittest.TestCase):
def setUp(self):
self.otb_dir = 'data/OTB'
def tearDown(self):
pass
def test_pad_pil(self):
dataset = OTB(self.otb_dir, download=True)
npad = random.choice([0, 10, 50])
padding = random.choice([None, 0, 'avg'])
print('[PIL-pad] padding:', padding, 'npad:', npad)
img_files, anno = random.choice(dataset)
for f, img_file in enumerate(img_files):
image = Image.open(img_file)
image = pad_pil(image, npad, padding=padding)
show_frame(image, fig_n=1)
def test_crop_pil(self):
dataset = OTB(self.otb_dir, download=True)
padding = random.choice([None, 0, 'avg'])
out_size = random.choice([None, 255])
print('[PIL-crop] padding:', padding, 'out_size:', out_size)
img_files, anno = random.choice(dataset)
for f, img_file in enumerate(img_files):
image = Image.open(img_file)
bndbox = anno[f, :]
center = bndbox[:2] + bndbox[2:] / 2
patch = crop_pil(image, center, bndbox[2:],
padding=padding, out_size=out_size)
show_frame(patch, fig_n=2, pause=0.1)
def test_pad_array(self):
dataset = OTB(self.otb_dir, download=True)
npad = random.choice([0, 10, 50])
padding = random.choice([None, 0, 'avg'])
print('[cv2-pad] padding:', padding, 'npad:', npad)
img_files, anno = random.choice(dataset)
for f, img_file in enumerate(img_files):
image = cv2.imread(img_file)
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = pad_array(image, npad, padding=padding)
show_frame(image[:, :, ::-1], fig_n=1)
def test_crop_array(self):
dataset = OTB(self.otb_dir, download=True)
padding = random.choice([None, 0, 'avg'])
out_size = random.choice([None, 255])
print('[cv2-crop] padding:', padding, 'out_size:', out_size)
img_files, anno = random.choice(dataset)
for f, img_file in enumerate(img_files):
image = cv2.imread(img_file)
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
bndbox = anno[f, :]
center = bndbox[:2] + bndbox[2:] / 2
patch = crop_array(image, center, bndbox[2:],
padding=padding, out_size=out_size)
show_frame(patch, fig_n=2, pause=0.1)
def test_crop_tensor(self):
dataset = OTB(self.otb_dir, download=True)
padding = random.choice([None, 0, 'avg'])
out_size = random.choice([255])
print('[PyTorch-crop] padding:', padding, 'out_size:', out_size)
img_files, anno = random.choice(dataset)
for f, img_file in enumerate(img_files):
image = cv2.imread(img_file)
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = torch.from_numpy(image).permute(
2, 0, 1).unsqueeze(0).float()
bndbox = torch.from_numpy(anno[f, :]).float()
center = bndbox[:2] + bndbox[2:] / 2
patch = crop_tensor(image, center, bndbox[2:],
padding=padding, out_size=out_size)
patch = patch.squeeze().permute(1, 2, 0).cpu().numpy().astype(np.uint8)
show_frame(patch, fig_n=1, pause=0.1)
def test_resize_tensor(self):
dataset = OTB(self.otb_dir, download=True)
out_size = random.choice([30, 100, 255])
print('[PyTorch-resize]:', out_size)
img_files, anno = random.choice(dataset)
for f, img_file in enumerate(img_files):
image = cv2.imread(img_file)
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = torch.from_numpy(image).permute(
2, 0, 1).unsqueeze(0).float()
image = resize_tensor(image, out_size)
image = image.squeeze().permute(1, 2, 0).numpy().astype(np.uint8)
show_frame(image, fig_n=2, pause=0.1)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/datasets/__init__.py | <reponame>BestSonny/open-vot-1<filename>lib/datasets/__init__.py
from __future__ import absolute_import
from .vot import VOT
from .otb import OTB
from .imagenet import ImageNetVID, ImageNetObject
from .pairwise import Pairwise
|
BestSonny/open-vot-1 | lib/datasets/vot.py | <filename>lib/datasets/vot.py
from __future__ import absolute_import, print_function, division
import os
import glob
import numpy as np
import six
from ..utils.ioutil import download, extract
class VOT(object):
def __init__(self, root_dir, return_rect=False,
download=False, version=2017):
super(VOT, self).__init__()
self.root_dir = root_dir
self.return_rect = return_rect
if download:
self._download(self.root_dir, version)
if not self._check_integrity():
raise Exception('Dataset not found or corrupted. ' +
'You can use download=True to download it.')
self.anno_files = sorted(glob.glob(
os.path.join(root_dir, '*/groundtruth.txt')))
self.seq_dirs = [os.path.dirname(f) for f in self.anno_files]
self.seq_names = [os.path.basename(s) for s in self.seq_dirs]
def __getitem__(self, index):
if isinstance(index, six.string_types):
if not index in self.seq_names:
raise Exception('Sequence {} not found.'.format(index))
index = self.seq_names.index(index)
img_files = sorted(glob.glob(
os.path.join(self.seq_dirs[index], '*.jpg')))
anno = np.loadtxt(self.anno_files[index], delimiter=',')
if self.return_rect and anno.shape[1] == 8:
anno = self._corner2rect(anno)
return img_files, anno
def __len__(self):
return len(self.seq_names)
def _check_integrity(self, root_dir=None):
if not root_dir:
root_dir = self.root_dir
return os.path.isdir(root_dir) and \
len(os.listdir(root_dir)) > 0
def _download(self, root_dir, version):
if self._check_integrity(root_dir):
print('Files already downloaded.')
return
assert version in range(2013, 2017 + 1), 'Incorrect VOT version.'
if not os.path.isdir(root_dir):
os.makedirs(root_dir)
version = 'vot%d' % version
url = 'http://data.votchallenge.net/%s/%s.zip' % (version, version)
zip_file = os.path.join(root_dir, version + '.zip')
download(url, zip_file)
extract(zip_file, root_dir)
return root_dir
def _corner2rect(self, corners, center=False):
cx = np.mean(corners[:, 0::2], axis=1)
cy = np.mean(corners[:, 1::2], axis=1)
x1 = np.min(corners[:, 0::2], axis=1)
x2 = np.max(corners[:, 0::2], axis=1)
y1 = np.min(corners[:, 1::2], axis=1)
y2 = np.max(corners[:, 1::2], axis=1)
area1 = np.linalg.norm(corners[:, 0:2] - corners[:, 2:4], axis=1) * \
np.linalg.norm(corners[:, 2:4] - corners[:, 4:6], axis=1)
area2 = (x2 - x1) * (y2 - y1)
scale = np.sqrt(area1 / area2)
w = scale * (x2 - x1) + 1
h = scale * (y2 - y1) + 1
if center:
return np.array([cx, cy, w, h]).T
else:
return np.array([cx-w/2, cy-h/2, w, h]).T
|
BestSonny/open-vot-1 | lib/trackers/csk.py | <reponame>BestSonny/open-vot-1
from __future__ import absolute_import, division
import numpy as np
import cv2
from . import Tracker
from ..utils import dict2tuple
from ..utils.complex import real, fft2, ifft2, complex_add, complex_mul, complex_div, fftshift
class TrackerCSK(Tracker):
def __init__(self, **kargs):
super(TrackerCSK, self).__init__('CSK')
self.parse_args(**kargs)
self._correlation = self.setup_kernel(self.cfg.kernel_type)
def parse_args(self, **kargs):
self.cfg = {
'padding': 1,
'output_sigma_factor': 0.0625,
'sigma': 0.2,
'poly_a': 1,
'poly_b': 7,
'lambda_': 1e-2,
'interp_factor': 0.075,
'kernel_type': 'gaussian'}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg = dict2tuple(self.cfg)
def setup_kernel(self, kernel_type):
assert kernel_type in ['linear', 'polynomial', 'gaussian']
if kernel_type == 'linear':
return lambda x1, x2: self._linear_correlation(x1, x2)
elif kernel_type == 'polynomial':
return lambda x1, x2: self._polynomial_correlation(
x1, x2, self.cfg.poly_a, self.cfg.poly_b)
elif kernel_type == 'gaussian':
return lambda x1, x2: self._gaussian_correlation(
x1, x2, self.cfg.sigma)
def init(self, image, init_rect):
# intialize parameters
self.resize_image = False
if np.sqrt(init_rect[2:].prod()) > 100:
self.resize_image = True
init_rect = init_rect / 2
self.t_center = init_rect[:2] + init_rect[2:] / 2
self.t_sz = init_rect[2:]
self.padded_sz = self.t_sz * (1 + self.cfg.padding)
self.padded_sz = np.floor(self.padded_sz).astype(int)
# create gaussian labels
output_sigma = np.sqrt(self.t_sz.prod()) * \
self.cfg.output_sigma_factor
rs, cs = np.ogrid[:self.padded_sz[1], :self.padded_sz[0]]
rs, cs = rs - self.padded_sz[1] / 2, cs - self.padded_sz[0] / 2
y = np.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = fft2(y)
# initialize hanning window
self.hann_window = np.outer(
np.hanning(self.padded_sz[1]),
np.hanning(self.padded_sz[0])).astype(np.float32)
# crop padded target and train classifier
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
self.z = self._crop(image, self.t_center, self.padded_sz)
self.z = self.hann_window * (np.float32(self.z) / 255 - 0.5)
k = self._correlation(self.z, self.z)
self.alphaf = complex_div(self.yf, complex_add(fft2(k), self.cfg.lambda_))
def update(self, image):
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
# locate target
x = self._crop(image, self.t_center, self.padded_sz)
x = self.hann_window * (np.float32(x) / 255 - 0.5)
k = self._correlation(x, self.z)
score = real(ifft2(complex_mul(self.alphaf, fft2(k))))
_, _, _, max_loc = cv2.minMaxLoc(score)
self.t_center = self.t_center - np.floor(self.padded_sz / 2) + max_loc
# limit the estimated bounding box to be overlapped with the image
self.t_center = np.clip(
self.t_center, -self.t_sz / 2 + 1,
image.shape[1::-1] + self.t_sz / 2 - 2)
# update model
new_z = self._crop(image, self.t_center, self.padded_sz)
new_z = self.hann_window * (np.float32(new_z) / 255 - 0.5)
k = self._correlation(new_z, new_z)
new_alphaf = complex_div(self.yf, complex_add(fft2(k), self.cfg.lambda_))
self.alphaf = (1 - self.cfg.interp_factor) * self.alphaf + \
self.cfg.interp_factor * new_alphaf
self.z = (1 - self.cfg.interp_factor) * self.z + \
self.cfg.interp_factor * new_z
bndbox = np.concatenate([
self.t_center - self.t_sz / 2, self.t_sz])
if self.resize_image:
bndbox = bndbox * 2
return bndbox
def _crop(self, image, center, size):
corners = np.zeros(4, dtype=int)
corners[:2] = np.floor(center - size / 2).astype(int)
corners[2:] = corners[:2] + size
pads = np.concatenate(
(-corners[:2], corners[2:] - image.shape[1::-1]))
pads = np.maximum(0, pads)
if np.any(pads > 0):
corners = np.concatenate((
corners[:2] + pads[:2],
corners[2:] - pads[2:])).astype(int)
patch = image[corners[1]:corners[3], corners[0]:corners[2]]
if np.any(pads > 0):
patch = cv2.copyMakeBorder(
patch, pads[1], pads[3], pads[0], pads[2],
borderType=cv2.BORDER_REPLICATE)
return patch
def _linear_correlation(self, x1, x2):
xcorr = cv2.mulSpectrums(fft2(x1), fft2(x2), 0, conjB=True)
xcorr = fftshift(real(ifft2(xcorr)))
return xcorr / x1.size
def _polynomial_correlation(self, x1, x2, a, b):
xcorr = cv2.mulSpectrums(fft2(x1), fft2(x2), 0, conjB=True)
xcorr = fftshift(real(ifft2(xcorr)))
out = (xcorr / x1.size + a) ** b
return out
def _gaussian_correlation(self, x1, x2, sigma):
xcorr = cv2.mulSpectrums(fft2(x1), fft2(x2), 0, conjB=True)
xcorr = fftshift(real(ifft2(xcorr)))
out = (np.sum(x1 * x1) + np.sum(x2 * x2) - 2 * xcorr) / x1.size
out = out * (out >= 0)
out = np.exp(-out / (self.cfg.sigma ** 2))
return out
|
BestSonny/open-vot-1 | lib/transforms/transform_siamfc.py | from __future__ import absolute_import, division
import numpy as np
import torchvision.transforms.functional as F
import torch
from ..utils import dict2tuple
from ..utils.ioutil import load_siamfc_stats
from ..utils.warp import crop_pil
class TransformSiamFC(object):
def __init__(self, stats_path=None, **kargs):
self.parse_args(**kargs)
self.stats = None
if stats_path:
self.stats = load_siamfc_stats(stats_path)
def parse_args(self, **kargs):
# default branch is AlexNetV1
default_args = {
'exemplar_sz': 127,
'search_sz': 255,
'score_sz': 17,
'context': 0.5,
'r_pos': 16,
'r_neg': 0,
'total_stride': 8,
'ignore_label': -100}
for key, val in default_args.items():
if key in kargs:
setattr(self, key, kargs[key])
else:
setattr(self, key, val)
def __call__(self, img_z, img_x, bndbox_z, bndbox_x):
crop_z = self._crop(img_z, bndbox_z, self.exemplar_sz)
crop_x = self._crop(img_x, bndbox_x, self.search_sz)
labels, weights = self._create_labels()
# data augmentation
if np.random.rand() > 0.5:
crop_z = F.hflip(crop_z)
crop_x = F.hflip(crop_x)
crop_z = 255.0 * F.to_tensor(crop_z)
crop_x = 255.0 * F.to_tensor(crop_x)
labels = torch.from_numpy(labels).float()
weights = torch.from_numpy(weights).float()
# color augmentation
if self.stats:
offset_z = np.reshape(np.dot(
self.stats.rgb_variance_z,
np.random.randn(3, 1)), (3, 1, 1))
offset_x = np.reshape(np.dot(
self.stats.rgb_variance_x,
np.random.randn(3, 1)), (3, 1, 1))
crop_z += torch.from_numpy(offset_z).float()
crop_x += torch.from_numpy(offset_x).float()
crop_z = torch.clamp(crop_z, 0.0, 255.0)
crop_x = torch.clamp(crop_x, 0.0, 255.0)
return crop_z, crop_x, labels, weights
def _crop(self, image, bndbox, out_size):
center = bndbox[:2] + bndbox[2:] / 2
size = bndbox[2:]
context = self.context * size.sum()
patch_sz = out_size / self.exemplar_sz * \
np.sqrt((size + context).prod())
return crop_pil(image, center, patch_sz, out_size=out_size)
def _create_labels(self):
labels = self._create_logisticloss_labels()
weights = np.zeros_like(labels)
pos_num = np.sum(labels == 1)
neg_num = np.sum(labels == 0)
weights[labels == 1] = 0.5 / pos_num
weights[labels == 0] = 0.5 / neg_num
weights *= pos_num + neg_num
labels = labels[np.newaxis, :]
weights = weights[np.newaxis, :]
return labels, weights
def _create_logisticloss_labels(self):
label_sz = self.score_sz
r_pos = self.r_pos / self.total_stride
r_neg = self.r_neg / self.total_stride
labels = np.zeros((label_sz, label_sz))
for r in range(label_sz):
for c in range(label_sz):
dist = np.sqrt((r - label_sz // 2) ** 2 +
(c - label_sz // 2) ** 2)
if dist <= r_pos:
labels[r, c] = 1
elif dist <= r_neg:
labels[r, c] = self.ignore_label
else:
labels[r, c] = 0
return labels
|
BestSonny/open-vot-1 | lib/trackers/dsst.py | from __future__ import absolute_import, division
import numpy as np
import cv2
from . import Tracker
from ..utils import dict2tuple
from ..utils.complex import real, conj, fft2, ifft2, fft1, ifft1, complex_add, complex_mul, complex_div
from ..descriptors.fhog import fast_hog
class TrackerDSST(Tracker):
def __init__(self, **kargs):
super(TrackerDSST, self).__init__('DSST')
self.parse_args(**kargs)
def parse_args(self, **kargs):
self.cfg = {
'padding': 1,
'output_sigma_factor': 0.0625,
'scale_sigma_factor': 0.25,
'lambda_': 1e-2,
'learning_rate': 0.025,
'scale_num': 33,
'scale_step': 1.02,
'scale_model_max_area': 512}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg = dict2tuple(self.cfg)
def init(self, image, init_rect):
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
# initialize parameters
self.t_center = init_rect[:2] + init_rect[2:] / 2
self.t_sz = init_rect[2:].astype(int)
self.t_scale = 1.0
self.padded_sz = self.t_sz * (1 + self.cfg.padding)
scale_factor = 1.0
if self.t_sz.prod() > self.cfg.scale_model_max_area:
scale_factor = np.sqrt(
self.cfg.scale_model_max_area / self.t_sz.prod())
self.scale_model_sz = (self.t_sz * scale_factor).astype(int)
self.scale_model_sz = self.scale_model_sz // 8 * 8
self.min_scale_factor = self.cfg.scale_step ** np.ceil(
np.log((5 / self.padded_sz).max()) / np.log(self.cfg.scale_step))
self.max_scale_factor = self.cfg.scale_step ** np.floor(
np.log((image.shape[1::-1] / self.t_sz).min()) / np.log(self.cfg.scale_step))
# create translation gaussian labels
output_sigma = self.cfg.output_sigma_factor * \
np.sqrt(np.prod(self.t_sz[:2]))
rs, cs = np.ogrid[:self.padded_sz[1], :self.padded_sz[0]]
rs, cs = rs - self.padded_sz[1] // 2, cs - self.padded_sz[0] // 2
y = np.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = fft2(y)[:, :, np.newaxis, :]
# create scale gaussian labels
scale_sigma = self.cfg.scale_sigma_factor * \
np.sqrt(self.cfg.scale_num)
ss = np.ogrid[:self.cfg.scale_num] - np.ceil(self.cfg.scale_num / 2)
ys = np.exp(-0.5 / scale_sigma ** 2 * (ss ** 2))
self.ysf = fft1(ys)[:, np.newaxis, :]
self.scale_factors = self.cfg.scale_step ** (-ss)
# initialize hanning windows
self.hann_window = np.outer(
np.hanning(self.padded_sz[1]),
np.hanning(self.padded_sz[0])).astype(np.float32)
self.hann_window = self.hann_window[:, :, np.newaxis]
if self.cfg.scale_num % 2 == 0:
self.scale_window = np.hanning(
self.cfg.scale_num + 1).astype(np.float32)
self.scale_window = self.scale_window[1:]
else:
self.scale_window = np.hanning(
self.cfg.scale_num).astype(np.float32)
self.scale_window = self.scale_window[:, np.newaxis]
# train translation filter
z = self._get_translation_sample(
image, self.t_center, self.padded_sz, self.t_scale)
self.hf_num, self.hf_den = self._train_translation_filter(
fft2(z), self.yf)
# train scale filter
zs = self._get_scale_sample(
image, self.t_center, self.t_sz,
self.t_scale * self.scale_factors, self.scale_model_sz)
self.sf_num, self.sf_den = self._train_scale_filter(fft1(zs), self.ysf)
def update(self, image):
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
# locate target center
x = self._get_translation_sample(
image, self.t_center, self.padded_sz, self.t_scale)
score = self._calc_translation_score(fft2(x), self.hf_num, self.hf_den)
_, _, _, max_loc = cv2.minMaxLoc(score)
self.t_center = self.t_center - self.t_scale * \
(np.floor(self.padded_sz / 2) - max_loc)
# limit the estimated bounding box to be overlapped with the image
self.t_center = np.clip(
self.t_center, -self.t_sz / 2 + 1,
image.shape[1::-1] + self.t_sz / 2 - 2)
# locate target scale
xs = self._get_scale_sample(
image, self.t_center, self.t_sz,
self.t_scale * self.scale_factors, self.scale_model_sz)
score = self._calc_scale_score(fft1(xs), self.sf_num, self.sf_den)
scale_id = score.argmax()
self.t_scale *= self.scale_factors[scale_id]
self.t_scale = np.clip(
self.t_scale, self.min_scale_factor, self.max_scale_factor)
# update translation filter
z = self._get_translation_sample(
image, self.t_center, self.padded_sz, self.t_scale)
hf_num, hf_den = self._train_translation_filter(
fft2(z), self.yf)
self.hf_num = (1 - self.cfg.learning_rate) * self.hf_num + \
self.cfg.learning_rate * hf_num
self.hf_den = (1 - self.cfg.learning_rate) * self.hf_den + \
self.cfg.learning_rate * hf_den
# update scale filter
zs = self._get_scale_sample(
image, self.t_center, self.t_sz,
self.t_scale * self.scale_factors, self.scale_model_sz)
sf_num, sf_den = self._train_scale_filter(fft1(zs), self.ysf)
self.sf_num = (1 - self.cfg.learning_rate) * self.sf_num + \
self.cfg.learning_rate * sf_num
self.sf_den = (1 - self.cfg.learning_rate) * self.sf_den + \
self.cfg.learning_rate * sf_den
t_sz = np.floor(self.t_sz * self.t_scale)
bndbox = np.concatenate([self.t_center - t_sz / 2, t_sz])
return bndbox
def _get_translation_sample(self, image, center, size, scale):
patch_sz = (size * scale).astype(int)
patch = self._crop(image, center, patch_sz)
if np.any(patch.shape[1::-1] != size):
patch = cv2.resize(patch, tuple(size))
feature = fast_hog(np.float32(patch), 1)[:, :, :27]
feature = np.pad(feature, ((1, 1), (1, 1), (0, 0)), 'edge')
gray = cv2.cvtColor(patch, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
feature = np.concatenate((gray / 255.0 - 0.5, feature), axis=2)
return self.hann_window * feature
def _get_scale_sample(self, image, center, size, scale_factors, scale_model_sz):
features = []
for scale in scale_factors:
patch_sz = size * scale
patch = self._crop(image, center, patch_sz)
patch = cv2.resize(patch, tuple(scale_model_sz))
feature = fast_hog(np.float32(patch) / 255.0, 4, False)
features.append(feature.reshape(-1))
features = np.stack(features)
return self.scale_window * features
def _crop(self, image, center, size):
corners = np.zeros(4, dtype=int)
corners[:2] = np.floor(center - size / 2).astype(int)
corners[2:] = corners[:2] + size
pads = np.concatenate(
(-corners[:2], corners[2:] - image.shape[1::-1]))
pads = np.maximum(0, pads)
if np.any(pads > 0):
corners = np.concatenate((
corners[:2] + pads[:2],
corners[2:] - pads[2:])).astype(int)
patch = image[corners[1]:corners[3], corners[0]:corners[2]]
if np.any(pads > 0):
patch = cv2.copyMakeBorder(
patch, pads[1], pads[3], pads[0], pads[2],
borderType=cv2.BORDER_REPLICATE)
return patch
def _train_translation_filter(self, zf, yf):
hf_num = complex_mul(yf, conj(zf))
hf_den = complex_mul(zf, conj(zf))
hf_den = np.sum(hf_den, axis=2)
return hf_num, hf_den
def _train_scale_filter(self, zsf, ysf):
sf_num = complex_mul(ysf, conj(zsf))
sf_den = complex_mul(zsf, conj(zsf))
sf_den = np.sum(sf_den, axis=1, keepdims=True)
return sf_num, sf_den
def _calc_translation_score(self, xf, hf_num, hf_den):
num = np.sum(complex_mul(hf_num, xf), axis=2)
den = complex_add(hf_den, self.cfg.lambda_)
score = real(ifft2(complex_div(num, den)))
return score
def _calc_scale_score(self, xsf, sf_num, sf_den):
num = np.sum(complex_mul(sf_num, xsf), axis=1, keepdims=True)
den = complex_add(sf_den, self.cfg.lambda_)
score = real(ifft2(complex_div(num, den))).squeeze(1)
return score
|
BestSonny/open-vot-1 | examples/run_tracking.py | <reponame>BestSonny/open-vot-1
from __future__ import absolute_import
import argparse
from lib.trackers import *
from lib.experiments import *
tracker_factory = {
'mosse': TrackerMOSSE,
'csk': TrackerCSK,
'kcf': TrackerKCF,
'dcf': TrackerDCF,
'dsst': TrackerDSST,
'goturn': TrackerGOTURN,
'siamfc': TrackerSiamFC,
'siamfcv2': TrackerSiamFC,
'dcfnet': TrackerDCFNet}
experiment_factory = {
'otb': ExperimentOTB}
# parse arguments
parser = argparse.ArgumentParser(description='tracking experiment')
parser.add_argument('-t', '--tracker', type=str, default='csk')
parser.add_argument('-e', '--experiment', type=str, default='otb')
parser.add_argument('-d', '--dataset-folder', type=str, default='data/OTB')
# for deep trackers
parser.add_argument('-n', '--network-path', type=str,
default='pretrained/siamfc/2016-08-17.net.mat')
args = parser.parse_args()
# setup tracker
if not args.tracker in ['goturn', 'siamfc', 'siamfcv2', 'dcfnet']:
# traditional tracker
tracker = tracker_factory[args.tracker]()
else:
# deep tracker
if args.tracker == 'siamfc':
tracker = tracker_factory[args.tracker](
branch='alexv1', net_path=args.network_path)
elif args.tracker == 'siamfcv2':
tracker = tracker_factory[args.tracker](
branch='alexv2', net_path=args.network_path)
elif args.tracker == 'dcfnet':
tracker = tracker_factory[args.tracker](
net_path=args.network_path, online=True)
else:
tracker = tracker_factory[args.tracker](
net_path=args.network_path)
# setup experiment
experiment = experiment_factory[args.experiment](args.dataset_folder)
# run experiment and record results in 'results' folder
experiment.run(tracker, visualize=True)
# report performance in 'reports' folder
experiment.report([tracker.name])
|
BestSonny/open-vot-1 | tests/models/test_goturn.py | from __future__ import absolute_import
import unittest
import torch
import time
from lib.models import GOTURN
class TestGOTURN(unittest.TestCase):
def setUp(self):
self.z = torch.randn((2, 3, 227, 227))
self.x = torch.randn((2, 3, 227, 227))
self.net = GOTURN()
def tearDown(self):
pass
def test_goturn(self):
with torch.set_grad_enabled(True):
self.net.train()
start = time.time()
out_train = self.net(self.z, self.x)
print('inference time of training: %.3f' % (time.time() - start))
self.assertTrue(self.net.training)
self.assertTrue(out_train.requires_grad)
with torch.set_grad_enabled(False):
self.net.eval()
start = time.time()
out_eval = self.net(self.z, self.x)
print('inference time of test: %.3f' % (time.time() - start))
self.assertFalse(self.net.training)
self.assertFalse(out_eval.requires_grad)
self.assertNotAlmostEqual(
out_train.mean().item(), out_eval.mean().item())
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/transforms/transform_goturn.py | from __future__ import absolute_import
import torchvision.transforms.functional as F
import torch
import numpy as np
import math
from ..utils import dict2tuple
from ..utils.warp import crop_pil
class TransformGOTURN(object):
def __init__(self, **kargs):
self.parse_args(**kargs)
def parse_args(self, **kargs):
default_args = {
'lambda_shift': 5,
'lambda_scale': 15,
'min_scale': -0.4,
'max_scale': 0.4,
'context': 2,
'out_size': 227,
'label_scale_factor': 10,
'mean_color': [104, 117, 123]}
for key, val in default_args.items():
if key in kargs:
setattr(self, key, kargs[key])
else:
setattr(self, key, val)
def __call__(self, *args):
assert len(args) in [2, 4]
if len(args) == 2:
img_z, img_x, bndbox_z, bndbox_x = \
args[0], args[1], args[0], args[1]
elif len(args) == 4:
img_z, img_x, bndbox_z, bndbox_x = args
# shift search area
rand_bndbox_x = self._rand_shift(bndbox_x, img_x.size)
# crop image regions
crop_z = self._crop(img_z, bndbox_z)
crop_x = self._crop(img_x, rand_bndbox_x)
labels = self._create_labels(rand_bndbox_x, bndbox_x)
# convert data to tensors
crop_z = 255.0 * F.to_tensor(crop_z)
crop_x = 255.0 * F.to_tensor(crop_x)
labels = torch.from_numpy(labels).float()
# color augmentation
mean_color = torch.tensor(self.mean_color).float().view(3, 1, 1)
crop_z -= mean_color
crop_x -= mean_color
return crop_z, crop_x, labels
def _rand_shift(self, bndbox, img_sz):
def rand_fn(lambda_, min_val=None, max_val=None):
sign = +1 if np.random.rand() > 0.5 else -1
rand = math.log(np.random.rand()) / (lambda_ * sign)
if min_val is not None or max_val is not None:
rand = np.clip(rand, min_val, max_val)
return rand
center = bndbox[:2] + bndbox[2:] / 2
size = bndbox[2:]
# randomly rescale the size
scale_factors = [
rand_fn(self.lambda_scale, self.min_scale, self.max_scale),
rand_fn(self.lambda_scale, self.min_scale, self.max_scale)]
rand_sz = size * (1 + np.array(scale_factors))
rand_sz = np.clip(rand_sz, 1.0, img_sz)
# randomly shift the center
shift_factors = [
rand_fn(self.lambda_shift),
rand_fn(self.lambda_shift)]
rand_center = center + size * shift_factors
rand_center = np.clip(
rand_center, rand_sz / 2, img_sz - rand_sz / 2)
rand_bndbox = np.concatenate([
rand_center - rand_sz / 2, rand_sz])
return rand_bndbox
def _crop(self, image, bndbox):
center = bndbox[:2] + bndbox[2:] / 2
size = bndbox[2:] * self.context
patch = crop_pil(image, center, size, padding=0,
out_size=self.out_size)
return patch
def _create_labels(self, rand_bndbox, bndbox):
rand_corners = np.concatenate([
rand_bndbox[:2], rand_bndbox[:2] + rand_bndbox[2:]])
anchor_corners = np.concatenate([
bndbox[:2], bndbox[:2] + bndbox[2:]])
offsets = anchor_corners - rand_corners
# normalize offsets
offsets[0::2] /= rand_bndbox[2] * self.context
offsets[1::2] /= rand_bndbox[3] * self.context
# normalize corners
margin = self.out_size * (1 - 1 / self.context) / 2
corners = np.array([
margin, margin,
self.out_size - margin, self.out_size - margin])
corners = corners / self.out_size + offsets
corners *= self.label_scale_factor
return corners
|
BestSonny/open-vot-1 | lib/experiments/__init__.py | from __future__ import absolute_import
from .otb import ExperimentOTB
|
BestSonny/open-vot-1 | lib/utils/__init__.py | <gh_stars>10-100
from __future__ import absolute_import
import torch.nn as nn
import math
from collections import namedtuple
def initialize_weights(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def dict2tuple(dictionary):
return namedtuple('GenericDict', dictionary.keys())(**dictionary)
|
BestSonny/open-vot-1 | lib/trainers/__init__.py | from __future__ import absolute_import
from .trainer_siamfc import TrainerSiamFC
from .trainer_goturn import TrainerGOTURN
from .trainer_dcfnet import TrainerDCFNet
|
BestSonny/open-vot-1 | lib/trackers/siamfc.py | from __future__ import absolute_import, division
import torch
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import cv2
from torch.optim.lr_scheduler import StepLR
from . import Tracker
from ..utils import dict2tuple
from ..models import SiameseNet, AlexNetV1, AlexNetV2
from ..utils.ioutil import load_siamfc_from_matconvnet
from ..utils.warp import warp_cv2
class BCEWeightedLoss(nn.Module):
def __init__(self):
super(BCEWeightedLoss, self).__init__()
def forward(self, input, target, weight=None):
return F.binary_cross_entropy_with_logits(
input, target, weight, size_average=True)
class TrackerSiamFC(Tracker):
def __init__(self, branch='alexv1', net_path=None, **kargs):
super(TrackerSiamFC, self).__init__('SiamFC')
self.parse_args(**kargs)
self.cuda = torch.cuda.is_available()
self.device = torch.device('cuda:0' if self.cuda else 'cpu')
self.setup_model(branch, net_path)
self.setup_optimizer()
def parse_args(self, **kargs):
# default branch is AlexNetV1
self.cfg = {
'exemplar_sz': 127,
'search_sz': 255,
'response_up': 16,
'context': 0.5,
'window_influence': 0.176,
'z_lr': 0,
'scale_num': 3,
'scale_step': 1.0375,
'scale_penalty': 0.97,
'scale_lr': 0.59,
'r_pos': 16,
'r_neg': 0,
'initial_lr': 1e-2,
'final_lr': 1e-5,
'step_size': 2,
'epoch_num': 50,
'lr_mult_conv_weight': 1,
'lr_mult_conv_bias': 2,
'lr_mult_bn_weight': 2,
'lr_mult_bn_bias': 1,
'lr_mult_linear_weight': 0,
'lr_mult_linear_bias': 1,
'weight_decay': 5e-4,
'batch_size': 8}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg = dict2tuple(self.cfg)
def setup_model(self, branch='alexv1', net_path=None):
assert branch in ['alexv1', 'alexv2']
if branch == 'alexv1':
self.model = SiameseNet(AlexNetV1(), norm='linear')
elif branch == 'alexv2':
self.model = SiameseNet(AlexNetV2(), norm='bn')
if net_path is not None:
ext = os.path.splitext(net_path)[1]
if ext == '.mat':
load_siamfc_from_matconvnet(net_path, self.model)
elif ext == '.pth':
state_dict = torch.load(
net_path, map_location=lambda storage, loc: storage)
self.model.load_state_dict(state_dict)
else:
raise Exception('unsupport file extension')
self.branch = nn.DataParallel(self.model.branch).to(self.device)
self.norm = nn.DataParallel(self.model.norm).to(self.device)
self.model = nn.DataParallel(self.model).to(self.device)
def setup_optimizer(self):
params = []
for name, param in self.model.named_parameters():
lr = self.cfg.initial_lr
weight_decay = self.cfg.weight_decay
if '.0' in name: # conv
if 'weight' in name:
lr *= self.cfg.lr_mult_conv_weight
weight_decay *= 1
elif 'bias' in name:
lr *= self.cfg.lr_mult_conv_bias
weight_decay *= 0
elif '.1' in name or 'bn' in name: # bn
if 'weight' in name:
lr *= self.cfg.lr_mult_bn_weight
weight_decay *= 0
elif 'bias' in name:
lr *= self.cfg.lr_mult_bn_bias
weight_decay *= 0
elif 'linear' in name:
if 'weight' in name:
lr *= self.cfg.lr_mult_linear_weight
weight_decay *= 1
elif 'bias' in name:
lr *= self.cfg.lr_mult_linear_bias
weight_decay *= 0
params.append({
'params': param,
'initial_lr': lr,
'weight_decay': weight_decay})
self.optimizer = optim.SGD(
params, lr=self.cfg.initial_lr,
weight_decay=self.cfg.weight_decay)
gamma = (self.cfg.final_lr / self.cfg.initial_lr) ** \
(1 / (self.cfg.epoch_num // self.cfg.step_size))
self.scheduler = StepLR(
self.optimizer, self.cfg.step_size, gamma=gamma)
self.criterion = BCEWeightedLoss().to(self.device)
def init(self, image, init_rect):
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
# initialize parameters
self.center = init_rect[:2] + init_rect[2:] / 2
self.target_sz = init_rect[2:]
context = self.cfg.context * self.target_sz.sum()
self.z_sz = np.sqrt((self.target_sz + context).prod())
self.x_sz = self.z_sz * self.cfg.search_sz / self.cfg.exemplar_sz
self.min_x_sz = 0.2 * self.x_sz
self.max_x_sz = 5.0 * self.x_sz
self.scale_factors = self.cfg.scale_step ** np.linspace(
-self.cfg.scale_num // 2,
self.cfg.scale_num // 2, self.cfg.scale_num)
self.score_sz, self.total_stride = self._deduce_network_params(
self.cfg.exemplar_sz, self.cfg.search_sz)
self.final_score_sz = self.cfg.response_up * (self.score_sz - 1) + 1
self.penalty = np.outer(
np.hanning(self.final_score_sz),
np.hanning(self.final_score_sz))
self.penalty /= self.penalty.sum()
self.avg_color = np.mean(image, axis=(0, 1))
# extract template features
crop_z = warp_cv2(image, self.center, self.z_sz,
self.cfg.exemplar_sz, self.avg_color)
crop_z = torch.from_numpy(crop_z).to(
self.device).permute(2, 0, 1).unsqueeze(0).float()
with torch.set_grad_enabled(False):
self.branch.eval()
self.z = self.branch(crop_z)
def update(self, image):
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
# update scaled sizes
scaled_exemplar = self.scale_factors * self.z_sz
scaled_search_area = self.scale_factors * self.x_sz
scaled_target = self.scale_factors[:, np.newaxis] * self.target_sz
# locate target
crops_x = [warp_cv2(
image, self.center, size, self.cfg.search_sz, self.avg_color)
for size in scaled_search_area]
crops_x = torch.stack([torch.from_numpy(c).to(
self.device).permute(2, 0, 1).float()
for c in crops_x], dim=0)
with torch.set_grad_enabled(False):
self.branch.eval()
x = self.branch(crops_x)
score, scale_id = self._calc_score(self.z, x)
self.x_sz = (1 - self.cfg.scale_lr) * self.x_sz + \
self.cfg.scale_lr * scaled_search_area[scale_id]
self.x_sz = np.clip(self.x_sz, self.min_x_sz, self.max_x_sz)
self.center = self._locate_target(self.center, score, self.final_score_sz,
self.total_stride, self.cfg.search_sz,
self.cfg.response_up, self.x_sz)
self.target_sz = (1 - self.cfg.scale_lr) * self.target_sz + \
self.cfg.scale_lr * scaled_target[scale_id]
# update the template
# self.z_sz = (1 - self.cfg.scale_lr) * self.z_sz + \
# self.cfg.scale_lr * scaled_exemplar[scale_id]
if self.cfg.z_lr > 0:
crop_z = warp_cv2(image, self.center, self.z_sz,
self.cfg.exemplar_sz, self.avg_color)
crop_z = torch.from_numpy(crop_z).to(
self.device).permute(2, 0, 1).unsqueeze(0).float()
with torch.set_grad_enabled(False):
self.branch.eval()
new_z = self.branch(crop_z)
self.z = (1 - self.cfg.z_lr) * self.z + \
self.cfg.z_lr * new_z
self.z_sz = (1 - self.cfg.scale_lr) * self.z_sz + \
self.cfg.scale_lr * scaled_exemplar[scale_id]
bndbox = np.concatenate([
self.center - self.target_sz / 2, self.target_sz])
return bndbox
def step(self, batch, backward=True, update_lr=False):
if backward:
if update_lr:
self.scheduler.step()
self.model.train()
else:
self.model.eval()
z, x, labels, weights = \
batch[0].to(self.device), batch[1].to(self.device), \
batch[2].to(self.device), batch[3].to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(backward):
pred = self.model(z, x)
loss = self.criterion(pred, labels, weights)
if backward:
loss.backward()
self.optimizer.step()
return loss.item()
def _deduce_network_params(self, exemplar_sz, search_sz):
z = torch.zeros(1, 3, exemplar_sz, exemplar_sz).to(self.device)
x = torch.zeros(1, 3, search_sz, search_sz).to(self.device)
with torch.set_grad_enabled(False):
self.model.eval()
y = self.model(z, x)
score_sz = y.size(-1)
total_stride = 1
for m in self.model.modules():
if isinstance(m, (nn.Conv2d, nn.MaxPool2d)):
stride = m.stride[0] if isinstance(
m.stride, tuple) else m.stride
total_stride *= stride
return score_sz, total_stride
def _calc_score(self, z, x):
scores = F.conv2d(x, z)
with torch.set_grad_enabled(False):
self.norm.eval()
scores = self.norm(scores)
scores[:self.cfg.scale_num // 2] *= self.cfg.scale_penalty
scores[self.cfg.scale_num // 2 + 1:] *= self.cfg.scale_penalty
scale_id = scores.view(self.cfg.scale_num, -1).max(dim=1)[0].argmax()
score = scores[scale_id].squeeze(0).cpu().numpy()
score = cv2.resize(
score, (self.final_score_sz, self.final_score_sz),
interpolation=cv2.INTER_CUBIC)
score -= score.min()
score /= max(1e-12, score.sum())
score = (1 - self.cfg.window_influence) * score + \
self.cfg.window_influence * self.penalty
return score, scale_id
def _locate_target(self, center, score, final_score_sz,
total_stride, search_sz, response_up, x_sz):
pos = np.unravel_index(score.argmax(), score.shape)[::-1]
half = (final_score_sz - 1) / 2
disp_in_area = np.asarray(pos) - half
disp_in_xcrop = disp_in_area * total_stride / response_up
disp_in_frame = disp_in_xcrop * x_sz / search_sz
center = center + disp_in_frame
return center
|
BestSonny/open-vot-1 | lib/models/submodules.py | <gh_stars>10-100
from __future__ import absolute_import, division
import torch.nn as nn
import torch
import torch.nn.functional as F
class XCorr(nn.Module):
def __init__(self):
super(XCorr, self).__init__()
def forward(self, z, x):
out = []
for i in range(z.size(0)):
out.append(F.conv2d(x[i, :].unsqueeze(0),
z[i, :].unsqueeze(0)))
return torch.cat(out, dim=0)
class Adjust2d(nn.Module):
def __init__(self, norm='bn'):
super(Adjust2d, self).__init__()
assert norm in [None, 'bn', 'cosine', 'euclidean', 'linear']
self.norm = norm
if norm == 'bn':
self.bn = nn.BatchNorm2d(1)
elif norm == 'linear':
self.linear = nn.Conv2d(1, 1, 1, bias=True)
self._initialize_weights()
def forward(self, out, z=None, x=None):
if self.norm == 'bn':
out = self.bn(out)
elif self.norm == 'linear':
out = self.linear(out)
elif self.norm == 'cosine':
n, k = out.size(0), z.size(-1)
norm_z = torch.sqrt(
torch.pow(z, 2).view(n, -1).sum(1)).view(n, 1, 1, 1)
norm_x = torch.sqrt(
k * k * F.avg_pool2d(torch.pow(x, 2), k, 1).sum(1, keepdim=True))
out = out / (norm_z * norm_x + 1e-32)
out = (out + 1) / 2
elif self.norm == 'euclidean':
n, k = out.size(0), z.size(-1)
sqr_z = torch.pow(z, 2).view(n, -1).sum(1).view(n, 1, 1, 1)
sqr_x = k * k * \
F.avg_pool2d(torch.pow(x, 2), k, 1).sum(1, keepdim=True)
out = out + sqr_z + sqr_x
out = out.clamp(min=1e-32).sqrt()
elif self.norm == None:
out = out
return out
def _initialize_weights(self):
if self.norm == 'bn':
self.bn.weight.data.fill_(1)
self.bn.bias.data.zero_()
elif self.norm == 'linear':
self.linear.weight.data.fill_(1e-3)
self.linear.bias.data.zero_()
|
BestSonny/open-vot-1 | tests/utils/test_logger.py | <reponame>BestSonny/open-vot-1
from __future__ import absolute_import
import unittest
import numpy as np
import torch.nn as nn
from lib.utils.logger import Logger
class TestLogger(unittest.TestCase):
def setUp(self):
self.logger = Logger(log_dir='logs/unittest')
self.model = nn.Conv2d(3, 32, 3, 1)
def tearDown(self):
pass
def test_logger(self):
for it in range(100):
self.logger.add_scalar('data/unittest', 3, it)
self.logger.add_text('unittest', 'iter %d' % (it + 1), it)
self.logger.add_array('unittest', np.random.rand(5, 5), it)
self.logger.add_checkpoint('unittest', self.model.state_dict(), it)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | tests/models/test_siamese.py | <reponame>BestSonny/open-vot-1<filename>tests/models/test_siamese.py
from __future__ import absolute_import, print_function
import unittest
import torch
import random
import time
from lib.models import SiameseNet, AlexNetV2
class TestSiameseNet(unittest.TestCase):
def setUp(self):
self.z = torch.randn((2, 3, 127, 127))
self.x = torch.randn((2, 3, 255, 255))
def tearDown(self):
pass
def test_siamese_net(self):
for norm in [None, 'bn', 'cosine', 'euclidean', 'linear']:
net = SiameseNet(AlexNetV2(), norm=norm)
with torch.set_grad_enabled(True):
net.train()
start = time.time()
out_train = net(self.z, self.x)
print('inference time of training: %.3f' %
(time.time() - start))
self.assertTrue(out_train.requires_grad)
self.assertTrue(net.training)
with torch.set_grad_enabled(False):
net.eval()
start = time.time()
out_eval = net(self.z, self.x)
print('inference time of test: %.3f' % (time.time() - start))
self.assertFalse(out_eval.requires_grad)
self.assertFalse(net.training)
self.assertNotAlmostEqual(
out_train.mean().item(), out_eval.mean().item())
if norm == 'cosine':
self.assertGreaterEqual(out_train.min().item(), -1)
self.assertLessEqual(out_train.max().item(), 1)
self.assertGreaterEqual(out_eval.min().item(), -1)
self.assertLessEqual(out_eval.max().item(), 1)
elif norm == 'euclidean':
self.assertGreaterEqual(out_train.min().item(), 0)
self.assertGreaterEqual(out_eval.min().item(), 0)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/transforms/__init__.py | from __future__ import absolute_import
from .transform_siamfc import TransformSiamFC
from .transform_goturn import TransformGOTURN
from .transform_dcfnet import TransformDCFNet
|
BestSonny/open-vot-1 | lib/models/caffenet.py | from __future__ import absolute_import
import torch.nn as nn
from ..utils import initialize_weights
class CaffeNet(nn.Module):
def __init__(self):
super(CaffeNet, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 96, 11, 4),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2),
nn.LocalResponseNorm(5, alpha=1e-4, beta=0.75))
self.conv2 = nn.Sequential(
nn.Conv2d(96, 256, 5, 1, padding=2, groups=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2),
nn.LocalResponseNorm(5, alpha=1e-4, beta=0.75))
self.conv3 = nn.Sequential(
nn.Conv2d(256, 384, 3, 1, padding=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
nn.Conv2d(384, 384, 3, 1, padding=1, groups=2),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.Conv2d(384, 256, 3, 1, padding=1, groups=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2))
initialize_weights(self)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
|
BestSonny/open-vot-1 | tests/trackers/test_kcf.py | from __future__ import absolute_import, print_function
import unittest
import random
from lib.trackers import TrackerKCF, TrackerDCF
from lib.datasets import OTB
class TestTrackerKCF(unittest.TestCase):
def setUp(self):
self.otb_dir = 'data/OTB'
def tearDown(self):
pass
def test_kcf(self):
dataset = OTB(self.otb_dir, download=True)
tracker = TrackerKCF()
img_files, anno = random.choice(dataset)
rects, speed = tracker.track(
img_files, anno[0, :], visualize=True)
self.assertEqual(rects.shape, anno.shape)
def test_dcf(self):
dataset = OTB(self.otb_dir, download=True)
tracker = TrackerDCF()
img_files, anno = random.choice(dataset)
rects, speed = tracker.track(
img_files, anno[0, :], visualize=True)
self.assertEqual(rects.shape, anno.shape)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/datasets/imagenet.py | <filename>lib/datasets/imagenet.py<gh_stars>10-100
from __future__ import absolute_import, division
import os
import glob
import xml.etree.ElementTree as ET
import numpy as np
import six
import random
from torch.utils.data import Dataset
from PIL import Image
class ImageNetVID(object):
def __init__(self, root_dir, return_rect=False,
subset='train', rand_choice=True, download=False):
r'''TODO: make the track_id sampling deterministic
'''
super(ImageNetVID, self).__init__()
self.root_dir = root_dir
self.return_rect = return_rect
self.rand_choice = rand_choice
if download:
self._download(self.root_dir)
if not self._check_integrity():
raise Exception('Dataset not found or corrupted. ' +
'You can use download=True to download it.')
if subset == 'val':
self.seq_dirs = sorted(glob.glob(os.path.join(
self.root_dir, 'Data/VID/val/ILSVRC2015_val_*')))
self.seq_names = [os.path.basename(s) for s in self.seq_dirs]
self.anno_dirs = [os.path.join(
self.root_dir, 'Annotations/VID/val', s) for s in self.seq_names]
elif subset == 'train':
self.seq_dirs = sorted(glob.glob(os.path.join(
self.root_dir, 'Data/VID/train/ILSVRC*/ILSVRC*')))
self.seq_names = [os.path.basename(s) for s in self.seq_dirs]
self.anno_dirs = [os.path.join(
self.root_dir, 'Annotations/VID/train',
*s.split('/')[-2:]) for s in self.seq_dirs]
else:
raise Exception('Unknown subset.')
def __getitem__(self, index):
if isinstance(index, six.string_types):
if not index in self.seq_names:
raise Exception('Sequence {} not found.'.format(index))
index = self.seq_names.index(index)
elif self.rand_choice:
index = np.random.randint(len(self.seq_names))
anno_files = sorted(glob.glob(
os.path.join(self.anno_dirs[index], '*.xml')))
objects = [ET.ElementTree(file=f).findall('object')
for f in anno_files]
# choose the track id randomly
track_ids, counts = np.unique([obj.find(
'trackid').text for group in objects for obj in group], return_counts=True)
track_id = random.choice(track_ids[counts >= 2])
frames = []
anno = []
for f, group in enumerate(objects):
for obj in group:
if not obj.find('trackid').text == track_id:
continue
frames.append(f)
anno.append([
int(obj.find('bndbox/xmin').text),
int(obj.find('bndbox/ymin').text),
int(obj.find('bndbox/xmax').text),
int(obj.find('bndbox/ymax').text)])
img_files = [os.path.join(
self.seq_dirs[index], '%06d.JPEG' % f) for f in frames]
anno = np.array(anno)
if self.return_rect:
anno[:, 2:] = anno[:, 2:] - anno[:, :2] + 1
return img_files, anno
def __len__(self):
return len(self.seq_names)
def _check_integrity(self):
return os.path.isdir(self.root_dir) and \
len(os.listdir(self.root_dir)) > 0
def _download(self, root_dir):
raise NotImplementedError()
class ImageNetObject(Dataset):
def __init__(self, root_dir, return_rect=False,
subset='train', download=False, transform=None):
super(ImageNetObject, self).__init__()
self.root_dir = root_dir
self.return_rect = return_rect
if download:
self._download(self.root_dir)
self.transform = transform
if not self._check_integrity():
raise Exception('Dataset not found or corrupted. ' +
'You can use download=True to download it.')
if subset == 'val':
self.img_dirs = [os.path.join(self.root_dir, 'ILSVRC2012_img_val')]
self.anno_dirs = [os.path.join(
self.root_dir, 'ILSVRC2012_bbox_val/val')]
elif subset == 'train':
self.img_dirs = sorted(glob.glob(os.path.join(
self.root_dir, 'ILSVRC2012_img_train/n*')))
self.anno_dirs = [os.path.join(
self.root_dir, 'ILSVRC2012_bbox_train',
os.path.basename(s)) for s in self.img_dirs]
else:
raise Exception('Unknown subset.')
self.img_nums = [len(glob.glob(os.path.join(d, '*.xml')))
for d in self.anno_dirs]
self.acc_nums = [sum(self.img_nums[:i + 1])
for i in range(len(self.img_nums))]
self.size = sum(self.img_nums)
def __getitem__(self, index):
# locate the annotation file
dir_id = np.argmax(np.array(self.acc_nums) > index)
anno_files = sorted(
glob.glob(os.path.join(self.anno_dirs[dir_id], '*.xml')))
if dir_id == 0:
anno_id = index
else:
anno_id = index - self.acc_nums[dir_id - 1]
anno_file = anno_files[anno_id]
img_file = os.path.join(
self.img_dirs[dir_id],
os.path.splitext(os.path.basename(anno_file))[0] + '.JPEG')
# read annotations
objects = ET.ElementTree(file=anno_file).findall('object')
rand_object = random.choice(objects)
bndbox = np.array([
int(rand_object.find('bndbox/xmin').text),
int(rand_object.find('bndbox/ymin').text),
int(rand_object.find('bndbox/xmax').text),
int(rand_object.find('bndbox/ymax').text)])
if self.return_rect:
bndbox[2:] = bndbox[2:] - bndbox[:2] + 1
img = Image.open(img_file)
if img.mode == 'L':
img = img.convert('RGB')
if self.transform:
return self.transform(img, bndbox)
else:
return img_file, bndbox
def __len__(self):
return self.size
def _check_integrity(self):
return os.path.isdir(self.root_dir) and \
len(os.listdir(self.root_dir)) > 0
def _download(self):
raise NotImplementedError()
|
BestSonny/open-vot-1 | examples/quick_example.py | <reponame>BestSonny/open-vot-1
from __future__ import absolute_import
from lib.trackers import TrackerCSK
from lib.experiments import ExperimentOTB
otb_dir = 'data/OTB'
experiment = ExperimentOTB(otb_dir, version=2013)
tracker = TrackerCSK()
experiment.run(tracker, visualize=True)
experiment.report([tracker.name])
|
BestSonny/open-vot-1 | lib/metrics/metrics.py | from __future__ import absolute_import, division
import numpy as np
def center_error(rects1, rects2):
r"""Center error.
"""
centers1 = rects1[:, :2] + (rects1[:, 2:] - 1) / 2
centers2 = rects2[:, :2] + (rects2[:, 2:] - 1) / 2
ces = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=1))
return ces
def iou(rects1, rects2):
r"""Intersection over union.
"""
rects_inter = _intersection(rects1, rects2)
if rects1.ndim == 1:
areas1 = np.prod(rects1[2:])
areas2 = np.prod(rects2[2:])
area_inter = np.prod(rects_inter[2:])
elif rects1.ndim == 2:
areas1 = np.prod(rects1[:, 2:], axis=1)
areas2 = np.prod(rects2[:, 2:], axis=1)
area_inter = np.prod(rects_inter[:, 2:], axis=1)
else:
raise Exception('Wrong dimension of rects!')
area_union = areas1 + areas2 - area_inter
ious = area_inter / (area_union + 1e-12)
return ious
def _intersection(rects1, rects2):
r"""Rectangle intersection.
"""
assert rects1.shape == rects2.shape
if rects1.ndim == 1:
x1 = max(rects1[0], rects2[0])
y1 = max(rects1[1], rects2[1])
x2 = min(rects1[0] + rects1[2], rects2[0] + rects2[2])
y2 = min(rects1[1] + rects1[3], rects2[1] + rects2[3])
w = max(0, x2 - x1)
h = max(0, y2 - y1)
return np.array([x1, y1, w, h])
elif rects1.ndim == 2:
x1 = np.maximum(rects1[:, 0], rects2[:, 0])
y1 = np.maximum(rects1[:, 1], rects2[:, 1])
x2 = np.minimum(rects1[:, 0] + rects1[:, 2],
rects2[:, 0] + rects2[:, 2])
y2 = np.minimum(rects1[:, 1] + rects1[:, 3],
rects2[:, 1] + rects2[:, 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack((x1, y1, w, h), axis=1)
|
BestSonny/open-vot-1 | tests/datasets/test_pairwise.py | <reponame>BestSonny/open-vot-1<gh_stars>10-100
from __future__ import absolute_import, print_function
import unittest
import random
from lib.datasets import Pairwise, OTB
from lib.utils.viz import show_frame
class TestPairwise(unittest.TestCase):
def setUp(self):
self.otb_dir = 'data/OTB'
self.visualize = True
def tearDown(self):
pass
def test_pairwise(self):
base_dataset = OTB(self.otb_dir, download=True)
frame_range = random.choice([0, 1, 100])
causal = random.choice([True, False])
subset = random.choice(['train', 'val'])
return_index = random.choice([True, False])
rand_choice = random.choice([True, False])
dataset = Pairwise(
base_dataset, pairs_per_video=1, frame_range=frame_range,
causal=causal, subset=subset, return_index=return_index,
rand_choice=rand_choice)
self.assertGreater(len(dataset), 0)
for i, item in enumerate(dataset):
img_z, img_x, bndbox_z, bndbox_x = \
item[0], item[1], item[2], item[3]
if return_index:
print('rand_z:', item[4], '\trand_x:', item[5])
self.assertEqual(img_z.mode, 'RGB')
self.assertEqual(img_x.mode, 'RGB')
self.assertEqual(bndbox_z.shape, (4,))
self.assertEqual(bndbox_x.shape, (4,))
if self.visualize:
item = random.choice(dataset)
img_z, img_x, bndbox_z, bndbox_x = \
item[0], item[1], item[2], item[3]
if return_index:
print('rand_z:', item[4], '\trand_x:', item[5])
show_frame(img_z, bndbox_z, fig_n=1, pause=1)
show_frame(img_x, bndbox_x, fig_n=2, pause=1)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/trackers/goturn.py | from __future__ import absolute_import, division
import torch
import os
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from . import Tracker
from ..utils import dict2tuple
from ..models import GOTURN
from ..utils.ioutil import load_goturn_from_caffe
from ..utils.warp import crop_tensor
class TrackerGOTURN(Tracker):
def __init__(self, net_path=None, **kargs):
super(TrackerGOTURN, self).__init__('GOTURN')
self.parse_args(**kargs)
self.cuda = torch.cuda.is_available()
self.device = torch.device('cuda:0' if self.cuda else 'cpu')
self.setup_model(net_path)
self.setup_optimizer()
def parse_args(self, **kargs):
self.cfg = {
'context': 2,
'scale_factor': 10,
'input_dim': 227,
'mean_color': [104, 117, 123],
'base_lr': 1e-6,
'weight_decay': 5e-4,
'momentum': 0.9,
'lr_step_size': 2000,
'gamma': 0.1,
'epoch_num': 10000,
'lr_mult_fc_weight': 10,
'lr_mult_fc_bias': 20,
'lr_mult_conv': 0,
'batch_size': 50 // 2}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg = dict2tuple(self.cfg)
def setup_model(self, net_path=None):
self.model = GOTURN()
if net_path is not None:
ext = os.path.splitext(net_path)[1]
if ext == '.pth':
state_dict = torch.load(
net_path, map_location=lambda storage, loc: storage)
self.model.load_state_dict(state_dict)
elif ext == '.caffemodel':
proto_path = os.path.join(
os.path.dirname(net_path), 'tracker.prototxt')
load_goturn_from_caffe(net_path, proto_path, self.model)
else:
raise Exception('unsupport file extention')
self.model = nn.DataParallel(self.model).to(self.device)
def setup_optimizer(self):
params = []
for name, param in self.model.named_parameters():
lr = self.cfg.base_lr
weight_decay = self.cfg.weight_decay
if 'conv' in name:
if 'weight' in name:
lr *= self.cfg.lr_mult_conv
weight_decay *= 1
elif 'bias' in name:
lr *= self.cfg.lr_mult_conv
weight_decay *= 0
elif 'fc' in name:
if 'weight' in name:
lr *= self.cfg.lr_mult_fc_weight
weight_decay *= 1
elif 'bias' in name:
lr *= self.cfg.lr_mult_fc_bias
weight_decay *= 0
params.append({
'params': param,
'initial_lr': lr,
'weight_decay': weight_decay})
self.optimizer = optim.SGD(
params, lr=self.cfg.base_lr,
momentum=self.cfg.momentum,
weight_decay=self.cfg.weight_decay)
self.scheduler = StepLR(
self.optimizer, self.cfg.lr_step_size, gamma=self.cfg.gamma)
self.criterion = nn.L1Loss().to(self.device)
def init(self, image, init_rect):
image = torch.from_numpy(image).to(
self.device).permute(2, 0, 1).unsqueeze(0).float()
init_rect = torch.from_numpy(init_rect).to(self.device).float()
# initialize parameters
self.mean_color = torch.FloatTensor(self.cfg.mean_color)
self.mean_color = self.mean_color.to(self.device).view(1, 3, 1, 1)
self.image_prev = image
self.bndbox_prev = init_rect
def update(self, image):
image = torch.from_numpy(image).to(
self.device).permute(2, 0, 1).unsqueeze(0).float()
z, _ = self._crop(self.image_prev, self.bndbox_prev)
x, roi = self._crop(image, self.bndbox_prev)
corners = self._locate_target(z, x)
corners = corners.squeeze() / self.cfg.scale_factor
corners = corners.clamp_(0, 1)
corners[0::2] *= roi[2]
corners[1::2] *= roi[3]
corners[0::2] += roi[0]
corners[1::2] += roi[1]
bndbox_curr = torch.cat((corners[:2], corners[2:] - corners[:2]))
bndbox_curr[2].clamp_(1.0, image.size(-1))
bndbox_curr[3].clamp_(1.0, image.size(-2))
# update
self.image_prev = image
self.bndbox_prev = bndbox_curr
return bndbox_curr.cpu().numpy()
def step(self, batch, backward=True, update_lr=False):
if backward:
if update_lr:
self.scheduler.step()
self.model.train()
else:
self.model.eval()
z, x, labels = \
batch[0].to(self.device), batch[1].to(self.device), \
batch[2].to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(backward):
pred = self.model(z, x)
loss = self.criterion(pred, labels)
if backward:
loss.backward()
self.optimizer.step()
return loss.item()
def _crop(self, image, bndbox, return_roi=False):
center = bndbox[:2] + bndbox[2:] / 2
center[0].clamp_(0.0, image.size(-1))
center[1].clamp_(0.0, image.size(-2))
size = bndbox[2:] * self.cfg.context
size[0].clamp_(1.0, image.size(-1))
size[1].clamp_(1.0, image.size(-2))
patch = crop_tensor(image, center, size, padding=0,
out_size=self.cfg.input_dim)
roi = torch.cat([center - size / 2, size])
return patch, roi
def _locate_target(self, z, x):
z -= self.mean_color
x -= self.mean_color
with torch.set_grad_enabled(False):
self.model.eval()
corners = self.model(z, x)
return corners
|
BestSonny/open-vot-1 | tests/transforms/test_transform_siamfc.py | from __future__ import absolute_import, division
import unittest
import random
import torchvision.transforms.functional as F
import numpy as np
from lib.transforms import TransformSiamFC
from lib.datasets import VOT, Pairwise
from lib.utils.viz import show_frame
class TestTransformSiamFC(unittest.TestCase):
def setUp(self):
self.vot_dir = 'data/vot2017'
self.stats_path = 'pretrained/siamfc/cfnet_ILSVRC2015.stats.mat'
self.visualize = True
def tearDown(self):
pass
def test_transform_siamfc(self):
base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
transform = TransformSiamFC(stats_path=self.stats_path)
dataset = Pairwise(
base_dataset, transform=transform, pairs_per_video=1, subset='train')
self.assertGreater(len(dataset), 0)
for crop_z, crop_x, labels, weights in dataset:
self.assertAlmostEqual(
weights[labels == 1].sum().item(),
weights[labels == 0].sum().item())
self.assertAlmostEqual(
weights.sum().item(), labels[labels >= 0].numel())
self.assertEqual(
weights[labels == transform.ignore_label].sum().item(), 0)
if self.visualize:
crop_z, crop_x, labels, weights = random.choice(dataset)
crop_z = F.to_pil_image(crop_z / 255.0)
crop_x = F.to_pil_image(crop_x / 255.0)
labels = self._rescale(labels.squeeze().cpu().numpy())
weights = self._rescale(weights.squeeze().cpu().numpy())
bndbox_z = np.array([31, 31, 64, 64])
bndbox_x = np.array([95, 95, 64, 64])
show_frame(crop_z, bndbox_z, fig_n=1, pause=1)
show_frame(crop_x, bndbox_x, fig_n=2, pause=1)
show_frame(labels, fig_n=3, pause=1, cmap='hot')
show_frame(weights, fig_n=4, pause=5, cmap='hot')
def _rescale(self, array):
array -= array.min()
array /= array.max()
return array
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | tests/trainers/test_trainer_goturn.py | from __future__ import absolute_import
import unittest
import random
from lib.trainers import TrainerGOTURN
class TestTrainerGOTURN(unittest.TestCase):
def setUp(self):
self.cfg_file = 'config/goturn.json'
self.vot_dir = 'data/vot2017'
self.vid_dir = 'data/ILSVRC'
self.det_dir = 'data/imagenet'
self.net_path = 'pretrained/goturn/tracker.pth'
self.trainer = TrainerGOTURN(self.cfg_file)
def tearDown(self):
pass
def test_train(self):
self.trainer.train(self.vid_dir, self.det_dir, self.vot_dir)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | tests/datasets/test_imagenet.py | from __future__ import absolute_import
import unittest
import random
import os
from PIL import Image
from lib.datasets import ImageNetVID, ImageNetObject
from lib.utils.viz import show_frame
class TestImageNet(unittest.TestCase):
def setUp(self):
self.vid_dir = 'data/ILSVRC'
self.obj_dir = 'data/imagenet'
self.visualize = True
def tearDown(self):
pass
def test_imagenet_vid(self):
dataset = ImageNetVID(self.vid_dir, return_rect=True)
self.assertGreater(len(dataset), 0)
for i in range(10):
img_files, anno = random.choice(dataset)
self.assertGreater(len(img_files), 0)
self.assertEqual(len(img_files), len(anno))
if self.visualize:
img_files, anno = random.choice(dataset)
for f, img_file in enumerate(img_files):
image = Image.open(img_file)
show_frame(image, anno[f, :])
def test_imagenet_obj(self):
subset = random.choice(['train', 'val'])
dataset = ImageNetObject(
self.obj_dir, subset=subset, return_rect=True)
self.assertGreater(len(dataset), 0)
for i in range(10):
img_file, bndbox = dataset[i]
self.assertTrue(os.path.isfile(img_file))
self.assertTrue(len(bndbox) == 4)
if self.visualize:
img_file, bndbox = random.choice(dataset)
image = Image.open(img_file)
show_frame(image, bndbox, fig_n=1, pause=0.1)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/transforms/transform_dcfnet.py | from __future__ import absolute_import, division
import numpy as np
import torchvision.transforms.functional as F
import torch
from ..utils import dict2tuple
from ..utils.ioutil import load_siamfc_stats
from ..utils.warp import crop_pil
from lib.utils.viz import show_frame
class TransformDCFNet(object):
def __init__(self, stats_path=None, **kargs):
self.parse_args(**kargs)
self.stats = None
if stats_path:
self.stats = load_siamfc_stats(stats_path)
def parse_args(self, **kargs):
default_args = {
'exemplar_sz': 125,
'padding': 2
}
for key, val in default_args.items():
if key in kargs:
setattr(self, key, kargs[key])
else:
setattr(self, key, val)
def __call__(self, img_z, img_x, bndbox_z, bndbox_x):
crop_z = self._crop(img_z, bndbox_z)
crop_x = self._crop(img_x, bndbox_x)
# data augmentation
if np.random.rand() > 0.5:
crop_z = F.hflip(crop_z)
crop_x = F.hflip(crop_x)
crop_z = 255.0 * F.to_tensor(crop_z)
crop_x = 255.0 * F.to_tensor(crop_x)
# color augmentation
if self.stats:
offset_z = np.reshape(np.dot(
self.stats.rgb_variance_z,
np.random.randn(3, 1)), (3, 1, 1))
offset_x = np.reshape(np.dot(
self.stats.rgb_variance_x,
np.random.randn(3, 1)), (3, 1, 1))
crop_z += torch.from_numpy(offset_z).float()
crop_x += torch.from_numpy(offset_x).float()
crop_z = torch.clamp(crop_z, 0.0, 255.0)
crop_x = torch.clamp(crop_x, 0.0, 255.0)
return crop_z, crop_x
def _crop(self, image, bndbox):
center = bndbox[:2] + bndbox[2:] / 2
size = bndbox[2:]
patch_sz = size * (1 + self.padding)
return crop_pil(image, center, patch_sz, out_size=self.exemplar_sz)
|
BestSonny/open-vot-1 | lib/utils/logger.py | from __future__ import absolute_import, print_function
import os
import platform
import json
import numpy as np
import torch
from tensorboardX import SummaryWriter
from datetime import datetime
class Logger(SummaryWriter):
def __init__(self, log_dir=None, comment='', verbose=True):
super(Logger, self).__init__(log_dir, comment)
self.log_dir = log_dir
self.verbose = verbose
self.add_meta()
def log(self, text_string):
self.add_text('common_text', text_string)
def add_meta(self, meta={}):
filename = os.path.join(self.log_dir, 'meta/meta.json')
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
meta.update({
'datetime': str(datetime.now()),
'language': 'Python %s' % platform.python_version(),
'platform': platform.platform(),
'computer': platform.node()})
with open(filename, 'w') as f:
json.dump(meta, f, indent=4)
def add_text(self, tag, text_string, global_step=None):
super(Logger, self).add_text(tag, text_string, global_step)
if self.verbose:
print('{}: {}'.format(tag, text_string))
def add_array(self, tag, array, global_step=None):
if global_step is None:
filename = os.path.join(
self.log_dir, 'arrays/%s.txt' % tag)
else:
filename = os.path.join(
self.log_dir, 'arrays/%s_step%d.txt' %
(tag, global_step + 1))
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
np.savetxt(filename, array, fmt='%.3f', delimiter=',')
def add_checkpoint(self, tag, state_dict, global_step=None):
if global_step is None:
filename = os.path.join(
self.log_dir, 'checkpoints/%s.pth' % tag)
else:
filename = os.path.join(
self.log_dir, 'checkpoints/%s_step%d.pth' %
(tag, global_step + 1))
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
torch.save(state_dict, filename)
|
BestSonny/open-vot-1 | lib/models/dcfnet.py | <filename>lib/models/dcfnet.py
from __future__ import absolute_import
import torch
import torch.nn as nn
from ..utils.complex import *
class DCFNetFeature(nn.Module):
def __init__(self):
super(DCFNetFeature, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(3, 32, 3),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1),
)
def forward(self, x):
return self.feature(x)
class DCFNetFeaturePadding(nn.Module):
def __init__(self):
super(DCFNetFeaturePadding, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3, padding=1),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1),
)
def forward(self, x):
return self.feature(x)
class DCFNet(nn.Module):
def __init__(self, config=None):
super(DCFNet, self).__init__()
self.feature = DCFNetFeature()
self.yf = config.yf.clone()
self.config = config
def forward(self, z, x):
z = self.feature(z)
x = self.feature(x)
zf = torch.rfft(z, signal_ndim=2)
xf = torch.rfft(x, signal_ndim=2)
kzzf = torch.sum(tensor_complex_mulconj(zf,zf), dim=1, keepdim=True)
kzyf = tensor_complex_mulconj(zf, self.yf.to(device=z.device))
solution = tensor_complex_division(kzyf, kzzf + self.config.lambda0)
response = torch.irfft(torch.sum(tensor_complex_mulconj(xf, solution), dim=1, keepdim=True), signal_ndim=2)
return response
class DCFNetOnline(nn.Module):
def __init__(self, config=None):
super(DCFNetOnline, self).__init__()
self.feature = DCFNetFeaturePadding()
self.config = config
def forward(self, x):
x = self.feature(x)
x = x * self.config.cos_window
xf = torch.rfft(x, signal_ndim=2)
solution = tensor_complex_division(self.model_alphaf, self.model_betaf + self.config.lambda0)
response = torch.irfft(torch.sum(tensor_complex_mulconj(xf, solution), dim=1, keepdim=True), signal_ndim=2)
r_max = torch.max(response)
return response
def update(self, z, lr=1.):
z = self.feature(z)
z = z * self.config.cos_window
zf = torch.rfft(z, signal_ndim=2)
kzzf = torch.sum(tensor_complex_mulconj(zf,zf), dim=1, keepdim=True)
kzyf = tensor_complex_mulconj(zf,self.config.yf_online.to(device=z.device))
if lr > 0.99:
self.model_alphaf = kzyf
self.model_betaf = kzzf
else:
self.model_alphaf = (1 - lr) * self.model_alphaf.data + lr * kzyf.data
self.model_betaf = (1 - lr) * self.model_betaf.data + lr * kzzf.data
|
BestSonny/open-vot-1 | lib/trackers/dcfnet.py | from __future__ import absolute_import, division
import torch
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import cv2
from torch.optim.lr_scheduler import LambdaLR
from . import Tracker
from ..utils import dict2tuple
from ..models import DCFNet, DCFNetOnline
from ..utils.warp import warp_cv2
def gaussian_shaped_labels(sigma, sz):
x, y = np.meshgrid(np.arange(1, sz[0]+1) - np.floor(float(sz[0]) / 2), np.arange(1, sz[1]+1) - np.floor(float(sz[1]) / 2))
d = x ** 2 + y ** 2
g = np.exp(-0.5 / (sigma ** 2) * d)
g = np.roll(g, int(-np.floor(float(sz[0]) / 2.) + 1), axis=0)
g = np.roll(g, int(-np.floor(float(sz[1]) / 2.) + 1), axis=1)
return g
class TrackerDCFNet(Tracker):
def __init__(self, net_path=None, online=False, **kargs):
super(TrackerDCFNet, self).__init__('DCFNet')
self.online = online
self.parse_args(**kargs)
self.setup_model(net_path)
self.setup_optimizer()
def parse_args(self, **kargs):
# default branch is AlexNetV1
self.cfg = {
'crop_sz': 125,
'output_sz': 121,
'lambda0': 1e-4,
'padding': 2.0,
'output_sigma_factor': 0.1,
'initial_lr': 1e-2,
'final_lr': 1e-5,
'epoch_num': 50,
'weight_decay': 5e-4,
'batch_size': 32,
'interp_factor': 0.01,
'num_scale': 3,
'scale_step': 1.0275,
'min_scale_factor': 0.2,
'max_scale_factor': 5,
'scale_penalty': 0.9925,
}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg['output_sigma'] = self.cfg['crop_sz'] / (1 + self.cfg['padding']) * self.cfg['output_sigma_factor']
self.cfg['y'] = gaussian_shaped_labels(self.cfg['output_sigma'], [self.cfg['output_sz'], self.cfg['output_sz']])
self.cfg['yf'] = torch.rfft(torch.Tensor(self.cfg['y']).view(1, 1, self.cfg['output_sz'], self.cfg['output_sz']).cuda(), signal_ndim=2)
self.cfg['net_average_image'] = np.array([104, 117, 123]).reshape(1, 1, -1).astype(np.float32)
self.cfg['scale_factor'] = self.cfg['scale_step'] ** (np.arange(self.cfg['num_scale']) - self.cfg['num_scale'] / 2)
self.cfg['scale_penalties'] = self.cfg['scale_penalty'] ** (np.abs((np.arange(self.cfg['num_scale']) - self.cfg['num_scale'] / 2)))
self.cfg['net_input_size'] = [self.cfg['crop_sz'], self.cfg['crop_sz']]
self.cfg['cos_window'] = torch.Tensor(np.outer(np.hanning(self.cfg['crop_sz']), np.hanning(self.cfg['crop_sz']))).cuda()
self.cfg['y_online'] = gaussian_shaped_labels(self.cfg['output_sigma'], self.cfg['net_input_size'])
self.cfg['yf_online'] = torch.rfft(torch.Tensor(self.cfg['y_online']).view(1, 1, self.cfg['crop_sz'], self.cfg['crop_sz']).cuda(), signal_ndim=2)
self.cfg = dict2tuple(self.cfg)
def setup_model(self, net_path=None):
if self.online:
self.model = DCFNetOnline(config = self.cfg)
else:
self.model = DCFNet(config = self.cfg)
if net_path:
self.load_param(net_path)
self.gpu_num = torch.cuda.device_count()
print('GPU NUM: {:2d}'.format(self.gpu_num))
if self.gpu_num > 1 and self.online == False:
self.model = nn.DataParallel(self.model, list(range(self.gpu_num))).cuda()
else:
self.model = self.model.cuda()
self.target = torch.Tensor(self.cfg.y).cuda().unsqueeze(0).unsqueeze(0).repeat(self.cfg.batch_size * self.gpu_num, 1, 1, 1)
def load_param(self, path='param.pth'):
checkpoint = torch.load(path)
if 'state_dict' in checkpoint.keys(): # from training result
state_dict = checkpoint['state_dict']
if 'module' in state_dict.keys()[0]: # train with nn.DataParallel
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
self.model.load_state_dict(new_state_dict)
else:
self.model.load_state_dict(state_dict)
else:
self.model.feature.load_state_dict(checkpoint)
def setup_optimizer(self):
self.optimizer = optim.SGD(
self.model.parameters(), lr=self.cfg.initial_lr,
weight_decay=self.cfg.weight_decay)
lambda1 = lambda epoch: np.logspace(0, -2, num=self.cfg.epoch_num)[epoch]
self.scheduler = LambdaLR(self.optimizer, lr_lambda=[lambda1])
self.criterion = nn.SmoothL1Loss(size_average=False)
def init(self, image, init_rect):
self.model.eval()
self.target_pos = init_rect[:2] + init_rect[2:] / 2 - 1
self.target_sz = init_rect[2:]
self.min_sz = np.maximum(self.cfg.min_scale_factor * self.target_sz, 4)
self.max_sz = np.minimum(image.shape[:2], self.cfg.max_scale_factor * self.target_sz)
self.padded_sz = self.target_sz * (1 + self.cfg.padding)
# get feature size and initialize hanning window
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
elif image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
target = warp_cv2(image, self.target_pos, self.padded_sz,
self.cfg.net_input_size, (0, 0, 0))
target = target - self.cfg.net_average_image
# cv2.imshow('response', target)
# cv2.waitKey(0)
target = torch.from_numpy(target).cuda().permute(2, 0, 1).unsqueeze(0).float()
self.model.update(target)
self.patch_crop = torch.zeros(self.cfg.num_scale, target.shape[1], target.shape[2], target.shape[3]).cuda() # buff
def update(self, image):
self.model.eval()
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
elif image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
for i in range(self.cfg.num_scale): # crop multi-scale search region
window_sz = self.target_sz * (self.cfg.scale_factor[i] * (1 + self.cfg.padding))
target = warp_cv2(image, self.target_pos, window_sz,
self.cfg.net_input_size, (0,0,0))
target = target - self.cfg.net_average_image
target = torch.from_numpy(target).cuda().permute(2, 0, 1).unsqueeze(0).float()
self.patch_crop[i, :] = target
response = self.model(self.patch_crop)
peak, idx = torch.max(response.view(self.cfg.num_scale, -1), 1)
peak = peak.data.cpu().numpy() * self.cfg.scale_penalties
best_scale = np.argmax(peak)
r_max, c_max = np.unravel_index(idx[best_scale], self.cfg.net_input_size)
if r_max > self.cfg.net_input_size[0] / 2:
r_max = r_max - self.cfg.net_input_size[0]
if c_max > self.cfg.net_input_size[1] / 2:
c_max = c_max - self.cfg.net_input_size[1]
window_sz = self.target_sz * (self.cfg.scale_factor[best_scale] * (1 + self.cfg.padding))
self.target_pos = self.target_pos + np.array([c_max, r_max]) * window_sz / self.cfg.net_input_size
self.target_sz = np.minimum(np.maximum(window_sz / (1 + self.cfg.padding), self.min_sz), self.max_sz)
# model update
window_sz = self.target_sz * (1 + self.cfg.padding)
target = warp_cv2(image, self.target_pos, window_sz,
self.cfg.net_input_size, (0, 0, 0))
target = target - self.cfg.net_average_image
target = torch.from_numpy(target).cuda().permute(2, 0, 1).unsqueeze(0).float()
self.model.update(target, lr=self.cfg.interp_factor)
bndbox = np.concatenate([
self.target_pos - self.target_sz / 2 + 1, self.target_sz])
return bndbox
def step(self, batch, backward=True, update_lr=False):
if backward:
if update_lr:
self.scheduler.step()
self.model.train()
else:
self.model.eval()
template, search = batch[0].cuda(non_blocking=True), batch[1].cuda(non_blocking=True)
self.optimizer.zero_grad()
with torch.set_grad_enabled(backward):
output = self.model(template, search)
loss = self.criterion(output, self.target)/template.size(0)
if backward:
loss.backward()
self.optimizer.step()
return loss.item()
|
BestSonny/open-vot-1 | lib/models/goturn.py | from __future__ import absolute_import
import torch.nn as nn
import torch
from .caffenet import CaffeNet
from ..utils import initialize_weights
class GOTURN(nn.Module):
def __init__(self):
super(GOTURN, self).__init__()
self.branch_z = CaffeNet()
self.branch_x = CaffeNet()
self.fc6 = nn.Sequential(
nn.Linear(6 * 6 * 256 * 2, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5))
self.fc7 = nn.Sequential(
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5))
self.fc7b = nn.Sequential(
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5))
self.fc8 = nn.Sequential(
nn.Linear(4096, 4))
initialize_weights(self)
def forward(self, z, x):
assert z.size(-1) == x.size(-1) == 227
z = self.branch_z(z)
x = self.branch_x(x)
out = torch.cat((z, x), dim=1)
out = out.view(out.size(0), -1)
out = self.fc6(out)
out = self.fc7(out)
out = self.fc7b(out)
out = self.fc8(out)
return out
|
BestSonny/open-vot-1 | lib/models/siamese.py | from __future__ import absolute_import
import torch.nn as nn
from .submodules import Adjust2d, XCorr
class SiameseNet(nn.Module):
def __init__(self, branch, norm='bn'):
super(SiameseNet, self).__init__()
self.branch = branch
self.norm = Adjust2d(norm=norm)
self.xcorr = XCorr()
def forward(self, z, x):
assert z.size()[:2] == x.size()[:2]
z = self.branch(z)
x = self.branch(x)
out = self.xcorr(z, x)
out = self.norm(out, z, x)
return out
|
BestSonny/open-vot-1 | lib/utils/ioutil.py | from __future__ import absolute_import, print_function, division
import time
import sys
import os
import zipfile
import torch
import scipy.io
import h5py
import numpy as np
from urllib.request import urlretrieve
from collections import namedtuple
from ..models import AlexNetV1, AlexNetV2
def download(url, filename):
return urlretrieve(url, filename, _reporthook)
def _reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def extract(filename, extract_dir):
if os.path.splitext(filename)[1] == '.zip':
print('Extracting zip file...')
if not os.path.isdir(extract_dir):
os.makedirs(extract_dir)
with zipfile.ZipFile(filename) as z:
z.extractall(extract_dir)
else:
raise Exception('Unsupport extension {} of the compressed file {}.'.format(
os.path.splitext(filename)[1]), filename)
def load_siamfc_from_matconvnet(filename, model):
assert isinstance(model.branch, (AlexNetV1, AlexNetV2))
if isinstance(model.branch, AlexNetV1):
p_conv = 'conv'
p_bn = 'bn'
p_adjust = 'adjust_'
elif isinstance(model.branch, AlexNetV2):
p_conv = 'br_conv'
p_bn = 'br_bn'
p_adjust = 'fin_adjust_bn'
params_names_list, params_values_list = load_matconvnet(filename)
params_values_list = [torch.from_numpy(p) for p in params_values_list]
for l, p in enumerate(params_values_list):
param_name = params_names_list[l]
if 'conv' in param_name and param_name[-1] == 'f':
p = p.permute(3, 2, 0, 1)
p = torch.squeeze(p)
params_values_list[l] = p
net = (
model.branch.conv1,
model.branch.conv2,
model.branch.conv3,
model.branch.conv4,
model.branch.conv5)
for l, layer in enumerate(net):
layer[0].weight.data[:] = params_values_list[
params_names_list.index('%s%df' % (p_conv, l + 1))]
layer[0].bias.data[:] = params_values_list[
params_names_list.index('%s%db' % (p_conv, l + 1))]
if l < len(net) - 1:
layer[1].weight.data[:] = params_values_list[
params_names_list.index('%s%dm' % (p_bn, l + 1))]
layer[1].bias.data[:] = params_values_list[
params_names_list.index('%s%db' % (p_bn, l + 1))]
bn_moments = params_values_list[
params_names_list.index('%s%dx' % (p_bn, l + 1))]
layer[1].running_mean[:] = bn_moments[:, 0]
layer[1].running_var[:] = bn_moments[:, 1] ** 2
elif model.norm.norm == 'bn':
model.norm.bn.weight.data[:] = params_values_list[
params_names_list.index('%sm' % p_adjust)]
model.norm.bn.bias.data[:] = params_values_list[
params_names_list.index('%sb' % p_adjust)]
bn_moments = params_values_list[
params_names_list.index('%sx' % p_adjust)]
model.norm.bn.running_mean[:] = bn_moments[0]
model.norm.bn.running_var[:] = bn_moments[1] ** 2
elif model.norm.norm == 'linear':
model.norm.linear.weight.data[:] = params_values_list[
params_names_list.index('%sf' % p_adjust)]
model.norm.linear.bias.data[:] = params_values_list[
params_names_list.index('%sb' % p_adjust)]
return model
def load_matconvnet(filename):
mat = scipy.io.loadmat(filename)
net_dot_mat = mat.get('net')
params = net_dot_mat['params']
params = params[0][0]
params_names = params['name'][0]
params_names_list = [params_names[p][0] for p in range(params_names.size)]
params_values = params['value'][0]
params_values_list = [params_values[p] for p in range(params_values.size)]
return params_names_list, params_values_list
def load_siamfc_stats(stats_path):
Stats = namedtuple('Stats', [
'rgb_mean_z',
'rgb_variance_z',
'rgb_mean_x',
'rgb_variance_x'])
mat = h5py.File(stats_path, mode='r')
rgb_mean_z = mat['z']['rgbMean'][:]
d, v = np.linalg.eig(mat['z']['rgbCovariance'][:])
rgb_variance_z = 0.1 * np.dot(np.sqrt(np.diag(d)), v.T)
rgb_mean_x = mat['x']['rgbMean'][:]
d, v = np.linalg.eig(mat['z']['rgbCovariance'][:])
rgb_variance_x = 0.1 * np.dot(np.sqrt(np.diag(d)), v.T)
stats = Stats(
rgb_mean_z,
rgb_variance_z,
rgb_mean_x,
rgb_variance_x)
return stats
def load_goturn_from_caffe(net_path, proto_path, model):
import caffe
caffe.set_mode_cpu()
net = caffe.Net(proto_path, net_path, caffe.TEST)
params = net.params
conv_branches = [model.branch_z, model.branch_x]
for i, branch in enumerate(conv_branches):
if i == 0:
param_names = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5']
else:
param_names = ['conv1_p', 'conv2_p',
'conv3_p', 'conv4_p', 'conv5_p']
conv_layers = [
net.conv1[0],
net.conv2[0],
net.conv3[0],
net.conv4[0],
net.conv5[0]]
for l, conv in enumerate(conv_layers):
name = param_names[l]
conv.weight.data[:] = torch.from_numpy(params[name][0].data)
conv.bias.data[:] = torch.from_numpy(params[name][1].data)
fc_layers = [
model.fc[0],
model.fc7[0],
model.fc7b[0],
model.fc8[0]]
params_names = ['fc6-new', 'fc7-new', 'fc7-newb', 'fc8-shapes']
for l, fc in enumerate(fc_layers):
name = param_names[l]
fc.weight.data[:] = torch.from_numpy(params[name][0].data)
fc.bias.data[:] = torch.from_numpy(params[name][1].data)
return model
|
BestSonny/open-vot-1 | lib/utils/warp.py | from __future__ import absolute_import, division
import numbers
import numpy as np
import cv2
import torch
import torch.nn.functional as F
from PIL import Image, ImageStat, ImageOps
def pad_pil(image, npad, padding='avg'):
if npad == 0:
return image
if padding == 'avg':
avg_chan = ImageStat.Stat(image).mean
# PIL doesn't support float RGB image
avg_chan = tuple(int(round(c)) for c in avg_chan)
image = ImageOps.expand(image, border=npad, fill=avg_chan)
else:
image = ImageOps.expand(image, border=npad, fill=padding)
return image
def crop_pil(image, center, size, padding='avg', out_size=None):
# convert bndbox to corners
size = np.array(size)
corners = np.concatenate((center - size / 2, center + size / 2))
corners = np.round(corners).astype(int)
pads = np.concatenate((-corners[:2], corners[2:] - image.size))
npad = max(0, int(pads.max()))
if npad > 0:
image = pad_pil(image, npad, padding=padding)
corners = tuple((corners + npad).tolist())
patch = image.crop(corners)
if out_size is not None:
if isinstance(out_size, numbers.Number):
out_size = (out_size, out_size)
if not out_size == patch.size:
patch = patch.resize(out_size, Image.BILINEAR)
return patch
def pad_array(image, npad, padding='avg'):
if npad == 0:
return image
if padding == 'avg':
avg_chan = image.mean(axis=(0, 1))
image = cv2.copyMakeBorder(image, npad, npad, npad, npad,
cv2.BORDER_CONSTANT, value=avg_chan)
else:
image = cv2.copyMakeBorder(image, npad, npad, npad, npad,
cv2.BORDER_CONSTANT, value=0)
return image
def crop_array(image, center, size, padding='avg', out_size=None):
# convert bndbox to corners
size = np.array(size)
corners = np.concatenate((center - size / 2, center + size / 2))
corners = np.round(corners).astype(int)
pads = np.concatenate((-corners[:2], corners[2:] - image.shape[1::-1]))
npad = max(0, int(pads.max()))
if npad > 0:
image = pad_array(image, npad, padding=padding)
corners = tuple((corners + npad).tolist())
patch = image[corners[1]:corners[3], corners[0]:corners[2]]
if out_size is not None:
if isinstance(out_size, numbers.Number):
out_size = (out_size, out_size)
if not out_size == patch.shape[1::-1]:
patch = cv2.resize(patch, out_size, interpolation=cv2.INTER_LINEAR)
return patch
def encode_theta(center, size, angle, img_sz):
device = center.device
sx, sy = size / (img_sz - 1)
tx, ty = (2 * center - img_sz + 1) / (img_sz - 1)
theta = torch.FloatTensor([
sx, 0, tx, 0, sy, ty]).view(-1, 2, 3).to(device)
return theta
def decode_theta(theta, img_sz):
device = theta.device
sx, sy, tx, ty = theta[0, 0], theta[1, 1], theta[0, 2], theta[1, 2]
center = torch.FloatTensor([tx, ty]).to(device) * (img_sz - 1)
center = (center + img_sz - 1) / 2
size = torch.FloatTensor([sx, sy]).to(device) * (img_sz - 1)
angle = torch.zeros(1).to(device)
return center, size, angle
def crop_tensor(image, center, size, padding='avg', out_size=None):
assert out_size is not None
img_sz = torch.tensor(image.size()[:-3:-1]).to(image.device).float()
# calculate padding
corners = torch.cat((center - size / 2, center + size / 2))
pads = torch.cat((-corners[:2], corners[2:] - img_sz))
npad = max(0, pads.max().item())
if npad > 0 and padding == 'avg':
avg_chan = image.view(3, -1).mean(dim=1).view(1, 3, 1, 1)
image -= avg_chan
out_size = torch.Size((1, 1, out_size, out_size))
theta = encode_theta(center, size, 0, img_sz)
grid = F.affine_grid(theta, out_size)
patch = F.grid_sample(image, grid)
if npad > 0 and padding == 'avg':
patch += avg_chan
return patch
def resize_tensor(image, size):
if isinstance(size, numbers.Number):
size = torch.Size((1, 1, size, size))
theta = torch.FloatTensor([1, 0, 0, 0, 1, 0]).to(
image.device).view(-1, 2, 3).float()
grid = F.affine_grid(theta, size)
return F.grid_sample(image, grid)
def warp_cv2(image, center, size, out_size, padding):
if isinstance(out_size, numbers.Number):
out_size = np.array([out_size, out_size], dtype=int)
sx, sy = out_size / size
dx, dy = -(center - size / 2) * [sx, sy]
affine = np.array([[sx, 0, dx],
[0, sy, dy]]).astype(np.float32)
patch = cv2.warpAffine(
image, affine, tuple(out_size), flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return patch
|
BestSonny/open-vot-1 | tests/models/test_caffenet.py | from __future__ import absolute_import, print_function
import unittest
import torch
import random
import time
from lib.models import CaffeNet
class TestCaffeNet(unittest.TestCase):
def setUp(self):
self.x = torch.randn((2, 3, 256, 256))
self.net = CaffeNet()
def tearDown(self):
pass
def test_caffenet(self):
net = CaffeNet()
with torch.set_grad_enabled(True):
net.train()
start = time.time()
out_train = net(self.x)
print('inference time of training: %.3f' % (time.time() - start))
self.assertTrue(out_train.requires_grad)
self.assertTrue(net.training)
with torch.set_grad_enabled(False):
net.eval()
start = time.time()
out_eval = net(self.x)
print('inference time of test: %.3f' % (time.time() - start))
self.assertFalse(out_eval.requires_grad)
self.assertFalse(net.training)
self.assertAlmostEqual(
out_train.mean().item(), out_eval.mean().item())
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | tests/transforms/test_transform_goturn.py | <reponame>BestSonny/open-vot-1<gh_stars>10-100
from __future__ import absolute_import, division
import unittest
import random
import torch
import torchvision.transforms.functional as F
import numpy as np
from lib.transforms import TransformGOTURN
from lib.datasets import VOT, Pairwise
from lib.utils.viz import show_frame
class TestTransformGOTURN(unittest.TestCase):
def setUp(self):
self.vot_dir = 'data/vot2017'
self.visualize = True
def tearDown(self):
pass
def test_transform_goturn(self):
base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
transform = TransformGOTURN()
dataset = Pairwise(
base_dataset, transform, pairs_per_video=1,
frame_range=1, causal=True)
self.assertGreater(len(dataset), 0)
for crop_z, crop_x, labels in dataset:
self.assertEqual(crop_z.size(), crop_x.size())
if self.visualize:
for t in range(10):
crop_z, crop_x, labels = random.choice(dataset)
mean_color = torch.tensor(
transform.mean_color).float().view(3, 1, 1)
crop_z = F.to_pil_image((crop_z + mean_color) / 255.0)
crop_x = F.to_pil_image((crop_x + mean_color) / 255.0)
labels = labels.cpu().numpy()
labels *= transform.out_size / transform.label_scale_factor
bndbox = np.concatenate([
labels[:2], labels[2:] - labels[:2]])
show_frame(crop_x, bndbox, fig_n=1, pause=1)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | tests/datasets/test_otb.py | from __future__ import absolute_import
import unittest
import random
from PIL import Image
from lib.datasets import OTB
from lib.utils.viz import show_frame
class TestOTB(unittest.TestCase):
def setUp(self):
self.otb_dir = 'data/OTB'
self.visualize = True
def tearDown(self):
pass
def test_load(self):
dataset = OTB(self.otb_dir)
self.assertGreater(len(dataset), 0)
for img_files, anno in dataset:
self.assertGreater(len(img_files), 0)
self.assertEqual(len(img_files), len(anno))
if self.visualize:
img_files, anno = random.choice(dataset)
for f, img_file in enumerate(img_files):
image = Image.open(img_file)
show_frame(image, anno[f, :])
def test_download(self):
dataset = OTB(self.otb_dir, download=True, version=2015)
self.assertGreater(len(dataset), 0)
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/metrics/__init__.py | from __future__ import absolute_import
from .metrics import iou, center_error
|
BestSonny/open-vot-1 | lib/models/__init__.py | from __future__ import absolute_import
from .alexnet import AlexNetV1, AlexNetV2
from .siamese import SiameseNet
from .caffenet import CaffeNet
from .goturn import GOTURN
from .dcfnet import DCFNet, DCFNetOnline
|
BestSonny/open-vot-1 | tests/trackers/test_siamfc.py | from __future__ import absolute_import, print_function
import unittest
import random
from torch.utils.data import DataLoader
from lib.trackers import TrackerSiamFC
from lib.datasets import VOT, Pairwise
from lib.transforms import TransformSiamFC
class TestTrackerSiamFC(unittest.TestCase):
def setUp(self):
self.vot_dir = 'data/vot2017'
self.net_v1 = 'pretrained/siamfc/2016-08-17.net.mat'
self.net_v2 = 'pretrained/siamfc/baseline-conv5_e55.mat'
self.stats_path = 'pretrained/siamfc/cfnet_ILSVRC2015.stats.mat'
def tearDown(self):
pass
def test_siamfc_track_v1(self):
dataset = VOT(self.vot_dir, return_rect=True, download=True)
tracker = TrackerSiamFC(
branch='alexv1', net_path=self.net_v1, z_lr=0,
response_up=16, scale_step=1.0375, window_influence=0.176)
img_files, anno = random.choice(dataset)
rects, speed = tracker.track(img_files, anno[0, :],
visualize=True)
self.assertEqual(rects.shape, anno.shape)
def test_siamfc_train_v1(self):
tracker = TrackerSiamFC(branch='alexv1')
transform = TransformSiamFC(
stats_path=self.stats_path, score_sz=17,
r_pos=16, total_stride=8)
base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
dataset = Pairwise(base_dataset, transform, pairs_per_video=1)
dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
# training loop
for it, batch in enumerate(dataloader):
update_lr = it == 0
loss = tracker.step(batch, backward=True, update_lr=update_lr)
print('Iter: {} Loss: {:.6f}'.format(it + 1, loss))
# val loop
for it, batch in enumerate(dataloader):
loss = tracker.step(batch, backward=False)
print('Val. Iter: {} Loss: {:.6f}'.format(it + 1, loss))
def test_siamfc_track_v2(self):
dataset = VOT(self.vot_dir, return_rect=True, download=True)
tracker = TrackerSiamFC(
branch='alexv2', net_path=self.net_v2, z_lr=0.01,
response_up=8, scale_step=1.0816, window_influence=0.25)
img_files, anno = random.choice(dataset)
rects, speed = tracker.track(img_files, anno[0, :],
visualize=True)
self.assertEqual(rects.shape, anno.shape)
def test_siamfc_train_v2(self):
tracker = TrackerSiamFC(branch='alexv2')
transform = TransformSiamFC(
stats_path=self.stats_path, score_sz=33,
r_pos=8, total_stride=4)
base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
dataset = Pairwise(base_dataset, transform, pairs_per_video=1)
dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
# training loop
for it, batch in enumerate(dataloader):
update_lr = it == 0
loss = tracker.step(batch, backward=True, update_lr=update_lr)
print('Iter: {} Loss: {:.6f}'.format(it + 1, loss))
# val loop
for it, batch in enumerate(dataloader):
loss = tracker.step(batch, backward=False)
print('Val. Iter: {} Loss: {:.6f}'.format(it + 1, loss))
if __name__ == '__main__':
unittest.main()
|
BestSonny/open-vot-1 | lib/trackers/__init__.py | from __future__ import absolute_import, division
import cv2
import numpy as np
import time
from ..utils.viz import show_frame
class Tracker(object):
def __init__(self, name):
self.name = name
def init(self, image, init_rect):
raise NotImplementedError()
def update(self, image):
raise NotImplementedError()
def track(self, img_files, init_rect, visualize=False):
frame_num = len(img_files)
bndboxes = np.zeros((frame_num, 4))
bndboxes[0, :] = init_rect
speed_fps = np.zeros(frame_num)
for f, img_file in enumerate(img_files):
image = cv2.imread(img_file)
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
start_time = time.time()
if f == 0:
self.init(image, init_rect)
else:
bndboxes[f, :] = self.update(image)
elapsed_time = time.time() - start_time
speed_fps[f] = 1. / elapsed_time
if visualize:
show_frame(image, bndboxes[f, :], fig_n=1)
return bndboxes, speed_fps
from .siamfc import TrackerSiamFC
from .goturn import TrackerGOTURN
from .csk import TrackerCSK
from .kcf import TrackerKCF
from .dcf import TrackerDCF
from .dcfnet import TrackerDCFNet
from .mosse import TrackerMOSSE
from .dsst import TrackerDSST
|
matthewfeickert/pyhf-funcX-CHEP-2021-proposal | build_table.py | <gh_stars>1-10
from pathlib import Path
import numpy as np
import pandas as pd
def convert_to_seconds(time_str):
minutes, seconds = time_str.split("m")
total = 60 * int(minutes) + int(seconds.split(".")[0])
return total
def main():
mean_wall_time = []
best_wall_time = []
file_list = ["1Lbb", "InclSS3L", "staus"]
for filename in file_list:
file_path = Path("data").joinpath("river").joinpath(f"{filename}_times.txt")
with open(file_path, "r") as readfile:
lines = readfile.readlines()
times = np.array([convert_to_seconds(line) for line in lines])
mean_wall_time.append(f"${np.mean(times)}\pm{np.std(times):.1f}$")
single_node_time = []
file_list = ["1Lbb", "InclSS3L", "staus"]
for filename in file_list:
file_path = (
Path("data").joinpath("river").joinpath(f"{filename}_single_node_time.txt")
)
with open(file_path, "r") as readfile:
time = readfile.readlines()[0]
single_node_time.append(convert_to_seconds(time))
river_config = {"max_blocks": 4, "nodes_per_block": 1}
table_data = pd.DataFrame(
dict(
# analysis=["ATLAS SUSY 1Lbb", "ATLAS SUSY SS3L", "ATLAS SUSY staus"],
analysis=[
"Eur. Phys. J. C 80 (2020) 691",
"JHEP 06 (2020) 46",
"Phys. Rev. D 101 (2020) 032009",
],
patches=[125, 76, 57],
mean_wall_time=mean_wall_time,
single_node_time=single_node_time,
)
)
caption = (
"Fit times for analyses using \pyhf{}'s NumPy backend and SciPy optimizer orchestrated with \\funcX{} on RIVER"
+ " with an endpoint configuration of and \\texttt{max\_blocks} = "
+ f"{river_config['max_blocks']}"
+ " and \\texttt{nodes\_per\_block} = "
+ f"{river_config['nodes_per_block']}"
+ f" over {len(times)} trials compared to a single RIVER node."
+ " The reported wall fit time is the mean wall fit time of the trials."
+ " The uncertainty on the mean wall time corresponds to the standard deviation of the wall fit times."
)
performance_table_latex = table_data.to_latex(
header=[
"Analysis",
"Patches",
"Wall time (sec)",
"Single node (sec)",
],
caption=caption,
label="table:performance",
index=False,
escape=False,
float_format="{:0.1f}".format,
column_format="@{}lrrrr@{}",
position="htpb",
)
with open("src/tables/performance_table.tex", "w") as table_file:
table_file.write(performance_table_latex)
if __name__ == "__main__":
main()
|
matthewfeickert/pyhf-funcX-CHEP-2021-proposal | figures/timing_barplot.py | <reponame>matthewfeickert/pyhf-funcX-CHEP-2021-proposal
import numpy as np
from matplotlib.figure import Figure
import matplotlib
from matplotlib import rcParams
from pathlib import Path
rcParams.update({"font.size": 14})
def plot_times(
analyses,
mean_times,
mean_uncertainties,
single_node_times,
machine_name,
max_time,
scale="linear",
):
fig = Figure()
fig.set_size_inches(7, 5)
ax = fig.subplots()
x = np.arange(len(analyses))
width = 0.35
ax.bar(x, mean_times, width=width, label="Wall time")
bin_bottom = np.array(mean_times) - np.array(mean_uncertainties)
ax.bar(
x,
height=2 * np.array(mean_uncertainties),
width=width,
bottom=bin_bottom,
fill=False,
linewidth=0,
edgecolor="gray",
hatch=3 * "/",
label="Uncertainty",
)
ax.bar(x + width, single_node_times, width=width, label="Single node")
text_left_edge = 0.66
ax.text(
text_left_edge, 0.68, "Nodes per block = 1", transform=ax.transAxes, size=10
)
ax.text(text_left_edge, 0.63, "Max blocks = 4", transform=ax.transAxes, size=10)
ax.set_xticks(x + width / 2)
ax.set_xticklabels(analyses, rotation=10, size=10)
ax.set_yscale(scale)
if scale != "log":
ax.set_ylim(top=max_time)
ax.set_title(f"{machine_name}")
ax.set_xlabel("Published analysis probability model")
ax.set_ylabel("Evaluation time (seconds)")
ax.legend(loc="best", frameon=False)
fig.tight_layout()
file_path = Path().cwd().joinpath("figures")
image_name = f"timing_barplot_{machine_name.lower()}"
if scale == "log":
image_name += "_log"
file_path = file_path.joinpath(image_name + ".pdf")
fig.savefig(file_path)
if __name__ == "__main__":
analyses = [
"<NAME> 80 (2020) 691",
"JHEP 06 (2020) 46",
"Phys. Rev. D 101 (2020) 032009",
]
mean_times = [156.2, 31.2, 57.4]
mean_uncertainties = [9.5, 2.7, 5.2]
single_node_times = [3842, 114, 612]
machine_name = "RIVER"
max_time = 4000
plot_times(
analyses,
mean_times,
mean_uncertainties,
single_node_times,
machine_name,
max_time,
)
plot_times(
analyses,
mean_times,
mean_uncertainties,
single_node_times,
machine_name,
max_time,
scale="log",
)
|
ursinus-cs371-s2022/Week5_EditBacktracing | stack.py | class Node:
def __init__(self, value):
self.value = value
self.next = None # Python's version of "null" is "None"
class LinkedList:
def __init__(self):
self.head = None
self.N = 0
def add_first(self, value):
"""
Parameters
----------
value: any
Add a new node to the beginning with this value
"""
new_node = Node(value)
head_before = self.head
self.head = new_node
new_node.next = head_before
self.N += 1
def remove_first(self):
"""
Remove and return the first value from the linked list
or do nothing and return None if it's already empty
"""
ret = None
if self.head: # If the head is not None
ret = self.head.value
self.head = self.head.next
self.N -= 1
return ret
def peek_first(self):
ret = None
if self.head:
ret = self.head.value
return ret
def __str__(self):
# This is like the to-string method
s = "LinkedList: "
node = self.head
while node: #As long as the node is not None
s += "{} ==> ".format(node.value)
node = node.next
return s
def __len__(self):
# This allows us to use len() on our object to get its length!
return self.N
class Stack:
def __init__(self):
self.L = LinkedList()
def push(self, val):
self.L.add_first(val)
def pop(self):
return self.L.remove_first()
def peek(self):
return self.L.peek_first()
def get_entire_stack(self):
node = self.L.head
ret = []
while node: #As long as the node is not None
ret = [node.value] + ret
node = node.next
return ret |
ursinus-cs371-s2022/Week5_EditBacktracing | edit.py | <reponame>ursinus-cs371-s2022/Week5_EditBacktracing
import numpy as np
LEFT = 0
UP = 1
DIAG = 2
def edit(s1, s2):
"""
An iterative, dynamic programming version of the string
edit distance
Parameters
----------
s1: string of length M
The first string to match
s2: string of length N
The second string to match
Returns
-------
cost: int
The cost of an optimal match
paths: list of lists
Each list
"""
M = len(s1)
N = len(s2)
# Create a 2D array with M+1 rows and N+1 columns
# to store the costs
table = np.zeros((M+1, N+1))
# Fill in the base cases
table[0, :] = np.arange(N+1)
table[:, 0] = np.arange(M+1)
# Make 2D array that stores the optimal moves
moves = []
for i in range(M+1):
moves.append([])
for j in range(N+1):
moves[i].append([])
# Fill in the base cases
for j in range(N+1):
moves[0][j] = [0] # Move left if we're at the top row
for i in range(M+1):
moves[i][0] = [1] # Move up if we're at the left column
# Do the dynamic programming to fill in the table and moves
for i in range(1, M+1):
for j in range(1, N+1):
cost1 = table[i, j-1] + 1 # Delete the last character from s2
cost2 = table[i-1, j] + 1 # Delete the last character from s1
cost3 = table[i-1, j-1] # Match or swap both characters at the end
if s1[i-1] != s2[j-1]:
cost3 += 1
table[i][j] = min(cost1, cost2, cost3)
if table[i][j] == cost1:
moves[i][j].append(LEFT)
if table[i][j] == cost2:
moves[i][j].append(UP)
if table[i][j] == cost3:
moves[i][j].append(DIAG)
print(moves)
"""
## TODO: Extract an optimal sequence of moves.
## Backtrace from i = M, j = N, following the arrows, until you get to [0, 0]
i = M
j = N
path = []
while not (i == 0 and j == 0):
if moves[i][j] == LEFT:
path.append("Adding {} to s1".format(s2[j-1]))
j -= 1
elif moves[i][j] == UP:
path.append("Deleting {} from s1".format(s1[i-1]))
i -= 1
else:
if s1[i-1] != s2[j-1]:
path.append("Swapping in {} for {} in s1".format(s2[j-1], s1[i-1]))
else:
path.append("Matching {}".format(s2[j-1]))
i -= 1
j -= 1
path.reverse()
for step in path:
print(step)
return cost
"""
edit("school", "fools")
|
Yuehan-Wang/carplet-pygame | event.py | <gh_stars>0
from typing import List
from card import Card
class Event:
def __init__(self, title: str, desc: str, cards: List[Card]) -> None:
self._title = title
self._desc = desc
self._cards = cards
@property
def title(self) -> str:
return self._title
@property
def desc(self) -> str:
return self._desc
@property
def cards(self) -> List[Card]:
return self._cards
|
Yuehan-Wang/carplet-pygame | engine.py | <gh_stars>0
import pygame
import time
import sys
from context import Context
class Engine:
# pygame init
WIN_WIDTH, WIN_HEIGHT = 900, 600
context = None
w = None
clock = None
logo = None
# fonts
screen_font = None
i_name_font = None
i_number_font = None
e_title_font = None
e_desc_font = None
c_title_font = None
c_title_hover_font = None
c_desc_font = None
c_cons_font = None
# sounds
start_soundtrack = None
finish_soundtrack = None
select_soundtrack = None
# images of indexes
I1_IMG = None
I2_IMG = None
I3_IMG = None
I4_IMG = None
# misc
has_clicked = False
FPS = 15
E = 2.25
counter = 0
cons = ""
ds = []
popup = False
@classmethod
def register_context(cls, context: Context):
cls.context = context
@classmethod
def init(cls):
pygame.init()
# Get window
cls.w = pygame.display.set_mode((cls.WIN_WIDTH, cls.WIN_HEIGHT))
# Set game name
pygame.display.set_caption(cls.context.name)
# Get clock
cls.clock = pygame.time.Clock()
# Set fonts
cls.screen_font = pygame.font.Font('assets/font/Abel-Regular.ttf', 50)
cls.i_number_font = pygame.font.Font('assets/font/Abel-Regular.ttf', 40)
cls.e_title_font = pygame.font.Font('assets/font/Abel-Regular.ttf', 25)
cls.c_title_font = pygame.font.Font('assets/font/Abel-Regular.ttf', 25)
cls.c_title_hover_font = pygame.font.Font('assets/font/Abel-Regular.ttf', 20)
cls.e_desc_font = pygame.font.Font('assets/font/Abel-Regular.ttf', 20)
cls.i_name_font = pygame.font.Font('assets/font/Abel-Regular.ttf', 20)
cls.c_desc_font = pygame.font.Font('assets/font/Abel-Regular.ttf', 16)
# Set soundtracks
cls.start_soundtrack = pygame.mixer.Sound('assets/sound/start.wav')
cls.finish_soundtrack = pygame.mixer.Sound('assets/sound/finish.wav')
cls.select_soundtrack = pygame.mixer.Sound('assets/sound/card_select.wav')
# Set index images
cls.I1_IMG = pygame.transform.scale(pygame.image.load(cls.context.indexes[0].icon), (48, 48))
cls.I2_IMG = pygame.transform.scale(pygame.image.load(cls.context.indexes[1].icon), (48, 48))
cls.I3_IMG = pygame.transform.scale(pygame.image.load(cls.context.indexes[2].icon), (48, 48))
cls.I4_IMG = pygame.transform.scale(pygame.image.load(cls.context.indexes[3].icon), (48, 48))
# Set Logo
cls.logo = pygame.image.load('assets/icon/logo.png')
cls.logo = pygame.transform.scale(cls.logo, (150,150))
@classmethod
def play(cls):
cls.__intro()
@classmethod
def __intro(cls):
while True:
cls.__draw_intro()
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
sys.exit()
if e.type == pygame.MOUSEBUTTONUP:
cls.has_clicked = False
start_pressed = cls.__press_button(400, 375, 125, 70, cls.start_soundtrack)
if start_pressed:
cls.__body()
@classmethod
def __body(cls):
time.sleep(0.5)
pygame.mixer.music.load('assets/sound/background.mp3')
pygame.mixer.music.set_volume(0.05)
pygame.mixer.music.play(-1)
while True:
if cls.context.plot_finished():
cls.context.next_plot()
if cls.context.context_finished() or cls.context.is_game_over():
cls.__end()
if cls.popup:
cls.counter += 1
if cls.counter >= cls.FPS * cls.E:
cls.popup = False
cls.counter = 0
cls.__draw_body()
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
sys.exit()
if e.type == pygame.MOUSEBUTTONUP:
cls.has_clicked = False
if not cls.popup:
l_btn_pressed = cls.__press_button(150, 450, 150, 200, cls.select_soundtrack)
m_btn_pressed = cls.__press_button(400, 450, 150, 200, cls.select_soundtrack)
r_btn_pressed = cls.__press_button(650, 450, 150, 200, cls.select_soundtrack)
l_card, m_card, r_card = cls.context.curr_event().cards
if l_btn_pressed:
cls.context.indexes[0].value = l_card.effects[0]
cls.context.indexes[1].value = l_card.effects[1]
cls.context.indexes[2].value = l_card.effects[2]
cls.context.indexes[3].value = l_card.effects[3]
cls.context.next_event()
cls.ds = l_card.effects
cls.cons = l_card.cons
cls.popup = True
continue
if m_btn_pressed:
cls.context.indexes[0].value = m_card.effects[0]
cls.context.indexes[1].value = m_card.effects[1]
cls.context.indexes[2].value = m_card.effects[2]
cls.context.indexes[3].value = m_card.effects[3]
cls.context.next_event()
cls.ds = m_card.effects
cls.cons = m_card.cons
cls.popup = True
continue
if r_btn_pressed:
cls.context.indexes[0].value = r_card.effects[0]
cls.context.indexes[1].value = r_card.effects[1]
cls.context.indexes[2].value = r_card.effects[2]
cls.context.indexes[3].value = r_card.effects[3]
cls.context.next_event()
cls.ds = r_card.effects
cls.cons = r_card.cons
cls.popup = True
continue
@classmethod
def __end(cls):
pygame.mixer.music.stop()
while True:
cls.__draw_end()
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
sys.exit()
if e.type == pygame.MOUSEBUTTONUP:
cls.has_clicked = False
replay_pressed = cls.__press_button(350, 375, 170, 70, cls.start_soundtrack)
if replay_pressed:
cls.popup = False
cls.ds = []
cls.counter = 0
cls.context.reset()
cls.__body()
@classmethod
def __draw_intro(cls):
welcome_msg = "Welcome to " + cls.context.name
creator_msg = "Created by " + cls.context.creator
cls.w.fill("White")
# Render Logo
cls.w.blit(cls.logo, (370, 50))
# Render text
cls.__render_center_text(welcome_msg, cls.screen_font, "Black", 450, 225, 500)
cls.__render_center_text(creator_msg, cls.i_number_font, "Black", 450, 300, 500)
cls.__render_center_text("Powered by Carplet", cls.i_number_font, "Black", 450, 525, 300)
# Render button
cls.__render_button(380, 375, 125, 70, "White", "Black", "Start")
# Update
cls.__update()
@classmethod
def __draw_body(cls):
# Get context
e = cls.context.curr_event()
indexes = cls.context.indexes
# Fill messages
names = [cls.i_name_font.render(i.name, True, "Black") for i in indexes]
numbers = [cls.i_number_font.render(str(i.value), True, "Black") for i in indexes]
cards = e.cards
cls.w.fill((255, 255, 255))
# Render first index
cls.w.blit(cls.I1_IMG, (100, 25))
cls.w.blit(names[0], (90, 78))
# Render second index
cls.w.blit(cls.I2_IMG, (300, 25))
cls.w.blit(names[1], (300, 78))
# Render third index
cls.w.blit(cls.I3_IMG, (500, 25))
cls.w.blit(names[2], (480, 78))
# Render fourth index
cls.w.blit(cls.I4_IMG, (700, 25))
cls.w.blit(names[3], (690, 78))
# Render increment/decrement numbers or normal numbers
if cls.popup:
d_nums = [cls.i_number_font.render("+" + str(d) if d >= 0 else str(d), True, "Green" if d >= 0 else "Red") for d in cls.ds]
cls.w.blit(d_nums[0], (170, 30))
cls.w.blit(d_nums[1], (370, 30))
cls.w.blit(d_nums[2], (570, 30))
cls.w.blit(d_nums[3], (770, 30))
else:
cls.w.blit(numbers[0], (170, 30))
cls.w.blit(numbers[1], (370, 30))
cls.w.blit(numbers[2], (570, 30))
cls.w.blit(numbers[3], (770, 30))
# Description Box
pygame.draw.rect(cls.w, "Black", (120, 125, 710, 260))
pygame.draw.rect(cls.w, "White", (125, 130, 700, 250))
cls.__render_center_text(e.title, cls.e_title_font, "Black", 475, 140, 350)
cls.__render_center_text(e.desc, cls.e_desc_font, "Black", 475, 250, 450)
# Render cards
cls.__render_card(150, 450, 150, 200, "White", "Black", cards[0].title, cards[0].desc)
cls.__render_card(400, 450, 150, 200, "White", "Black", cards[1].title, cards[1].desc)
cls.__render_card(650, 450, 150, 200, "White", "Black", cards[2].title, cards[2].desc)
# Render consequence if popup
if cls.popup:
pygame.draw.rect(cls.w, "Red", (120, 125, 710, 260))
pygame.draw.rect(cls.w, 'White', (125, 130, 700, 250))
cls.__render_center_text(cls.cons, cls.e_desc_font, "Black", 475, 250, 450)
# Update
cls.__update()
@classmethod
def __draw_end(cls):
i_index = cls.context.cause_index()
end = cls.context.success if i_index == -1 else cls.context.indexes[i_index].end_str
cls.w.fill("White")
# Render Logo
cls.w.blit(cls.logo, (370, 50))
# Render texts
cls.__render_center_text(end, cls.screen_font, "Black", 450, 225, 700)
cls.__render_center_text("Powered by Carplet", cls.i_number_font, "Black", 450, 525, 300)
# Render buttons
cls.__render_button(370, 375, 170, 70, "White", "Black", "Re-Play")
# Update
cls.__update()
@classmethod
def __update(cls):
pygame.display.update()
cls.clock.tick(cls.FPS)
@classmethod
def __is_hover(cls, x, y, width, height) -> bool:
mouse_x = pygame.mouse.get_pos()[0]
mouse_y = pygame.mouse.get_pos()[1]
if (x + width) > mouse_x > x and (y + height) > mouse_y > y:
return True
return False
@classmethod
def __render_button(cls, x, y, width, height, hover_color, default_color, msg):
s_bt = cls.screen_font.render(msg, True, hover_color)
hover_s_bt = cls.screen_font.render(msg, True, default_color)
rect_bt = s_bt.get_rect(midtop=(x + width / 2, y))
if cls.__is_hover(x, y, width, height):
pygame.draw.rect(cls.w, hover_color, (x, y, width, height))
cls.w.blit(hover_s_bt, rect_bt)
else:
pygame.draw.rect(cls.w, default_color, (x, y, width, height))
cls.w.blit(s_bt, rect_bt)
@classmethod
def __render_card(cls, x, y, width, height, hover_color, default_color, title, desc):
float_height = 30
if cls.__is_hover(x, y, width, height):
pygame.draw.rect(cls.w, hover_color, (x, y - float_height, width, height))
cls.__render_center_text(title, cls.c_title_hover_font, default_color, x + width / 2, y, width - 10)
cls.__render_center_text(desc, cls.c_desc_font, default_color, x + width / 2, y + 70, width - 10)
else:
pygame.draw.rect(cls.w, default_color, (x, y, width, height))
cls.__render_center_text(title, cls.c_title_font, hover_color, x + width / 2, y + 50, width - 10)
@classmethod
def __render_center_text(cls, msg, font, color, x, y, allowed_width):
words = msg.split()
lines = []
while len(words) > 0:
line_words = []
while len(words) > 0:
line_words.append(words.pop(0))
w, h = font.size(' '.join(line_words + words[:1]))
if w > allowed_width:
break
line = ' '.join(line_words)
lines.append(line)
y_offset = 0
for line in lines:
w, h = font.size(line)
fx = x - w / 2
fy = y + y_offset
s = font.render(line, True, color)
cls.w.blit(s, (fx, fy))
y_offset += h
@classmethod
def __press_button(cls, x, y, width, height, sound):
click = pygame.mouse.get_pressed(3)
if cls.__is_hover(x, y, width, height) and click[0] == 1 and not cls.has_clicked:
cls.has_clicked = True
pygame.mixer.Sound.play(sound)
return True
return False
|
Yuehan-Wang/carplet-pygame | card.py | from typing import List
class Card:
def __init__(self, title: str, desc: str, effects: List[int], cons: str) -> None:
if title == "":
raise ValueError("Card title cannot be empty string")
if desc == "":
raise ValueError("Card description cannot be empty string")
if cons == "":
raise ValueError("Card consequence cannot be empty string")
self._title = title
self._desc = desc
self._effects = effects
self._cons = cons
@property
def title(self) -> str:
return self._title
@property
def desc(self) -> str:
return self._desc
@property
def effects(self) -> List[int]:
return self._effects
@property
def cons(self) -> str:
return self._cons
|
Yuehan-Wang/carplet-pygame | context.py | <filename>context.py
import json
import os
from typing import List
from index import Index
from event import Event
from card import Card
class Context:
def __init__(self, file: str) -> None:
if not os.path.isfile(file):
raise FileNotFoundError("Expected a file but found none")
if os.path.splitext(file)[1] != ".json":
raise FileNotFoundError("Expected a json file but found none")
with open(file, 'r') as f:
self._data = json.load(f)
self._name: str = ""
self._success: str = ""
self._creator: str = ""
self._indexes: List[Index] = []
self._plots: List[List[Event]] = []
self._plot_count = 0
self._event_count = 0
self.__extract()
self.__validated()
@property
def name(self) -> str:
return self._name
@property
def creator(self) -> str:
return self._creator
@property
def success(self) -> str:
return self._success
@property
def indexes(self) -> List[Index]:
return self._indexes
@property
def plots(self) -> List[List[Event]]:
return self._plots
def curr_event(self) -> Event or None:
if not self.plot_finished():
return self._plots[self._plot_count][self._event_count]
return None
def next_event(self) -> None:
if not self.plot_finished():
self._event_count += 1
def curr_plot(self) -> List[Event]:
if not self.context_finished():
return self._plots[self._plot_count]
return []
def next_plot(self) -> None:
if not self.context_finished():
self._plot_count += 1
self._event_count = 0 # events start from beginning
def is_game_over(self) -> bool:
for i in self._indexes:
if i.destroy():
return True
return False
def cause_index(self) -> int:
for key, i in enumerate(self._indexes):
if i.destroy():
return key
return -1
def plot_finished(self) -> bool:
return self._event_count >= len(self._plots[self._plot_count])
def context_finished(self) -> bool:
return self._plot_count >= len(self._plots)
def reset(self) -> None:
self._plot_count = 0
self._event_count = 0
for i in self._indexes:
i.reset()
self.__validated()
def __extract(self) -> None:
self._name = self._data['name']
self._creator = self._data['creator']
self._success = self._data['success']
for raw_index in self._data['indexes']:
self._indexes.append(Index(raw_index['name'], raw_index['start'], raw_index['asset'], raw_index['end']))
for raw_events in self._data['plots']:
events = []
for raw_event in raw_events:
cards = []
for raw_card in raw_event['cards']:
cards.append(Card(raw_card['title'], raw_card['desc'], raw_card['effects'], raw_card['cons']))
events.append(Event(raw_event['title'], raw_event['desc'], cards))
self._plots.append(events)
def __validated(self) -> None:
if self._name == "":
raise ValueError("Name of the game cannot be empty string")
if self._creator == "":
raise ValueError("Name of the creator cannot be be empty string")
if self._success == "":
raise ValueError("Success string cannot be be empty")
if len(self._indexes) != 4:
raise ValueError("Number of indexes must be equal to 4")
for index in self._indexes:
if index.end_str == "":
raise ValueError("End string of index cannot be be empty")
if len(self._plots) == 0:
raise ValueError("Number of plots must be greater than 0")
for plot in self._plots:
if len(plot) == 0:
raise ValueError("Number of events in one plot must be greater than 0")
for event in plot:
if event.title == "":
raise ValueError("Event title cannot be empty string")
if event.desc == "":
raise ValueError("Event description cannot be empty string")
if len(event.cards) != 3:
raise ValueError("Number of cards must be equal to 3")
for card in event.cards:
if card.title == "":
raise ValueError("Card title cannot be empty string")
if card.desc == "":
raise ValueError("Card description cannot be empty string")
if card.cons == "":
raise ValueError("Card consequence cannot be empty string")
if len(card.effects) != len(self._indexes):
raise ValueError("Number of effects should be equal to number of indexes")
|
Yuehan-Wang/carplet-pygame | main.py | from engine import Engine
from context import Context
if __name__ == "__main__":
file = "plot.json"
Engine.register_context(Context(file))
Engine.init()
Engine.play()
|
Yuehan-Wang/carplet-pygame | index.py | import os.path
class Index:
def __init__(self, name: str, start: int, icon: str, end_str: str) -> None:
if name == "":
raise ValueError("Index name cannot be empty string")
self._name = name
if start <= 0:
raise ValueError("Index start value cannot be negative or zero")
self._start = start
self._value = start
if not os.path.exists(icon):
raise ValueError("Index icon path cannot be empty string")
self._icon = icon
if end_str == "":
raise ValueError("Index finish string cannot be empty string")
self._end_str = end_str
@property
def name(self) -> str:
return self._name
@property
def value(self) -> int:
return self._value
@value.setter
def value(self, d: int) -> None:
self._value += d
@property
def icon(self) -> str:
return self._icon
@property
def end_str(self) -> str:
return self._end_str
def reset(self) -> None:
self._value = self._start
def destroy(self) -> bool:
return self._value <= 0
|
pkauppin/finer-utilities | lemmatize-matches.py | <reponame>pkauppin/finer-utilities
#! /usr/bin/env python3
# List all matched entities in a given category in their base forms and output them in order of appearance.
# The lemmatization requires that lemma forms and morphological tags be found in the input file.
from sys import stdin, stdout, stderr, argv
import re
import argparse
# FiNER entity tag prefixes
numex_pfx = 'Numex'
enamex_pfx = 'Enamex'
timex_pfx = 'Timex'
# Maximum number of entity levels considered (1 = no nested entities)
max_depth = 4
# Lemma forms that are usually in plural by default
pluralia = [
'olympialaiset',
'markkinat',
'yhdysvallat',
'uutiset',
'voimat',
'jokerit',
'maat',
'saaret',
'vuoret',
'laiset',
'läiset',
]
# Plural forms of common pluralized elements
pl_forms = {
'vuori': 'vuoret',
'saari': 'saaret',
'maa': 'maat',
'voima': 'voimat',
'sanoma': 'sanomat',
'putous': 'putoukset',
'kilpailu': 'kilpailut',
'kisa': 'kisat',
'aatti': 'aatit',
'uutinen': 'uutiset',
'markkina': 'markkinat',
'festivaali': 'festivaalit',
'festari': 'festarit',
'juhla': 'juhlat',
'alppi': 'alpit',
'viikko': 'viikot',
'amerikka': 'amerikat',
'vihreä': 'vihreät',
'filippiini': 'filippiinit',
'yhdistynyt': 'yhdistyneet',
'inen': 'iset',
'kunta': 'kunnat',
}
pl_regex = '.*(%s)' % '|'.join(pluralia)
# Naively generate partitive form from lemma
par_suffixes = {
(True, True): '[ia]',
(True, False): '[iä]',
(False, True): '[a]',
(False, False): '[ä]',
}
par_forms = {
'vuosi': 'vuotta',
'aste': 'astetta',
'kcal': 'kcal',
}
def get_partitive(wform, lemma, morph):
if re.search('[A-Z0-9/:.]', wform):
return re.sub(':.+', '', wform)
elif '[CASE=PAR]' in morph:
return wform
for nom, par in par_forms.items():
if lemma.endswith(nom):
return (lemma+'#').replace(nom+'#', par)
if len(lemma) < 4:
return lemma
cons = lemma[-1] not in 'aeiouyäö'
back = re.findall('[aou]', lemma[-5:]) != []
return lemma + par_suffixes[(cons, back)]
def congr(morph1, morph2):
"""
Check morphological analyses for agreement in NUM and CASE.
"""
tags1 = re.findall('\[(CASE|NUM)=[A-Z]+]', morph1)
tags2 = re.findall('\[(CASE|NUM)=[A-Z]+]', morph2)
return tags1 == tags2
def get_lemma(wform, lemma, morph, tag=''):
"""
Return lemma form (or nominative plural form) for nouns and nounlike words,
otherwise return word form as is.
"""
if re.match('POS=NOUN|POS=NUMERAL|POS=ADJECTIVE|SUBCAT=QUANTOR', morph):
if '[NUM=PL]' in morph and not re.fullmatch(pl_regex, lemma) and re.match('Enamex(Loc|Evt|Org)', tag):
for sg, pl in pl_forms.items():
if lemma.endswith(sg):
return (lemma+'#').replace(sg+'#', pl)
return lemma + '[t]'
return lemma
return wform.lower()
def parse_numex(entity, tag=numex_pfx):
"""
Parse numerical expression (Numex).
"""
normalized = []
unit = entity.pop()
while entity:
wform, lemma, morph = entity.pop()
if entity and 'NUMERAL' in morph:
normalized.append(get_partitive(wform, lemma, morph))
elif entity and re.search('PROPER.*CASE=GEN', morph) and tag == 'NumexMsrCur':
normalized.append(wform.lower())
else:
normalized.append(lemma)
normalized.reverse()
wform, lemma, morph = unit
if normalized[0] in ['yksi', '1']:
normalized.append(lemma)
else:
normalized.append(get_partitive(wform, lemma, morph))
return ' '.join(normalized)
def parse_timex(entity, tag=timex_pfx):
"""
Parse expression of time such as dates are parsed differently (Timex).
"""
normalized = []
while entity:
wform, lemma, morph = entity.pop()
wform = wform.lower()
if wform in ['aikana', 'välillä', 'aikaa']:
normalized += [wform] + [e[0] for e in entity][::-1]
normalized = normalized[::-1]
return ' '.join(normalized)
if wform.endswith('kuuta') and entity:
wform2, lemma2, morph2 = entity.pop()
if 'SUBCAT=ORD' in morph2:
normalized.append(wform)
else:
normalized.append(lemma)
wform, lemma, morph = wform2, lemma2, morph2
if 'SUBCAT=ORD' in morph:
normalized.append(lemma)
if entity:
wform, lemma, morph = entity.pop()
if wform.endswith('kuun'):
normalized.append(wform)
else:
normalized.append(lemma)
elif re.fullmatch('[0-9]+[.]?', wform):
normalized.append(wform)
elif wform in ['vuonna', 'vuosina']:
normalized.append(wform)
elif re.fullmatch('vuosi|.+kuu|päivä', lemma):
normalized.append(lemma)
else:
normalized.append(wform)
normalized = normalized[::-1]
return ' '.join(normalized)
def parse_enamex(entity, tag=enamex_pfx):
"""
Parse proper name or similar expression (Enamex).
"""
normalized = []
wform, lemma, morph = entity.pop()
if re.fullmatch('(18|19|20)[0-9][0-9]', wform) and tag.startswith('EnamexEvt') and entity:
normalized.append(wform.lower())
wform, lemma, morph = entity.pop()
normalized.append(get_lemma(wform, lemma, morph, tag))
while entity and not wform.startswith('-'):
wform2, lemma2, morph2 = entity.pop()
if re.search('POS=ADJECTIVE|SUBCAT=ORD|SUBCAT=QUANTOR', morph) and congr(morph, morph2):
normalized.append(get_lemma(wform2, lemma2, morph2, tag))
else:
normalized.append(wform2.lower())
break
wform, lemma, morph = wform2, lemma2, morph2
normalized = [wform.lower() for wform, lemma, morph in entity] + normalized[::-1]
return ' '.join(normalized)
def is_endtag(nertag, tag=''):
return any([
nertag.startswith('</' + tag),
nertag.startswith('<' + tag) and nertag.endswith('/>')
])
def main():
tag_columns = [list() for i in range(max_depth)]
analyses = []
for n, line in enumerate(stdin, 1):
line = line.lstrip(' \t').rstrip(' \n')
if line:
if '\t[POS=' not in line:
stderr.write('WARNING: Line %i: Irregular morphological labels detected!\n' % n)
fields = line.split('\t')
try:
wform, lemma, morph, semtag = fields[0:4]
except:
stderr.write('WARNING: Line %i: unexpected number of fields!\n' % n)
exit(1)
analyses.append((wform, lemma, morph, semtag, n))
nertags = fields[4:] + [''] * max_depth
nertags = nertags[0:max_depth]
for nertag, tag_column in zip(nertags, tag_columns):
tag_column.append(nertag)
else:
tag_columns = tag_columns[0:max_depth]
entities = []
for tag_column in tag_columns:
tag = '#'
tuples = []
for analysis, nertag in zip(analyses, tag_column):
wform, lemma, morph, semtag, i2 = analysis
if nertag.startswith('<' + args.tag) and not nertag.startswith('</'):
i1 = i2
tag = nertag.strip('<>/')
tuples.append((wform, lemma.lower(), morph,))
elif tuples:
tuples.append((wform, lemma.lower(), morph,))
if is_endtag(nertag, tag):
if tag.startswith(enamex_pfx):
ent_str = parse_enamex(tuples, tag)
elif tag.startswith(timex_pfx):
ent_str = parse_timex(tuples, tag)
elif tag.startswith(numex_pfx):
ent_str = parse_numex(tuples, tag)
else:
ent_str = ' '.join([t[0] for t in tuples])
entities.append((i1, i2, ent_str, tag,))
tag = '#'
tuples = []
entities.sort()
for i1, i2, ent_str, tag in entities:
print('%i,%i\t%s\t%s' % (i1, i2, ent_str, tag))
tag_columns = [list() for i in range(max_depth)]
analyses = []
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extract entities found by FiNER, \
normalize/lemmatize them and list in order of appearance.')
parser.add_argument('--tag', type=str, default='', description='find entities whose tag begins with TAG')
args = parser.parse_args()
main()
|
weeb-poly/syncplay-proxy | syncplay/ep_proxy.py | <reponame>weeb-poly/syncplay-proxy<filename>syncplay/ep_proxy.py
import os
import logging
from twisted.internet.endpoints import TCP4ServerEndpoint, SSL4ServerEndpoint
from syncplay.server import SyncplayProxyWSFactory
from syncplay.server import SyncplayProxyTCPFactory
from twisted.internet import reactor
# from autobahn.twisted.choosereactor import install_reactor
# reactor = install_reactor()
def setupTCPFactory(factory, port: int) -> None:
connType = "TCP"
endpoint4 = TCP4ServerEndpoint(reactor, port)
setupEndpoint(endpoint4, factory, "IPv4", connType)
def setupWSFactory(factory, port: int) -> None:
connType = "WS"
if factory.options is not None:
endpoint4 = SSL4ServerEndpoint(reactor, port, factory.options)
else:
endpoint4 = TCP4ServerEndpoint(reactor, port)
setupEndpoint(endpoint4, factory, "IPv4", connType)
def setupEndpoint(endpoint, factory, addrType: str, connType: str) -> None:
def listenFailed(e):
logging.exception(e)
logging.exception(f"{addrType} listening failed ({connType}).")
endpoint.listen(factory).addErrback(listenFailed)
def main():
tcport = os.environ.get('SYNCPLAY_TCP_PORT', None)
wsport = os.environ.get('SYNCPLAY_WS_PORT', None)
host = os.environ.get('SYNCPLAY_HOST', 'syncplay.pl:8997')
tls = os.environ.get('SYNCPLAY_TLS_PATH')
if tcport is not None:
tcp_factory = SyncplayProxyTCPFactory(
tcport,
host,
tls
)
setupTCPFactory(tcp_factory, int(tcport))
if wsport is not None:
ws_factory = SyncplayProxyWSFactory(
wsport,
host,
tls
)
setupWSFactory(ws_factory, int(wsport))
reactor.run()
if __name__ == "__main__":
main()
|
weeb-poly/syncplay-proxy | syncplay/__main__.py | #!/usr/bin/env python3
import logging
from . import ep_proxy
def main():
logging.basicConfig(level=logging.INFO)
ep_proxy.main()
if __name__ == '__main__':
main()
|
weeb-poly/syncplay-proxy | syncplay/__init__.py | <reponame>weeb-poly/syncplay-proxy<gh_stars>1-10
projectURL = 'https://github.com/weeb-poly/syncplay-proxy'
|
weeb-poly/syncplay-proxy | syncplay/protocols.py | import json
import logging
from twisted.protocols.basic import LineReceiver
from twisted.internet.protocol import ClientFactory
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerProtocol
class JSONCommandProtocol(LineReceiver):
def lineReceived(self, line: bytes) -> None:
try:
line = line.decode('utf-8').strip()
except UnicodeDecodeError:
self.handleError("Not a utf-8 string")
self.drop()
return
if not line:
return
try:
messages = json.loads(line)
except json.decoder.JSONDecodeError:
self.handleError("Not a json encoded string {}".format(line))
self.drop()
return
else:
self.messageRecieved(messages)
def sendMsg(self, dict_: dict) -> None:
line = json.dumps(dict_)
self.sendLine(line.encode('utf-8'))
def drop(self) -> None:
self.transport.loseConnection()
def handleError(self, _error):
raise NotImplementedError()
class SyncplayProxyProtocol(JSONCommandProtocol):
def __hash__(self) -> int:
return hash('|'.join((
self.transport.getPeer().host,
str(id(self)),
)))
def drop(self) -> None:
self.transport.loseConnection()
self.connectionLost(None)
def handleError(self, error) -> None:
logging.error("Drop: {} -- {}".format(self.transport.getPeer().host, error))
self.sendMsg({"Error": {"message": error}})
self.drop()
class SyncplayProxyClientProtocol(SyncplayProxyProtocol):
def connectionMade(self):
self.factory.server.client = self
while self.factory.server.buffer:
self.messageRecieved(self.factory.server.buffer.pop())
def connectionLost(self, _):
self.factory.server.client = None
self.factory.server.drop()
def messageRecieved(self, messages):
self.factory.server.sendMsg(messages)
class WSJSONCommandProtocol(WebSocketServerProtocol):
def onMessage(self, line: bytes, isBinary: bool) -> None:
if isBinary:
self.handleError("Not a utf-8 string")
self.drop()
return
try:
line = line.decode('utf-8').strip()
except UnicodeDecodeError:
self.handleError("Not a utf-8 string")
self.drop()
return
if not line:
return
try:
messages = json.loads(line)
except json.decoder.JSONDecodeError:
self.handleError("Not a json encoded string {}".format(line))
self.drop()
return
else:
self.messageRecieved(messages)
def sendMsg(self, dict_: dict) -> None:
line = json.dumps(dict_)
self.sendMessage(line.encode('utf-8'), False)
def drop(self) -> None:
self.transport.loseConnection()
def handleError(self, _error):
raise NotImplementedError()
class SyncplayWSServerProtocol(WSJSONCommandProtocol):
def __init__(self, factory):
self._factory = factory
self._features = None
self._logged = False
def connectionMade(self):
self.buffer = []
self.client = None
cli_factory = ClientFactory()
cli_factory.protocol = SyncplayProxyClientProtocol
cli_factory.server = self
host_name, host_port = self._factory.host_name, self._factory.host_port
reactor.connectTCP(host_name, host_port, cli_factory)
def connectionLost(self, _):
tmpClient = self.client
if tmpClient is not None:
self.client = None
tmpClient.drop()
def messageRecieved(self, messages: dict) -> None:
tlsMsg = messages.pop("TLS", None)
if tlsMsg is not None:
self.handleTLS(tlsMsg)
if "Hello" in messages.keys():
messages["Hello"]["user_ip"] = self.transport.getPeer().host
if len(messages) != 0:
self.proxyMessages(messages)
def proxyMessages(self, messages) -> None:
if self.client is not None:
self.client.sendMsg(messages)
else:
self.buffer.append(messages)
def sendTLS(self, message) -> None:
self.sendMsg({"TLS": message})
def handleTLS(self, message) -> None:
inquiry = message.get("startTLS")
if "send" in inquiry:
self.sendTLS({"startTLS": "false"})
class SyncplayTCPServerProtocol(JSONCommandProtocol):
def __init__(self, factory):
self._factory = factory
self._features = None
self._logged = False
def connectionMade(self):
self.buffer = []
self.client = None
cli_factory = ClientFactory()
cli_factory.protocol = SyncplayProxyClientProtocol
cli_factory.server = self
host_name, host_port = self._factory.host_name, self._factory.host_port
reactor.connectTCP(host_name, host_port, cli_factory)
def connectionLost(self, _):
tmpClient = self.client
if tmpClient is not None:
self.client = None
tmpClient.drop()
def messageRecieved(self, messages: dict) -> None:
tlsMsg = messages.pop("TLS", None)
if tlsMsg is not None:
self.handleTLS(tlsMsg)
if "Hello" in messages.keys():
messages["Hello"]["user_ip"] = self.transport.getPeer().host
if len(messages) != 0:
self.proxyMessages(messages)
def proxyMessages(self, messages) -> None:
if self.client is not None:
self.client.sendMsg(messages)
else:
self.buffer.append(messages)
def sendTLS(self, message) -> None:
self.sendMsg({"TLS": message})
def handleTLS(self, message) -> None:
inquiry = message.get("startTLS")
if "send" in inquiry:
if self._factory.serverAcceptsTLS:
lastEditCertTime = self._factory.checkLastEditCertTime()
if lastEditCertTime is not None and lastEditCertTime != self._factory.lastEditCertTime:
self._factory.updateTLSContextFactory()
if self._factory.options is not None:
self.sendTLS({"startTLS": "true"})
self.transport.startTLS(self._factory.options)
else:
self.sendTLS({"startTLS": "false"})
else:
self.sendTLS({"startTLS": "false"})
|
weeb-poly/syncplay-proxy | syncplay/server.py | <gh_stars>1-10
import os
import logging
import pem
from twisted.internet.protocol import ServerFactory
from OpenSSL import SSL
from twisted.internet import ssl
from syncplay import constants
from syncplay.protocols import SyncplayTCPServerProtocol, SyncplayWSServerProtocol
from autobahn.twisted.websocket import WebSocketServerFactory
class SyncplayProxyWSFactory(WebSocketServerFactory):
port: str
host: str
def __init__(self, port: str = '', host: str = '', tlsCertPath = None):
self.port = port
host_name, host_port = host.split(":", 1)
self.host_name = host_name
self.host_port = int(host_port)
self.options = None
if tlsCertPath is not None:
self._allowSSLconnections(tlsCertPath)
def buildProtocol(self, _addr):
return SyncplayWSServerProtocol(self)
def _allowSSLconnections(self, path: str) -> None:
try:
privKeyPath = path+'/privkey.pem'
chainPath = path+'/fullchain.pem'
contextFactory = pem.twisted.certificateOptionsFromFiles(
privKeyPath,
chainPath,
method=SSl.SSLv23_METHOD
)
self.options = contextFactory
logging.info("SSL support is enabled.")
except Exception:
self.options = None
logging.exception("Error while loading the SSL certificates.")
logging.info("SSL support is not enabled.")
class SyncplayProxyTCPFactory(ServerFactory):
port: int
host: str
tlscertPath: str
serverAcceptsTLS: bool
_TLSattempts: int
def __init__(self, port: str = '', host: str = '', tlsCertPath = None):
self.port = int(port)
host_name, host_port = host.split(":", 1)
self.host_name = host_name
self.host_port = int(host_port)
self.certPath = tlsCertPath
self.serverAcceptsTLS = False
self._TLSattempts = 0
self.options = None
if self.certPath is not None:
self._allowTLSconnections(self.certPath)
def buildProtocol(self, _addr):
return SyncplayTCPServerProtocol(self)
def _allowTLSconnections(self, path: str) -> None:
try:
privKeyPath = path+'/privkey.pem'
chainPath = path+'/fullchain.pem'
self.lastEditCertTime = os.path.getmtime(chainPath)
cipherListString = "ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:"\
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:"\
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384"
accCiphers = ssl.AcceptableCiphers.fromOpenSSLCipherString(cipherListString)
contextFactory = pem.twisted.certificateOptionsFromFiles(
privKeyPath,
chainPath,
acceptableCiphers=accCiphers,
raiseMinimumTo=ssl.TLSVersion.TLSv1_2
)
self.options = contextFactory
self.serverAcceptsTLS = True
self._TLSattempts = 0
logging.info("TLS support is enabled.")
except Exception:
self.options = None
self.serverAcceptsTLS = False
self.lastEditCertTime = None
logging.exception("Error while loading the TLS certificates.")
logging.info("TLS support is not enabled.")
def checkLastEditCertTime(self):
try:
outTime = os.path.getmtime(self.certPath+'/fullchain.pem')
except:
outTime = None
return outTime
def updateTLSContextFactory(self) -> None:
self._allowTLSconnections(self.certPath)
self._TLSattempts += 1
if self._TLSattempts < constants.TLS_CERT_ROTATION_MAX_RETRIES:
self.serverAcceptsTLS = True
|
yongzx/Truth-table | logic.py | from collections import OrderedDict
def split_into_list(s):
"""
Input: String
Output: A list of variables and the connectives (in English)
1. Remove the punctuations from the sentence such as "if A, then B", but not parentheses.
2. Split the sentence into a list of words composing the sentence.
3. Remove the word "if" from the list because the word "then" is sufficient to represent the relationship.
Moreover, the word "then" is sandwiched in the middle of the sentence, which makes it easier to create Logic Tree.
4. Returns the list.
"""
remove_punc = str.maketrans("", "", ",.!")
s = s.translate(remove_punc)
def remove_if(list_x):
list_x = [x for x in list_x if x != "if"]
return list_x
return remove_if(s.split(" "))
def replace_parentheses_with_list(list_x):
"""
Input: A list with entries within parentheses
Output: A list with entries within the parentheses replaced with entries within lists
1. Detect the parentheses in the list, e.g., ['(', 'A', 'or', 'B', ')', 'and', 'C']
2. Put the entries within the parentheses into another list using recursive call. The main outermost list is called logic_package.
During the recusive call, we use result as a temporary list to store the entries within the parentheses.
3. At the end, for ['(', 'A', 'or', 'B', ')', 'and', 'C'], it returns the logic_package, [['A', 'or', 'B'], 'and', 'C']
"""
tracking_idx = 0
logic_package = []
def replace_parentheses_with_list_helper(list_x):
result = []
nonlocal tracking_idx
#when doesn't encounter parentheses, add it to the result list.
#when encounters left parenthesis, recursively call the helper function to put the entries in the parentheses into a new result list.
#when encounters right parenthesis, return the result list as all the entries in the parentheses have been put into the result list.
while tracking_idx < len(list_x):
if list_x[tracking_idx] not in ("(", ")"):
result.append(list_x[tracking_idx])
tracking_idx += 1
elif list_x[tracking_idx] == '(':
tracking_idx += 1
result.append(replace_parentheses_with_list_helper(list_x))
elif list_x[tracking_idx] == ')':
tracking_idx += 1
return result
return result
logic_package = replace_parentheses_with_list_helper(list_x)
return logic_package
def identify_atomic_sentences(logical_sentences):
"""
Input: A list whose entries are the atomic sentences and the connectives in ordinary language.
Output: A dictionary of atomic sentences
1. First initialize the Ordered Dictionary atomic_sentences. I use Ordered Dictionary because I want to use dictionary for O(1) access time and for hashing,
and most importantly, I want to retain the order of atomic sentences. In addition, I can use the set property of dictionary to avoid having
duplicated atomic sentences in atomic_sentences.
2. Use the helper function. If the entries are one of the connective keywords "then", "and", "or" and "not".
If they are, ignore them, Otherwise, put them into the dictionary atomic_sentences.
If the entry is a list, do a recursive call using the helper function so as to identify if there is any unique atomic sentences within the list.
3. Return atomic_sentences.
"""
atomic_sentences = OrderedDict()
def helper(elem):
if not isinstance(elem, list) :
if elem not in ("then", "and", "or", "not"):
atomic_sentences[elem] = None
else:
for e in elem:
helper(e)
for s in logical_sentences:
helper(s)
return atomic_sentences
def generate_truth_values_for_atomic_sentences(dict_atomic_sentences):
"""
Input: A dictionary which maps all the symbols of atomic sentences to None
Output: A dictionary which maps all the symbols to their respective lists of truth values in different orders such that
in a truth table, we can have distinct combinations of truth values in each row.
1. Initialize the result with two lists: one contains True and the other contains False.
2. Because the truth table will have 2^n rows for n atomic sentences, we can treat the list result as a queue.
For each atomic sentence, dequeue all the items from the result is dequeued, and for each the item dequeued, which is a list of truth values,
duplicate the list into two lists and enqueue one with True and the other with False. Enqueue the lists into a temporary queue, which
becomes the result at the end of each loop of atomic sentence.
3. When step (2) ends, result stores all the rows of truth values combination in the truth table.
However, it is better to map the atomic sentences to their columns of truth values because it is easier to print the truth value for a given
row and column while printing the truth table.
4. Initilize an Ordered Dictionary truth_values_atomic_sentences because I want to map the atomic sentences to their columns of truth values,
and most importantly, I want to retain the order of atomic sentences.
5. Use a for-loop and list comprehension to generate the columns of truth values and map it to the atomic sentences.
6. Return truth_values_atomic_sentences.
"""
result = [[True], [False]]
def generate_helper(dict_atomic_sentences, idx):
if idx < len(dict_atomic_sentences):
tmp = []
nonlocal result
while result:
row = result.pop()
row_1 = row[:] + [True]
row_2 = row[:] + [False]
tmp.append(row_1)
tmp.append(row_2)
result = tmp[:]
generate_helper(dict_atomic_sentences, idx+1)
generate_helper(dict_atomic_sentences, 1) # start from the second atomic sentence because the result is initialized as [[True], [False]] instead of [].
truth_values_atomic_sentences = OrderedDict()
def assign_to_atomic_sentences(result):
col = 0
for s in dict_atomic_sentences:
truth_values_atomic_sentences[s] = [result[row][col] for row in range(len(result))]
col += 1
assign_to_atomic_sentences(result)
return truth_values_atomic_sentences
class Node:
"""
Attributes:
data : String - contains the symbol for atomic sentences (e.g., "A", "B", "C") and ordinary language of connectives("then", "and", "or").
negation : Bool - whether the atomic sentence or the logical statement is negated
truth_values : Ordered Dictionary - map the node's description, desc, to its column of truth values in the truth table. This takes into account of the negation.
desc : String - contains the symbol for atomic sentences (e.g., "A", "B", "C") or describe the relationship between the two chldren nodes(e.g., "A -> B", "A ^ B", "A V B") INCLUDING the negation symbol.
desc_wo_negation: String - contains the symbol for atomic sentences (e.g., "A", "B", "C") or describe the relationship between the two chldren nodes(e.g., "A -> B", "A ^ B", "A V B") EXCLUDING the negation symbol.
p : Node - parent node
left : Node - left child's node
right : Node - right child's node
"""
def __init__(self, data):
self.data = data
self.negation = False
self.negation_count = 0
self.truth_values = None
self.truth_values_wo_negation = None
self.desc = None
self.desc_wo_negation = None
self.left = None
self.right = None
self.p = None
class Tree:
"""
This is a class for Logic Tree.
A Logic Tree captures the negation, the connectives and the atomic sentences that are used to form the logical statements.
For example,
Logical statement of "not ( A and B )".
Logic Tree:
(~)and <- root
/ \
A B
Logical statement of "if ( A and B ), then C"
Logic Tree:
then <- root
/ \
and C
/ \
A B
"""
def __init__(self):
"""
Attributes:
root : Node - root of the Tree
truth_tables_output : Ordered Dictionary that maps the premises and conclusion to their columns of truth values in the truth table.
Ordered Dictionary is used to ensure that O(1) access time, and that the premises are put in order of complexity and that conclusion comes before premises.
"""
self.root = None
self.truth_tables_output = OrderedDict()
def create_logic_tree(self, list_x):
"""
Input: List of entries list_x that compose a statement, e.g., ["not", ["A", "and", "B"]]
Output: Root of the tree. The Logic Tree is created.
1. Assign the list of entries list_x to the root.
2. The helper function is created to break down the list and assign each entry to each node.
At the same time, the helper function establishes the hierachy of parent and child nodes.
3. If the node.data which at this stage is a list contains only one entry, it means the node represents an atomic sentence.
Assign the node.data to the entry.
4. If the node.data which at this stage is a list contains two entries, the first entry must be a "not".
Negate the node.negation, which initially is False, and assign the node.data to the second entry.
Since the second entry may be a list, e.g. the second entry for this list ["not", ["A", "and", "B"]],
we need to call the helper function on the node recursively.
5. If step (3) and (4) are not carried out, we know that the node.data which at this stage is a list contains three or more than three entries.
We also know that "not" may exist in the list.
The first step is to create a duplicate list but without "not" in within. This duplicate list tmp_list_wo_not will only have three entries,
and the second entry must be a connective, i.e., either 'then", "or" or "and".
The first and third entries in tmp_list_wo_not are made into left and right child nodes respectively, and the second entry
is made into a parent node as the parent node should connnect two child nodes to represent the relationship between the child nodes.
The second step is to negate the child nodes. (Parent node is not negated because there is no "not" before the connective).
The final step is to check if the child nodes are atomic sentences. If they are not, then the node.data will store a list.
Therefore, we have to recursively call the helper function on the child nodes to break down the list.
"""
self.root = Node(list_x)
#build the tree using a helper function
def create_logic_tree_helper(node):
if len(node.data) == 1:
node.data = node.data[0]
return
if len(node.data) == 2:
node.data = node.data[1]
node.negation = not node.negation
node.negation_count += 1
create_logic_tree_helper(node)
return
tmp_list = node.data[:]
tmp_list_wo_not = [elem for elem in node.data if elem != "not"]
node.data = tmp_list_wo_not[1]
node.left = Node(tmp_list_wo_not[0])
node.right = Node(tmp_list_wo_not[2])
node.left.p, node.right.p = node, node
for i in range(len(tmp_list)):
if tmp_list[i] == "not":
if tmp_list[i+1] == node.left.data:
node.left.negation = not node.left.negation
elif tmp_list[i+1] == node.right.data:
node.right.negation = not node.right.negation
if isinstance(node.left.data, list): #left child node is not atomic sentence
create_logic_tree_helper(node.left)
if isinstance(node.right.data, list): #right child node is not atomic sentence
create_logic_tree_helper(node.right)
create_logic_tree_helper(self.root)
return self.root
def assign_truth_values_to_node(self, atomic_truth_values_dict):
"""
Input: An Ordered Dictionary atomic_truth_values_dict mapping atomic sentences to their respective columns of truth values.
Output: Root of the tree. All the nodes in the tree are assigned to their columns of truth values in the truth table.
1. If the node's truth_values attribute is None, we have to assign its column of truth values to this truth_values attribute.
2. If the node is a leaf, it means the node is an atomic sentence. Therefore, we use the inputted atomic_truth_values_dict to assign
the node's column of truth values to the node.truth_values attribute.
3. If the node is an internal node, it means the node represents the connective. We use the connective, which is stored in node.data,
to find the column of truth values for the node depending on whether the connective is a conjunction, disjunction or implication.
Also, if the node is negated, we first store the columns of truth values that have not been negated into node.truth_values_wo_negation.
Then, we negate all the truth value in the node.truth_values. The reason to store the truth values that are not negated is that
in truth table, we may have to print both the logical statements that is not negated and that is negated.
4. Return the root.
"""
def helper(node):
if not node.truth_values:
if not node.left and not node.right: #the node is a leaf
node.truth_values = atomic_truth_values_dict[node.data]
if node.negation:
node.truth_values_wo_negation = node.truth_values[:]
node.truth_values = [not t_value for t_value in node.truth_values]
else: #node is an internal node, i.e., it has left and right child nodes
node.truth_values = []
helper(node.left)
helper(node.right)
if node.data == "and":
for i in range(len(node.left.truth_values)):
node.truth_values.append(node.left.truth_values[i] and node.right.truth_values[i])
if node.negation:
node.truth_values_wo_negation = node.truth_values[:]
node.truth_values = [not t_value for t_value in node.truth_values]
if node.data == "or":
for i in range(len(node.left.truth_values)):
node.truth_values.append(node.left.truth_values[i] or node.right.truth_values[i])
if node.negation:
node.truth_values_wo_negation = node.truth_values[:]
node.truth_values = [not t_value for t_value in node.truth_values]
if node.data == "then":
for i in range(len(node.left.truth_values)):
node.truth_values.append((not node.left.truth_values[i]) or node.right.truth_values[i])
if node.negation:
node.truth_values_wo_negation = node.truth_values[:]
node.truth_values = [not t_value for t_value in node.truth_values]
helper(self.root)
return self.root
def describe(self, node):
"""
Modifying method.
Input: A node (must be root node to generate the truth table)
Output: None. All the node.desc attributes are filled with descriptions of what the respective node means (in other words, the logical statement).
Examples of description : ~ ( B -> C ), ( ( C V A ) ^ B ), ~ ~ ~ A
1. Base case: the node must be valid, i.e., not None.
2. Since the inputted node is from root node, and we need the description about the node's left and right child nodes before we can describe
the node, recursively call the describe method on the node's left and right child nodes.
3. If the node itself is an atomic sentence, the description is the node.data, which is "A", "B", "C", etc.
4. If the node itself is not an atomic sentence, then its node.data must contain the connective "then", "and", or "or".
Transform the connective into its symbol and sandwich it between the descriptions of left and right child nodes to represent the relationship between
the left and right child nodes, which can be implication, conjunction or disjuntion.
5. For both step 3 and step 4,if there the node is negated, stores the description without the symbol ~ in node.desc_wo_negation
This is because in the truth table, we may need to print out the description, or the logical statement in both not negated and negated from.
Next, store the negated form in node.desc by adding node.negation_count (which is an odd number) number of "~" in front of the original node.desc.
If the node is not negated but the node.negation_count is more than 0, it means that there is double negation.
Therefore we adding node.negation_count (which is an even number) number of "~" in front of the original node.desc.
"""
if node:
self.describe(node.left)
self.describe(node.right)
if node.data not in ("then", "and", "or"): #atomic sentence
node.desc = node.data
if node.negation:
node.desc_wo_negation = node.desc
node.desc = "~ "*node.negation_count + node.desc
if node.negation_count and not node.negation:
node.desc = "~ "*node.negation_count + node.desc
else: #not an atomic sentence
if node.data == "then":
node.desc = "( " + node.left.desc + " -> " + node.right.desc + " )"
if node.negation:
node.desc_wo_negation = node.desc
node.desc = "~ "*node.negation_count + node.desc
if node.negation_count and not node.negation:
node.desc = "~ "*node.negation_count + node.desc
elif node.data == "and":
node.desc = "( " + node.left.desc + " ^ " + node.right.desc + " )"
if node.negation:
node.desc_wo_negation = node.desc
node.desc = "~ "*node.negation_count + node.desc
if node.negation_count and not node.negation:
node.desc = "~ "*node.negation_count + node.desc
elif node.data == "or":
node.desc = "( " + node.left.desc + " V " + node.right.desc + " )"
if node.negation:
node.desc_wo_negation = node.desc
node.desc = "~ "*node.negation_count + node.desc
if node.negation_count and not node.negation:
node.desc = "~ "*node.negation_count + node.desc
def ordered_dict_is_empty(self):
"""
Check if truth_tables_output is empty.
The implication is that if truth_tables_output is empty, then the root is a leaf.
"""
return self.truth_tables_output == OrderedDict()
def truth_tables_complex_sent(self, node):
"""
Modifying method.
Input: A node (must be root node to generate the truth table)
Output: None. The ordered dictionary truth_tables_output of the tree is modified to map the all the nodes which will appear
in the truth table's header row to their respective columns of truth values.
1. If the node is not a leaf node, recursively call the function on its left and right child nodes.
If the logical statement (which is the node) is being negated, map the logical statement which is not negated to the truth values which are not negated.
Regardless of whether or not the node is negated, map the logical statement to its column of truth values.
2. If the node is a leaf node, check if its negation count is more than 0. If it is not, it means it is a simple atomic sentences, which already
exists in truth_values_atomic_sentences. Therefore, we can ig nore it.
If the negation count is more than 0, we map the logical statement to its column of truth values.
"""
if node and node.left and node.right: #not a leaf node
self.truth_tables_complex_sent(node.left)
self.truth_tables_complex_sent(node.right)
if node.negation:
self.truth_tables_output[node.desc_wo_negation] = node.truth_values_wo_negation
self.truth_tables_output[node.desc] = node.truth_values
elif node.negation_count: # includes ~ "A" (i.e., negation of atomic sentences) or ~ ~ "A" (i.e., double, triple...negation of the atomic sentences)
self.truth_tables_output[node.desc] = node.truth_values
def print_truth_table(truth_values_atomic_sentences, logic_trees, logic_trees_complex):
"""
Input: 1. A dictionary truth_values_atomic_sentences which maps each symbol of atomic sentence to their respective lists of truth values.
2. A list of logic trees
3. A dictionary logic_trees_complex which maps all the logical statements possibly formed from the logic_trees (excluding atomic sentences)
to their respective lists of truth values.
Output: Return None. Print the truth table.
1. Generate the first row, the header row, which are all the symbols for the atomic sentences and statements
2. Print the first row from step (1).
3. Print out the truth values across the row by using nested for-loops.
"""
top_row = [t for t in truth_values_atomic_sentences.keys()] + ['|'] # symbols for atomic sentences
for T in logic_trees:
for v in T.truth_tables_output: # this loop appends all the descriptions of internal nodes in the logical tree into top_row
if v not in top_row:
top_row.append(v)
if T.ordered_dict_is_empty(): # this is for the case where the root is the leaf.
top_row.append(T.root.data)
for t in top_row:
print('%-15s' % t, end = "")
print()
for row in range(2**len(truth_values_atomic_sentences)): # 2**len(atomic_sentences) is equivalent to the number of rows
# for each column in the header row
for item in top_row:
# if the column is a symbol for atomic sentences
# print out the truth value for the symbol at the particular row
if item in truth_values_atomic_sentences:
print('%-15s' % truth_values_atomic_sentences[item][row], end = "")
elif item == "|":
print('%-15s' % "", end = "")
# if the column is a logical statement
# print out the truth value for the logical statement at the particular row
else:
for t in logic_trees_complex:
if item in t:
print('%-15s' % t[item][row], end = "")
break
print()
def check_validity(list_of_trees):
"""
Input: A list of the logic trees formed from the list of given statements.
Output: String - whether the argument is valid or invalid.
1. Since we are only interested in the combination of truth values of the premises and conclusion when we check validity,
and the truth values of premises and conclusions are stored in the roots of the logical trees, we first append their
truth values into the list list_validity.
2. Use nested for loops to compare if there is any case when all the premises are True and the conclusion is False.
If so, we can break out of the for loops and assign "Not Valid" to the variable is_valid.
Otherwise, if there is no case when all the premises are True and the conclusion is False, and there is a case
when all the premises and conclusion are True, then assign "Is Valid" to the variable is_valid.
Otherwise, if there is no cases when all the premises and conclusion are True, the variable is_valid remains its
initial value: (all the statements are) "Not Logically Connected"
3. Return is_valid
"""
list_validity = []
for T in list_of_trees:
list_validity.append(T.root.truth_values)
is_valid = "Not Logically Connected"
for col in range(len(list_validity[0])):
premises_all_valid = True
for row in range(len(list_validity)-1):
if not list_validity[row][col]:
premises_all_valid = False
break
if not premises_all_valid:
continue
if premises_all_valid and list_validity[-1][col]:
is_valid = "Is Valid"
elif premises_all_valid and not list_validity[-1][col]:
is_valid = "Not Valid"
break
return is_valid
###########################################################################################################################################
# Input Format : List. The last entry is the conclusion. The other entries are the premises
# there is no outermost parentheses
# When applies parentheses, ensure that there are spaces after the left parenthesis and before right parenthesis
# When apply negation, use parentheses.
sample1 = ["if A, then B", "A", "B"]
sample2 = ["if A, then B", "not ( B )", "not ( A )"]
sample3 = ["if A, then B", "if B, then C", "if A, then C"]
sample3_A = ["if A, then B", "if B, then C", "if C, then A"]
sample4 = ["A or B", "not ( A )", "B"]
sample5 = ["not ( not ( not ( not ( A ) ) ) )", "A"]
sample6 = ["A", "A or B"]
sample7 = ["A and B", "A"]
sample8 = ["( if p, then q ) and ( if r, then s )", "p or r", "q or s"] #is Valid
def answer_truth_table_validity(list_of_statements):
"""
Input: List of Statements (Argument).
Output: Return none. Print truth table and the validity of the arguments.
1. Break all the statements into lists of atomic sentences and connectives.
2. Identify all the atomic sentences.
3. Generate lists of truth values for the atomic sentences and map different atomic sentences to columns of truth values created such that
when the columns of truth values of atomic sentences are displayed in the truth table, each row has distinct combination of truth values.
4. For the lists taken from step (1), create a Logic Tree for each list. Find out the truth values for each node as well as their descriptions.
Map all the internal nodes to their respective truth values using truth_tables_complex_sent method.
5. Append all the logic trees into the list logic_trees and append the mapping from step (4) to the list logic_trees_complex.
6. Print the truth table using the function print_truth_table.
7. Check the validity using the function check_validty, and print it.
"""
logical_sentences = [replace_parentheses_with_list(split_into_list(s)) for s in list_of_statements]
atomic_sentences = identify_atomic_sentences(logical_sentences)
truth_values_atomic_sentences = generate_truth_values_for_atomic_sentences(atomic_sentences)
logic_trees = []
logic_trees_complex = []
for s in logical_sentences:
T = Tree()
T.create_logic_tree(s)
T.assign_truth_values_to_node(truth_values_atomic_sentences)
T.describe(T.root)
T.truth_tables_complex_sent(T.root)
logic_trees.append(T)
logic_trees_complex.append(T.truth_tables_output)
print_truth_table(truth_values_atomic_sentences, logic_trees, logic_trees_complex)
print("Premises: ", list_of_statements[:-1], "\nConclusion: ", list_of_statements[-1], "\n", check_validity(logic_trees))
answer_truth_table_validity(sample3)
print()
answer_truth_table_validity(sample3_A) |
CallumAlexander/Tic-Tac-Toe-Artificial-Intelligence | main.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon May 13 16:56:50 2019
@author: Callum
"""
import numpy as np
from random import randint as rnd
from random import uniform
aiValue = np.ones(shape=(3,3), dtype=float)
scoreBoard = np.zeros(shape=(3,3), dtype=int)
board = np.full((3,3), ' ')
player = True
#player input module
def PlayerInput():
playerY = int(input('Enter the vertical (y) coordinate of your chosen position (0-2) : '))
while playerY > 2 or playerY < 0:
playerY = int(input('Enter the vertical (y) coordinate of your chosen position (0-2) : '))
playerX = int(input('Enter the horizontal (x) coordinate of your chosen position (0-2) : '))
while playerX > 2 or playerX < 0:
playerX = int(input('Enter the horizontal (x) coordinate of your chosen position (0-2) : '))
return playerX, playerY
def UpdateBoard(Y, X, board, player):
if player:
board[Y][X] = 'X'
else:
board[Y][X] = 'O'
Display(board)
def UpdateValues(aiValue, board):
'''
This procedure is the main bulk of the AI that works with the AI function.
This procedure valuates each position on the board, based on the location
of entities. This valuation algorithm is based on attempting to stalemate
the game, or win the game through directly attacking the player.
'''
#This first loop valuates each position on the game board initially
for i in range(0, 3):
for j in range(0, 3):
if board[i,j] == 'X': # unplaying a taken spot by player
aiValue[i,j] = -1000
elif board[i,j] == 'O':
aiValue[i,j] = -1200 # unplaying a taken spot by AI
if i+1 <=2:
if board[i+1,j] == 'X': # boosting the above spot
aiValue[i,j] += 7 + uniform(4.5, 5.5)
if board[i+1,j] == 'O':
aiValue[i,j] += 5 + uniform(4.5, 5.5)
if i-1 >= 0:
if board[i-1,j] == 'X': # boosting the below spot
aiValue[i,j] += 7 + uniform(4.5, 5.5)
if board[i-1,j] == 'O':
aiValue[i,j] += 5 + uniform(4.5, 5.5)
if j+1 <= 2:
if board[i,j+1] == 'X': # boosting the left spot
aiValue[i,j] += 7 + uniform(4.5, 5.5)
if board[i,j+1] == 'O':
aiValue[i,j] += 5 + uniform(4.5, 5.5)
if j-1 >= 0:
if board[i,j-1] == 'X': # boosting the right spot
aiValue[i,j] += 7 + uniform(4.5, 5.5)
if board[i,j-1] == 'O':
aiValue[i,j] += 7 + uniform(4.5, 5.5)
for i in range(0,3): # vertical index
for j in range(0,3): # horizontal index
if j-2 == 0:
if board[i,j-2] == 'X' and board[i,j-1] == 'X' and board[i,j] == ' ':
aiValue[i,j] = aiValue[i,j] * 2 # boosting the end of a full row
if j == 0 and board[i,j+1] == 'X' and board[i,j+2] == 'X' and board[i,j] == ' ':
aiValue[i,j] = aiValue[i,j] * 2 # boosting the start of a full row
if i-2 == 0:
if board[i-2,j] == 'X' and board[i-1,j] == 'X' and board[i,j] == ' ':
aiValue[i,j] = aiValue[i,j] * 2 # boosting the end of a full column
if i == 0 and board[i+1,j] == 'X' and board[i+2,j] == 'X' and board[i,j] == ' ':
aiValue[i,j] = aiValue[i,j] * 2 # boosting the start of a full column
if i == 1:
if board[i-1,j] == 'X' and board[i+1, j] == 'X':
aiValue[i,j] = aiValue[i,j] * 3
if board[i-1,j] == 'O' and board[i+1, j] == 'O':
aiValue[i,j] = aiValue[i,j] * 4
if j == 1:
if board[i,j-1] == 'X' and board[i,j+1] == 'X':
aiValue[i,j] = aiValue[i,j] * 3
if board[i,j-1] == 'O' and board[i,j+1] == 'O':
aiValue[i,j] = aiValue[i,j] * 4
def ArtificialIntelligence(aiValue, board):
'''
This is a simple Artificial Intelligence model that plays the highest value position on
the board.
'''
pos = np.unravel_index(np.argmax(aiValue, axis=None), aiValue.shape)
if aiValue[pos] == np.max(aiValue):
#print(str(round(aiValue[pos], 3)), ' is equal to the value of the max , ', str(round(np.max(aiValue), 3)))
return pos
else:
print('You have some maaaad problem with finding the maxima in this array')
def Display(board):
print(' 0 1 2 ')
print('0', board[0,:])
print('1', board[1,:])
print('2', board[2,:])
def Scorer(board, scoreBoard):
# 0 = no winner, continue playing; 1 = player win; 2 = ai win; 3 = stalemate
for i in range(0,3): # y index
for j in range(0,3): # x index
if board[i,j] == 'X':
scoreBoard[i,j] = -2
elif board[i,j] == 'O':
scoreBoard[i,j] = 2
#This bit of code sums the scores of each row and column to determine whether they are filled
#if a summed score value os -6, the player has won; 6, the AI has won.
sumArray = np.zeros(shape=(2,3)) #rows on top, columns on the bottom
for i in range(0,3):
rowIsum = np.sum(scoreBoard[i,:])
columnIsum = np.sum(scoreBoard[:, i])
sumArray[0,i] = rowIsum
sumArray[1,i] = columnIsum
if 0 not in scoreBoard:
return 3
if -6 in sumArray:
return 1
elif 6 in sumArray:
return 2
else:
return 0
#Main Loop
Display(board)
while True:
#Computation for the Players Turn
playerX, playerY = PlayerInput()
UpdateBoard(playerY, playerX, board, True)
aiValue = np.ones(shape=(3,3), dtype=float)
UpdateValues(aiValue, board)
if Scorer(board, scoreBoard) == 3:
Display(board)
print('Stalemate, good game')
break
elif Scorer(board, scoreBoard) == 1:
Display(board)
print('You win, congrats')
break
elif Scorer(board, scoreBoard) == 2:
Display(board)
print('The AI has won')
break
#Computation for the AIs Turn
pos = (0,0)
pos = ArtificialIntelligence(aiValue, board)
UpdateBoard(pos[0], pos[1], board, False)
aiValue = np.ones(shape=(3,3), dtype=float)
UpdateValues(aiValue, board)
if Scorer(board, scoreBoard) == 3:
Display(board)
print('Stalemate, good game')
break
if Scorer(board, scoreBoard) == 1:
Display(board)
print('You win, congrats')
break
elif Scorer(board, scoreBoard) == 2:
Display(board)
print('The AI has won')
break
|
wanderleyjrs/Python-Baseball | stats/data.py | import os
import glob
import pandas as pd
#3 - Python File Management
#game_files = glob.glob(os.path.join(os.getcwd(), 'games','*.EVE')) os.getcwd() -> return the current working directory of a process.
game_files = glob.glob('/home/wanderley/Python-Baseball/games/*.EVE')
#4 - Sorting File Names
game_files.sort()
#5 - Read CSV Files
game_frames = []
#6 - Append Game Frames
for game_file in game_files:
game_frame = pd.read_csv(game_file, names = ['type', 'multi2', 'multi3', 'multi4', 'multi5', 'multi6', 'event'])
game_frames.append(game_frame)
#7 - Concatenate DataFrames
games = pd.concat(game_frames)
#8 - Clean Values
games.loc[(games['multi5'] == '??'),'multi5'] = " "
#9 - Extract Identifiers
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
#10 - Forward Fill Identifiers
identifiers = identifiers.fillna(method='ffill')
#11 - Rename Columns
identifiers.columns = ['game_id', 'year']
#12 - Concatenate Identifier Columns
games = pd.concat([games, identifiers], axis=1, sort=False)
#13 - Fill NaN Values
games = games.fillna(' ')
#14 - Categorical Event Type
games.loc[:,'type'] = pd.Categorical(games.loc[:,'type'])
#15 - Print DF
print(games.head())
|
Goooyi/svgd-tf2 | svgd/kernels.py | import tensorflow as tf
import tensorflow_probability as tfp
@tf.function
def euclideanPairwiseDistance(x):
distance = tf.expand_dims(x, 1) - tf.expand_dims(tf.stop_gradient(x), 0)
return tf.einsum('ijk,kji->ij', distance, tf.transpose(distance))
class RbfKernel:
@tf.function
def __call__(self, x):
normedDist = euclideanPairwiseDistance(x)
bandwidth = tf.stop_gradient(self.computeBandWidth(normedDist))
return tf.exp(-0.5 * normedDist / bandwidth**2)
@tf.function
def computeBandWidth(self, euclideanPwDistances):
pwDistanceMedian = tfp.stats.percentile(
euclideanPwDistances, 50.0, interpolation='midpoint')
n = tf.constant(euclideanPwDistances.shape[0], dtype = tf.float64)
return pwDistanceMedian / tf.math.log(n + 1) |
Goooyi/svgd-tf2 | svgd/__init__.py | from svgd.main import SVGD |
Goooyi/svgd-tf2 | svgd/main.py | import tensorflow as tf
class SVGD:
def __init__(self, kernel, targetDistribution, learningRate):
self.kernel = kernel
self.targetDistribution = targetDistribution
self.optimizer = tf.keras.optimizers.Adam(learningRate)
def update(self, x, nIterations):
for _ in range(nIterations):
kernelMatrix, kernelGrad = self.computeKernel(x)
logprobGradient = self.logprobGradient(x)
completeGrad = -(kernelMatrix @ logprobGradient + kernelGrad) / x.shape[0]
self.optimizer.apply_gradients([(completeGrad, x)])
return x
def computeKernel(self, x):
with tf.GradientTape() as tape:
kernelMatrix = self.kernel(x)
return kernelMatrix, -tape.gradient(kernelMatrix, [x])[0]
def logprobGradient(self, x):
with tf.GradientTape() as tape:
logprob = tf.math.log(self.targetDistribution(x))
return tape.gradient(logprob, [x])[0] |
Crupette/modit-micro | util/retrievesymbols.py | <gh_stars>1-10
"""
Script originally created by klange (https://github.com/klange/toaruos/blob/master/util/generate_symbols.py")
I am using it because GCC is fighting with me
This may be replaced later
or improved
or modified
I honestly don't know
"""
import sys
ignored = ["kernel_symbols_start", "kernel_symbols_end" ]
lines = [ x.strip().split(" ")[2] for x in sys.stdin.readlines() if x not in ignored ]
print(".section .symbols")
for name in lines:
print(".extern %s" % (name))
print(".type %s, @function" % (name))
print(".global kernel_symbols_start")
print("kernel_symbols_start:")
for name in lines:
print(".long %s" % (name))
print(".asciz \"%s\"" % (name))
print(".global kernel_symbols_end")
print("kernel_symbols_end:")
|
bindungszustandsamplitude/dysonutils | libraries/libInterpolate/scripts/util/update-sources.py | #! /usr/bin/env python
from pyparsing import *
import argparse
import os
import glob
import re
import shutil
import pprint
import fnmatch
arg_parser = argparse.ArgumentParser(description="Replaces source files in a CMakeLitst.txt file.")
arg_parser.add_argument("cmake_files", metavar="CMAKELISTS-FILE", nargs="*", default=['CMakeLists.txt'],help="Cmake files to be modified. Stdin is used if no files are given.")
arg_parser.add_argument("source_dirs", metavar="SOURCE-DIRECTORY", nargs="*", default=['./src/'],help="Directory to search for source files.")
arg_parser.add_argument("source_exts", metavar="EXT", nargs="*", default=['.hpp','.cpp','.h'],help="Source file extensions.")
args = arg_parser.parse_args()
class parser:
cmd_name = Literal('target_sources')
arguments = QuotedString( quoteChar='(', endQuoteChar=')',multiline=True )
source_cmd_parser = WordStart() + cmd_name('cmd_name') + arguments('arguments')
arguments_parser = OneOrMore(Word(printables))
for cmake_file in (args.cmake_files if len(args.cmake_files) > 0 else ('-',)):
cmake_dir = os.path.dirname(cmake_file)
if len(cmake_dir) == 0:
cmake_dir = "."
# find source files
source_files = []
for dir in args.source_dirs:
for root, directories, filenames in os.walk(dir):
for filename in filenames:
for ext in args.source_exts:
if fnmatch.fnmatch(filename, '*'+ext):
source_files.append( os.path.join(root,filename) )
print "Detected Sources:"
pprint.pprint(source_files)
print "Directories Searched:"
pprint.pprint(args.source_dirs)
# parse cmake file
with open(cmake_file,'r') as f:
cmake_text = f.read()
target_source_commands = parser.source_cmd_parser.searchString(cmake_text)
if len(target_source_commands) > 1:
print "ERROR: more than one target_source command found. I don't know what to do."
target_source_arguments = parser.arguments_parser.parseString(target_source_commands[0]['arguments'])
lines = []
lines.append(target_source_commands[0]['cmd_name']+"(")
i = 0
while i < len(target_source_arguments) and re.search( "CMAKE_CURRENT_SOURCE_DIR", target_source_arguments[i]) == None:
lines.append(" "+target_source_arguments[i])
i += 1
for source in source_files:
lines.append( " $<BUILD_INTERFACE:%s>"%re.sub("^./","${CMAKE_CURRENT_SOURCE_DIR}/",source,count=1) )
lines.append(")")
new = "\n".join(lines)
old = originalTextFor(parser.source_cmd_parser).searchString(cmake_text)[0][0]
cmake_text = cmake_text.replace(old,new)
# make a copy of file
shutil.copyfile( cmake_file, cmake_file+".bak" )
with open(cmake_file,'w') as f:
pass
f.write(cmake_text)
|
Dark-Bob/mro | paging_test.py | <reponame>Dark-Bob/mro<filename>paging_test.py
# reminder to add tests for this and implement |
Dark-Bob/mro | update_test.py | <gh_stars>1-10
import os
import pytest
import mro
import mro.foreign_keys
import connection as con
@pytest.fixture(scope="module")
def connection(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
cursor.execute("""create table table1 (
id serial unique,
name varchar(20) not null,
value varchar(20),
primary key (id, name)
);""")
cursor.execute("""create table table2 (
id serial,
name varchar(20) not null,
table1_id integer,
primary key (id),
foreign key (table1_id) references table1(id)
);""")
cursor.execute("""create table table3 (
value varchar(20) not null
);""")
cursor.execute("""create table table4 (
id serial,
my_bool bool default False,
my_boolean boolean default True,
primary key (id)
);""")
connection.commit()
create_test_data(connection)
connection.close()
mro.load_database(lambda: con.connect())
return connection
def create_test_data(connection):
cursor = connection.cursor()
num_table1 = 2
for i in range(1,num_table1+1):
cursor.execute("insert into table1 (name) values (%s)", ('table1_{}'.format(i),))
for j in range(1,4):
cursor.execute("insert into table2 (name, table1_id) values (%s,%s)", ('table2_{}_{}'.format(i, j), i))
# edge cases
cursor.execute("insert into table2 (name, table1_id) values (%s,%s)", ('table2_None', None))
cursor.execute("insert into table1 (name) values (%s)", ('table1_None',))
connection.commit()
class TestUpdates(object):
def test_multiple_column_primary_key_update(self, connection):
table = mro.table1(name='Test34', value='first')
selectedTable = mro.table1.select_one("name='Test34'")
assert selectedTable.value == 'first'
table.value = 'second'
selectedTable = mro.table1.select_one("name='Test34'")
assert selectedTable.value == 'second'
def test_update_fails_with_no_primary_key(self, connection):
table = mro.table3(value='first')
with pytest.raises(ValueError) as excinfo:
table.value = 'second'
assert excinfo.value.args[0] == "Update needs columns to match to update, is your table missing a primary key?"
def test_update_multiple_values(self, connection):
table = mro.table1(name='Test35', value='first')
assert mro.table1.select_count("name='Test35'") == 1
assert mro.table1.select_count("name='Test36'") == 0
table = table.update(name='Test36', value='second')
assert table.name == 'Test36'
assert table.value == 'second'
assert mro.table1.select_count("name='Test35'") == 0
assert mro.table1.select_count("name='Test36'") == 1
selectedTable = mro.table1.select_one("name='Test36'")
assert selectedTable.value == 'second'
def test_boolean(self, connection):
table = mro.table4.insert(my_bool=True,my_boolean=False)
table.my_bool != table.my_boolean
assert table.my_bool is True
assert table.my_boolean is False
if __name__ == '__main__':
pytest.main([__file__])
#pytest.main([__file__ + '::TestUpdates::test_update_multiple_values']) |
Dark-Bob/mro | mro/sqlite.py | <reponame>Dark-Bob/mro
import mro.data_types as data_types
def _load_sqllite_db(connection):
cursor = connection.cursor()
tables = {}
# Get tables
cursor.execute("select * from sqlite_master;")
connection.commit()
for table in cursor:
# Get columns
definition = table[4]
index = definition.find('(')
definition = definition[index+1:len(definition)-1]
columns = definition.split(', ')
col_dict = {}
for column in columns:
col_split = column.split(' ')
col_dict[col_split[0]] = data_types.type_map[col_split[1]]
tables[table[1]] = col_dict
return tables |
Dark-Bob/mro | view_test.py | <gh_stars>1-10
import os
import pytest
import mro
import mro.foreign_keys
import connection as con
xfail = pytest.mark.xfail
@pytest.fixture(scope="module")
def connection(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
cursor.execute("""create table table1 (
id serial,
name varchar(20) not null,
primary key (id)
);""")
cursor.execute("""create table table2 (
id serial,
name varchar(20) not null,
table1_id integer,
primary key (id),
foreign key (table1_id) references table1(id)
);""")
cursor.execute("""create table table3 (
id serial,
name varchar(20) not null,
table4s varchar(20),
primary key (id)
);""")
cursor.execute("""create table table4 (
id serial,
name varchar(20) not null,
table3_id integer,
primary key (id),
foreign key (table3_id) references table3(id)
);""")
connection.commit()
mro.load_database(lambda: con.connect())
create_test_data(connection)
return connection
def create_test_data(connection):
cursor = connection.cursor()
num_table1 = 2
for i in range(1,num_table1+1):
cursor.execute("insert into table1 (name) values (%s)", ('table1_{}'.format(i),))
for j in range(1,4):
cursor.execute("insert into table2 (name, table1_id) values (%s,%s)", ('table2_{}_{}'.format(i, j), i))
# edge cases
cursor.execute("insert into table2 (name, table1_id) values (%s,%s)", ('table2_None', None))
cursor.execute("insert into table1 (name) values (%s)", ('table1_None',))
connection.commit()
class TestViews(object):
@xfail
def test_read_view(self, connection):
raise Exception("Not implemented");
@xfail
def test_insert_view_fails(self, connection):
raise Exception("Not implemented");
@xfail
def test_update_view_fails(self, connection):
raise Exception("Not implemented");
if __name__ == '__main__':
#pytest.main([__file__])
pytest.main([__file__ + '::TestViews::test_read_view']) |
Dark-Bob/mro | mro/custom_types.py | import mro
import datetime
import csv
import psycopg2.extensions, psycopg2.extras
from operator import attrgetter
def _get_custom_type_names(connection):
cursor = connection.cursor()
# Get custom types
# 'lo' is a type that comes from the lo extension
cursor.execute("""SELECT t.typname as type
FROM pg_type t
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = t.typnamespace
WHERE
(
t.typrelid = 0
OR
(
SELECT c.relkind = 'c'
FROM pg_catalog.pg_class c
WHERE c.oid = t.typrelid
)
)
AND NOT EXISTS
(
SELECT 1
FROM pg_catalog.pg_type el
WHERE el.oid = t.typelem
AND el.typarray = t.oid
)
AND n.nspname NOT IN
(
'pg_catalog',
'information_schema'
)
AND t.typname != 'lo'
""")
connection.commit()
types = [t for t in cursor]
return types
def customColumnToDataType(column, code_start, code_end):
python_type = f'mro.custom_types.{column[27]}'
return '{0}{1}, {2}'.format(code_start, python_type, code_end)
def strtobool(value: str):
return {'true': True,
'false': False,
't': True,
'f': False
}.get(value.lower(), None)
def strtodate(value: str):
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
def strtotime(value: str):
return datetime.datetime.strptime(value, '%H:%M:%S.%f').time()
def strtotimestamp(value: str):
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f')
postgres_type_to_python_map = {
'character varying': str,
'text': str,
'integer': int,
'boolean': strtobool,
'bool': strtobool,
'float': float,
'double precision': float,
'real': float,
'date': strtodate,
'timestamp without time zone': strtotimestamp,
'time without time zone': strtotime,
'json': str,
'jsonb': str,
'uuid': str,
'bytea': str,
'oid': int
}
class CustomFieldType:
def __init__(self, attribute):
self.attribute = attribute
def __get__(self, instance, type):
if instance is None:
return self
else:
return attrgetter(self.attribute)(instance)
def __set__(self, instance, value):
raise NotImplementedError("You cannot set custom type internal attributes")
def create_custom_types(connection):
custom_types = _get_custom_type_names(connection)
for custom_type in custom_types:
type_name = custom_type[0]
if type_name == 'hstore' or type_name == 'ghstore':
continue
mro.data_types.type_map[type_name] = [type_name, customColumnToDataType, mro.data_types.default_transform]
psycopg2.extras.register_composite(type_name, connection)
cursor2 = connection.cursor()
cursor2.execute(f"""
SELECT a.attname AS "Field",
pg_catalog.format_type(a.atttypid, a.atttypmod) AS "Datatype"
FROM pg_attribute AS a
WHERE attrelid IN
(
SELECT typrelid
FROM pg_type
WHERE typname = '{type_name}'
);
""")
connection.commit()
fields = [field for field in cursor2]
# Create the python custom class
def create_custom_type(name, fields):
def constructor(self, **kwargs):
for k, v in kwargs.items():
self.__dict__[f"_{k}"] = v
custom_type_dict = {'__init__': constructor}
for field in fields:
column_name = field[0]
custom_type_dict[column_name] = None
new_custom_class = type(name, (), custom_type_dict)
for field in fields:
setattr(new_custom_class, field[0], CustomFieldType(f"_{field[0]}"))
def cast_custom(value, cur):
if value is None:
return None
value = value.strip('(').rstrip(')')
values = list(csv.reader([value]))[0]
dict = {}
for i in range(len(fields)):
col_name = fields[i][0]
col_type = fields[i][1]
col_value = values[i]
if len(values[i]) is 0:
dict[col_name] = None
else:
dict[col_name] = postgres_type_to_python_map[col_type](col_value)
return new_custom_class(**dict)
cursor3 = connection.cursor()
# Get the postgres type object id for this custom type
cursor3.execute(f"""
SELECT pg_type.oid
FROM pg_type
JOIN pg_namespace
ON typnamespace = pg_namespace.oid
WHERE typname = '{type_name}'
AND nspname = 'public'
""")
connection.commit()
custom_object_oid = cursor3.fetchone()[0]
new_custom_type = psycopg2.extensions.new_type((custom_object_oid,), new_custom_class.__name__,
cast_custom)
psycopg2.extensions.register_type(new_custom_type, connection)
def adapt_custom_type(custom_type):
fields = []
for k, v in custom_type.__dict__.items():
fields.append(str(v))
return psycopg2.extensions.AsIs(str(tuple(fields)))
psycopg2.extensions.register_adapter(new_custom_class, adapt_custom_type)
return new_custom_class
custom_python_class = create_custom_type(type_name, fields)
setattr(mro.custom_types, custom_python_class.__name__, custom_python_class)
# Create the database custom class
def custom_type_constructor(self, name, column_index, python_type, **kwargs):
super(self.__class__, self).__init__(name, python_type, column_index, **kwargs)
def setter(self, instance, value):
if not self.is_updateable:
raise PermissionError('The value of [{}] is not updateable.'.format(self.name))
if value is None:
if self.not_null:
raise ValueError('The value of [{}] cannot be null.'.format(self.name))
elif type(value) is not self.python_type:
if type(value) is dict or type(value) is tuple:
value = self.python_type(**value)
else:
raise TypeError(
'Value should be of type [{}] or dictionary not [{}]'.format(self.python_type.__name__,
value.__class__.__name__))
self.validate_set(value)
instance.__dict__[self.name] = value
instance.update(**{self.name: value})
attrib_dict = {'__init__': custom_type_constructor,
'__set__': setter}
for field in fields:
attrib_dict[field] = None
custom_db_class = type(type_name,
(mro.data_types.database_type,),
attrib_dict)
setattr(mro.data_types, custom_db_class.__name__, custom_db_class)
|
Dark-Bob/mro | mro/__init__.py | <reponame>Dark-Bob/mro<filename>mro/__init__.py
import mro.connection
import mro.data_types
import mro.table
import mro.sqlite
import mro.custom_types
import mro.routine
def disconnect():
mro.connection.disconnect()
def load_database(connection_function, hooks=None):
print("***********INITIALISING DATABASE************")
mro.connection.set_connection_function(connection_function)
mro.connection.set_on_reconnect(init_db)
mro.connection.set_hooks(hooks)
connection = mro.connection.connection
init_db(connection)
if hooks is not None:
for hook in hooks:
hook()
def init_db(connection):
if connection.__class__.__module__ == 'sqlite3':
tables = sqlite._load_sqllite_db(connection)
else:
tables = _load_standard_db(connection)
_create_classes(tables)
mro.routine._create_routines(connection)
def execute_sql(sql, values=None):
return mro.table.table._execute_sql(sql, values)
def _load_standard_db(connection):
print('Loading standard db')
cursor = connection.cursor()
tables = {}
# Create any custom types
print('Creating custom types')
mro.custom_types.create_custom_types(connection)
# Get tables
print('Getting tables')
cursor.execute("select * from information_schema.tables where table_schema='public';")
connection.commit()
for table in cursor:
table_name = table[2]
print(f'Getting info about table [{table_name}]')
cursor2 = connection.cursor()
# Get foreign keys (part 1)
# https://dba.stackexchange.com/a/218969
cursor2.execute(f"""
select
col.attname as fk_column_name
,ftbl.relname as referenced_table_name
,fcol.attname as referenced_column_name
from pg_catalog.pg_constraint con
join lateral unnest(con.conkey) with ordinality as u(attnum, attposition) on true
join pg_class tbl on tbl.oid = con.conrelid
join pg_attribute col on (col.attrelid = tbl.oid and col.attnum = u.attnum)
join lateral unnest(con.confkey) with ordinality as fu(attnum, attposition) on true
join pg_class ftbl on ftbl.oid = con.confrelid
join pg_attribute fcol on (fcol.attrelid = ftbl.oid and fcol.attnum = fu.attnum)
where
con.conrelid = '{table_name}'::regclass
and con.contype = 'f';
""")
connection.commit()
foreign_keys = {}
for foreign_key in cursor2:
foreign_keys[foreign_key[0]] = (foreign_key[1], foreign_key[2])
# Get foreign keys (part 2)
# https://dba.stackexchange.com/a/218969
cursor2.execute(f"""
select
tbl.relname
,col.attname
,fcol.attname
from pg_catalog.pg_constraint con
join lateral unnest(con.conkey) with ordinality as u(attnum, attposition) on true
join pg_class tbl on tbl.oid = con.conrelid
join pg_attribute col on (col.attrelid = tbl.oid and col.attnum = u.attnum)
join lateral unnest(con.confkey) with ordinality as fu(attnum, attposition) on true
join pg_class ftbl on ftbl.oid = con.confrelid
join pg_attribute fcol on (fcol.attrelid = ftbl.oid and fcol.attnum = fu.attnum)
where
con.confrelid = '{table_name}'::regclass
and con.contype = 'f';
""")
connection.commit()
foreign_key_targets = []
for foreign_key in cursor2:
foreign_key_targets.append((foreign_key[0], foreign_key[1], foreign_key[2]))
# Get primary keys
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
cursor2.execute(f"""
select
a.attname
from pg_index i
join pg_attribute a on a.attrelid = i.indrelid and a.attnum = any(i.indkey)
where
i.indrelid = '{table_name}'::regclass
and i.indisprimary;
""")
connection.commit()
primary_key_columns = [row[0] for row in cursor2]
# Get columns
cursor2.execute(f"""
select
column_name
,data_type
,udt_name
,ordinal_position
,column_default
,is_nullable
,is_updatable
,character_maximum_length
from information_schema.columns
where
table_name='{table_name}';
""")
connection.commit()
columns = []
for column in cursor2:
col_data = {}
column_name = column[0]
postgres_type = column[1]
if postgres_type == 'USER-DEFINED':
postgres_type = column[2]
data_type = mro.data_types.type_map[postgres_type]
col_data['custom_type'] = eval(f'mro.custom_types.{postgres_type}')
else:
data_type = mro.data_types.type_map[postgres_type]
column_index = column[3]-1
column_default = column[4]
is_nullable = column[5] == 'YES'
is_updateable = column[6] == 'YES'
get_value_on_insert = False
is_primary_key = column_name in primary_key_columns
if column_default:
column_default, get_value_on_insert = data_type[2](column_default, postgres_type)
col_data['data_type'] = data_type[0]
col_data['column_name'] = column_name
col_data['column_index'] = column_index
col_data['column_default'] = column_default
col_data['not_null'] = not is_nullable
col_data['is_updateable'] = is_updateable
col_data['get_value_on_insert'] = get_value_on_insert
col_data['is_primary_key'] = is_primary_key
col_data['length'] = column[7]
if column_name in foreign_keys:
foreign_key = foreign_keys[column_name]
col_data['foreign_key'] = foreign_key
columns.append(col_data)
tables[table_name] = {}
tables[table_name]['columns'] = columns
tables[table_name]['foreign_key_targets'] = foreign_key_targets
return tables
def _create_classes(tables):
for table_name, table_data in tables.items():
table_columns = table_data['columns']
foreign_key_targets = table_data['foreign_key_targets']
def create_table_class(name, columns):
def init_function(self, **kwargs):
for column in columns:
self.__dict__[column['column_name']] = column['column_default']
custom_type = column.get('custom_type')
kwarg_for_column = kwargs.get(column['column_name'])
if kwarg_for_column is not None:
if custom_type is not None and type(kwarg_for_column) is not custom_type:
kwargs[column['column_name']] = custom_type(**kwarg_for_column)
for k, v in kwargs.items():
if not hasattr(self, k):
raise ValueError(f"{self.__class__.__name__} does not have an attribute {k}")
self.__dict__[k] = v
if not super(self.__class__, self)._insert.disabled:
obj = super(self.__class__, self).insert(**kwargs)
for c in self.__class__._get_value_on_insert_columns:
self.__dict__[c] = obj.__dict__[c]
def update_function(self, **kwargs):
primary_key_columns = self.__class__._primary_key_columns
primary_key_column_values = [self.__dict__[c] for c in primary_key_columns]
super(self.__class__, self).update(primary_key_columns, primary_key_column_values, **kwargs)
with mro.table.disable_insert():
for k, v in kwargs.items():
self.__dict__[k] = v
return self
attrib_dict = {'__init__': init_function,
'update': update_function}
table_class = type(name, (mro.table.table,), attrib_dict)
return table_class
dynamic_table_class = create_table_class(table_name, table_columns)
for column in table_columns:
kwargs = {"name": column['column_name'],
"column_index": column['column_index'],
"not_null": column['not_null'],
"is_updateable": column['is_updateable'],
"get_value_on_insert": column['get_value_on_insert'],
"is_primary_key": column['is_primary_key']}
if column['data_type'] == 'varchar':
kwargs['length'] = column['length']
if column.get('custom_type') is not None:
kwargs['python_type'] = column['custom_type']
col_value = mro.data_types.__dict__[column['data_type']](**kwargs)
# Add attributes to class
setattr(dynamic_table_class, column['column_name'], col_value)
# Add foreign key attributes to the class
if column.get('foreign_key') is not None:
setattr(dynamic_table_class,
column['column_name'],
mro.foreign_keys.foreign_key_data_type(column['column_name'],
col_value,
f'mro.{column["foreign_key"][0]}',
column["foreign_key"][1]))
for foreign_key_target in foreign_key_targets:
foreign_key_name = f"{foreign_key_target[0]}s"
# if they happen to have a column the same name as the reference list don't add it
if foreign_key_name not in [column['column_name'] for column in table_columns]:
setattr(dynamic_table_class,
foreign_key_name,
mro.foreign_keys.foreign_key_reference(foreign_key_target[2],
f"mro.{foreign_key_target[0]}",
foreign_key_target[1]))
setattr(mro, dynamic_table_class.__name__, dynamic_table_class)
dynamic_table_class._register()
|
Dark-Bob/mro | table_test.py | <gh_stars>1-10
import pytest
import mro
import connection as con
from datetime import datetime, date
from threading import Thread, Event
class table1(mro.table.table):
column1 = mro.data_types.integer('column1', 0, not_null=False, is_updateable=True, get_value_on_insert = False, is_primary_key = False)
column2 = mro.data_types.varchar('column2', 1, 20, not_null=False, is_updateable=True, get_value_on_insert = False, is_primary_key = False)
column3 = mro.data_types.integer('column3', 1, not_null=False, is_updateable=True, get_value_on_insert = False, is_primary_key = False)
def __init__(self, **kwargs):
self.__dict__['column1'] = 1
self.__dict__['column2'] = None
self.__dict__['column3'] = None
for k, v in kwargs.items():
if not hasattr(self, k):
raise ValueError("{} does not have an attribute {}".format(self.__class__.__name__, k))
self.__dict__[k] = v
if not mro.table.disable_insert():
obj = super().insert(**kwargs)
for c in table1._get_value_on_insert_columns:
self.__dict__[c] = obj.__dict__[c]
def update(self, **kwargs):
primary_key_columns = table1._primary_key_columns
primary_key_column_values = [self.__dict__[c] for c in primary_key_columns]
super().update(primary_key_columns, primary_key_column_values, **kwargs)
table1._register()
@pytest.fixture
def connection_function(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
cursor.execute("create table table1 (id serial primary key, created_date date not null default current_date, column1 integer default 1, column2 varchar(20), column3 integer, column4 float default 1.2, column5 bool default False, column6 oid default 999)")
cursor.execute("create table table2 (column1 varchar(20), column2 integer, column3 varchar(20))")
cursor.execute("create table table3 (created_datetime timestamp not null default current_timestamp, created_time time not null default current_time, column1 varchar(20) default 'ABC DEF', column2 integer, column3 varchar(20), column4 jsonb, column5 bool, column6 oid)")
cursor.execute("insert into table1 (column1, column2, column3, column6) values (%s,%s,%s,%s)", (1,'Hello World!', 2, 777))
cursor.execute("insert into table1 (column1, column2, column3) values (%s,%s,%s)", (2,'Hello World2!', 3))
cursor.execute("insert into table2 values (%s,%s,%s)", ('Hello World3!', 4, 'Hello World4!'))
connection.commit()
connection.close()
return lambda: con.connect()
@pytest.fixture
def connection_function_for_threadsafe_test(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
cursor.execute("create table table1 (id serial primary key, created_date date not null default current_date, column1 integer default 1, column2 varchar(20), column3 integer, column4 float default 1.2, column5 bool default False, column6 oid default 999)")
cursor.execute("create table table2 (id serial primary key, created_date date not null default current_date, column1 integer default 1, column2 varchar(20), column3 integer, column4 float default 1.2, column5 bool default False, column6 oid default 999)")
for i in range(3000):
cursor.execute("insert into table1 (column1, column2, column3, column6) values (%s,%s,%s,%s)", (i,'Hello World!', 2, 777))
cursor.execute("insert into table2 (column1, column2, column3, column6) values (%s,%s,%s,%s)", (i, 'Hello World!', 2, 777))
connection.commit()
connection.close()
return lambda: con.connect()
class TestTable(object):
def test_table_reflection(self, connection_function):
mro.load_database(connection_function)
tables = mro.table1.select()
assert len(tables) == 2
assert isinstance(tables[0].created_date, date)
assert tables[0].column1 == 1
assert tables[0].column2 == 'Hello World!'
assert tables[0].column3 == 2
assert tables[0].column6 == 777
assert tables[1].column1 == 2
assert tables[1].column2 == 'Hello World2!'
assert tables[1].column3 == 3
tables = mro.table2.select()
assert len(tables) == 1
assert tables[0].column1 == 'Hello World3!'
assert tables[0].column2 == 4
assert tables[0].column3 == 'Hello World4!'
def test_table_select_filter(self, connection_function):
mro.load_database(connection_function)
tables = mro.table1.select('column1 = %d' % 2)
assert len(tables) == 1
assert tables[0].column1 == 2
assert tables[0].column2 == 'Hello World2!'
assert tables[0].column3 == 3
tables = mro.table2.select("column1 = '%d'" % 1)
assert len(tables) == 0
def test_table_select(self, connection_function):
mro.load_database(connection_function)
assert len(mro.table1.select()) is 2
assert len(mro.table1.select("column1=1")) is 1
def test_table_select_pyformat_syntax(self, connection_function):
mro.load_database(connection_function)
initial_tables = mro.table1.select()
injection_string = "1; insert into table1(column1, column2, column3) values(3,'Hello World3!',4); select * from table1"
# Check we throw an exception if the input variable contains an injection string
with pytest.raises(Exception):
mro.table1.select("column1 = %s;", injection_string)
# Check that since the attempted injection we haven't been able to insert another row using the select with user input
current_tables = mro.table1.select()
assert len(current_tables) == len(initial_tables)
# Check the positive case, that we can select using pyformat syntax
assert len(mro.table1.select("column1 = %s", 1)) is 1
def test_table_select_count(self, connection_function):
mro.load_database(connection_function)
assert mro.table1.select_count() is 2
assert mro.table1.select_count("column1=1") is 1
def test_table_select_count_pyformat_syntax(self, connection_function):
mro.load_database(connection_function)
injection_string = "1; insert into table1(column1, column2, column3) values(3,'Hello World3!',4); select count(*) from table1"
initial_table_count = mro.table1.select_count()
with pytest.raises(Exception):
mro.table1.select_count("column1 = %s;", injection_string)
# Check that since the attempted injection we haven't been able to insert another row using the select count with user input
current_table_count = mro.table1.select_count()
assert current_table_count == initial_table_count
# Check the positive case, that we can select count with pyformat syntax
assert mro.table1.select_count("column1 = %s", 1) is 1
def test_table_select_one(self, connection_function):
mro.load_database(connection_function)
assert mro.table1.select_one("column1 = 1").column1 is 1
assert mro.table2.select_one().column2 is 4
def test_table_select_one_pyformat_syntax(self, connection_function):
mro.load_database(connection_function)
injection_string = "1; insert into table1(column1, column2, column3) values(3,'Hello World3!',4); select * from table1"
initial_table_count = mro.table1.select_count()
with pytest.raises(Exception):
mro.table1.select_one("column1 = %s;", injection_string)
# Check that since the attempted injection we haven't been able to insert another row using the select count with user input
current_table_count = mro.table1.select_count()
assert current_table_count == initial_table_count
# Check the positive case we can select one using this pyformat syntax
assert mro.table1.select_one("column1 = %s", 1).column1 is 1
def test_table_delete_filter(self, connection_function):
mro.load_database(connection_function)
table_count = mro.table1.select_count()
tables = mro.table1.select('column1 = %d' % 2)
assert len(tables) == 1
assert tables[0].column1 == 2
assert tables[0].column2 == 'Hello World2!'
assert tables[0].column3 == 3
mro.table1.delete('column1 = %d' % 2)
assert table_count - 1 == mro.table1.select_count()
def test_table_delete(self, connection_function):
mro.load_database(connection_function)
mro.table1.delete('column1 = 1')
assert mro.table1.select_count('column1 = 1') is 0
assert mro.table1.select_count() is not 0
mro.table1.delete()
assert mro.table1.select_count() is 0
def test_table_delete_pyformat_syntax(self, connection_function):
mro.load_database(connection_function)
assert mro.table1.select_count("column1=1") is not 0
mro.table1.delete('column2 = %s',
"1; insert into table1(column1,column2,column3) values(4, 'row in on delete', 6);")
# Check we didn't delete as the column didn't match the whole string, also check we didn't insert a new row into the table
assert mro.table1.select_count("column1 = 1") is not 0
assert mro.table1.select_count("column2 = 'row in on delete'") is 0
# Check the positive case, we can delete using the pyformat syntax
mro.table1.delete("column1=%s",1)
assert mro.table1.select_count("column1=1") is 0
def test_create_object(self, connection_function):
mro.load_database(connection_function)
table_count = mro.table1.select_count()
table = mro.table1(column1=3, column2='Hi!', column3=11, column6=10)
assert table.column1 == 3
assert table.column2 == 'Hi!'
assert table.column3 == 11
assert table.column6 == 10
table = mro.table1(column2 = 'Hi2!')
assert table.column1 == 1
assert table.column2 == 'Hi2!'
assert table.column3 is None
kwargs = {'column1': 5, 'column2': 'Hi3!', 'column3': 78, 'column6': 22}
table = mro.table1(**kwargs)
assert table.column1 == 5
assert table.column2 == 'Hi3!'
assert table.column3 == 78
assert table.column6 == 22
tables = mro.table1.select()
assert table_count + 3 == len(tables)
assert tables[4].column1 == 5
assert tables[4].column2 == 'Hi3!'
assert tables[4].column3 == 78
assert tables[4].column6 == 22
def test_insert_check_default_values(self, connection_function):
mro.load_database(connection_function)
table_count = mro.table1.select_count()
table = mro.table1(column1 = 3, column2 = 'Hi!')
assert table.column4 == 1.2
assert table.column5 is False
assert table.column6 == 999
table = mro.table1(column1 = 3, column2 = 'Hi!', column3 = 11, column4=5.7, column5=True, created_date = datetime.now().date(), column6=88)
assert table.column4 == 5.7
assert table.column5 is True
assert table.column6 == 88
tables = mro.table1.select()
for table in tables:
assert isinstance(table.id, int)
assert table.id is not None
assert isinstance(table.created_date, date)
assert table.created_date is not None
assert isinstance(table.column1, int)
assert table.column1 is not None
assert isinstance(table.column2, str)
assert table.column2 is not None
assert table.column3 is None or isinstance(table.column3, int)
assert isinstance(table.column5, bool)
assert isinstance(table.column6, int)
table = mro.table3(column3 = 'Hi56!', column4 = '{"data": 1}')
table = mro.table3.select_one("column3 = 'Hi56!'")
assert isinstance(table.column1, str)
assert table.column1 == 'ABC DEF'
assert isinstance(table.column3, str)
assert table.column3 is not None
assert isinstance(table.column4, str)
assert table.column4 is not None
assert table.column5 is None
assert table.column6 is None
def test_insert_many(self, connection_function):
mro.load_database(connection_function)
mro.table1.delete()
table = mro.table1.insert_many(
['column1', 'column2', 'column3'],
[
[1, 'Hi!', 7],
[2, 'Hi2!', 13],
[3, 'Hi3!', 21]
])
tables = mro.table1.select()
assert 3 == len(tables)
assert tables[0].column1 == 1
assert tables[0].column2 == 'Hi!'
assert tables[0].column3 == 7
assert tables[1].column1 == 2
assert tables[1].column2 == 'Hi2!'
assert tables[1].column3 == 13
assert tables[2].column1 == 3
assert tables[2].column2 == 'Hi3!'
assert tables[2].column3 == 21
def test_insert_with_only_primary_key_no_kwargs(self, connection_function):
mro.load_database(connection_function)
table_count = mro.table1()
def test_disable_insert_thread_safe(self, connection_function_for_threadsafe_test):
mro.load_database(connection_function_for_threadsafe_test)
closedown_event = Event()
thread1 = Thread(target=simple_select, args=(mro.table1.select, "thread1", closedown_event))
thread1.start()
thread2 = Thread(target=simple_select, args=(mro.table2.select, "thread2", closedown_event))
thread2.start()
thread3 = Thread(target=simple_select, args=(mro.table1.select, "thread3", closedown_event))
thread3.start()
thread1.join()
thread2.join()
thread3.join()
successful = True
if closedown_event.wait(0):
successful = False
assert successful
def simple_select(select_function, name, closedown_event):
count = 0
iterations = 10
log_every = 3
while count < iterations:
try:
if closedown_event.wait(0):
return
if count % log_every == 0:
print(f"{name} Iterated {count} times")
count = count + 1
tables = select_function()
except Exception as ex:
print(f"Exception in {name}: {str(ex)}")
closedown_event.set()
return
if __name__ == '__main__':
#pytest.main([__file__])
pytest.main([__file__ + '::TestTable::test_insert_with_only_primary_key_no_kwargs']) |
Dark-Bob/mro | mro_test.py | <filename>mro_test.py
import os
import pytest
import psycopg2
import mro
import connection as con
@pytest.fixture(scope="module")
def connection(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
cursor.execute("create table table1 (id serial primary key, column1 integer, column2 varchar(20), column3 integer)")
cursor.execute("create table table2 (id serial primary key, column1 varchar(20), column2 integer, column3 varchar(20))")
cursor.execute("create table table3 (id serial primary key)")
cursor.execute("insert into table1 (column1, column2, column3) values (%s,%s,%s)", (1,'Hellow World!', 2))
cursor.execute("insert into table1 (column1, column2, column3) values (%s,%s,%s)", (2,'Hellow World2!', 3))
cursor.execute("insert into table2 (column1, column2, column3) values (%s,%s,%s)", ('Hellow World3!', 4, 'Hellow World4!'))
connection.commit()
connection.close()
mro.load_database(lambda: con.connect())
return connection
class TestMro(object):
def test_table_reflection(self, connection):
table1 = mro.table1.select_one()
table2 = mro.table2.select_one()
assert table1.__class__.__name__ == 'table1'
assert table2.__class__.__name__ == 'table2'
text = 'Hello World!'
number = 1
with pytest.raises(TypeError) as excinfo:
table1.column1 = text
assert excinfo.value.args[0] == 'Value should be of type [int] not [{}]'.format(text.__class__.__name__ )
table1.column1 = number
table1.column2 = text
assert table1.column1 == number
assert table1.column2 == text
with pytest.raises(TypeError) as excinfo:
table2.column1 = number
assert excinfo.value.args[0] == 'Value should be of type [str] not [{}]'.format(number.__class__.__name__ )
table2.column1 = text
table2.column2 = number
assert table2.column1 == text
assert table2.column2 == number
def test_recovery_from_failed_insert(self, connection):
mro.table3.insert(id=1)
with pytest.raises(psycopg2.IntegrityError) as e:
mro.table3.insert(id=1)
assert e.value.args[0].startswith('duplicate key value violates unique constraint')
mro.table3.insert(id=2)
def test_reconnect(self, connection):
try:
table = mro.table1.select_one()
mro.disconnect()
table.column1 = 1
except Exception as e:
print(e)
if __name__ == '__main__':
pytest.main([__file__]) |
Dark-Bob/mro | mro/table.py | from contextlib import contextmanager
import logging
import threading
import time
import psycopg2
from tenacity import before_sleep_log, retry, stop_after_attempt, wait_random_exponential
import mro.connection as con
import mro.data_types
import mro.foreign_keys
logger = logging.getLogger(__name__)
# TODO replace with green thread friendly, thread local storage
psycopg2_lock = threading.Lock()
MAX_ATTEMPTS = 3
@contextmanager
def disable_insert():
table._insert.disabled = True
try:
yield
finally:
table._insert.disabled = False
class insert_local(threading.local):
disabled = False
class table(object):
_insert = insert_local()
@classmethod
def _register(cls):
data_types = []
foreign_keys = []
for key, value in cls.__dict__.items():
if isinstance(value, mro.data_types.database_type):
data_types.append(value)
if isinstance(value, mro.foreign_keys.foreign_key_data_type):
foreign_keys.append(value)
cls._get_value_on_insert_columns = [d.name for d in data_types if d.get_value_on_insert]
cls._get_value_on_insert_columns_str = ', '.join(cls._get_value_on_insert_columns)
cls._primary_key_columns = [d.name for d in data_types if d.is_primary_key]
@classmethod
def _get_cursor(cls):
retry_count = 0
while True:
try:
return con.connection.cursor()
except psycopg2.InterfaceError:
if retry_count == MAX_ATTEMPTS:
raise
logger.exception("Connection failure while getting cursor, will attempt to reconnect.")
time.sleep(retry_count * 1)
con.reconnect()
except Exception:
logger.exception("Exception while getting sql cursor.")
raise
retry_count += 1
@classmethod
def _execute_sql(cls, sql, values=None, cursor=None):
with psycopg2_lock:
if cursor is None:
cursor = cls._get_cursor()
retry_count = 0
retry = True
while retry:
try:
cursor.execute(sql, values)
con.connection.commit()
retry = False
except psycopg2.InterfaceError:
if retry_count == MAX_ATTEMPTS:
raise
logger.exception("Connection failure will attempt to reconnect [{}] {}".format(sql, values))
time.sleep(retry_count * 1)
con.reconnect()
cursor = con.connection.cursor()
except Exception:
logger.exception("Exception while executing sql [{}] {}".format(sql, values))
try:
con.connection.rollback()
except psycopg2.InterfaceError:
logger.exception("Connection failure on attempt to rollback [{}] {}".format(sql, values))
raise
retry_count += 1
return cursor
@staticmethod
def _convert_numpy_types_to_python(values):
for k, v in values.items():
values[k] = mro.data_types.convert_numpy_to_python(v)
return values
@classmethod
@retry(wait=wait_random_exponential(), stop=stop_after_attempt(MAX_ATTEMPTS),
reraise=True, before_sleep=before_sleep_log(logger, logging.WARNING))
def select(cls, clause=None, *format_args):
format_args = list(format_args)
if clause is None:
sql = "select * from \"{}\";".format(cls.__name__)
else:
sql = "select * from \"{}\" where {};".format(cls.__name__, clause)
cursor = cls._execute_sql(sql, values=format_args)
column_names = [column.name for column in cursor.description]
with disable_insert():
objs = []
for row in cursor:
kwargs = {}
for index in range(len(column_names)):
kwargs[column_names[index]] = row[index]
objs.append(cls(**kwargs))
return objs
@classmethod
@retry(wait=wait_random_exponential(), stop=stop_after_attempt(MAX_ATTEMPTS),
reraise=True, before_sleep=before_sleep_log(logger, logging.WARNING))
def select_count(cls, clause=None, *format_args):
format_args = list(format_args)
if clause is None:
sql = "select count(*) from \"{}\";".format(cls.__name__)
else:
sql = "select count(*) from \"{}\" where {};".format(cls.__name__, clause)
cursor = cls._execute_sql(sql, values=format_args)
for row in cursor:
return row[0]
@classmethod
@retry(wait=wait_random_exponential(), stop=stop_after_attempt(MAX_ATTEMPTS),
reraise=True, before_sleep=before_sleep_log(logger, logging.WARNING))
def select_one(cls, clause=None, *format_args):
format_args = list(format_args)
if clause is None:
sql = "select * from \"{}\" limit 1;".format(cls.__name__)
else:
sql = "select * from \"{}\" where {} limit 1;".format(cls.__name__, clause)
cursor = cls._execute_sql(sql, values=format_args)
column_names = [column.name for column in cursor.description]
obj = None
with disable_insert():
for row in cursor:
kwargs = {}
for index in range(len(column_names)):
kwargs[column_names[index]] = row[index]
obj = cls(**kwargs)
return obj
@classmethod
def delete(cls, clause=None, *format_args):
format_args = list(format_args)
if clause is None:
sql = "delete from \"{}\";".format(cls.__name__)
else:
sql = "delete from \"{}\" where {};".format(cls.__name__, clause)
cls._execute_sql(sql, values=format_args)
@classmethod
def insert(cls, **kwargs):
if table._insert.disabled:
return
keys = kwargs.keys()
if len(keys) == 0:
cols = 'default'
vals_str = ''
vals = ()
else:
kwargs = table._convert_numpy_types_to_python(kwargs)
cols = '({})'.format(', '.join(list(keys)))
vals = list(kwargs.values())
vals_str_list = ["%s"] * len(vals)
vals_str = ' ({})'.format(', '.join(vals_str_list))
if cls._get_value_on_insert_columns_str:
sql = "insert into \"{t}\" {c} values{v} returning {c2}".format(
t=cls.__name__, c=cols, v=vals_str, c2=cls._get_value_on_insert_columns_str)
cursor = cls._execute_sql(sql, vals)
with disable_insert():
for row in cursor:
for index in range(len(cls._get_value_on_insert_columns)):
kwargs[cls._get_value_on_insert_columns[index]] = row[index]
obj = cls(**kwargs)
else:
sql = "insert into \"{t}\" {c} values{v}".format(
t=cls.__name__, c=cols, v=vals_str)
cls._execute_sql(sql, vals)
with disable_insert():
obj = cls(**kwargs)
return obj
@classmethod
def insert_many(cls, cols, values):
cursor = cls._get_cursor()
cols = ', '.join(cols)
vals_str_list = ["%s"] * len(values[0])
vals_str = "({})".format(", ".join(vals_str_list))
# speed up of
# aggregate_values = ','.join(cursor.mogrify(vals_str, x).decode("utf-8") for x in values)
aggregate_values = cursor.mogrify(
','.join([vals_str for i in range(len(values))]),
[item for sublist in values for item in sublist]).decode("utf-8")
sql = "insert into \"{}\" ({}) values {}".format(
cls.__name__, cols, aggregate_values)
cls._execute_sql(sql, cursor=cursor)
@classmethod
def update(cls, match_columns, match_column_values, **kwargs):
if table._insert.disabled:
return
if not match_columns:
raise ValueError("Update needs columns to match to update, is your table missing a primary key?")
vals = list(kwargs.values()) + match_column_values
update_column_str = ", ".join([c + '=%s' for c in kwargs.keys()])
match_column_str = " and ".join([c + '=%s' for c in match_columns])
sql = "update \"{t}\" set {c} where {c2}".format(
t=cls.__name__, c=update_column_str, c2=match_column_str)
cls._execute_sql(sql, vals)
@classmethod
def update_many(cls, match_columns, match_column_values, update_columns, update_column_values):
# update test as t set
# column_a = c.column_a
# from (values
# ('123', 1),
# ('345', 2)
# ) as c(column_b, column_a)
# where c.column_b = t.column_b;
raise Exception('Not implemented') |
Dark-Bob/mro | connection.py | <gh_stars>1-10
import os
import psycopg2
import mro.helpers
global get_connection
if os.path.isfile('my_connection.py'):
import my_connection
get_connection = my_connection.get_connection
else:
get_connection = lambda: psycopg2.connect(database='circle_test', user='ubuntu', password="<PASSWORD>")
def connect():
global connection
connection = get_connection()
return connection
def disconnect():
connection.close()
def drop_tables():
# clear tables
cursor = connection.cursor()
cursor.execute("select * from information_schema.tables where table_schema='public';")
connection.commit()
for table in cursor:
cursor2 = connection.cursor()
cursor2.execute("drop table " + table[2] + " cascade;")
connection.commit()
# clear stored procs and functions
cursor.execute("select * from information_schema.routines where routine_schema = 'public'")
connection.commit()
column_name_index_map = mro.helpers.create_column_name_index_map(cursor)
for routine in cursor:
cursor2 = connection.cursor()
cursor2.execute(f"drop {routine[column_name_index_map['routine_type']]} {routine[column_name_index_map['routine_name']]}")
connection.commit() |
Dark-Bob/mro | data_types_test.py | import pytest
import mro
import connection as con
import psycopg2
from datetime import datetime, date, time
import uuid
xfail = pytest.mark.xfail
class test_type(object):
varchar = mro.data_types.varchar('varchar', 0, 15, not_null=False, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
varchar2 = mro.data_types.varchar('varchar2', 1, 20, not_null=False, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
varchar_not_null = mro.data_types.varchar('varchar_not_null', 2, 15, not_null=True, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
varchar_not_updateable = mro.data_types.varchar('varchar_not_updateable', 3, 15, not_null=False, is_updateable=False, get_value_on_insert=False, is_primary_key=False)
integer = mro.data_types.integer('integer', 4, not_null=False, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
boolean = mro.data_types.boolean('boolean', 5, not_null=False, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
@pytest.fixture(scope="module")
def connection(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
# TODO re-add once custom enum types are supported, currently only custom composite types are
# cursor.execute("""DROP TYPE IF EXISTS call_outcome""")
# cursor.execute("""CREATE TYPE call_outcome AS ENUM ('No Answer', 'Answer Machine', 'Hung Up', 'Busy', 'Sale')""")
cursor.execute("""create table test_type (
id serial primary key,
"varchar" varchar(15),
"varchar2" varchar(20),
"varchar_not_null" varchar(20) not null default 'abc',
"integer" integer,
"boolean" boolean,
"time" time,
"date" date,
"timestamp" timestamp,
"json" json,
"jsonb" jsonb,
"text" text default E'two\nlines',
"double" double precision,
"real" real,
"uuid" uuid,
"bytea" bytea,
"oid" oid);""")
# "custom_enum" call_outcome);""")
connection.commit()
connection.close()
mro.load_database(lambda: con.connect())
return connection
class TestDataTypes(object):
def test_varchar(self, connection):
obj = mro.test_type(varchar = 'init')
message = 'sldkhfaskjf ashdkfjahs dfkjashd'
with pytest.raises(ValueError) as excinfo:
obj.varchar = message
message = 'Hey'
assert excinfo.value.args[0] == 'Value length [{}] should not exceed [{}]'.format(len(message), 15)
message = mro.test_type(varchar = 'init')
with pytest.raises(TypeError) as excinfo:
obj.varchar = message
assert excinfo.value.args[0] == 'Value should be of type [str] not [{}]'.format(message.__class__ .__name__)
message = 'Hello World!'
obj.varchar = message
assert obj.varchar == message
def test_multi_object(self, connection):
obj = mro.test_type(varchar = 'init')
obj2 = mro.test_type(varchar = 'init')
obj.varchar = '1'
obj.varchar2 = '2'
assert obj.varchar != obj.varchar2
obj.varchar = '1'
obj2.varchar = '2'
assert obj.varchar != obj2.varchar
def test_not_null(self, connection):
obj = mro.test_type(varchar = 'init')
assert obj.varchar_not_null == 'abc'
with pytest.raises(ValueError) as excinfo:
obj.varchar_not_null = None
assert excinfo.value.args[0] == 'The value of [{}] cannot be null.'.format('varchar_not_null')
@xfail
def test_not_updateable(self, connection):
raise Exception("Not implemented")
obj = mro.test_type(varchar = 'init')
obj.varchar = '1'
assert obj.varchar == '1'
with pytest.raises(PermissionError) as excinfo:
obj.varchar_not_updateable = '2'
assert excinfo.value.args[0] == 'The value of [{}] is not updateable.'.format('varchar_not_updateable')
def test_integer(self, connection):
obj = mro.test_type(varchar = 'init')
obj.integer = 1
assert obj.integer == 1
with pytest.raises(TypeError) as excinfo:
obj.integer = '1'
assert excinfo.value.args[0] == 'Value should be of type [int] not [{}]'.format(str.__name__)
def test_boolean(self, connection):
obj = mro.test_type(varchar = 'init')
obj.boolean = True
assert obj.boolean == True
with pytest.raises(TypeError) as excinfo:
obj.boolean = 1
assert excinfo.value.args[0] == 'Value should be of type [bool] not [{}]'.format(int.__name__)
def test_time(self, connection):
obj = mro.test_type(varchar = 'init')
obj.time = time(17, 20)
assert obj.time == time(17, 20)
with pytest.raises(TypeError) as excinfo:
obj.time = datetime(2015, 12, 21, 17, 20)
assert excinfo.value.args[0] == 'Value should be of type [time] not [{}]'.format(datetime.__name__)
def test_date(self, connection):
obj = mro.test_type(varchar = 'init')
obj.date = date(2015, 12, 21)
assert obj.date == date(2015, 12, 21)
with pytest.raises(TypeError) as excinfo:
obj.date = datetime(2015, 12, 21, 17, 20)
assert excinfo.value.args[0] == 'Value should be of type [date] not [{}]'.format(datetime.__name__)
def test_datetime(self, connection):
obj = mro.test_type(varchar = 'init')
obj.timestamp = datetime(2015, 12, 21, 17, 20)
assert obj.timestamp == datetime(2015, 12, 21, 17, 20)
with pytest.raises(TypeError) as excinfo:
obj.timestamp = date(2015, 12, 21)
assert excinfo.value.args[0] == 'Value should be of type [datetime] not [{}]'.format(date.__name__)
def test_json(self, connection):
obj = mro.test_type(varchar = 'init')
obj.json = '{"key": "value"}'
assert obj.json == '{"key": "value"}'
with pytest.raises(psycopg2.DataError) as excinfo:
obj.json = 'this is just text'
assert excinfo.value.args[0].startswith('invalid input syntax for type json')
def test_jsonb(self, connection):
obj = mro.test_type(varchar = 'init')
obj.jsonb = '{"key": "value"}'
assert obj.jsonb == '{"key": "value"}'
with pytest.raises(psycopg2.DataError) as excinfo:
obj.jsonb = 'this is just text'
assert excinfo.value.args[0].startswith('invalid input syntax for type json')
def test_text(self, connection):
obj = mro.test_type(varchar = 'init')
obj.text = '1'
assert obj.text == '1'
with pytest.raises(TypeError) as excinfo:
obj.text = 1
assert excinfo.value.args[0] == 'Value should be of type [str] not [{}]'.format(int.__name__)
def test_double(self, connection):
obj = mro.test_type(varchar = 'init')
obj.double = 2.0
assert obj.double == 2.0
with pytest.raises(TypeError) as excinfo:
obj.double = '1'
assert excinfo.value.args[0] == 'Value should be of type [float] not [{}]'.format(str.__name__)
def test_real(self, connection):
obj = mro.test_type(varchar = 'init')
obj.real = 2.0
assert obj.real == 2.0
with pytest.raises(TypeError) as excinfo:
obj.real = '1'
assert excinfo.value.args[0] == 'Value should be of type [float] not [{}]'.format(str.__name__)
@xfail
def test_uuid(self, connection):
obj = mro.test_type(varchar = 'init')
obj.uuid = uuid.uuid4()
assert obj.uuid == uuid.uuid4()
with pytest.raises(TypeError) as excinfo:
obj.uuid = 'fail'
assert excinfo.value.args[0] == 'Value should be of type [uuid] not [{}]'.format(str.__name__)
@xfail
def test_custom_enum(self, connection):
obj = mro.test_type(varchar='init')
obj.custom_enum = 'Busy'
assert obj.custom_enum == 'Busy'
with pytest.raises(TypeError) as excinfo:
obj.custom_enum = 'Not Valid'
assert excinfo.value.args[0] == 'Value should be of type [custom_enum] not [{}]'.format(str.__name__)
def test_bytea(self, connection):
bytea = 'my byte array'.encode('utf-8')
obj = mro.test_type(bytea=bytea)
obj.bytea = bytea
assert obj.bytea == bytea
with pytest.raises(TypeError) as excinfo:
obj.bytea = 'Not Valid'
assert excinfo.value.args[0] == 'Value should be of type [bytes] not [{}]'.format(str.__name__)
def test_oid(self, connection):
obj = mro.test_type(varchar='init')
obj.oid = 1000
assert obj.oid == 1000
with pytest.raises(TypeError) as excinfo:
obj.oid = 'randomstring'
assert excinfo.value.args[0] == 'Value should be of type [int] not [{}]'.format(str.__name__)
if __name__ == '__main__':
#pytest.main([__file__, '-rw'])
pytest.main([__file__ + '::TestDataTypes::test_bytea']) |
Dark-Bob/mro | mro/helpers.py | <reponame>Dark-Bob/mro<filename>mro/helpers.py
def create_column_name_index_map(cursor):
description = cursor.description
return {column.name: index for column, index in zip(description, range(len(description)))} |
Dark-Bob/mro | mro/foreign_keys.py |
import mro
import json
class foreign_key_data_type(object):
def __init__(self, name, data_type, reference_class, reference_column_name):
self.name = name + '_foreign_key'
self.data_type = data_type
self.reference_class = reference_class
self.reference_column_name = reference_column_name
def __get__(self, instance, instance_type):
if instance is None:
return self
elif not hasattr(instance, self.name):
self._lazy_init(instance)
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not hasattr(instance, self.name):
self._lazy_init(instance)
if isinstance(value, self.reference_class):
instance.__dict__[self.name].value = value.__dict__[self.reference_column_name]
else:
instance.__dict__[self.name].value = value
def _lazy_init(self, instance):
# lazy eval of reference class checked once per instance to get around class creation order issues
if isinstance(self.reference_class, str):
self.reference_class = eval(self.reference_class)
instance.__dict__[self.name] = foreign_key(instance, self.data_type, self.reference_class, self.reference_column_name)
# Ensure foreign keys just get saved as the key value not the json object
class foreign_key_json_encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, foreign_key):
return obj.value
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
json._default_encoder = foreign_key_json_encoder()
class foreign_key(object):
def __init__(self, owner, data_type, reference_class, reference_column_name):
self.__dict__['data_type'] = data_type
self.__dict__['reference_class'] = reference_class
self.__dict__['reference_column_name'] = reference_column_name
self.__dict__['owner'] = owner
def __getattr__(self, attribute):
value = self.data_type.__get__(self.owner, int)
if attribute == 'value':
return value
elif attribute == 'object':
if value == None:
self.__dict__['object'] = None
else:
self.__dict__['object'] = self.reference_class.select_one("{} = {}".format(self.reference_column_name, value))
return self.__dict__['object']
else:
raise AttributeError("Attribute [{}] does not exist.".format(attribute))
def __setattr__(self, attribute, value):
if attribute == 'value':
self.data_type.__set__(self.owner, value)
if 'object' in self.__dict__:
del self.__dict__['object']
elif attribute == 'object':
# we could allow this to be set and if the object is different update the reference accordingly
raise PermissionError("Cannot set the object attribute directly.")
else:
raise AttributeError("Illegal attribute [{}] on this object.".format(attribute))
def __repr__(self):
value = self.data_type.__get__(self.owner, int)
return str(value)
def __index__(self):
value = self.data_type.__get__(self.owner, int)
return value
def __int__(self):
value = self.data_type.__get__(self.owner, int)
return value
def __eq__(self, other):
value = self.data_type.__get__(self.owner, int)
if type(other) is int:
return value == other
elif type(other) is foreign_key:
return value == other.data_type.__get__(other.owner, int)
else:
return False
class foreign_key_reference(object):
def __init__(self, target_column, referring_class, referring_column):
self.name = referring_class + '_foreign_refs'
self.target_column = target_column
self.referring_class = referring_class
self.referring_column = referring_column
def __get__(self, instance, instance_type):
if instance is None:
return self
if self.name in instance.__dict__:
return instance.__dict__[self.name]
if isinstance(self.referring_class, str):
self.referring_class = eval(self.referring_class)
instance.__dict__[self.name] = foreign_key_reference_list(instance, self.target_column, self.referring_class, self.referring_column)
return instance.__dict__[self.name]
def __set__(self, instance, value):
raise Exception('Cannot set foreign key reference list, perhaps you meant to append to or extend the list?')
class foreign_key_reference_list(list):
def __init__(self, target_instance, target_column, referring_class, referring_column):
self.target_instance = target_instance
self.target_column = target_column
self.referring_class = referring_class
self.referring_column = referring_column
super().__init__(self)
super().extend(self.referring_class.select(self.referring_column + '=' + str(getattr(self.target_instance, self.target_column))))
def __getitem__(self, key):
return super().__getitem__(key)
def __setitem__(self, key, item):
raise PermissionError("Cannot set specific value on foreign key reference list.")
def __call__(self):
super().clear()
super().extend(self.referring_class.select(self.referring_column + '=' + str(getattr(self.target_instance, self.target_column))))
def append(self, object):
setattr(object, self.referring_column, getattr(self.target_instance, self.target_column))
return super().append(object) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.