content stringlengths 5 1.05M |
|---|
from django.test import TransactionTestCase
class APITests(TransactionTestCase):
def test_tutorials_page_status_code(self):
response = self.client.get('/tutorials/')
self.assertEquals(response.status_code, 200)
def test_tags_page_status_code(self):
response = self.client.get('/tags/')
self.assertEquals(response.status_code, 200)
def test_latest_page_status_code(self):
response = self.client.get('/latest/')
self.assertEquals(response.status_code, 200)
|
import logging
import warnings
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import PowerNorm
from matplotlib.figure import Figure
from matplotlib.patheffects import withStroke
from pyqtgraph import Qt as qt
logger = logging.getLogger('ds.'+__name__)
# The logging level for signals
SIGNALS = 5
#_Constants_____________________________________________________________________
CACHED_CMAPS_FILENAME = 'cmaps.p'
CONFIG_DIR = '.data_slicer/'
#_Utilities_____________________________________________________________________
class TracedVariable(qt.QtCore.QObject) :
""" A pyqt implementaion of tkinter's/Tcl's traced variables using Qt's
signaling mechanism.
Basically this is just a wrapper around any python object which ensures
that pyQt signals are emitted whenever the object is accessed or changed.
In order to use pyqt's signals, this has to be a subclass of
:class:`QObject <pyqtgraph.Qt.QtCore.QObject>`.
**Attributes**
========================== ================================================
_value the python object represented by this
TracedVariable instance. Should never be
accessed directly but only through the getter
and setter methods.
sig_value_changed :class:`Signal <pyqtgraph.Qt.QtCore.Signal>`;
the signal that is emitted whenever
``self._value`` is changed.
sig_value_read :class:`Signal <pyqtgraph.Qt.QtCore.Signal>`;
the signal that is emitted whenever
``self._value`` is read.
sig_allowed_values_changed :class:`Signal <pyqtgraph.Qt.QtCore.Signal>`;
the signal that is emitted whenever
``self.allowed_values`` are set or unset.
allowed_values :class:`array <numpy.ndarray>`; a sorted
list of all values that self._value can
assume. If set, all tries to set the value
will automatically set it to the closest
allowed one.
========================== ================================================
"""
sig_value_changed = qt.QtCore.Signal()
sig_value_read = qt.QtCore.Signal()
sig_allowed_values_changed = qt.QtCore.Signal()
def __init__(self, value=None, name=None) :
# Initialize instance variables
self.allowed_values = None
# Have to call superclass init for signals to work
super().__init__()
self._value = value
if name is not None :
self.name = name
else :
self.name = 'Unnamed'
def __repr__(self) :
return '<TracedVariable({}, {})>'.format(self.name, self._value)
def set_value(self, value=None) :
""" Emit sig_value_changed and set the internal self._value. """
# Choose the closest allowed value
if self.allowed_values is not None :
value = self.find_closest_allowed(value)
self._value = value
logger.log(SIGNALS, '{} {}: Emitting sig_value_changed.'.format(
self.__class__.__name__, self.name))
self.sig_value_changed.emit()
def get_value(self) :
""" Emit sig_value_changed and return the internal self._value.
.. warning::
the signal is emitted here before the caller actually receives
the return value. This could lead to unexpected behaviour.
"""
logger.log(SIGNALS, '{} {}: Emitting sig_value_read.'.format(
self.__class__.__name__, self.name))
self.sig_value_read.emit()
return self._value
def on_change(self, callback) :
""" Convenience wrapper for :class:`Signal
<pyqtgraph.Qt.QtCore.Signal>`'s 'connect'.
"""
self.sig_value_changed.connect(callback)
def on_read(self, callback) :
""" Convenience wrapper for :class:`Signal
<pyqtgraph.Qt.QtCore.Signal>`'s 'connect'.
"""
self.sig_value_read.connect(callback)
def set_allowed_values(self, values=None) :
""" Define a set/range/list of values that are allowed for this
Variable. Once set, all future calls to set_value will automatically
try to pick the most reasonable of the allowed values to assign.
Emits :signal:`sig_allowed_values_changed`
**Parameters**
====== =================================================================
values iterable; The complete list of allowed (numerical) values. This
is converted to a sorted np.array internally. If values is
`None`, all restrictions on allowed values will be lifted and
all values are allowed.
====== =================================================================
"""
if values is None :
# Reset the allowed values, i.e. all values are allowed
self.allowed_values = None
self.min_allowed = None
self.max_allowed = None
else :
# Convert to sorted numpy array
try :
values = np.array(values)
except TypeError :
message = 'Could not convert allowed values to np.array.'
raise TypeError(message)
# Sort the array for easier indexing later on
values.sort()
self.allowed_values = values
# Store the max and min allowed values (necessary?)
self.min_allowed = values[0]
self.max_allowed = values[-1]
logger.log(SIGNALS,
'{} {}: Emitting sig_allowed_values_changed.'.format(
self.__class__.__name__, self.name))
# Update the current value to within the allowed range
self.set_value(self._value)
self.sig_allowed_values_changed.emit()
def find_closest_allowed(self, value) :
""" Return the value of the element in self.allowed_values (if set)
that is closest to `value`.
"""
if self.allowed_values is None :
return value
else :
ind = np.abs( self.allowed_values-value ).argmin()
return self.allowed_values[ind]
#_Functions_____________________________________________________________________
def indexof(value, array) :
"""
Return the first index of the value in the array closest to the given
`value`.
Example::
>>> a = np.array([1, 0, 0, 2, 1])
>>> indexof(0, a)
1
>>> indexof(0.9, a)
0
"""
return np.argmin(np.abs(array - value))
def pop_kwarg(name, kwargs, default=1) :
""" Check if a keyword *name* appears in the dictionary *kwargs*. If yes,
remove it from the dictionary, returning its value. If no, return the
value specified by *default*.
"""
if name in kwargs :
return kwargs.pop(name)
else :
return default
def make_slice_3d(data, d, i, integrate=0, silent=False) :
"""
:deprecated:
.. warning::
Use :func:`make_slice <data_slicer.utilities.make_slice>`
instead. (though this ~might~ be slightly faster for 3d datasets)
Create a slice out of the 3d data (l x m x n) along dimension d
(0,1,2) at index i. Optionally integrate around i.
**Parameters**
========= =================================================================
data array-like; data of the shape (x, y, z)
d int, d in (0, 1, 2); dimension along which to slice
i int, 0 <= i < data.size[d]; The index at which to create the slice
integrate int, ``0 <= integrate < |i - n|``; the number of slices above
and below slice i over which to integrate
silent bool; toggle warning messages
========= =================================================================
**Returns**
=== =======================================================================
res np.array; Slice at index with dimensions ``shape[:d] + shape[d+1:]``
where shape = (x, y, z).
=== =======================================================================
"""
# Get the relevant dimensions
shape = data.shape
try :
n_slices = shape[d]
except IndexError :
print('d ({}) can only be 0, 1 or 2 and data must be 3D.'.format(d))
return
# Set the integration indices and adjust them if they go out of scope
start = i - integrate
stop = i + integrate + 1
if start < 0 :
if not silent :
warnings.warn(
'i - integrate ({}) < 0, setting start=0'.format(start))
start = 0
if stop > n_slices :
if not silent :
warning = ('i + integrate ({}) > n_slices ({}), setting '
'stop=n_slices').format(stop, n_slices)
warnings.warn(warning)
stop = n_slices
# Initialize data container and fill it with data from selected slices
if d == 0 :
sliced = data[start:stop,:,:].sum(d)
elif d == 1 :
sliced = data[:,start:stop,:].sum(d)
elif d == 2 :
sliced = data[:,:,start:stop].sum(d)
return sliced
def make_slice(data, dim, index, integrate=0, silent=False) :
"""
Take a slice out of an N dimensional dataset *data* at *index* along
dimension *dim*. Optionally integrate by +- *integrate* channels around
*index*.
If *data* has shape::
(n0, n1, ..., n(dim-1), n(dim), n(dim+1), ..., n(N-1))
the result will be of dimension N-1 and have shape::
(n0, n1, ..., n(dim-1), n(dim+1), ..., n(N-1))
or in other words::
shape(result) = shape(data)[:dim] + shape(data)[dim+1:]
.
**Parameters**
========= =================================================================
data array-like; N dimensional dataset.
dim int, 0 <= d < N; dimension along which to slice.
index int, 0 <= index < data.size[d]; The index at which to create
the slice.
integrate int, ``0 <= integrate < |index|``; the number of slices above
and below slice *index* over which to integrate. A warning is
issued if the integration range would exceed the data (can be
turned off with *silent*).
silent bool; toggle warning messages.
========= =================================================================
**Returns**
=== =======================================================================
res np.array; slice at *index* alond *dim* with dimensions shape[:d] +
shape[d+1:].
=== =======================================================================
"""
# Find the dimensionality and the number of slices along the specified
# dimension.
shape = data.shape
ndim = len(shape)
try :
n_slices = shape[dim]
except IndexError :
message = ('*dim* ({}) needs to be smaller than the dimension of '
'*data* ({})').format(dim, ndim)
raise IndexError(message)
# Set the integration indices and adjust them if they go out of scope
start = index - integrate
stop = index + integrate + 1
if start < 0 :
if not silent :
warnings.warn(
'i - integrate ({}) < 0, setting start=0'.format(start))
start = 0
if stop > n_slices :
if not silent :
warning = ('i + integrate ({}) > n_slices ({}), setting '
'stop=n_slices').format(stop, n_slices)
warnings.warn(warning)
stop = n_slices
# Roll the original data such that the specified dimension comes first
i_original = np.arange(ndim)
i_rolled = np.roll(i_original, dim)
data = np.moveaxis(data, i_original, i_rolled)
# Take the slice
sliced = data[start:stop].sum(0)
# Bring back to more intuitive form. For that we have to remove the now
# lost dimension from the index arrays and shift all indices.
i_original = np.concatenate((i_original[:dim], i_original[dim+1:]))
i_original[i_original>dim] -= 1
i_rolled = np.roll(i_original, dim)
return np.moveaxis(sliced, i_rolled, i_original)
def roll_array(a, i) :
""" Cycle the arrangement of the dimensions in an *N* dimensional array.
For example, change an X-Y-Z arrangement to Y-Z-X.
**Parameters**
= =========================================================================
a array of *N* dimensions, i.e. `len(a.shape) = N`.
i int; number of dimensions to roll
= =========================================================================
**Returns**
=== =======================================================================
res array of *N* dimensions where the axes have been rearranged as
follows::
before: `shape(a) = (d[0], d[1], ..., d[N])`
after: `shape(res) = (d[(0+i)%N], d[(1+i)%N], ..., d[(N+i)%N])`
=== =======================================================================
"""
# Create indices and rolled indices
N = len(a.shape)
indices = np.arange(N)
rolled_indices = np.roll(indices, i)
# Move the axes in the array accordingly
res = np.moveaxis(a, indices, rolled_indices)
return res
def get_lines(data, n, dim=0, i0=0, i1=-1, offset=0.2, integrate='max',
**kwargs) :
"""
Extract *n* evenly spaced rows/columns from data along dimension *dim*
between indices *i0* and *i1*. The extracted lines are normalized and offset
such that they can be nicely plotted close by each other - as is done, for
example in :func:`lineplot <data_slicer.pit.PITDataHandler.lineplot>`.
**Parameters**
========= =================================================================
data 2d np.array; the data from which to extract lines.
n int; the number of lines to extract.
dim int; either 0 or 1, specifying the dimension along which to
extract lines.
i0 int; starting index in *data* along *dim*.
i1 int; ending index in *data* along *dim*.
offset float; how much to vertically translate each successive line.
integrate int or other; specifies how many channels around each line
index should be integrated over. If anything but a small
enough integer is given, defaults to the maximally available
integration range.
kwargs any other passed keyword arguments are discarded.
========= =================================================================
**Returns**
======= ===================================================================
lines list of 1d np.arrays; the extracted lines.
indices list of int; the indices at which the lines were extracted.
======= ===================================================================
"""
# Sanity check
shape = data.shape
try :
assert len(shape) == 2
except AssertionError :
message = '*data* should be a 2d np.array. Found: {} dimensions.'
message = message.format(len(shape))
raise TypeError(message)
# Normalize data and transpose if necessary
if dim == 1 :
data = data.T
norm = np.max(data[i0:i1])
data /= norm
# Calculate the indices at which to extract lines.
# First the raw step size *delta*
if i1 == -1 : i1 = shape[dim]-1
delta = (i1 - i0)/n
# The maximum number of channels we can integrate around each index is
# delta/2
max_integrate = int(delta/2)
# Adjust the user supplied *integrate* value, if necessary
if type(integrate) != int or integrate > max_integrate :
integrate = max_integrate
# Construct equidistantly spaced center indices, leaving space above and
# below for the integration.
indices = [int(round(i)) for i in
np.linspace(i0+integrate+1, i1-integrate, n)]
# Extract the lines
lines = []
sumnorm = 2*integrate + 1
for i in range(n) :
start = indices[i] - integrate
stop = indices[i] + integrate + 1
line = np.sum(data[start:stop], 0)/sumnorm + i*offset
lines.append(line)
return lines, indices
def plot_cuts(data, dim=0, integrate=0, zs=None, labels=None, max_ppf=16,
max_nfigs=4, **kwargs) :
""" Plot all (or only the ones specified by `zs`) cuts along dimension `dim`
on separate subplots onto matplotlib figures.
**Parameters**
========= =================================================================
data 3D np.array with shape (z,y,x); the data cube.
dim int; one of (0,1,2). Dimension along which to take the cuts.
integrate int or 'full'; number of slices to integrate around each
extracted cut. If 'full', take the maximum number possible,
depending whether the number of cuts is reduced due to
otherwise exceeding *max_nfigs*. 'full' does not work if *zs*
are given.
zs 1D np.array; selection of indices along dimension `dim`. Only
the given indices will be plotted.
labels 1D array/list of length z. Optional labels to assign to the
different cuts
max_ppf int; maximum number of plots per figure.
max_nfigs int; maximum number of figures that are created. If more would
be necessary to display all plots, a warning is issued and
only every N'th plot is created, where N is chosen such that
the whole 'range' of plots is represented on the figures.
kwargs dict; keyword arguments passed on to :func:`pcolormesh
<matplotlib.axes._subplots.AxesSubplot.pcolormesh>`.
Additionally, the kwarg `gamma` for power-law color mapping
is accepted.
========= =================================================================
"""
# Create a list of all indices in case no list (`zs`) is given
if zs is None :
zs = np.arange(data.shape[dim])
elif integrate == 'full' :
warnings.warn('*full* option does not work when *zs* are specified.')
integrate = 0
# The total number of plots and figures to be created
n_plots = len(zs)
n_figs = int( np.ceil(n_plots/max_ppf) )
nth = 1
if n_figs > max_nfigs :
# Only plot every nth plot
nth = round(n_plots/(max_ppf*max_nfigs))
# Get the right English suffix depending on the value of nth
if nth <= 3 :
suffix = ['st', 'nd', 'rd'][nth-1]
else :
suffix = 'th'
warnings.warn((
'Number of necessary figures n_figs ({0}) > max_nfigs ({1}).' +
'Setting n_figs to {1} and only plotting every {2}`{3} cut.').format(
n_figs, max_nfigs, nth, suffix))
n_figs = max_nfigs
n_plots = max_ppf*n_figs
# Figure out how much we should integrate
if integrate == 'full' or integrate > nth/2 :
integrate = int(nth/2)
# If we have just one figure, make the subplots as big as possible by
# setting the number of subplots per row (ppr) to a reasonable value
if n_figs == 1 :
ppr = int( np.ceil(np.sqrt(n_plots)) )
else :
ppr = int( np.ceil(np.sqrt(max_ppf)) )
# Depending on the dimension we need to extract the cuts differently.
# Account for this by moving the axes
x = np.arange(len(data.shape))
data = np.moveaxis(data, x, np.roll(x, dim))
# Extract kwargs used for the PowerNorm
gamma = pop_kwarg('gamma', kwargs, 1)
vmin = pop_kwarg('vmin', kwargs, None)
vmax = pop_kwarg('vmax', kwargs, None)
# Define the beginnings of the plot in figure units
margins = dict(left=0, right=1, bottom=0, top=1)
figures = []
for i in range(n_figs) :
# Create the figure with pyplot
fig = plt.figure()
start = i*ppr*ppr
stop = (i+1)*ppr*ppr
# Iterate over the cuts that go on this figure
for j,z in enumerate(zs[start:stop]) :
# Try to extract the cut and create the axes
cut_index = z*nth
if cut_index < data.shape[0] :
cut = make_slice(data, 0, cut_index, integrate)
else :
continue
# Transpose to counter matplotlib's transposition
cut = cut.T
ax = fig.add_subplot(ppr, ppr, j+1)
ax.pcolormesh(cut,
norm=PowerNorm(gamma=gamma, vmin=vmin, vmax=vmax),
**kwargs)
ax.set_xticks([])
ax.set_yticks([])
if labels is not None :
labeltext = str(labels[cut_index])
else :
labeltext = str(cut_index)
label = ax.text(0, 0, labeltext, size=10)
label.set_path_effects([withStroke(linewidth=2, foreground='w',
alpha=0.5)])
fig.subplots_adjust(hspace=0.01, wspace=0.01, **margins)
figures.append(fig)
return figures
def get_contours(data, x=None, y=None, levels=0) :
""" Use matplotlib`s contour function to get contour lines where the 2
dimensional dataset *data* intersects *levels*.
**Parameters**
====== ====================================================================
data 2d-array; shape (nx, ny)
x array-like; can be a linear array of shape (nx) or a meshgrid of
shape (nx, ny)
y array-like; can be a linear array of shape (ny) or a meshgrid of
shape (nx, ny)
levels float or list of float; the levels at which to extract the
contour lines. Due to a matplotlib limitation, these numbers have
to be in ascending order.
====== ====================================================================
**Returns**
======== ==================================================================
contours list of 2d-arrays; each array of shape (2, N) contains the x and
y coordinates of a contour line.
======== ==================================================================
"""
# Handle input
data = np.asarray(data)
shape = data.shape
if len(shape) != 2 :
raise ValueError('*data* should be a 2-d array. '
'shape(data)={}'.format(shape))
# Default to index arrays
if x is None or y is None :
x = np.arange(shape[0])
y = np.arange(shape[1])
else :
x = np.asarray(x)
y = np.asarray(y)
# Make meshgrid and sanity check for shapes
if len(x.shape) == 1 and len(y.shape) == 1 :
X, Y = np.meshgrid(x, y)
elif len(x.shape) == 2 and len(y.shape) == 2 :
X, Y = x, y
else :
raise ValueError('*x* and *y* should have the same shape. '
'x.shape={}, y.shape={}'.format(x.shape, y.shape))
if isinstance(levels, int) : levels = [levels]
# Create invisible figure and axes to get access to the contour function
ghost_fig = Figure()
ghost_ax = ghost_fig.add_subplot(111)
# Use contour to do the work
collections = ghost_ax.contour(X, Y, data, levels=levels).collections
contours = []
for collection in collections :
verts = collection.get_paths()[0].vertices
contours.append(np.array([verts[:,0], verts[:,1]]))
# Clean up
ghost_fig.clear()
del ghost_fig
return contours
if __name__ == '__main__' :
N = 100
x = np.arange(N)
y = np.arange(N)
X, Y = np.meshgrid(x, y)
data = (X-N/2)**2 + (Y-N/2)**2
contours = get_contours(data, levels=[100, 200, 300, 400])
plt.pcolormesh(X, Y, data)
for contour in contours :
plt.plot(contour[0], contour[1])
plt.show()
|
# Copyright(c) 2017-2021 CloudNetEngine. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines nodes and topology structure."""
import os
import sys
from ipaddress import IPv4Address, IPv4Network, IPv6Address, IPv6Network
from yaml import safe_load
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.constants import Constants
from resources.libraries.python.vif import VhostUserInterface, InterfaceAddress
from resources.libraries.python.ssh import exec_cmd, kill_process
from resources.libraries.python.vm import VirtualMachine
from resources.libraries.python.vswitch import OvsDpdk, OvsNative
__all__ = [
u"init_topology",
]
_TMP_DIR = "temp"
# Please make sure tep's IP networks are not conflict against
# SUT's management IP networks.
_TEP_NETV4 = IPv4Network("10.111.0.0/16")
_TEP_NETV6 = IPv6Network('2001:1000:1000:1000:0:0:0a6f:0000/112')
def _atoi(s):
try:
return int(s)
except ValueError:
return 0
class Node():
"""Define attributes for a managed node. """
def __init__(self, name, node_spec):
self.name = name
self.ssh_info = dict()
self.ssh_info['host'] = node_spec['host']
self.ssh_info['port'] = node_spec['port']
self.ssh_info['username'] = node_spec['username']
self.ssh_info['password'] = node_spec['password']
class Numa():
"""Define attributes for a NUMA node. """
def __init__(self, numa_id):
self.numa_id = numa_id
self.avail_cpus = list()
self.avail_mem = 0
self.vms = list()
class SUT(Node):
"""Define attributes and methods for a SUT (System Under Test) node.
A SUT node is normally a host which runs virtual switch and guests.
"""
HUGE_MNT = '/dev/hugepages'
HUGEPAGE_SIZE = 1024 # MB
OVSDPDK_MEM_PER_SOCKET = (1024 * 512) # KB
OVSDPDK_PNIC_NUMA_CPU_NUM = 1
OVSDPDK_NORM_NUMA_CPU_NUM = 1
MAX_VM_PER_NUMA = 2
def __init__(self, name, node_spec):
super().__init__(name, node_spec)
self.huge_mnt = node_spec.get('huge_mnt', SUT.HUGE_MNT)
self.userspace_tso = node_spec.get('userspace_tso', True)
self.numas = list()
self.hugepage_size = int(node_spec.get('hugepage_size', SUT.HUGEPAGE_SIZE))
self.hugepage_size *= 1024 # To KB
cmd = "lscpu -p"
_, stdout, _ = exec_cmd(self.ssh_info, cmd)
# Need to destory stale processes before collecting the available resources.
kill_process(self.ssh_info, "qemu-system-x86_64")
kill_process(self.ssh_info, "ovs-vswitchd")
kill_process(self.ssh_info, "ovsdb-server")
# Remove stale rtemap entries
exec_cmd(self.ssh_info, f"rm -rf {self.huge_mnt}/rtemap_*")
## CPU,Core,Socket,Node,,L1d,L1i,L2,L3,L4
#0,0,0,0,,0,0,0,0,0
#1,1,0,0,,1,1,1,1,1
self.cpuinfo = list()
for line in stdout.split("\n"):
if len(line) > 0 and line[0] != "#":
self.cpuinfo.append([_atoi(x) for x in line.split(",")])
# Last line contains the largest numa node id
self.n_numa = self.cpuinfo[-1][3] + 1
# Construct NUMA core list mapping
for numa_id in range(self.n_numa):
numa = Numa(numa_id)
cmd = f"cat /sys/devices/system/node/node{numa_id}/" \
f"hugepages/hugepages-{self.hugepage_size}kB/free_hugepages"
ret_code, stdout, stderr = exec_cmd(self.ssh_info, cmd)
stdout = stdout.strip()
if ret_code:
# Current numa node doesn't have any hugepage requested.
continue
try:
free_hugepages = int(stdout)
if free_hugepages < 0:
# In some system without numa enabled, normalized to 0
free_hugepages = 0
except ValueError:
logger.error(f"Reading numa hugepage failed : {cmd} {stdout}")
sys.exit()
numa.avail_mem = self.hugepage_size * free_hugepages
self.numas.append(numa)
for cpu in self.cpuinfo:
self.numas[cpu[3]].avail_cpus.append(cpu[0])
# Don't use core 0
self.numas[0].avail_cpus.remove(0)
self.pnic_numa_id = None
uplinks_spec = node_spec.get("interfaces", dict())
for iface in uplinks_spec.keys():
iface_spec = uplinks_spec[iface]
cmd = f"cat /sys/bus/pci/devices/{iface_spec['pci_address']}/numa_node"
_, stdout, _ = exec_cmd(self.ssh_info, cmd)
try:
numa_id = int(stdout)
if numa_id < 0:
# In some system without numa enabled, normalized to 0
numa_id = 0
except ValueError:
logger.error(f"Reading numa location failed for: {iface_spec['pci_address']}")
sys.exit()
if not self.pnic_numa_id:
self.pnic_numa_id = numa_id
else:
if numa_id != self.pnic_numa_id:
logger.warn(f"uplink interfaces CANNOT be on different numa nodes")
sys.exit()
self.test_root_dir = node_spec.get("test_root_dir")
if not self.test_root_dir:
(_, stdout, _) = exec_cmd(self.ssh_info, 'echo ~')
self.test_root_dir = os.path.join(str(stdout).strip(), "TEST_ROOT/")
self.test_tmp_dir = os.path.join(self.test_root_dir, _TMP_DIR)
exec_cmd(self.ssh_info, f"rm -rf {self.test_tmp_dir}")
exec_cmd(self.ssh_info, f"mkdir -p {self.test_tmp_dir}")
self.vhost_sock_dir = "/var/run/openvswitch"
self.vms = list()
guest_vcpu_idx = 0
node_idx = int(node_spec['id'])
dp_type = node_spec.get("dp_type", "ovs-dpdk")
dpdk_devbind_dir = os.path.join(self.test_root_dir, "bin/")
ovs_bin_dir = os.path.join(self.test_root_dir, f"bin/{dp_type}/")
tep_ipv4 = list(_TEP_NETV4.hosts())[node_idx + 1]
tep_ipv6 = list(_TEP_NETV6.hosts())[node_idx + 1]
tep_addr = InterfaceAddress(tep_ipv4, _TEP_NETV4,
tep_ipv6, _TEP_NETV6)
if dp_type == "ovs-dpdk":
ovs_native = False
aux_params = dict()
socket_mem_str = ''
# 'numas' is sored by id
for numa in self.numas:
socket_mem = max(SUT.OVSDPDK_MEM_PER_SOCKET, self.hugepage_size)
if numa.avail_mem <= socket_mem:
logger.warn(f"numa node:{numa.numa_id} mem:{numa.avail_mem} "
f"is not enough for ovsdpdk socket_mem:{socket_mem}. "
f"skip this numa node.")
# Prevent alloacte vm on this numa node
numa.avail_mem = 0
socket_mem = 0
else:
# If the left mem is not enough for a VM, no side effect
numa.avail_mem -= socket_mem
socket_mem_str += f"{int(socket_mem/1024)},"
socket_mem_str = socket_mem_str.rstrip(',')
cpu_mask = 0
pnic_numa_cpu_num = int(node_spec.get('pnic_numa_cpu_num',
SUT.OVSDPDK_PNIC_NUMA_CPU_NUM))
norm_numa_cpu_num = int(node_spec.get('norm_numa_cpu_num',
SUT.OVSDPDK_NORM_NUMA_CPU_NUM))
for numa in self.numas:
if not numa.avail_mem:
# Bypass numa nodes which have no socket_mem allocated.
continue
if numa.numa_id == self.pnic_numa_id:
if len(numa.avail_cpus) < pnic_numa_cpu_num:
logger.warn(f"pnic numa node:{numa.numa_id} "
f"have have no {pnic_numa_cpu_num} cpus")
sys.exit()
for _ in range(pnic_numa_cpu_num):
cpu = numa.avail_cpus.pop(0)
cpu_mask |= 1 << cpu
else:
if len(numa.avail_cpus) < norm_numa_cpu_num:
logger.warn(f"norm numa node:{numa.numa_id} "
f"have have no {pnic_numa_cpu_num} cpus")
sys.exit()
for _ in range(norm_numa_cpu_num):
cpu = numa.avail_cpus.pop(0)
cpu_mask |= 1 << cpu
aux_params['socket_mem'] = socket_mem_str
aux_params['huge_mnt'] = self.huge_mnt
aux_params['cpu_mask'] = hex(cpu_mask)
aux_params['userspace_tso'] = self.userspace_tso
aux_params['driver'] = node_spec.get('driver', 'vfio-pci')
self.vswitch = OvsDpdk(self.ssh_info, node_spec.get("interfaces", dict()),
tep_addr,
ovs_bin_dir, dpdk_devbind_dir,
aux_params)
elif dp_type == "ovs-native":
ovs_native = True
self.vswitch = OvsNative(self.ssh_info, node_spec.get("interfaces", dict()),
tep_addr,
ovs_bin_dir, dpdk_devbind_dir)
else:
raise RuntimeError(f"Do not support {dp_type} type datapath")
self.vswitch.stop_vswitch()
self.vswitch.start_vswitch()
host_ssh_info = {
'host': node_spec['host'],
'port': node_spec['port'],
'username': node_spec['username'],
'password': node_spec['password'],
}
vm_mem_size = int(node_spec.get('vm_mem_size', VirtualMachine.VM_MEM_SIZE))
vm_mem_size *= 1024 # To KB
vm_cpu_num = int(node_spec.get('vm_cpu_num', VirtualMachine.VM_CPU_NUM))
vm_mem_size = max(vm_mem_size, self.hugepage_size)
if_idx_of_host = 1
guest_idx = 1
for numa in self.numas:
if not numa.avail_mem:
# Bypass numa nodes which have no socket_mem allocated.
continue
for _ in range(SUT.MAX_VM_PER_NUMA):
if numa.avail_mem < vm_mem_size:
break
if len(numa.avail_cpus) < vm_cpu_num:
break
numa.avail_mem -= vm_mem_size
vm_host_cpus = []
for _ in range(vm_cpu_num):
vm_host_cpus.append(numa.avail_cpus.pop(0))
vm_name = 'vm_{0:02d}_{1:02d}'.format(node_idx, guest_idx)
vm = VirtualMachine(vm_name, guest_idx, vm_mem_size, vm_host_cpus, self.huge_mnt,
host_ssh_info, self.test_root_dir,
ovs_native=ovs_native)
for if_idx_on_vm in range(VirtualMachine.VM_VIFS_NUM):
vif_name = 'vhost_{0:02d}{1:03d}'.format(node_idx, if_idx_of_host)
ipv4 = IPv4Address('172.{0}.{1}.{2}'.format(
168 + if_idx_on_vm, node_idx, guest_idx))
ipv4_network = IPv4Network(f"{ipv4}/16", strict=False)
ipv6 = IPv6Address('2001:1000:1000:1000:0:0:' \
'ac{0:02x}:{1:02x}{2:02x}'.format(
168 + if_idx_on_vm, node_idx, guest_idx))
ipv6_network = IPv6Network(f"{ipv6}/112", strict=False)
if_addr = InterfaceAddress(ipv4, ipv4_network, ipv6, ipv6_network)
mac = '00:00:00:{0:02x}:{1:02x}:{2:02x}'.format(node_idx,
guest_idx, if_idx_on_vm + 1)
vif = VhostUserInterface(name=vif_name,
idx=if_idx_on_vm,
mac=mac,
ofp=f"{Constants.OFP_VHOST_BASE + if_idx_of_host}")
cm = str(node_spec.get('vhost_client_mode', True)).lower()
if cm == 'false':
vif.backend_client_mode = False
vif.if_addr = if_addr
vif.sock = os.path.join(self.vhost_sock_dir, vif_name)
if dp_type == "ovs-native":
path = os.path.join(self.test_tmp_dir, f"{vm_name}_{vif_name}")
vif.qemu_script_ifup = f"{path}_ifup"
vif.qemu_script_ifdown = f"{path}_ifdown"
vm.add_vhost_user_if(vif)
if_idx_of_host += 1
numa.vms.append(vm)
guest_idx += 1
def get_vms(self):
"""Get all the virtual machines on the SUT.
:returns: virtual machines.
:rtype: list(VirtualMachine obj)
"""
vms = list()
for numa in self.numas:
vms += numa.vms
return vms
def load_topo_from_yaml():
"""Load topology from file defined in "${TOPOLOGY_PATH}" variable.
Then constructs all the components defined in the config file.
"""
try:
topo_path = BuiltIn().get_variable_value(u"${TOPOLOGY_PATH}")
except Exception:
raise "Cannot load topology file."
nodes_spec = None
with open(topo_path) as work_file:
nodes_spec = safe_load(work_file.read())[u"nodes"]
for name, node_spec in nodes_spec.items():
if node_spec['type'] == 'SUT':
sut = SUT(name, node_spec)
suts.append(sut)
# pylint:disable=global-variable-undefined
def init_topology():
"""Initialize topology. Only be called once for the whole test. """
global suts
suts = list()
load_topo_from_yaml()
|
from pylab import *
x = linspace(-1., 1., 1001)
y = x.copy()
[x, y] = meshgrid(x, y)
y = y[::-1, :]
sample = atleast_2d(loadtxt("sample.txt"))
figure(figsize=(8, 8))
ion()
hold(False)
for i in xrange(0, sample.shape[0]):
plot([])
# Circle radii and positions
width = sample[i, 5:1005]
xc = sample[i, 1005:2005]
yc = sample[i, 2005:3005]
# Truncate
xc = xc[0:sample[i, 4]]
yc = yc[0:sample[i, 4]]
width = exp(width[0:sample[i, 4]])
for j in xrange(0, xc.shape[0]):
gca().add_artist(Rectangle((xc[j] - width[j], yc[j] - width[j]), 2*width[j], 2*width[j], alpha=0.1))
axis([-1, 1, -1, 1])
title(i+1)
draw()
ioff()
show()
|
import logging
from io import BytesIO
import uvicorn
from fastapi import Body, Depends, FastAPI, File, Request, UploadFile
from sqlalchemy.orm import Session
from .database import SessionLocal, UserIn, add_user, delete_user, get_user, get_users
from .yolo_minimal.detect import detect_init, detect, parse_name
app = FastAPI(openapi_prefix="/api")
logger = logging.getLogger()
model = detect_init(cfg="yolov3-spp-buoy.cfg", weights="best_buoy.pt", img_size=512)
@app.get("/")
async def read_root():
return {"return": 200}
@app.get("/ip")
async def read_ip(request: Request):
client_host = request.client.host
return {"return": 200, "client_host": client_host}
# Minimal PyTorch yolo detect application
@app.get("/yolo")
async def get_model_info():
(name, labels) = parse_name(files="buoy.names")
return {"return": 200, "name": name, "labels": labels}
@app.post("/yolo")
async def detect_image(image: UploadFile = File(...)):
data = BytesIO(image.file.read())
return {
"return": 200,
"result": detect(data.read(), model, img_size=512,),
}
# Dependency
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
# SQL Process
@app.get("/chatboard/all")
async def read_users(db: Session = Depends(get_db)):
return {"return": 200, "user": get_users(db)}
@app.get("/chatboard/{uid}")
async def read_user(uid: str, db: Session = Depends(get_db)):
return {"return": 200, "user": get_user(db, uid)}
@app.delete("/chatboard/{uid}")
async def remove_user(
uid: str, request: Request, db: Session = Depends(get_db),
):
ip = request.client.host
if user := get_user(db, uid):
if ip == user.ip:
delete_user(db, uid)
return {"return": 200, "user": get_users(db)}
else:
return {"return": 200, "user": get_users(db, 400)}
@app.put("/chatboard", status_code=201)
async def create_user(
request: Request, user: UserIn = Body(...), db: Session = Depends(get_db)
):
logger.info(f"Setting database: {user.name} {user.email}")
ip = request.client.host
add_user(db, user, ip)
return {"return": 201, "user": get_users(db)}
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8000)
|
from django.urls import path
from .import views
urlpatterns=[
path('register/',views.register,name='register'),
path('manufacturer_register/',views.manufacturer_register.as_view(),name='manufacturer_register'),
path('distributor_register/',views.distributor_register.as_view(),name='distributor_register'),
path('retailer_register/',views.retailer_register.as_view(),name='retailer_register'),
path('governmentbody_register/',views.governmentbody_register.as_view(),name='governmentbody_register'),
path('normaluser_register/',views.normaluser_register.as_view(),name='normaluser_register'),
path('login/',views.login_request,name='login'),
path('logout/',views.logout_view,name='logout'),
] |
import pytest
from lupa._lupa import LuaRuntime
from dredis.keyspace import Keyspace
from dredis.exceptions import RedisScriptError
from dredis.lua import LuaRunner, RedisLua
def test_lua_return_redis_types_run():
k = Keyspace()
runner = LuaRunner(k)
lua_script = """return {'test', true, false, 10, 20.3, {4}}"""
assert runner.run(lua_script, [], []) == ['test', 1, None, 10, 20, [4]]
def test_lua_table_with_error_run():
k = Keyspace()
runner = LuaRunner(k)
lua_script_err = """return {err='This is an error'}"""
with pytest.raises(RedisScriptError) as e:
runner.run(lua_script_err, [], [])
assert str(e.value) == 'This is an error'
def test_lua_table_with_ok_run():
k = Keyspace()
runner = LuaRunner(k)
lua_script_ok = """return {ok='Everything is OK'}"""
assert runner.run(lua_script_ok, [], []) == 'Everything is OK'
def test_redislua_return_lua_types_call():
k = Keyspace()
lua_runtime = LuaRuntime(unpack_returned_tuples=True)
redis_lua = RedisLua(k, lua_runtime)
lua_script = """return {'test', true, false, 10, 20.3, {'another string'}, redis.call('ping')}"""
table = redis_lua.call('EVAL', lua_script, 0, [])
assert table[1] == 'test'
assert table[2] == 1
assert table[3] is False
assert table[4] == 10
assert table[5] == 20
assert table[6][1] == 'another string'
assert table[7] == 'PONG'
def test_redislua_return_lua_types_pcall():
k = Keyspace()
lua_runtime = LuaRuntime(unpack_returned_tuples=True)
redis_lua = RedisLua(k, lua_runtime)
lua_script = """return {'test', true, false, 10, 20.3, {'another string'}, redis.call('ping')}"""
table = redis_lua.pcall('EVAL', lua_script, 0, [])
assert table[1] == 'test'
assert table[2] == 1
assert table[3] is False
assert table[4] == 10
assert table[5] == 20
assert table[6][1] == 'another string'
assert table[7] == 'PONG'
def test_redislua_with_error_call():
k = Keyspace()
lua_runtime = LuaRuntime(unpack_returned_tuples=True)
redis_lua = RedisLua(k, lua_runtime)
with pytest.raises(RedisScriptError) as exc:
redis_lua.call('GET')
assert str(exc.value) == "wrong number of arguments for 'get' command"
def test_redislua_with_error_pcall():
k = Keyspace()
lua_runtime = LuaRuntime(unpack_returned_tuples=True)
redis_lua = RedisLua(k, lua_runtime)
table = redis_lua.pcall('GET')
assert table['err'] == "wrong number of arguments for 'get' command"
def test_redislua_with_command_error_call():
k = Keyspace()
lua_runtime = LuaRuntime(unpack_returned_tuples=True)
redis_lua = RedisLua(k, lua_runtime)
with pytest.raises(RedisScriptError) as exc:
redis_lua.call('cmd_not_found')
assert str(exc.value) == '@user_script: Unknown Redis command called from Lua script'
def test_redislua_with_command_error_pcall():
k = Keyspace()
lua_runtime = LuaRuntime(unpack_returned_tuples=True)
redis_lua = RedisLua(k, lua_runtime)
table = redis_lua.pcall('cmd_not_found')
assert table['err'] == '@user_script: Unknown Redis command called from Lua script'
|
"""
Marquee plugin: replicate the html <marquee/> tag with message corrections.
Usage of this plugin is not recommended.
Commands
--------
.. glossary::
/marquee <text>
Send the following text with <marquee/> behavior
Configuration
-------------
.. glossary::
:sorted:
refresh
**Default:** ``1``
Interval between each correction (the closest to 0 is the fastest)
total_duration
**Default:** ``30``
Total duration of the animation.
padding
**Default:** ``20``
Padding to use to move the text.
"""
from poezio.plugin import BasePlugin
from poezio import tabs
from poezio import xhtml
from poezio.decorators import command_args_parser
def move(text, step, spacing):
new_text = text + (" " * spacing)
return new_text[-(step % len(new_text)):] + new_text[:-(step % len(new_text))]
class Plugin(BasePlugin):
default_config = {"marquee": {"refresh": 1.0, "total_duration": 30, "padding": 20}}
def init(self):
for tab_t in [tabs.MucTab, tabs.ConversationTab, tabs.PrivateTab]:
self.add_tab_command(tab_t, 'marquee', self.command_marquee,
'Replicate the <marquee/> behavior in a message')
@command_args_parser.raw
def command_marquee(self, args):
tab = self.api.current_tab()
args = xhtml.clean_text(xhtml.convert_simple_to_full_colors(args))
tab.command_say(args)
is_muctab = isinstance(tab, tabs.MucTab)
msg_id = tab.last_sent_message["id"]
jid = tab.name
event = self.api.create_delayed_event(self.config.get("refresh"),
self.delayed_event,
jid, args, msg_id, 1, 0,
is_muctab)
self.api.add_timed_event(event)
def delayed_event(self, jid, body, msg_id, step, duration, is_muctab):
if duration >= self.config.get("total_duration"):
return
message = self.core.xmpp.make_message(jid)
message["type"] = "groupchat" if is_muctab else "chat"
message["body"] = move(body, step, self.config.get("padding"))
message["replace"]["id"] = msg_id
message.send()
event = self.api.create_delayed_event(self.config.get("refresh"),
self.delayed_event, jid, body,
message["id"], step + 1,
duration + self.config.get("refresh"),
is_muctab)
self.api.add_timed_event(event)
|
"""Initializes the header dash html
"""
from typing import List
from dash.development.base_component import ComponentMeta
from dash_html_components import Div, A
from chime_dash.app.components.base import Component
class Header(Component):
"""
"""
localization_file = "header.yml"
def get_html(self) -> List[ComponentMeta]:
"""Initializes the header dash html
"""
content = self.content
return [
Div(
className="penn-medicine-header__content",
children=[
A(
href="https://www.pennmedicine.org",
className="penn-medicine-header__logo",
title=content["logo-title"],
children=content["logo-text"],
),
A(
className="penn-medicine-header__title",
id="title",
children=content["title"],
),
],
)
]
|
from __future__ import unicode_literals
from django.template import Library
register = Library()
@register.simple_tag
def get_api_image_url(obj, **kwargs):
return obj.get_api_image_url(**kwargs)
|
import argparse
import os
from google.cloud import bigquery
from google.cloud.bigquery.job import ExtractJobConfig
DATASET = "taxifare"
TRAIN_TABLE = "feateng_training_data"
VALID_TABLE = "feateng_valid_data"
TRAIN_SQL = """ CREATE OR REPLACE TABLE taxifare.feateng_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 1000)) = 1
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
VALID_SQL = """
CREATE OR REPLACE TABLE taxifare.feateng_valid_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
def export_table_to_gcs(dataset_ref, source_table, destination_uri):
table_ref = dataset_ref.table(source_table)
config = ExtractJobConfig()
config.print_header = False
extract_job = bq.extract_table(
table_ref,
destination_uri,
location="US",
job_config=config,
)
extract_job.result()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--bucket",
help = "GCS bucket where datasets will be exported.",
required = True
)
args = parser.parse_args()
gs = "gs://"
bucket = args.bucket if gs in args.bucket else os.path.join(gs, args.bucket)
datadir = os.path.join(bucket, DATASET, 'data')
train_export_path = os.path.join(datadir, "taxi-train-*.csv")
valid_export_path = os.path.join(datadir, "taxi-valid-*.csv")
bq = bigquery.Client()
dataset_ref = bigquery.Dataset(bq.dataset("taxifare"))
try:
bq.create_dataset(dataset_ref)
print("Dataset created")
except:
print("Dataset already exists")
print("Creating the training dataset...")
bq.query(TRAIN_SQL).result()
print("Creating the validation dataset...")
bq.query(VALID_SQL).result()
print("Exporting training dataset to GCS", train_export_path)
export_table_to_gcs(dataset_ref, TRAIN_TABLE, train_export_path)
print("Exporting validation dataset to GCS", valid_export_path)
export_table_to_gcs(dataset_ref, VALID_TABLE, valid_export_path)
|
# This file is part of JST.
#
# JST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# JST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
def operator_to_name(operator):
if operator == '+' or operator == '+=':
return 'PLUS'
elif operator == '-' or operator == '-=':
return 'MINUS'
elif operator == '*' or operator == '*=':
return 'MULT'
elif operator == '/' or operator == '/=':
return 'DIVIDE'
elif operator == '%' or operator == '%=':
return 'MOD'
elif operator == '>>' or operator == '>>=':
return 'RSHIFT'
elif operator == '<<' or operator == '<<=':
return 'LSHIFT'
elif operator == '^' or operator == '^=':
return 'XOR'
elif operator == '~' or operator == '~=':
return 'BITNOT'
elif operator == '&' or operator == '&=':
return 'BITAND'
elif operator == '|' or operator == '|=':
return 'BITOR'
elif operator == '!':
return 'NOT'
elif operator == '!=':
return 'NOT_EQUAL'
elif operator == '==':
return 'EQUAL'
elif operator == '&&':
return 'AND'
elif operator == '||':
return 'OR'
elif operator == '<':
return 'LESS'
elif operator == '<=':
return 'LESS_EQUAL'
elif operator == '>':
return 'GREATER'
elif operator == '>=':
return 'GREATER_EQUAL'
elif operator == '->':
return 'ARROW'
elif operator == '.':
return 'DOT'
elif operator == '++':
return 'PLUS_PLUS'
elif operator == '--':
return 'MINUS_MINUS'
elif operator == '=':
return 'ASSIGN'
else:
raise Exception("Unknown operator can't be identified: {}".format(operator)) |
from .config import Config
from .oss import Oss
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
plt.rcParams["figure.figsize"] = (10,6)
fox_data = []
cannon_data = []
simple_matrix_data = []
order = []
i = 4
with open('fox_data.txt','r') as file:
for line in file:
j = 0
sum = 0
for word in line.split():
j+=1
sum+=float(word)
if(j==4):
fox_data.append(sum/j)
order.append(i)
i+=4
j=0
sum = 0
file.close()
with open('cannon_data.txt','r') as file:
for line in file:
j = 0
sum = 0
for word in line.split():
j+=1
sum+=float(word)
if(j==4):
cannon_data.append(sum/j)
j=0
sum = 0
file.close()
with open('simple_matrix_data.txt','r') as file:
for line in file:
for word in line.split():
simple_matrix_data.append(float(word))
file.close()
strassen_data = []
oreder = []
i = 4
with open('strassen_data.txt','r') as file:
for line in file:
for word in line.split():
strassen_data.append(float(word))
oreder.append(i)
i+=4
file.close()
plt.plot(order,fox_data,'r')
plt.plot(order,cannon_data,'b')
plt.plot(order,simple_matrix_data,'g')
plt.plot(oreder,strassen_data,'y')
plt.title("Execution Time for Fox and Cannon")
plt.ylabel("Time (in sec)")
plt.xlabel("Order of Matrix (n,n)")
red_patch = mpatches.Patch(color='red', label='Fox Algorithm')
blue_patch = mpatches.Patch(color='blue', label='Cannon Algorithm')
green_patch = mpatches.Patch(color='green', label='Normal Multiplication')
yellow_patch = mpatches.Patch(color='yellow', label='Stression Algorithm')
plt.legend(handles=[green_patch, yellow_patch, red_patch, blue_patch])
plt.show()
|
# (c) Copyright 2014,2015,2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from freezerclient import exceptions
from freezerclient.v1.managers import actions
class TestActionManager(unittest.TestCase):
def setUp(self):
self.mock_client = mock.Mock()
self.mock_response = mock.Mock()
self.mock_client.endpoint = 'http://testendpoint:9999'
self.mock_client.auth_token = 'testtoken'
self.mock_client.client_id = 'test_client_id_78900987'
self.action_manager = actions.ActionManager(self.mock_client)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_create(self, mock_requests):
self.assertEqual('http://testendpoint:9999/v1/actions/',
self.action_manager.endpoint)
self.assertEqual({'X-Auth-Token': 'testtoken',
'Content-Type': 'application/json',
'Accept': 'application/json'},
self.action_manager.headers)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_create_ok(self, mock_requests):
self.mock_response.status_code = 201
self.mock_response.json.return_value = {'action_id': 'qwerqwer'}
mock_requests.post.return_value = self.mock_response
retval = self.action_manager.create({'action': 'metadata'})
self.assertEqual('qwerqwer', retval)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_create_fail_when_api_return_error_code(self, mock_requests):
self.mock_response.status_code = 500
mock_requests.post.return_value = self.mock_response
self.assertRaises(exceptions.ApiClientException,
self.action_manager.create, {'action': 'metadata'})
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_delete_ok(self, mock_requests):
self.mock_response.status_code = 204
mock_requests.delete.return_value = self.mock_response
retval = self.action_manager.delete('test_action_id')
self.assertIsNone(retval)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_delete_fail(self, mock_requests):
self.mock_response.status_code = 500
mock_requests.delete.return_value = self.mock_response
self.assertRaises(exceptions.ApiClientException,
self.action_manager.delete, 'test_action_id')
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_get_ok(self, mock_requests):
self.mock_response.status_code = 200
self.mock_response.json.return_value = {'action_id': 'qwerqwer'}
mock_requests.get.return_value = self.mock_response
retval = self.action_manager.get('test_action_id')
self.assertEqual({'action_id': 'qwerqwer'}, retval)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_get_fails_on_error_different_from_404(self, mock_requests):
self.mock_response.status_code = 500
mock_requests.get.return_value = self.mock_response
self.assertRaises(exceptions.ApiClientException,
self.action_manager.get, 'test_action_id')
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_get_none(self, mock_requests):
self.mock_response.status_code = 404
mock_requests.get.return_value = self.mock_response
retval = self.action_manager.get('test_action_id')
self.assertIsNone(retval)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_list_ok(self, mock_requests):
self.mock_response.status_code = 200
action_list = [{'action_id_0': 'bomboloid'},
{'action_id_1': 'asdfasdf'}]
self.mock_response.json.return_value = {'actions': action_list}
mock_requests.get.return_value = self.mock_response
retval = self.action_manager.list()
self.assertEqual(action_list, retval)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_list_error(self, mock_requests):
self.mock_response.status_code = 404
action_list = [{'action_id_0': 'bomboloid'},
{'action_id_1': 'asdfasdf'}]
self.mock_response.json.return_value = {'clients': action_list}
mock_requests.get.return_value = self.mock_response
self.assertRaises(exceptions.ApiClientException,
self.action_manager.list)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_update_ok(self, mock_requests):
self.mock_response.status_code = 200
self.mock_response.json.return_value = {
"patch": {"status": "bamboozled"},
"version": 12,
"action_id": "d454beec-1f3c-4d11-aa1a-404116a40502"
}
mock_requests.patch.return_value = self.mock_response
retval = self.action_manager.update(
'd454beec-1f3c-4d11-aa1a-404116a40502', {'status': 'bamboozled'})
self.assertEqual(12, retval)
@mock.patch('freezerclient.v1.managers.actions.requests')
def test_update_raise_MetadataUpdateFailure_when_api_return_error_code(
self, mock_requests):
self.mock_response.json.return_value = {
"patch": {"status": "bamboozled"},
"version": 12,
"action_id": "d454beec-1f3c-4d11-aa1a-404116a40502"
}
self.mock_response.status_code = 404
self.mock_response.text = (
'{"title": "Not Found","description":"No document found with ID '
'd454beec-1f3c-4d11-aa1a-404116a40502x"}'
)
mock_requests.patch.return_value = self.mock_response
self.assertRaises(exceptions.ApiClientException,
self.action_manager.update,
'd454beec-1f3c-4d11-aa1a-404116a40502',
{'status': 'bamboozled'})
|
#%%
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Input, Model, layers, Sequential
from tensorflow.keras.layers import Layer, Conv2D, MaxPool2D,\
Flatten, BatchNormalization, Dense, GlobalAveragePooling2D, Dropout
#Allow the GPU memory growth for deep learning
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# %%
#Build the residual block
class BasicBlock(Layer):
def __init__(self, n_filters, stride=1):
super(BasicBlock, self).__init__()
#Channel 1 ---- 2 convolution layers
self.conv1 = Conv2D(n_filters, (3,3), strides=stride, padding="same",\
kernel_regularizer=keras.regularizers.l2(4e-3))
self.bn1 = BatchNormalization()
self.relu = layers.Activation("relu")
self.conv2 = Conv2D(n_filters, (3,3), strides=1, padding="same",\
kernel_regularizer=keras.regularizers.l2(4e-3))
self.bn2 = BatchNormalization()
#Channel 2 ---- shortcut
if stride != 1:
self.shortcut = Sequential()
self.shortcut.add(Conv2D(n_filters, (1,1), strides=2))
else:
self.shortcut = lambda x:x
def call(self, inputs, training=None):
#[b, h, w, c]
out = self.conv1(inputs)
out = Dropout(0.3)(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = Dropout(0.3)(out)
out = self.bn2(out)
out = self.relu(out)
identity = self.shortcut(inputs)
output = layers.add([out, identity])
output = self.relu(output)
return output
#%%
inp = keras.Input(shape=(32,32,3))
layer1 = BasicBlock(64, stride=2)(inp)
layer1
# %%
# Build ResNet
class ResNet(Model):
def __init__(self, block_distribution, n_classes): #[2,2,2,2]
super(ResNet, self).__init__()
#The first layer
self.stem = Sequential([Conv2D(64, (3,3), strides=1),
BatchNormalization(),
layers.Activation("relu"),
MaxPool2D(pool_size=(2,2), strides=(1,1), padding="same")])
self.layer1 = self.build_resblock(64, block_distribution[0])
self.layer2 = self.build_resblock(128, block_distribution[1], stride=2)
self.layer3 = self.build_resblock(256, block_distribution[2], stride=2)
self.layer4 = self.build_resblock(512, block_distribution[3], stride=2)
#output: [b, 512, h, w]
#Unknown the specific h and w, so global average it -> [b, 512, 1, 1]
self.avgpool = GlobalAveragePooling2D()
self.fc = Dense(n_classes)
def call(self, inputs, training=None):
x = self.stem(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = Flatten()(self.avgpool(x))
output = self.fc(x)
return output
def build_resblock(self, n_filters, n_blocks, stride=1):
res_blocks = Sequential()
res_blocks.add(BasicBlock(n_filters, stride))
for _ in range(1, n_blocks):
res_blocks.add(BasicBlock(n_filters, stride=1))
return res_blocks
# %%
# Pre-define some ResNet models
def ResNet18(n_classes):
return ResNet([2,2,2,2], n_classes)
def ResNet34(n_classes):
return ResNet([3,4,6,3], n_classes)
#%%
if __name__=="__main__":
resnet18 = ResNet18(100)
resnet18.build(input_shape=(None, 32, 32, 3))
resnet18.summary()
# %%
|
from __future__ import absolute_import, division, print_function
from .fcn8 import *
from .fcn16 import *
from .fcn32 import *
from .unet import *
from .pspnet import *
from .fast_scnn import *
model_from_name = {}
model_from_name["fcn8_vgg11"] = fcn8_vgg11
model_from_name["fcn8_vgg13"] = fcn8_vgg13
model_from_name["fcn8_vgg16"] = fcn8_vgg16
model_from_name["fcn8_vgg19"] = fcn8_vgg19
model_from_name["fcn16_vgg11"] = fcn16_vgg11
model_from_name["fcn16_vgg13"] = fcn16_vgg13
model_from_name["fcn16_vgg16"] = fcn16_vgg16
model_from_name["fcn16_vgg19"] = fcn16_vgg19
model_from_name["fcn32_vgg11"] = fcn32_vgg11
model_from_name["fcn32_vgg13"] = fcn32_vgg13
model_from_name["fcn32_vgg16"] = fcn32_vgg16
model_from_name["fcn32_vgg19"] = fcn32_vgg19
model_from_name["fcn8_resnet18"] = fcn8_resnet18
model_from_name["fcn8_resnet34"] = fcn8_resnet34
model_from_name["fcn8_resnet50"] = fcn8_resnet50
model_from_name["fcn8_resnet101"] = fcn8_resnet101
model_from_name["fcn8_resnet152"] = fcn8_resnet152
model_from_name["fcn16_resnet18"] = fcn16_resnet18
model_from_name["fcn16_resnet34"] = fcn16_resnet34
model_from_name["fcn16_resnet50"] = fcn16_resnet50
model_from_name["fcn16_resnet101"] = fcn16_resnet101
model_from_name["fcn16_resnet152"] = fcn16_resnet152
model_from_name["fcn32_resnet18"] = fcn32_resnet18
model_from_name["fcn32_resnet34"] = fcn32_resnet34
model_from_name["fcn32_resnet50"] = fcn32_resnet50
model_from_name["fcn32_resnet101"] = fcn32_resnet101
model_from_name["fcn32_resnet152"] = fcn32_resnet152
model_from_name["fcn8_mobilenet_v2"] = fcn8_mobilenet_v2
model_from_name["fcn16_mobilenet_v2"] = fcn16_mobilenet_v2
model_from_name["fcn32_mobilenet_v2"] = fcn32_mobilenet_v2
model_from_name["unet"] = unet
model_from_name["unet_vgg11"] = unet_vgg11
model_from_name["unet_vgg13"] = unet_vgg13
model_from_name["unet_vgg16"] = unet_vgg16
model_from_name["unet_vgg19"] = unet_vgg19
model_from_name["unet_resnet18"] = unet_resnet18
model_from_name["unet_resnet34"] = unet_resnet34
model_from_name["unet_resnet50"] = unet_resnet50
model_from_name["unet_resnet101"] = unet_resnet101
model_from_name["unet_resnet152"] = unet_resnet152
model_from_name["unet_mobilenet_v2"] = unet_mobilenet_v2
model_from_name["pspnet_vgg11"] = pspnet_vgg11
model_from_name["pspnet_vgg13"] = pspnet_vgg13
model_from_name["pspnet_vgg16"] = pspnet_vgg16
model_from_name["pspnet_vgg19"] = pspnet_vgg19
model_from_name["pspnet_resnet18"] = pspnet_resnet18
model_from_name["pspnet_resnet34"] = pspnet_resnet34
model_from_name["pspnet_resnet50"] = pspnet_resnet50
model_from_name["pspnet_resnet101"] = pspnet_resnet101
model_from_name["pspnet_resnet152"] = pspnet_resnet152
model_from_name["pspnet_mobilenet_v2"] = pspnet_mobilenet_v2
model_from_name["fast_scnn"] = fast_scnn
|
"""
工具类
其实里面都只是数值计算函数
"""
import numpy as np
def cut_img_3D(img):
"""
将3D的数据进行剪裁,去除3D黑边
img:numpy array
return a cut img with same axis number but not a fixed one
"""
# print(img.shape,img.dtype)
buf=[]
for i in range(img.shape[0]):
temp = img[i,:,:]
if(temp.sum()!=0):
buf.append(i)
break
for i in range(img.shape[0]-1,-1,-1):
temp = img[i,:,:]
if(temp.sum()!=0):
buf.append(i)
break
for i in range(img.shape[1]):
temp = img[:,i,:]
if(temp.sum()!=0):
buf.append(i)
break
for i in range(img.shape[1]-1,-1,-1):
temp = img[:,i,:]
if(temp.sum()!=0):
buf.append(i)
break
for i in range(img.shape[2]):
temp = img[:,:,i]
if(temp.sum()!=0):
buf.append(i)
break
for i in range(img.shape[2]-1,-1,-1):
temp = img[:,:,i]
if(temp.sum()!=0):
buf.append(i)
break
pw=1 # plus_width 前后增加的额外像素 防止3D图像缺失一小部分
for i in range(3):
if buf[2*i]-pw>=0:
buf[2*i] -= pw
for i in range(3):
if buf[2*i+1]+pw<=(img.shape[i]-1):
buf[2*i+1] += pw
cut_img = img[buf[0]:buf[1]+1,buf[2]:buf[3]+1,buf[4]:buf[5]+1]
# print(cut_img.shape) # buf 记录的是坐标下标 自身不涉及index+1 -1 认为考虑+-1
max_length = max(cut_img.shape)
zeros = np.zeros(shape=[1,cut_img.shape[1],cut_img.shape[2]],dtype=np.int16)
letf_layers = max_length - cut_img.shape[0]
for i in range(letf_layers//2):
cut_img = np.concatenate((zeros,cut_img),axis=0)
for i in range(letf_layers-letf_layers//2):
cut_img = np.concatenate((cut_img,zeros),axis=0)
# print(cut_img.shape)
zeros = np.zeros(shape=[cut_img.shape[0],1,cut_img.shape[2]],dtype=np.int16)
letf_layers = max_length - cut_img.shape[1]
for i in range(letf_layers//2):
cut_img = np.concatenate((zeros,cut_img),axis=1)
for i in range(letf_layers-letf_layers//2):
cut_img = np.concatenate((cut_img,zeros),axis=1)
# print(cut_img.shape)
zeros = np.zeros(shape=[cut_img.shape[0],cut_img.shape[1],1],dtype=np.int16)
letf_layers = max_length - cut_img.shape[2]
for i in range(letf_layers//2):
cut_img = np.concatenate((zeros,cut_img),axis=2)
for i in range(letf_layers-letf_layers//2):
cut_img = np.concatenate((cut_img,zeros),axis=2)
# print(cut_img.shape)
# print(cut_img.min())
# print(cut_img.max())
return cut_img
def gen_hole_mask(img,pix_val=1):
shape = img.shape
dtype = img.dtype
#部分样本不是int16 类型 强制mask为int16
new_mask_img = img[:,:,:]
new_mask_img[new_mask_img!=pix_val]=0
new_mask_img[new_mask_img==pix_val]=1
return new_mask_img
def gen_mask(img):
shape = img.shape
dtype = img.dtype
#部分样本不是int16 类型 强制mask为int16
new_mask_img = np.ones(shape=shape,dtype=np.int16)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
for k in range(img.shape[2]):
if img[i,j,k]<=0:
new_mask_img[i,j,k]=0
else:
break
for k in range(img.shape[2]-1,-1,-1):
if img[i,j,k]<=0:
new_mask_img[i,j,k]=0
else:
break
return new_mask_img
def center_crop_3D(img,target_size):
#img numpy array 3D
"""
极右原则的好处是 有限剪裁末端 这样即便值剪裁单个值点 也可以使用[0:-1]实现剪裁而不必考虑末端点不需要剪裁时语法上的特殊性
crop 等价于逆向padding
"""
shape = img.shape
for i in range(len(target_size)):
if shape[i]<target_size[i]:
raise ValueError("Unsupported target size")
elif shape[i]==target_size[i]:
pass
else:
diff = shape[i]-target_size[i]
begin = diff//2
end = diff-begin
if i == 0:
img = img[begin:-end,:,:]
elif i == 1:
img = img[:,begin:-end,:]
elif i == 2:
img = img[:,:,begin:-end]
else:
raise ValueError("Dim crash")
return img
def option1_gen_whole_mask():
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pylab as plt
import nibabel as nib
from nibabel import nifti1
from nibabel.viewers import OrthoSlicer3D
import os
buf_seg = []
for (dirName, subdirList, fileList) in os.walk("G:\\Datasets\\BraTS\\ToCrop\\MICCAI_BraTS2020_TrainingData"):
for filename in fileList:
if "seg.nii" in filename.lower():
buf_seg.append(os.path.join(dirName,filename))
for i,item in enumerate(buf_seg):
img0 = nib.load(item)
img = np.array(img0.dataobj[:,:,:],dtype=np.int16)
save_path =item[:-7]+"mask_v1.nii"
mask = gen_hole_mask(img,pix_val=1)
print(i+1,mask.shape,mask.dtype)
data = mask
affine = img0.affine
new_image = nib.Nifti1Image(data, affine)
nib.save(new_image,save_path)
img = np.array(img0.dataobj[:,:,:],dtype=np.int16)
save_path =item[:-7]+"mask_v2.nii"
mask = gen_hole_mask(img,pix_val=2)
print(i+1,mask.shape,mask.dtype)
data = mask
affine = img0.affine
new_image = nib.Nifti1Image(data, affine)
nib.save(new_image,save_path)
img = np.array(img0.dataobj[:,:,:],dtype=np.int16)
save_path =item[:-7]+"mask_v4.nii"
mask = gen_hole_mask(img,pix_val=4)
print(i+1,mask.shape,mask.dtype)
data = mask
affine = img0.affine
new_image = nib.Nifti1Image(data, affine)
nib.save(new_image,save_path)
def option2_gen_mask():
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pylab as plt
import nibabel as nib
from nibabel import nifti1
from nibabel.viewers import OrthoSlicer3D
import os
buf_A = []
buf_B = []
for (dirName, subdirList, fileList) in os.walk("G:\\Datasets\\BraTS\\ToCrop"):
for filename in fileList:
if "t1.nii" in filename.lower():
buf_A.append(os.path.join(dirName,filename))
if "t2.nii" in filename.lower():
buf_B.append(os.path.join(dirName,filename))
for i,item in enumerate(buf_A):
save_path =item[:-6]+"mask_t1_v0.nii"
img0 = nib.load(item)
img = np.array(img0.dataobj[:,:,:],dtype=np.int16)
mask = gen_mask(img)
print(i+1,mask.shape,mask.dtype)
data = mask
affine = img0.affine
new_image = nib.Nifti1Image(data, affine)
nib.save(new_image,save_path)
for i,item in enumerate(buf_B):
save_path =item[:-6]+"mask_t2_v0.nii"
img0 = nib.load(item)
img = np.array(img0.dataobj[:,:,:],dtype=np.int16)
mask = gen_mask(img)
print(i+1,mask.shape,mask.dtype)
data = mask
affine = img0.affine
new_image = nib.Nifti1Image(data, affine)
nib.save(new_image,save_path)
if __name__ == "__main__":
# import matplotlib
# matplotlib.use('TkAgg')
# from matplotlib import pylab as plt
# import nibabel as nib
# from nibabel import nifti1
# from nibabel.viewers import OrthoSlicer3D
# example_filename = 'G:\\Datasets\\BraTS\\Collections\\HGG\\Brats18_2013_17_1\\Brats18_2013_17_1_t2.nii'
# img = nib.load(example_filename)
# img = np.array(img.dataobj[:,:,:])
# cut_img = cut_img_3D(img)
# print(cut_img.shape)
# from skimage import measure
# from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# def plot_3d(image, threshold=0):
# # Position the scan upright,
# # so the head of the patient would be at the top facing the camera
# p = image#.transpose(2,1,0)
# verts, faces, norm, val = measure.marching_cubes_lewiner(p,threshold,step_size=1, allow_degenerate=True)
# #verts, faces = measure.marching_cubes_classic(p,threshold)
# fig = plt.figure(figsize=(10, 10))
# ax = fig.add_subplot(111, projection='3d')
# # Fancy indexing: `verts[faces]` to generate a collection of triangles
# mesh = Poly3DCollection(verts[faces], alpha=0.7)
# face_color = [0.45, 0.45, 0.75]
# mesh.set_facecolor(face_color)
# ax.add_collection3d(mesh)
# ax.set_xlim(0, p.shape[0])
# ax.set_ylim(0, p.shape[1])
# ax.set_zlim(0, p.shape[2])
# plt.show()
# plot_3d(cut_img)
# option1_gen_whole_mask()
option2_gen_mask()
|
import asyncio
from .ModbusProtocolRtu import ModbusProtocolRtu
import logging
_logger = logging.getLogger(__name__)
class Transport:
def __init__(self, **kwargs):
self.logger = kwargs.get('logger', _logger)
pass
async def connect(self, serial):
pass
async def send(self, data):
pass
async def receive(self, size):
pass
async def close(self):
pass
|
#!/usr/bin/env python
import sys
import math
points = []
#files = ['Control182localeq','Control189localeq','Control239localeq']
class Plot3d():
def getNodesAndEdges(infile):
inpath = 'csv/'
outpath = 'graphml/'
filename = infile.name[:-4} if infile.name.endswith('.csv') else infile.name
infilename = inpath + sys.argv[1] + '.csv'
nodename = outpath + sys.argv[1] + '.nodes.csv'
edgename = outpath + sys.argv[1] + '.edges.csv'
for line in infile:
line = line.strip().split(',')
points.append(str(line[0]) + "," + str(line[1]) + "," + str(line[2]))
#with open(sys.argv[2], 'w') as nodefile:
with open(nodename, 'w') as nodefile:
#with open(sys.argv[3], 'w') as edgefile:
with open(edgename, 'w') as edgefile:
for ind in range(len(points)):
temp = points[ind].strip().split(',')
x = temp[0]
y = temp[1]
z = temp[2]
radius = 18
nodefile.write("s" + str(ind + 1) + "," + str(x) + "," + str(y) + "," + str(z) + "\n")
for index in range(ind + 1, len(points)):
tmp = points[index].strip().split(',')
distance = math.sqrt(math.pow(int(x) - int(tmp[0]), 2) + math.pow(int(y) - int(tmp[1]), 2) + math.pow(int(z) - int(tmp[2]), 2))
if distance < radius:
edgefile.write("s" + str(ind + 1) + "," + "s" + str(index + 1) + "\n")
return [nodefile, edgefile]
#old shit =======================================================================
#for f in files:
# filename = inpath + f + '.csv'
# nodename = outpath + f + '.nodes.csv'
# edgename = outpath + f + '.edges.csv'
# #with open(sys.argv[1], 'r') as infile:
# with open(filename, 'r') as infile:
# for line in infile:
# line = line.strip().split(',')
# points.append(str(line[0]) + "," + str(line[1]) + "," + str(line[2]))
#
# #with open(sys.argv[2], 'w') as outfile:
# with open(nodename, 'w') as outfile:
# #with open(sys.argv[3], 'w') as edgefile:
# with open(edgename, 'w') as edgefile:
# for ind in range(len(points)):
# temp = points[ind].strip().split(',')
# x = temp[0]
# y = temp[1]
# z = temp[2]
# radius = 18
# outfile.write("s" + str(ind + 1) + "," + str(x) + "," + str(y) + "," + str(z) + "\n")
# for index in range(ind + 1, len(points)):
# tmp = points[index].strip().split(',')
# distance = math.sqrt(math.pow(int(x) - int(tmp[0]), 2) + math.pow(int(y) - int(tmp[1]), 2) + math.pow(int(z) - int(tmp[2]), 2))
# if distance < radius:
# edgefile.write("s" + str(ind + 1) + "," + "s" + str(index + 1) + "\n")
|
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import random
##### Init class #####
db = SQLAlchemy()
##### Models #####
class User(db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(100), unique=True, nullable=False)
password = db.Column(db.String(100), unique=False, nullable=False)
name = db.Column(db.String(80), nullable=False)
date_created = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
jokes = db.relationship('Joke', backref='author', lazy=True)
def __init__(self, username, name, password):
self.username = username
self.password = password
self.name = name
def __repr__(self):
return f"<User({self.id}, #{self.username}, {self.name}> "
@classmethod
def insert(self, name, username, password):
'''
inserts user to the database
Parameters:
-----------
name: string
username: string
password: string
'''
user = User(name=name, username=username, password=password)
# add to db and commit
db.session.add(user)
db.session.commit()
@classmethod
def update(self, id, name, username, password):
'''
updates user record with id=id with the new record
Parameters
----------
id: int
name: string
username: string
password: string
'''
query = self.query.filter_by(id=id).first()
# update values
query.name = name
query.username = username
query.password = password
# commit changes to db
db.session.commit()
@classmethod
def getUser(self, id):
'''
gets the user by id
Parameters:
-----------
id: int
Returns:
-------
query: user object
'''
query = self.query.filter_by(id=id).first()
return query
@classmethod
def getByUsername(self, username):
'''
gets user by username
Parameters:
-----------
username: string
Returns:
--------
query: user object
'''
query = self.query.filter_by(username=username).first()
return query
@classmethod
def getUserJokes(self, username):
'''
gets jokes associated with username
Parameters:
----------
username: string
Returns:
--------
jokes: list of joke objects
'''
query = self.query.filter_by(username=username).first()
jokes = query.jokes
return jokes
@classmethod
def getUserId(self, username):
'''
get id associated with the given username
'''
id = self.query.filter_by(username=username).first().id
return id
class Joke(db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
joke = db.Column(db.Text, nullable=False)
date_created = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __init__(self, joke, user_id):
self.joke = joke
self.user_id = user_id
def __repr__(self):
return f"<User({self.user}, {self.joke[:20]}...> "
@classmethod
def insert(self, joke, user_id):
'''
insert joke into db
Parameters:
-----------
joke: string
user_id: int
'''
joke = Joke(joke=joke, user_id=user_id)
# add to db and commit
db.session.add(joke)
db.session.commit()
@classmethod
def update(self, id, joke):
'''
updates existing joke record
Parameters:
-----------
id: int
joke: string
'''
query = self.query.get(id)
# update values in query
query.joke = joke
query.date_created = datetime.utcnow()
# commit the updates
db.session.commit()
@classmethod
def getRandomJoke(self):
'''
gets a random joke from the database
Returns
-------
joke: string
'''
# get all jokes
query = self.query.all()
sz = len(query)
# get random idx
idx = random.randint(0, sz-1)
return query[idx]
@classmethod
def delete(self, id):
'''
deletes joke with id=id from the database
Parameters:
----------
id: integer
'''
query = self.query.get(id)
db.session.delete(query)
db.session.commit()
@classmethod
def get(self, id):
'''
gets joke with id=id
Parameters:
-----------
id: Integer
'''
query = self.query.get(id)
return query
|
#Bit server start and requesr handiling file
# there will be a peice of updated code that get parameteters from the path
from urlparse import urlparse
#Import the map file
from map import mapRequest
#import the scripts needed to run the http server request
import SocketServer
from BaseHTTPServer import BaseHTTPRequestHandler
# this function gets the parameters from a get request
# it returns a dictionary with the parameters names and their data
def getParams(path):
if path.find('?') != -1:
query = urlparse(path).query
print query
return dict(qc.split("=") for qc in query.split("&"))
return ""
def withoutParams(path):
if path.find('?') != -1:
return path[0:(path.find('?'))]
else:
return path
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
# this if statement sees if the index of the project has been
# requested by asking for the root
if self.path == "/":
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
#get the right rought from the map file and render the page
self.wfile.write(mapRequest(withoutParams(self.path),getParams(self.path)))
#this control statement chatches if the browser requests for the favicon
# and since the project does not have on yet we will ignore this request
# other wise this is the normal request
elif self.path != "/favicon.ico":
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
#get the right template and send the right parameters to the par requester
self.wfile.write(mapRequest(withoutParams(self.path),getParams(self.path)))
def do_PUT(self):
print "----- SOMETHING WAS PUT!! ------"
print self.headers
length = int(self.headers['Content-Length'])
content = self.rfile.read(length)
self.send_response(200)
print content
httpd = SocketServer.TCPServer(("10.0.1.5", 80), MyHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
|
from ex112.utilidadescev import moeda
from ex112.utilidadescev import dado
p= dado.leiaDinheiro('Digite o Preço R$: ')
moeda.resumo(p, 35, 22)
|
from django.conf.urls import patterns, include, url
from django.contrib.auth.views import login, logout
from main_menu import admin_custom as admin
from django.views.generic.base import RedirectView
#from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page' : '/accounts/login'}),
(r'^$', 'main_menu.views.index'),
#(r'^index/$', RedirectView.as_view(url='localhost:8000'),'main_menu.views.index'),
#(r'^admin_page/$', 'main_menu.views.admin_page'),
(r'^user/$', 'main_menu.views.user_page'),
(r'^auth_token/$', 'main_menu.views.auth_token'),
(r'^token_data/$', 'main_menu.views.token_data'),
#(r'^admin/$', 'main_menu.views.admin_page'),
(r'^index/$','main_menu.views.index'),
(r'^index/demo/$','main_menu.views.demo'),
(r'^demo/$','main_menu.views.demo'),
(r'/admin/$', 'main_menu.views.admin_page'),
(r'^demo/admin/$', 'main_menu.views.admin_page'),
(r'^auth_token_user/$', 'main_menu.views.auth_token_user'),
(r'^index/demo/user/$', 'main_menu.views.user_page'),
(r'^demo/user/$', 'main_menu.views.user_page'),
(r'/user_price/$', 'main_menu.views.user_page_calculate_price'),
(r'/user_periodic/$', 'main_menu.views.user_page_periodic_price'),
#(r'/calculate_price/$', 'main_menu.views.calculate_price'),
#(r'^admin/main_menu/stackuser/calculate_price$','main_menu.admin.calculate_price'),
#(r'^js/jquery-1.11.0.min.js/$', include("static/js/jquery-1.11.0.min.js")),
#(r'^js/jquery-ui-1.10.4.min.js/$', include("static/js/jquery-ui-1.10.4.min.js")),
#(r'^js/ui.js/$', include("static/js/ui.js")),
#(r'^/define_pricing/?ct=(?P<ct>\d+)/$', 'main_menu.views.define_pricing'),
)
|
# Author: Luka Maletin
from data_structures.stack import Stack
OPERATORS = '&|!'
def calculate(d1, d2, operator):
if operator == '&':
return intersection(d1, d2)
elif operator == '|':
return union(d1, d2)
elif operator == '!':
return difference(d1, d2)
def evaluate(expression):
operands = Stack()
for token in expression:
if isinstance(token, str):
if token in OPERATORS:
b = operands.pop() # second operand
a = operands.pop() # first operand
operands.push(calculate(a, b, token))
else:
operands.push(token)
return operands.pop()
def intersection(d1, d2):
result = {}
for key in d1.keys():
if key in d2:
result[key] = d1[key]
return result
def union(d1, d2):
result = {}
for key in d1.keys():
result[key] = d1[key]
for key in d2.keys():
if key not in result:
result[key] = d2[key]
return result
def difference(d1, d2):
result = {}
for key in d1.keys():
if key not in d2:
result[key] = d1[key]
return result
|
import sys
from requests import get
from core.colors import bad, info, red, green, end
def honeypot(inp):
honey = 'https://api.shodan.io/labs/honeyscore/%s?key=C23OXE0bVMrul2YeqcL7zxb6jZ4pj2by' % inp
try:
result = get(honey).text
except:
result = None
sys.stdout.write('%s No information available' % bad + '\n')
if result:
if float(result) < 0.5:
color = green
else:
color = red
probability = str(float(result) * 10)
sys.stdout.write('%s Honeypot Probabilty: %s%s%%%s' %
(info, color, probability, end) + '\n')
|
from django.contrib.auth import get_user_model
from django_analyses.models.run import Run
from django_analyses.serializers.analysis_version import (
AnalysisVersionSerializer,
)
from rest_auth.serializers import UserDetailsSerializer
from rest_framework import serializers
User = get_user_model()
class MiniUserSerializer(UserDetailsSerializer):
"""
Minified serializer class for the :class:`User` model.
"""
full_name = serializers.SerializerMethodField()
class Meta(UserDetailsSerializer.Meta):
fields = (
"id",
"username",
"first_name",
"last_name",
"full_name",
"email",
)
def get_full_name(self, instance: User) -> str:
return instance.profile.get_full_name(include_title=False)
class RunSerializer(serializers.HyperlinkedModelSerializer):
user = MiniUserSerializer()
analysis_version = AnalysisVersionSerializer()
class Meta:
model = Run
fields = (
"id",
"user",
"analysis_version",
"created",
"modified",
"start_time",
"end_time",
"duration",
"status",
)
def duration(self, instance: Run):
return self.instance.duration
|
# Hand of Straights
# Alice has a hand of cards, given as an array of integers.
# Now she wants to rearrange the cards into groups so that each group is size W,
# and consists of W consecutive cards. Return true if and only if she can.
# hand = [1,2,3,6,2,3,4,7,8], W = 3 => True
# hand = [5,1], W = 2 => False
# Note: consecutive means no nearby cards can be the same number
class Solution(object):
def isNStraightHand(self, hand, W):
"""
:type hand: List[int]
:type W: int
:rtype: bool
"""
counter = collections.Counter(hand)
for n in sorted(counter.keys()):
if counter[n] > 0:
# should have counter[n] number of straights starting from n
use_cnt = counter[n]
for m in range(n, n + W):
if counter[m] < use_cnt:
return False
counter[m] -= use_cnt
return True
# O(mlogm) time, O(m) space, m being number of distinct cards
# Followup:
# what if W is very large, can we do better than reducing the count card by card?
class SolutionF1(object):
def isNStraightHand(self, hand, W):
c = collections.Counter(hand)
start = collections.deque()
last_checked, opened = -1, 0
for i in sorted(c):
if opened > c[i] or opened > 0 and i > last_checked + 1:
return False
start.append(c[i] - opened)
last_checked, opened = i, c[i]
if len(start) == W:
opened -= start.popleft()
return opened == 0
|
from anonapi.client import WebAPIClient
def anonymize_files_sop_class_filter():
"""Create an IDIS job that pulls files from the hospital information system"""
# Create a client that will talk to the web API
client = WebAPIClient(
hostname="https://umcradanonp11.umcn.nl/sandbox",
username="z123sandbox",
token="token",
)
# Create a job that takes data from the IDC (formally IMPAX) directly
# and allow only files that match the given SOPClassUIDs. For a full list of
# possible SOPClassUIDs see https://www.dicomlibrary.com/dicom/sop/
anon_name = "TEST_NAME_03"
anon_id = "03"
sid = "123.12335.3353.36464.343435677" # study UID
destination_path = r"\\umcsanfsclp01\radng_imaging\temptest_output"
idc_job_info = client.post(
"create_job",
source_type="WADO",
source_name="IDC_WADO",
source_instance_id=sid,
source_sop_class_filter_list="1.2.840.10008.5.1.4.1.1.88.67, 1.2.840.10008.5.1.4.1.1.7",
anonymizedpatientname=anon_name,
anonymizedpatientid=anon_id,
destination_type="PATH",
project_name="Wetenschap-Algemeen",
destination_path=destination_path,
description=f"A test idc job",
)
print(f"Succesfully created a job in {client}, job_id={idc_job_info['job_id']}")
if __name__ == "__main__":
anonymize_files_sop_class_filter()
|
# Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helper functions for running DDSP colab notebooks."""
import base64
import io
import tempfile
import ddsp
from IPython import display
import librosa
import matplotlib.pyplot as plt
import numpy as np
from pydub import AudioSegment
from scipy.io import wavfile
import tensorflow.compat.v2 as tf
from google.colab import files
from google.colab import output
download = files.download
DEFAULT_SAMPLE_RATE = 16000
_play_count = 0 # Used for ephemeral play().
def play(array_of_floats,
sample_rate=DEFAULT_SAMPLE_RATE,
ephemeral=True,
autoplay=False):
"""Creates an HTML5 audio widget to play a sound in Colab.
This function should only be called from a Colab notebook.
Args:
array_of_floats: A 1D or 2D array-like container of float sound samples.
Values outside of the range [-1, 1] will be clipped.
sample_rate: Sample rate in samples per second.
ephemeral: If set to True, the widget will be ephemeral, and disappear on
reload (and it won't be counted against realtime document size).
autoplay: If True, automatically start playing the sound when the widget is
rendered.
"""
# If batched, take first element.
if len(array_of_floats.shape) == 2:
array_of_floats = array_of_floats[0]
normalizer = float(np.iinfo(np.int16).max)
array_of_ints = np.array(
np.asarray(array_of_floats) * normalizer, dtype=np.int16)
memfile = io.BytesIO()
wavfile.write(memfile, sample_rate, array_of_ints)
html = """<audio controls {autoplay}>
<source controls src="data:audio/wav;base64,{base64_wavfile}"
type="audio/wav" />
Your browser does not support the audio element.
</audio>"""
html = html.format(
autoplay='autoplay' if autoplay else '',
base64_wavfile=base64.b64encode(memfile.getvalue()).decode('ascii'))
memfile.close()
global _play_count
_play_count += 1
if ephemeral:
element = 'id_%s' % _play_count
display.display(display.HTML('<div id="%s"> </div>' % element))
js = output._js_builder # pylint:disable=protected-access
js.Js('document', mode=js.EVAL).getElementById(element).innerHTML = html
else:
display.display(display.HTML(html))
def record(seconds=3,
sample_rate=DEFAULT_SAMPLE_RATE,
normalize_db=0.1):
"""Record audio from the browser in colab using javascript.
Based on: https://gist.github.com/korakot/c21c3476c024ad6d56d5f48b0bca92be
Args:
seconds: Number of seconds to record.
sample_rate: Resample recorded audio to this sample rate.
normalize_db: Normalize the audio to this many decibels. Set to None to skip
normalization step.
Returns:
An array of the recorded audio at sample_rate.
"""
# Use Javascript to record audio.
record_js_code = """
const sleep = time => new Promise(resolve => setTimeout(resolve, time))
const b2text = blob => new Promise(resolve => {
const reader = new FileReader()
reader.onloadend = e => resolve(e.srcElement.result)
reader.readAsDataURL(blob)
})
var record = time => new Promise(async resolve => {
stream = await navigator.mediaDevices.getUserMedia({ audio: true })
recorder = new MediaRecorder(stream)
chunks = []
recorder.ondataavailable = e => chunks.push(e.data)
recorder.start()
await sleep(time)
recorder.onstop = async ()=>{
blob = new Blob(chunks)
text = await b2text(blob)
resolve(text)
}
recorder.stop()
})
"""
print('Starting recording for {} seconds...'.format(seconds))
display.display(display.Javascript(record_js_code))
audio_string = output.eval_js('record(%d)' % (seconds*1000.0))
print('Finished recording!')
audio_bytes = base64.b64decode(audio_string.split(',')[1])
return audio_bytes_to_np(audio_bytes,
sample_rate=sample_rate,
normalize_db=normalize_db)
def audio_bytes_to_np(wav_data,
sample_rate=DEFAULT_SAMPLE_RATE,
normalize_db=0.1):
"""Convert audio file data (in bytes) into a numpy array.
Saves to a tempfile and loads with librosa.
Args:
wav_data: A byte stream of audio data.
sample_rate: Resample recorded audio to this sample rate.
normalize_db: Normalize the audio to this many decibels. Set to None to skip
normalization step.
Returns:
An array of the recorded audio at sample_rate.
"""
# Parse and normalize the audio.
audio = AudioSegment.from_file(io.BytesIO(wav_data))
audio.remove_dc_offset()
if normalize_db is not None:
audio.normalize(headroom=normalize_db)
# Save to tempfile and load with librosa.
with tempfile.NamedTemporaryFile(suffix='.wav') as temp_wav_file:
fname = temp_wav_file.name
audio.export(fname, format='wav')
audio_np, unused_sr = librosa.load(fname, sr=sample_rate)
return audio_np.astype(np.float32)
def upload(sample_rate=DEFAULT_SAMPLE_RATE, normalize_db=None):
"""Load a collection of audio files (.wav, .mp3) from disk into colab.
Args:
sample_rate: Resample recorded audio to this sample rate.
normalize_db: Normalize the audio to this many decibels. Set to None to skip
normalization step.
Returns:
An tuple of lists, (filenames, numpy_arrays).
"""
audio_files = files.upload()
fnames = list(audio_files.keys())
audio = []
for fname in fnames:
file_audio = audio_bytes_to_np(audio_files[fname],
sample_rate=sample_rate,
normalize_db=normalize_db)
audio.append(file_audio)
return fnames, audio
def specplot(audio,
vmin=-5,
vmax=1,
rotate=True,
size=512 + 256,
**matshow_kwargs):
"""Plot the log magnitude spectrogram of audio."""
# If batched, take first element.
if len(audio.shape) == 2:
audio = audio[0]
logmag = ddsp.spectral_ops.compute_logmag(ddsp.core.tf_float32(audio),
size=size)
if rotate:
logmag = np.rot90(logmag)
# Plotting.
plt.matshow(logmag,
vmin=vmin,
vmax=vmax,
cmap=plt.cm.magma,
aspect='auto',
**matshow_kwargs)
plt.xticks([])
plt.yticks([])
plt.xlabel('Time')
plt.ylabel('Frequency')
def transfer_function(ir, sample_rate=DEFAULT_SAMPLE_RATE):
"""Get true transfer function from an impulse_response."""
n_fft = ddsp.core.get_fft_size(0, ir.shape.as_list()[-1])
frequencies = np.abs(np.fft.fftfreq(n_fft, 1/sample_rate)[:int(n_fft/2) + 1])
magnitudes = tf.abs(tf.signal.rfft(ir, [n_fft]))
return frequencies, magnitudes
def plot_impulse_responses(impulse_response,
desired_magnitudes,
sample_rate=DEFAULT_SAMPLE_RATE):
"""Plot a target frequency response, and that of an impulse response."""
n_fft = desired_magnitudes.shape[-1] * 2
frequencies = np.fft.fftfreq(n_fft, 1/sample_rate)[:n_fft//2]
true_frequencies, true_magnitudes = transfer_function(impulse_response)
# Plot it.
plt.figure(figsize=(12, 6))
plt.subplot(121)
# Desired transfer function.
plt.semilogy(frequencies, desired_magnitudes, label='Desired')
# True transfer function.
plt.semilogy(true_frequencies, true_magnitudes[0, 0, :], label='True')
plt.title('Transfer Function')
plt.legend()
plt.subplot(122)
plt.plot(impulse_response[0, 0, :])
plt.title('Impulse Response')
|
#!/usr/bin/env python
""" Converts between chromosome names. """
import sys
import argparse
from argparse import RawDescriptionHelpFormatter as Raw
import pkg_resources
import gzip
import pandas as pd
import pysam
import pybedtools
from Bio import SeqIO
def arguments():
""" Function to pull command line arguments """
DESCRIPTION = """\
Converts between chromosome names.
In D. melanogaster there are four common varieties of chromosomes names
that are used.
* FlyBase style: 2L, 2R, 3L, etc.
* UCSC style: chr2L, chr2R, chr3L, etc.
* RefSeq style: NT_033779.5, NT_033778.4, NT_037436.4, etc.
* GenBank style: AE014134.6, AE013599.5, AE014296.5, etc.
Each style has its benefits: FlyBase is the standard, UCSC is compatible
with the genome browser, and RefSeq and GenBank are the most explicit. It
is common to come across files that use these different styles of
chromosome name, this tool aims to easily convert chromosome names in a
variety of file format.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION,
formatter_class=Raw)
parser.add_argument("--from", dest="orig", action='store', required=True,
choices=['FlyBase', 'UCSC', 'RefSeq', 'GenBank'],
help="Current type of chromosome name.")
parser.add_argument("--to", dest="new", action='store', required=True,
choices=['FlyBase', 'UCSC', 'RefSeq', 'GenBank'],
help="The type of chromosome name wanted.")
parser.add_argument("--fileType", dest="type", action='store',
required=True,
choices=['SAM', 'BAM', 'BED', 'GFF', 'GTF', 'FASTA'],
help="What is the input format.")
parser.add_argument("-i", "--input", dest="input", action='store',
required=True,
help="""Input file to convert. If `-i -`, STDIN is
used. Note if using SAM/BAM with STDIN, you must
include headrs.""")
parser.add_argument("-o", "--output", dest="output", action='store',
required=False, default='-',
help="Output file, if none given or `-o -` "
"then STDOUT.")
parser.add_argument("--debug", dest="debug", action='store_true',
required=False, help="Enable debug output.")
return parser.parse_args()
def import_conversion(f, t):
"""
Import NCBI conversion table.
Parameters
----------
f: str {FlyBase, UCSC, NCBI}
The current chromosome format.
t: str {FlyBase, UCSC, NCBI}
The desired chromosome format.
Returns
-------
dict: Mapping {f: t}
"""
# Get location of the file
gz = pkg_resources.resource_filename(
'lcdblib',
'data/GCF_000001215.4.assembly.txt.gz'
)
# Import file
df = pd.read_csv(gz, compression='gzip', comment='#', sep='\t',
header=None)
# Make mapping from keyword to column number
# col# Header
# 0 Sequence-Name
# 1 Sequence-Role
# 2 Assigned-Molecule
# 3 Assigned-Molecule-Location/Type
# 4 GenBank-Accn
# 5 Relationship
# 6 RefSeq-Accn
# 7 Assembly-Unit
# 8 Sequence-Length
# 9 UCSC-style-name
mapping = {'FlyBase': 0, 'UCSC': 9, 'GenBank': 4, 'RefSeq': 6}
# This table has a small error were FlyBase mitochondrion_genome is MT
df.replace('MT', 'mitochondrion_genome', inplace=True)
return {k: v for k, v in df[[mapping[f], mapping[t]]].values}
def pysam_convert(input, output, kind, mapper):
"""
Use pysam to convert chromosomes in BAM and SAM files.
pysam uses a header to define chromosomes, then each read is just mapped
back to this header. Only the header needs to be modified, and reads
need to be written to the new output file which uses this header.
"""
# Determine SAM or BAM flags
if kind == 'BAM':
flag_in = 'rb'
flag_out = 'wb'
elif kind == 'SAM':
flag_in = 'r'
flag_out = 'wh'
curr = pysam.AlignmentFile(input, flag_in)
# Change chromosome in the header
header = curr.header
for chrom in header['SQ']:
chrom['SN'] = mapper[chrom['SN']]
with pysam.AlignmentFile(output, flag_out, header=header) as OUT:
for read in curr:
OUT.write(read)
def convertFeature(f, mapper):
f.chrom = mapper[f.chrom]
return f
def pybedtools_convert(input, output, mapper):
""" Use pybedtools to convert chromosomes in BED, GTF, or GFF. """
if input == '-':
# Use STDIN
bt = pybedtools.BedTool(sys.stdin)
else:
bt = pybedtools.BedTool(input)
if output == '-':
# Use STDOUT
print(bt.each(convertFeature, mapper))
else:
bt.each(convertFeature, mapper).saveas(output)
def fasta_convert(input, output, mapper):
""" Uses Biopython.SeqIO to convert FASTA headers. """
if input == '-':
# Use STDIN
fh = sys.stdin
elif input.endswith('.gz'):
fh = gzip.open(input, 'rt')
else:
fh = open(input, 'r')
if output == '-':
# Use STDOUT
oh = sys.stdout
elif output.endswith('.gz'):
oh = gzip.open(input, 'wb')
else:
oh = open(output, 'w')
for seq in SeqIO.parse(fh, 'fasta'):
seq.description = seq.description.replace(seq.id, mapper[seq.id])
seq.name = mapper[seq.name]
seq.id = mapper[seq.id]
SeqIO.write(seq, oh, 'fasta')
# close file handlers
fh.close()
oh.close()
def main():
# Import commandline arguments.
args = arguments()
# Get mapping dict
mapper = import_conversion(args.orig, args.new)
if (args.type == 'BAM') | (args.type == 'SAM'):
pysam_convert(args.input, args.output, args.type, mapper)
elif (args.type == 'BED') | (args.type == 'GFF') | (args.type == 'GTF'):
pybedtools_convert(args.input, args.output, mapper)
elif (args.type == 'FASTA'):
fasta_convert(args.input, args.output, mapper)
|
# Solving problems in statistics
import math
from my_utils import probability, union_probability
from my_utils.testing import test_equal, test_close
import my_utils.counting as C
print("\nSOME PROBABILITY PROBLEMS\n")
# Question 1
print("Q: Find the probability of getting a head when you toss a fair coin?")
p_head = probability(1, 2)
print(p_head)
print("A: The probability is {}".format(p_head) )
expected_p_head = 0.5
test_equal(p_head, expected_p_head)
print("")
# Question 2
print("Q: Find the probability of getting 3 heads when you toss 10 fair coins.")
p_3_heads = C.combinations(10, 3) / 2**10
print("A: The probability is {}".format(p_3_heads))
expected_p_3_heads = 0.1171875
test_close(p_3_heads, expected_p_3_heads)
print("")
# Question 3
print("Q: Find the probability of getting 4 heads when you toss 10 fair coins.")
p_3_heads = C.combinations(10, 4) / 2**10
print("A: The probability is {}".format(p_3_heads))
expected_p_3_heads = 0.205078125
test_close(p_3_heads, expected_p_3_heads)
print("")
# Question 4
print("Q: Find the probability of getting 5 heads when you toss 10 fair coins.")
p_3_heads = C.combinations(10, 5) / 2**10
print("A: The probability is {}".format(p_3_heads))
expected_p_3_heads = 0.205078125
test_close(p_3_heads, expected_p_3_heads)
print("") |
import pyctopod
def handle_message(from_id, topic, msg):
"""
Publishes "blah" to :test everytime a message is received.
"""
msg = "blah"
pyctopod.publish("test", msg.encode('utf8'), from_id="pyctotest_keepalive_receiver")
def main():
pyctopod.subscribe(topics=["test_topic"], handlers=[handle_message])
def register_handler(pid):
pyctopod.register_main(main)
pyctopod.register_handler(pid)
|
"""
Copyright 2021, BYU-Idaho.
Author(s): Matt Manley, Jacob Oliphant
Version: 1.0
Date: 27-01-2021
"""
from abc import ABC
class Trait(ABC):
"""A distinguishing quality.
The responsibility of Trait is to define the common methods for an actor's
specific qualities or features. Even though this version of Trait doesn't
define any it is still useful as a marker interface. That is, the intent of
a subclass is made clearer by virtue of the inheritance relationship with
this one..
The concept of a "trait" is a fundamental part of the Genie object model.
Making sure it's represented in code, even as just a marker interface,
helps make the whole project more understandable. It also provides a place
to make changes in the future if we ever need it.
"""
pass |
#!/usr/bin/env python3
"""
Módulo Main: Programa principal (controlador).
Playlist - Ejecute "ayuda" para más información
"""
from lista import Lista
from estante import Estante
from repl import REPL
from repl import strip
class Main:
"""Clase principal."""
def __init__(self):
"""Constructor: Inicializa propiedades de instancia y ciclo REPL."""
self.comandos = {
"agregar": self.agregar,
"borrar": self.borrar,
"mostrar": self.mostrar,
"listar": self.listar,
"buscar": self.buscar,
"ayuda": self.ayuda,
"salir": self.salir
}
archivo = "datos.db"
introduccion = strip(__doc__)
self.lista = Estante(archivo)
if not self.lista.esarchivo():
introduccion += '\nError: No se pudo abrir "{}"'.format(archivo)
REPL(self.comandos, introduccion).ciclo()
def agregar(self, cancion, artista, album, anho, genero):
"""
Agrega un registro a la lista.
cancion -- nombre de la cancion. Se usará como clave.
artista -- nombre del artista.
album -- nombre del album donde se encuentra la cancion
anho -- anho en el cual se lazo el album
genero -- estilo musical
"""
self.lista[cancion] = Lista(cancion, artista, album, anho, genero)
def borrar(self, cancion):
"""
Borra un registro de la agenda.
nombre -- Nombre del contacto que se desea borrar de la agenda.
"""
del self.lista[cancion]
def mostrar(self, cancion):
"""
Retorna un registro de la agenda.
nombre -- Nombre del contacto que se desea mostrar.
"""
return self.lista[cancion]
def listar(self):
"""
Retorna un generador con todos los registros de la agenda.
Este comando no requiere de parámetros.
"""
return (self.lista[cancion]
for cancion in sorted(self.lista))
def buscar(self, cadena):
"""
Retorna un generador con los registros que contienen una cadena.
cadena -- Nombre o parte del nombre que se desea buscar en la agenda.
"""
return (self.lista[cancion]
for cancion in sorted(self.lista)
if cadena in cancion)
def ayuda(self, comando=None):
"""
Retorna la lista de comandos disponibles.
comando -- Comando del que se desea obtener ayuda (opcional).
"""
if comando in self.comandos:
salida = strip(self.comandos[comando].__doc__)
else:
salida = "Sintaxis: comando [parámetro1] [parámetro2] [..]\n" + \
"Comandos: " + \
", ".join(sorted(self.comandos.keys()))
return salida
def salir(self):
"""
Sale de la aplicación.
Este comando no requiere de parámetros.
"""
quit()
if __name__ == "__main__":
Main()
|
import wikipedia
from .baseprovider import BaseProvider
class WikipediaProvider(BaseProvider):
@staticmethod
def get(query, config, params={}, lang='en'):
wikipedia.set_lang(lang)
search = wikipedia.search(query)
if not search:
return {
'content': 'nan'
}
try:
result = wikipedia.page(search[0])
except wikipedia.DisambiguationError:
return {
'type': 'text',
'content': 'https://{}.wikipedia.org/wiki/{}'.format(lang, query)
}
content = {
'type': 'text',
'content': result.content.split('\n')[0],
'url': result.url
}
return content
|
import traceback
from contextlib import contextmanager
import pytz
import signal
from datetime import datetime
from typing import Union, Any
@contextmanager
def signal_context(signals=(signal.SIGINT, signal.SIGTERM), handler: Union[int, Any] = signal.SIG_IGN):
prev_hdlrs = [signal.signal(s, handler) for s in signals]
try:
yield prev_hdlrs
finally:
for s, prev_hdlr in zip(signals, prev_hdlrs):
signal.signal(s, prev_hdlr)
ISO8601 = '%Y-%m-%dT%H:%M:%S.%fZ'
ISO8601_SYMSAFE = '%Y%m%dT%H%M%S%fZ'
def time_now() -> datetime:
return datetime.now(tz=pytz.utc)
def time_parse(v, format) -> datetime:
return datetime.strptime(v, format).replace(tzinfo=pytz.utc)
def _build_callstack(ignore=1):
assert ignore > 0
INDENT = ' '
callstack = '\n'.join([INDENT + line.strip() for line in traceback.format_stack()][:-ignore])
return callstack
def _log_called_from(logger, pat='', *args):
if len(pat):
pat += '\n'
logger.exception(pat + 'Called from\n%s\n', *args, _build_callstack())
def _log_traceback(logger, pat=''):
if len(pat):
pat += '\n'
try:
raise KeyError()
except:
logger.debug(pat + _build_callstack(ignore=2))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test.test_datamodel_v1
~~~~~~~~~~~~~~~~~~~~~~
Test data model key calculations etc.
"""
import logging
import unittest
from networking_calico.datamodel_v1 import WloadEndpointId
# Logger
log = logging.getLogger(__name__)
class TestWorkloadEndpointId(unittest.TestCase):
def test_equality(self):
ep_id = WloadEndpointId("localhost", "orchestrator", "workload",
"endpoint")
self.assertTrue(ep_id == ep_id)
self.assertFalse(ep_id != ep_id)
self.assertFalse(ep_id == "not an endpoint id")
self.assertFalse(ep_id == 42)
bad_host_ep_id = WloadEndpointId("notlocalhost", "orchestrator",
"workload", "endpoint")
self.assertFalse(ep_id == bad_host_ep_id)
bad_orchestrator_ep_id = WloadEndpointId("hostname",
"notanorchestrator",
"workload",
"endpoint")
self.assertFalse(ep_id == bad_orchestrator_ep_id)
bad_workload_ep_id = WloadEndpointId("hostname", "orchestrator",
"notworkload", "endpoint")
self.assertFalse(ep_id == bad_workload_ep_id)
bad_endpoint_ep_id = WloadEndpointId("hostname", "orchestrator",
"workload", "notanendpoint")
self.assertFalse(ep_id == bad_endpoint_ep_id)
self.assertTrue(ep_id != bad_endpoint_ep_id)
|
import matplotlib.pyplot as plt
import pylab
data = pylab.loadtxt('../out/plotdata.txt', delimiter=',', usecols=(0, 1, 2, 3, 4, 5, 6))
pylab.plot(data[:, 0], data[:, 1], linestyle='solid', linewidth=0.5, marker='.', markersize=2, label='L0', color='blue')
pylab.plot(data[:, 0], data[:, 2], linestyle='dashed', linewidth=0.5, marker='.', markersize=2, label='L1', color='blue')
pylab.plot(data[:, 0], data[:, 3], linestyle='dotted', linewidth=0.5, marker='.', markersize=2, label='L2', color='blue')
pylab.plot(data[:, 0], data[:, 4], linestyle='solid', linewidth=0.5, marker='.', markersize=2, label='L0-C', color='red')
pylab.plot(data[:, 0], data[:, 5], linestyle='dashed', linewidth=0.5, marker='.', markersize=2, label='L1-C', color='red')
pylab.plot(data[:, 0], data[:, 6], linestyle='dotted', linewidth=0.5, marker='.', markersize=2, label='L2-C', color='red')
plt.xlabel("iteration", size=15)
plt.ylabel("performance", size=15)
plt.grid(True, linewidth=0.2)
plt.legend(loc='best')
plt.show()
|
import torch
class MovingAverage:
def __init__(self, moving_average_size=256):
self.count = 0
self.i = 0
self.moving_average_size = moving_average_size
self.values = self.moving_average_size * [0]
def reset(self):
self.count = 0
self.i = 0
def moving_average(self):
if self.count == 0:
return 0
return sum(self.values) / self.count
def addn(self, value):
self.values[self.i] = value
self.count = min(self.count + 1, self.moving_average_size)
self.i = (self.i + 1) % self.moving_average_size
class CPUParallel(torch.nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, *input):
return self.module(*input)
|
# -*- coding: utf-8 -*-
import sys
import os
import shutil
import warnings
import re
from setuptools import setup, Extension
from setuptools import find_packages
#########
# settings
#########
project_var_name = "csharpyml"
project_owner = "sdpython"
sversion = "0.1"
versionPython = "%s.%s" % (sys.version_info.major, sys.version_info.minor)
path = "Lib/site-packages/" + project_var_name
readme = 'README.rst'
history = 'HISTORY.rst'
KEYWORDS = project_var_name + ', first name, last name'
DESCRIPTION = "Tools to use C# + Python mostly from Python."
CLASSIFIERS = [
'Programming Language :: Python :: %d' % sys.version_info[0],
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Topic :: Education',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable'
]
#######
# data
#######
packages = find_packages('src', exclude='src')
package_dir = {k: "src/" + k.replace(".", "/") for k in packages}
package_data = {project_var_name +
".binaries.Release": ["*.dll", "*.so", "*.json", "*.xml"]}
############
# functions
############
def is_local():
file = os.path.abspath(__file__).replace("\\", "/").lower()
if "/temp/" in file and "pip-" in file:
return False
from pyquickhelper.pycode.setup_helper import available_commands_list
return available_commands_list(sys.argv)
def ask_help():
return "--help" in sys.argv or "--help-commands" in sys.argv
def verbose():
print("---------------------------------")
print("package_dir =", package_dir)
print("packages =", packages)
print("package_data=", package_data)
print("current =", os.path.abspath(os.getcwd()))
print("---------------------------------")
##########
# version
##########
if is_local() and not ask_help():
def write_version():
from pyquickhelper.pycode import write_version_for_setup
return write_version_for_setup(__file__)
write_version()
versiontxt = os.path.join(os.path.dirname(__file__), "version.txt")
if os.path.exists(versiontxt):
with open(versiontxt, "r") as f:
lines = f.readlines()
subversion = "." + lines[0].strip("\r\n ")
if subversion == ".0":
raise Exception("Git version is wrong: '{0}'.".format(subversion))
else:
raise FileNotFoundError(versiontxt)
else:
# when the module is installed, no commit number is displayed
subversion = ""
if "upload" in sys.argv and not subversion and not ask_help():
# avoid uploading with a wrong subversion number
raise Exception(
"Git version is empty, cannot upload, is_local()={0}".format(is_local()))
##############
# common part
##############
if os.path.exists(readme):
with open(readme, "r", encoding='utf-8-sig') as f:
long_description = f.read()
else:
long_description = ""
if os.path.exists(history):
with open(history, "r", encoding='utf-8-sig') as f:
long_description += f.read()
if "--verbose" in sys.argv:
verbose()
if is_local():
from pyquickhelper import get_fLOG, get_insetup_functions
logging_function = get_fLOG()
logging_function(OutputPrint=True)
must_build, run_build_ext = get_insetup_functions()
if must_build():
out = run_build_ext(__file__)
print(out)
from pyquickhelper.pycode import process_standard_options_for_setup
r = process_standard_options_for_setup(
sys.argv, __file__, project_var_name,
extra_ext=["cs"],
add_htmlhelp=sys.platform.startswith("win"),
coverage_options=dict(omit=["*exclude*.py"]),
github_owner=project_owner,
fLOG=logging_function, covtoken=(
"d911f0bb-f250-415d-860b-19b342a4f168", "'_UT_39_std' in outfile"),
requirements=["pyquickhelper", "jyquickhelper", "csharpy"],
additional_notebook_path=["pyquickhelper", "jyquickhelper", "csharpy"],
additional_local_path=["pyquickhelper", "jyquickhelper", "csharpy"],
copy_add_ext=["dll", 'so'], layout=["html"])
if not r and not ({"bdist_msi", "sdist",
"bdist_wheel", "publish", "publish_doc", "register",
"upload_docs", "bdist_wininst", "build_ext"} & set(sys.argv)):
raise Exception("unable to interpret command line: " + str(sys.argv))
else:
r = False
def build_machinelearning(version="Release"):
"Builds machinelearning (ml.net)."
from pyquickhelper.loghelper import run_cmd
print('[csharpyml.machinelearning]')
this = os.path.abspath(os.path.dirname(__file__))
folder = os.path.join(this, 'cscode', 'machinelearning')
cmd = "build{0}"
if sys.platform.startswith("win"):
cmd = cmd.format('.cmd')
else:
cmd = cmd.format('.sh')
full = os.path.join(folder, cmd)
if not os.path.exists(full):
existing = os.listdir(folder)
raise FileNotFoundError("Unable to find '{0}', build failed. Found:\n{1}".format(
full, "\n".join(existing)))
if not sys.platform.startswith("win"):
cmd = "bash --verbose " + cmd
cmd += ' -' + version
out, err = run_cmd(cmd, wait=True, change_path=folder)
if len(err) > 0:
# Filter out small errors.
errs = []
lines = err.split('\n')
for line in lines:
if 'ILAsmVersion.txt: No such file or directory' in line:
continue
errs.append(line)
err = "\n".join(errs)
if len(err) > 0:
raise RuntimeError(
"Unable to build machinelearning code.\nCMD: {0}\n--ERR--\n{1}".format(cmd, err))
elif len(out) > 0:
print('[csharpyml.machinelearning] OUT')
print(out)
bin = os.path.join(folder, "bin")
if not os.path.exists(bin):
existing = os.listdir(folder)
raise FileNotFoundError("Unable to find '{0}', build failed. Found:\n{1}".format(
bin, "\n".join(existing)))
def build_machinelearningext(version="Release"):
"Builds the module machinelearningext."
from pyquickhelper.loghelper import run_cmd
env = os.environ.get('DOTNET_CLI_TELEMETRY_OPTOUT', None)
if env is None:
os.environ['DOTNET_CLI_TELEMETRY_OPTOUT'] = '1'
print('[csharpyml.env] DOTNET_CLI_TELEMETRY_OPTOUT={0}'.format(
os.environ['DOTNET_CLI_TELEMETRY_OPTOUT']))
# builds the other libraries
cmds = ['dotnet restore machinelearningext.sln',
'dotnet build -c %s machinelearningext.sln' % version]
folder = os.path.abspath("cscode")
folder = os.path.join("machinelearningext", "machinelearningext")
outs = []
for cmd in cmds:
out, err = run_cmd(cmd, fLOG=print, wait=True, change_path=folder)
if len(err) > 0:
raise RuntimeError(
"Unable to compile C# code.\nCMD: {0}\n--ERR--\n{1}".format(cmd, err))
elif len(out) > 0:
outs.append(out)
print('[csharpyml.dotnet] OUT')
print(out)
# Copy specific files.
copy_assemblies(version=version)
def build_module(version="Release"):
"build the module"
# git submodule add https://github.com/dotnet/machinelearning.git cscode/machinelearning
# We build a dotnet application.
from pyquickhelper.loghelper import run_cmd
env = os.environ.get('DOTNET_CLI_TELEMETRY_OPTOUT', None)
if env is None:
os.environ['DOTNET_CLI_TELEMETRY_OPTOUT'] = '1'
print('[csharpyml.env] DOTNET_CLI_TELEMETRY_OPTOUT={0}'.format(
os.environ['DOTNET_CLI_TELEMETRY_OPTOUT']))
# builds the other libraries
cmds = ['dotnet restore CSharPyMLExtension_netcore.sln',
'dotnet build -c %s CSharPyMLExtension_netcore.sln' % version]
folder = os.path.abspath("cscode")
outs = []
for cmd in cmds:
out, err = run_cmd(cmd, fLOG=print, wait=True, change_path=folder)
if len(err) > 0:
raise RuntimeError(
"Unable to compile C# code.\nCMD: {0}\n--ERR--\n{1}".format(cmd, err))
elif len(out) > 0:
outs.append(out)
print('[csharpyml.dotnet] OUT')
print(out)
# Copy specific files.
copy_assemblies(version=version)
def extract_version_target(path):
"3.5.1/lib/netstandard1.0 --> (3, 5, 1), 'netstandard1.0')"
reg = re.compile(
'([0-9]+[.][0-9]+[.][0-9]+).*[/\\\\](netstandard[0-9][.][0-9])')
res = reg.search(path)
if res:
g1, g2 = res.groups()
if g1:
g1 = tuple(int(_) for _ in g1.split('.'))
else:
g1 = None
if not g2:
g2 = None
return g1, g2
else:
reg = re.compile('(netstandard[0-9][.][0-9])')
res = reg.search(path)
if res:
return None, res.groups()[0]
else:
reg = re.compile('(netcoreapp[0-9][.][0-9])')
res = reg.search(path)
if res:
return None, res.groups()[0]
else:
reg = re.compile('(Native)')
res = reg.search(path)
if res:
return None, res.groups()[0]
else:
return None, None
def find_folder_package(folder):
"Finds the best location within a package"
from pyquickhelper.filehelper import explore_folder
dirs, _ = explore_folder(folder)
found = []
for d in dirs:
version, net = extract_version_target(d)
if version is None:
version = (0, 0, 0)
if version is not None and net is not None:
found.append((version, net, d))
elif net is not None:
found.append(((0, 0, 1), net, d))
if not found:
raise FileNotFoundError("Not suitable path for '{0}'".format(folder))
else:
try:
mx = max(found)
except TypeError as e:
raise TypeError(
"Unable to find a version in '{0}'\n{1}".format(folder, found)) from e
return mx
def copy_assemblies(libdef=None, version="Release"):
"""
Copies all assemblies in the right location.
*libdef* can be ``None``, ``ml`` or ``mlext``.
"""
from pyquickhelper.filehelper import synchronize_folder
if libdef == 'ml':
folders = []
copy2 = True
for lib in ["Microsoft.ML.Api",
"Microsoft.ML.Console",
# "Microsoft.ML.DnnAnalyzer",
# "Microsoft.ML.Ensemble",
"Microsoft.ML.FastTree",
# "Microsoft.ML.HalLearners",
"Microsoft.ML.ImageAnalytics",
"Microsoft.ML.KMeansClustering",
# "Microsoft.ML.Legacy",
"Microsoft.ML.LightGBM",
"Microsoft.ML.Maml",
# "Microsoft.ML.Onnx",
# "Microsoft.ML.OnnxTransform",
# "Microsoft.ML.PCA",
# "Microsoft.ML.PipelineInference",
"Microsoft.ML.Sweeper",
"Microsoft.ML.TensorFlow",
# "Microsoft.ML.Transforms",
# "Microsoft.ML.StandardLearners",
"Microsoft.ML.TimeSeries",
]:
fold = 'cscode/machinelearning/bin/AnyCPU.%s/%s' % (version, lib)
if not os.path.exists(fold):
# To avoid copy, we check that machinelearningext is
# present at the same level as csharyml.
this = os.path.normpath(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
mlext = os.path.join(this, 'machinelearningext')
if not os.path.exists(mlext):
raise FileNotFoundError(
"Unable to find folder '{0}' or '{1}'.".format(fold, mlext))
fold = os.path.join(mlext, "machinelearning",
'bin/AnyCPU.%s/%s' % (version, lib))
if not os.path.exists(mlext):
raise FileNotFoundError(
"Unable to find folder '{0}' or '{1}'.".format(fold, fold))
copy2 = False
folders.append(fold)
dests = ['cscode/bin/machinelearning/%s' % version]
if copy2:
dests.append(
'cscode/machinelearningext/machinelearning/dist/%s' % version)
elif libdef == 'mlext':
folders = []
for sub in ['DataManipulation',
'DocHelperMlExt',
'ScikitAPI',
]:
fold = 'cscode/machinelearningext/machinelearningext/bin/AnyCPU.%s/%s/netstandard2.0' % (
version, sub)
if not os.path.exists(fold):
# To avoid copy, we check that machinelearningext is
# present at the same level as csharyml.
this = os.path.normpath(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
mlext = os.path.join(this, 'machinelearningext')
if not os.path.exists(mlext):
raise FileNotFoundError(
"Unable to find folder '{0}' or '{1}'.".format(fold, mlext))
fold = os.path.join(mlext, "machinelearningext",
'bin/AnyCPU.%s/%s' % (version, sub))
if not os.path.exists(mlext):
raise FileNotFoundError(
"Unable to find folder '{0}' or '{1}'.".format(fold, fold))
copy2 = False
folders.append(fold)
dests = ['cscode/bin/machinelearningext/%s' % version,
]
else:
folders = ['cscode/bin/machinelearning/%s' % version,
'cscode/bin/machinelearningext/%s' % version,
'cscode/bin/AnyCPU.%s/CSharPyMLExtension/netstandard2.0' % version]
rootpkg = "cscode/machinelearning/packages"
if not os.path.exists(rootpkg):
# To avoid copy, we check that machinelearningext is
# present at the same level as csharyml.
this = os.path.normpath(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
mlext = os.path.join(this, 'machinelearningext')
if not os.path.exists(mlext):
raise FileNotFoundError(
"Unable to find folder '{0}' or '{1}'.".format(fold, mlext))
rootpkg = os.path.join(mlext, "machinelearning", "packages")
folders.extend([
os.path.join(rootpkg, "newtonsoft.json",
"10.0.3", "lib", "netstandard1.3"),
os.path.join(rootpkg, "system.memory",
"4.5.1", "lib", "netstandard2.0"),
os.path.join(rootpkg, "system.runtime.compilerservices.unsafe",
"4.5.0", "lib", "netstandard2.0"),
os.path.join(rootpkg, "system.collections.immutable",
"1.5.0", "lib", "netstandard2.0"),
os.path.join(rootpkg, "system.numerics.vectors",
"4.4.0", "lib", "netstandard2.0"),
os.path.join(rootpkg, "lightgbm",
"2.2.1.1", "runtimes", "win-x64", "native"),
os.path.join(rootpkg, "google.protobuf",
"3.5.1", "lib", "netstandard1.0"),
])
if sys.platform.startswith("win"):
sub = "win-x64"
else:
sub = "linux-x64"
# Additional dependencies.
folders.extend([
os.path.join(rootpkg, "system.threading.tasks.dataflow",
"4.8.0", "lib", "netstandard2.0"),
os.path.join(rootpkg, "system.drawing.common",
"4.5.0", "lib", "netstandard2.0"),
os.path.join(rootpkg, "mlnetmkldeps", "0.0.0.7",
"runtimes", sub, "native"),
])
dests = ['src/csharpyml/binaries/%s' % version]
for dest in dests:
if not os.path.exists(dest):
os.makedirs(dest)
if libdef is None:
init = os.path.join(dest, '__init__.py')
if not os.path.exists(init):
with open(init, 'w') as f:
pass
for fold in folders:
try:
v, n, found = find_folder_package(fold)
do_check = True
except FileNotFoundError:
found = fold
do_check = False
if do_check:
if "packages" in fold:
if v is None:
raise FileNotFoundError(
"Unable to find a suitable version for package '{0}'".format(fold))
elif 'Native' not in found and 'netcoreapp' not in found and 'netstandard' not in found:
raise FileNotFoundError(
"Unable to find a suitable folder binaries '{0}'".format(fold))
print("[csharpyml.copy] '{0}' -> '{1}'".format(found, dest))
synchronize_folder(found, dest, fLOG=print, no_deletion=True)
if libdef not in ('ml', 'mlext'):
if sys.platform.startswith("win"):
check_existence = "src/csharpyml/binaries/%s/System.Numerics.Vectors.dll" % version
else:
check_existence = "src/csharpyml/binaries/%s/System.Numerics.Vectors.dll" % version
if not os.path.exists(check_existence):
found = "\n".join(os.listdir(os.path.dirname(check_existence)))
warnings.warn("Unable to find '{0}', found:\n{1}".format(
check_existence, found))
if not r:
if len(sys.argv) in (1, 2) and sys.argv[-1] in ("--help-commands",):
from pyquickhelper.pycode import process_standard_options_for_setup_help
process_standard_options_for_setup_help(sys.argv)
from pyquickhelper.pycode import clean_readme
long_description = clean_readme(long_description)
root = os.path.abspath(os.path.dirname(__file__))
end = False
# version
version = None
if "debug" in sys.argv:
version = "Debug"
elif "Debug" in sys.argv:
version = "Debug"
elif "release" in sys.argv:
version = "Release"
elif "Release" in sys.argv:
version = "Release"
sys.argv = [_ for _ in sys.argv if _ not in (
"debug", "Debug", "release", "Release")]
version2 = version if version else "Release"
if "copybinml" in sys.argv:
copy_assemblies(libdef='ml', version=version2)
end = True
elif "copybinmlext" in sys.argv:
copy_assemblies(libdef='mlext', version=version2)
end = True
elif "copybin" in sys.argv:
copy_assemblies(libdef=None, version=version2)
end = True
elif "build_ext" in sys.argv:
if '--inplace' not in sys.argv:
raise Exception("Option --inplace must be set up.")
# builds machinelearning
if '--submodules' in sys.argv:
sys.argv = [_ for _ in sys.argv if _ != '--submodules']
build_machinelearning(version=version2)
copy_assemblies(libdef="ml", version=version2)
build_machinelearningext(version=version2)
copy_assemblies(libdef="mlext", version=version2)
build_module(version=version2)
copy_assemblies(version=version2)
if sys.platform.startswith("win"):
extra_compile_args = None
else:
extra_compile_args = ['-std=c++11']
if not end:
# C parts
ext_cparts = Extension('src.csharpyml.cparts.cmodule',
[os.path.join(root, 'src/csharpyml/cparts/version.cpp'),
os.path.join(root, 'src/csharpyml/cparts/cmodule.cpp')],
extra_compile_args=extra_compile_args,
include_dirs=[os.path.join(root, 'src/csharpyml/cparts')])
# Regular setup.
setup(
name=project_var_name,
ext_modules=[ext_cparts],
version=sversion,
author='Xavier Dupré',
author_email='xavier.dupre@gmail.com',
license="MIT",
url="http://www.xavierdupre.fr/app/csharpyml/",
download_url="https://github.com/sdpython/csharpyml/",
description=DESCRIPTION,
long_description=long_description,
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
packages=packages,
package_dir=package_dir,
package_data=package_data,
setup_requires=["pyquickhelper"],
install_requires=['pythonnet', 'pyquickhelper'],
extras_require={
'sphinxext': ['pyquickhelper'],
},
)
|
#!/usr/bin/python
DOCUMENTATION = '''
---
module: targetcli_backstore
short_description: TargetCLI backstore module
description:
- module for handling backstore objects in targetcli ('/backstores').
version_added: "2.0"
options:
backstore_type:
description:
- Type of storage in TargetCLI (block, fileio, pscsi, ramdisk)
required: true
default: null
backstore_name:
description:
- Name of backtore object in TargetCLI
required: true
default: null
options:
description:
- options for create operation when creating backstore object
required: false
default: null
attributes:
description:
- Attributes for the defined LUN
required: false
default: null
state:
description:
- Should the object be present or absent from TargetCLI configuration
required: false
choices: [present, absent]
notes:
- Tested on CentOS 7.2
requirements: [ ]
author: "Ondrej Famera <ondrej-xa2iel8u@famera.cz>"
'''
EXAMPLES = '''
define new block backstore from disk/LV /dev/c7vg/LV1
- targetcli_backstore: backstore_type=block backstore_name=test1 options=/dev/c7vg/LV1
define new block backstore from disk/LV /dev/c7vg/LV2 with attributes
- targetcli_backstore: backstore_type=block backstore_name=test2 options=/dev/c7vg/LV2 attributes={{ "emulate_tpu=1" }}
remove block backstore from disk/LV /dev/c7vg/LV2
- targetcli_backstore: backstore_type=block backstore_name=test2 state=absent
'''
from distutils.spawn import find_executable
def main():
module = AnsibleModule(
argument_spec=dict(
backstore_type=dict(required=True),
backstore_name=dict(required=True),
options=dict(required=False),
attributes=dict(required=False),
state=dict(default="present", choices=['present', 'absent']),
),
supports_check_mode=True
)
attributes = module.params['attributes']
state = module.params['state']
if state == 'present' and not module.params['options']:
module.fail_json(msg="Missing options parameter needed for creating backstore object")
if find_executable('targetcli') is None:
module.fail_json(msg="'targetcli' executable not found. Install 'targetcli'.")
result = {}
try:
rc, out, err = module.run_command("targetcli '/backstores/%(backstore_type)s/%(backstore_name)s status'" % module.params)
if rc == 0 and state == 'present':
result['changed'] = False
elif rc == 0 and state == 'absent':
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
else:
cmd = "targetcli '/backstores/%(backstore_type)s delete %(backstore_name)s'" % module.params
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(**result)
else:
module.fail_json(msg="Failed to delete backstores object using command " + cmd, output=out, error=err)
elif state == 'absent':
result['changed'] = False
else:
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
else:
cmd = "targetcli '/backstores/%(backstore_type)s create %(backstore_name)s %(options)s'" % module.params
rc, out, err = module.run_command(cmd)
if rc == 0:
if attributes:
cmd = "targetcli '/backstores/%(backstore_type)s/%(backstore_name)s set attribute %(attributes)s'" % module.params
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(**result)
else:
module.fail_json(msg="Failed to set LUN's attributes using cmd "+cmd, output=out, error=err)
else:
module.exit_json(**result)
else:
module.fail_json(msg="Failed to define backstores object using command " + cmd, output=out, error=err)
except OSError as e:
module.fail_json(msg="Failed to check backstore object - %s" % (e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
import logging
from abc import ABC, abstractmethod
from flask import Blueprint
LOG = logging.getLogger(__name__)
class AbstractController(ABC):
def __init__(self):
self.api = Blueprint(self.__class__.__name__, __name__)
self._routes()
@abstractmethod
def _routes(self):
pass
|
"""The 21 BC functionality."""
import requests.sessions
from two1.lib.bitrequests import BitTransferRequests
from two1.commands.config import Config
from two1.lib.wallet import Wallet
wallet = Wallet()
username = Config().username
bt_requests = BitTransferRequests(wallet, username)
class BitTransferSession(requests.sessions.Session):
"""A requests Session with BitTransfer functionality.
This substitutes for requests.sessions.Session in httpie.
We want to use BitTransferRequests.request, but that in turn uses
requests.request, which uses the default Session, with no way to specify
a custom one.
The solution for now is to monkey patch requests.request to
this Session's bound request function.
Ideally, BitRequests should subclass requests.sessions.Session.
"""
def request(self, method, url, **kwargs):
"""Wraps the superclass method."""
# Force BitTransferRequests to use this Session.
requests.request = super(BitTransferSession, self).request
return bt_requests.request(method, url, **kwargs)
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.transforms import Resize,ToTensor,Compose,CenterCrop
import cv2
import numpy as np
class DeepLabLargeFOV(nn.Module):
def __init__(self, in_dim, out_dim, *args, **kwargs):
super(DeepLabLargeFOV, self).__init__(*args, **kwargs)
vgg16 = torchvision.models.vgg16()
self.features = self.get_VGG16(in_dim)
self.classifier = self.get_classifer(out_dim)
self.MDC_features = self.get_VGG16(in_dim)
self.MDC_DC_1 = self.get_DC(0)
self.MDC_DC_2 = self.get_DC(1)
self.MDC_DC_3 = self.get_DC(2)
self.MDC_DC_4 = self.get_DC(3)
self.gap = self.get_GAP()
self.linear1 = nn.Linear(1024,21,bias=False)
self.linear2 = nn.Linear(1024,21,bias=False)
self.linear3 = nn.Linear(1024,21,bias=False)
self.linear4 = nn.Linear(1024,21,bias=False)
self.init_weights()
def get_VGG16(self,in_dim):
layers = []
layers.append(nn.Conv2d(in_dim, 64, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.Conv2d(64, 64, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.MaxPool2d(3, stride = 2, padding = 1))
layers.append(nn.Conv2d(64, 128, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.Conv2d(128, 128, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.MaxPool2d(3, stride = 2, padding = 1))
layers.append(nn.Conv2d(128, 256, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.Conv2d(256, 256, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.Conv2d(256, 256, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.MaxPool2d(3, stride = 2, padding = 1))
layers.append(nn.Conv2d(256, 512, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.Conv2d(512, 512, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.Conv2d(512, 512, kernel_size = 3, stride = 1, padding = 1))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.MaxPool2d(3, stride = 1, padding = 1))
layers.append(nn.Conv2d(512,
512,
kernel_size = 3,
stride = 1,
padding = 2,
dilation = 2))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.Conv2d(512,
512,
kernel_size = 3,
stride = 1,
padding = 2,
dilation = 2))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.Conv2d(512,
512,
kernel_size = 3,
stride = 1,
padding = 2,
dilation = 2))
layers.append(nn.ReLU(inplace = True))
layers.append(nn.MaxPool2d(3, stride = 1, padding = 1))
return nn.Sequential(*layers)
def get_classifer(self,out_dim):
classifier = []
classifier.append(nn.AvgPool2d(3, stride = 1, padding = 1))
classifier.append(nn.Conv2d(512,
1024,
kernel_size = 3,
stride = 1,
padding = 12,
dilation = 12))
classifier.append(nn.ReLU(inplace=True))
classifier.append(nn.Conv2d(1024, 1024, kernel_size=1, stride=1, padding=0))
classifier.append(nn.ReLU(inplace=True))
classifier.append(nn.Dropout(p=0.5))
classifier.append(nn.Conv2d(1024, out_dim, kernel_size=1))
return nn.Sequential(*classifier)
def get_DC(self,times):
layers = []
if times == 0:
layers.append(nn.Conv2d(512,1024,kernel_size=3,stride=1,padding=1))
else:
layers.append(nn.Conv2d(512,1024,kernel_size=3,stride=1,padding=3*times,dilation=times*3))
return nn.Sequential(*layers)
def get_GAP(self):
layers = []
layers.append(nn.ReLU(inplace=True))
layers.append(nn.AvgPool2d(kernel_size=41,stride=1))
layers.append(nn.Dropout(p=0.5))
return nn.Sequential(*layers)
def forward(self, img):
N, C, H, W = img.size()
x = self.features(img)
#print(x.size())
x = self.classifier(x)#{16,21,41,41}
#print('shape of fov classifier output is {}'.format(x.size()))
fov_out = F.interpolate(x, (H, W), mode='bilinear', align_corners=True)#16,21,321,321
#print(fov_out.size())
x = self.MDC_features(img)
x1 = x.clone()
x2 = x.clone()
x3 = x.clone()
x4 = x.clone()
x1 = self.MDC_DC_1(x1)
feature_1 = x1.clone()
x1 = self.gap(x1)
x1 = self.linear1(x1.view(N,-1))
linear_weight_1 = list(self.parameters())[-1]
x1 = torch.sigmoid(x1)
x2 = self.MDC_DC_2(x2)
feature_2 = x2.clone()
x2 = self.gap(x2)
x2 = self.linear2(x2.view(N,-1))
linear_weight_2 = list(self.parameters())[-2]
x2 = torch.sigmoid(x2)
x3 = self.MDC_DC_3(x3)
feature_3 = x3.clone()
x3 = self.gap(x3)
x3 = self.linear3(x3.view(N,-1))
linear_weight_3 = list(self.parameters())[-3]
x3 = torch.sigmoid(x3)
x4 = self.MDC_DC_4(x4)
feature_4 = x4.clone()
x4 = self.gap(x4)
x4 = self.linear3(x4.view(N,-1))
linear_weight_4 = list(self.parameters())[-4]
x4 = torch.sigmoid(x4)
CAMs_1 = self.getCams(x1,feature_1,linear_weight_1)
CAMs_2 = self.getCams(x2,feature_2,linear_weight_2)
CAMs_3 = self.getCams(x3,feature_3,linear_weight_3)
CAMs_4 = self.getCams(x4,feature_4,linear_weight_4)
location_map = torch.argmax((CAMs_1+(CAMs_2+CAMs_3+CAMs_4)/3),dim=1)
pred_mask = torch.argmax(fov_out,dim=1)
return fov_out,x1,x2,x3,x4,CAMs_1,CAMs_2,CAMs_3,CAMs_4,location_map,pred_mask
def init_weights(self):
vgg = torchvision.models.vgg16(pretrained=True)
state_vgg = vgg.features.state_dict()
self.features.load_state_dict(state_vgg)
for ly in self.classifier.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
nn.init.constant_(ly.bias, 0)
def returnCAM(self,feature_conv, weight_softmax):
# generate the class activation maps upsample to 256x256
size_upsample = (321, 321)
bz, nc, h, w = feature_conv.size()
#print(feature_conv.shape)
#print("class_idx :{}".format(len(class_idx)))
#print('idx is {} weight_softmax{} shape is {}'.format(idx,idx,len(weight_softmax)))
#print(nc,h,w)
feature_conv = feature_conv.permute(1,0,2,3)
feature_conv = feature_conv.reshape(1024,-1)
cam = torch.mm(weight_softmax,feature_conv)#weightsoftmax (21,1024) feature_conv (16,1024,41,41)
#cam (16,21,321,321)
cam = cam.reshape(21,64,41,41)
cam = cam.permute(1,0,2,3)
min = torch.min(cam).clone()
max = torch.max(cam).clone()
cam = cam - min
cam_img = cam / max
cam_img = cam_img.cpu().detach().numpy()
temp = np.zeros((0,21,321,321))
cam_img = np.uint8(255*cam_img)
for item in cam_img:
item = item.swapaxes(0,2)
item = cv2.resize(item,size_upsample)
item = item.swapaxes(0,2)
item = np.expand_dims(item,axis=0)
temp = np.r_[temp,item]
#for i in range(16):
#ci = ci.unsqueeze(0)
#temp = torch.cat((temp,ci),0)
#cam_img = cam_img.swapaxes(0,2)
return torch.from_numpy(temp)
def getCams(self,preds,feature,weight_softmax):
# print('shape of preds:{}'.format(preds))
bz, nc, h, w = feature.size()
#print('bz is {}'.format(bz))
#print(i)
#print("shape of CAMs is {}".format(CAMs.shape))
#print('shape of nextCams is{}'.format(returnCAM(feature[i], weight_softmax, [idxs[i,-1]]).shape))
CAMs = self.returnCAM(feature, weight_softmax)
#print(nextCAM)
return CAMs
def showCAM(self,CAMs,img):
img = cv2.imread('test.jpg')
height=497
width=497
heatmap = cv2.applyColorMap(cv2.resize(CAMs[0],(width, height)), cv2.COLORMAP_JET)
result = heatmap * 0.3 + img * 0.5
cv2.imwrite('CAM.jpg', result)
if __name__ == "__main__":
net = DeepLabLargeFOV(3, 10)
in_ten = torch.randn(1, 3, 224, 224)
out = net(in_ten)
print(out.size())
in_ten = torch.randn(1, 3, 64, 64)
mod = nn.Conv2d(3,
512,
kernel_size = 3,
stride = 1,
padding = 2,
dilation = 2)
out = mod(in_ten)
print(out.shape)
|
# The pipeline API requires imports.
from zipline.api import attach_pipeline, pipeline_output, update_universe
from zipline.pipeline import Pipeline
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import SimpleMovingAverage
def initialize(context):
# Create, register and name a pipeline in initialize.
pipe = Pipeline()
attach_pipeline(pipe, 'example')
# Construct a simple moving average factor and add it to the pipeline.
sma_short = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
pipe.add(sma_short, 'sma_short')
# Set a screen on the pipelines to filter out securities.
pipe.set_screen(sma_short > 1.0)
def before_trading_start(context, data):
# Pipeline_output returns the constructed dataframe.
output = pipeline_output('example')
# Select and update your universe.
context.my_universe = output.sort('sma_short', ascending=False).iloc[:200]
update_universe(context.my_universe.index)
def handle_data(context, data):
log.info("\n" + str(context.my_universe.head(5)))
|
#!/usr/bin/env python3
import collections
import enum
# TODO we may need to change this to Enum since only a few valid values are allowed.
class CrimeLevel(object):
""" Crime Level.
Describes crime level. e.g. Felony Class A.
Attributes:
type_: A string describing the type of crime.
class_: A string of length 1 specifying the class.
"""
def __init__(self, type_, class_=None):
self.type_ = type_
self.class_ = class_
def __str__(self):
if class_:
return '{} Class {}'.format(self.type_, self.class_)
else:
return self.type_
DispositionType = enum.Enum('DispositionType',
' '.join([
'CONVICTED',
'PROBATION_REVOKED',
'DISMISSED',
'ACQUITTED',
'NO_COMPLAINT'
]))
class Disposition(object):
""" Disposition for a charge.
Attributes:
type_: An enum of type DispositionType.
date: A datetime.date specifying the date of the disposition.
"""
def __init__(self, type_, date):
self.type_ = type_
self.date = date
class Statute(object):
""" Statute corresponding to a law
Statutes are represented by numbers in hierarchical manner:
chapter.subchapter(section)(subsection) e.g. 653.412(5)(c)
Attributes:
chapter: An integer that specifies statute chapter.
subchapter: An integer that specifies statute sub-chapter.
section: An integer that specifies the section within sub-chapter.
subsection: A string of length 1 that specifies the sub-section within
section.
"""
def __init__(self, chapter, subchapter, section=None, subsection=None):
self.chapter = chapter
self.subchapter = subchapter
self.section = section
self.subsection = subsection
# TODO we may need to add components beyond subsection
def __eq__(self, other):
return (self.chapter == other.chapter and
self.subchapter == other.subchapter and
((not self.section and not other.section) or
self.section == other.section) and
((not self.subsection and not other.subsection) or
self.subsection == other.subsection))
def __str__(self):
# TODO do these need to have leading zeros?
statute = '{}'.format(self.chapter)
if self.subchapter:
statute = '{}.{:03d}'.format(statute, self.subchapter)
if self.section:
statute = '{}({})'.format(statute, self.section)
if self.subsection:
statute = '{}({})'.format(statute, self.subsection)
return statute
class Charge(object):
""" Charge filed on a Client.
Attributes:
name: A string describing the charge.
statute: A Statute object that applies for the charge.
date: A datetime.date object specifying the date of the charge.
disposition: A Disposition object for the charge.
"""
def __init__(
self,
name,
statute,
level,
date,
disposition):
self.name = name
self.statute = statute
self.level = level
self.date = date
self.disposition = disposition
self._result = None
@property
def type_elig_result(self):
return self._result
@type_elig_result.setter
def type_elig_result(self, result):
self._result = result
CaseState = enum.Enum('CaseState', 'OPEN CLOSED')
class Case(object):
""" Case associated with a Client.
Attributes:
charges: A list of Charge(s).
state: A CaseState enum.
balance_due: A float that tells how much money is owed to the court.
"""
def __init__(self, charges, state, balance_due=0.0):
self.charges = charges
self.state = state
self.balance_due = balance_due
def num_charges(self):
return len(self.charges)
class Client(object):
""" Client - An individual who wants to expunge charges from their record.
Attributes:
name: A string that specifies client's name.
dob: A datetime.date that specifies date of birth.
cases: A list of Case(s).
"""
def __init__(self, name, dob, cases):
self.name = name
self.dob = dob
self.cases = cases
def num_charges(self):
num = 0
for case in self.cases:
num += case.num_charges()
return num
class ResultCode(enum.Enum):
INELIGIBLE = 'Ineligible'
ELIGIBLE = 'Eligible'
FURTHER_ANALYSIS = 'Further analysis needed'
EXAMINE = 'Examine'
NO_ACTION = 'Take no action'
OPEN_CASE = 'Open Case'
"""
A class for storing individual analysis steps
Attributes:
result: A boolean that's the result of the check
check: A string that breifly describes the check
check_desc: A string that elaborates the check (optional)
"""
CheckResult = collections.namedtuple('CheckResult', 'result check check_desc',
defaults=[None])
class Result(object):
"""
A class for storing analysis results
Attributes:
code: A ResultCode instance.
statute: A Statute instance that's associated with arrived result.
date: A datetime.date instance that specifies eligibility date.
analysis: A list of CheckResult instances that describes the analysis done.
"""
def __init__(self, code=None, analysis=None,
statute=None, date=None):
self.code = code
self.analysis = analysis
self.statute = statute
self.date = date
def __str__(self):
return ' '.join([str(self.code), str(self.analysis), str(self.statute), str(self.date)])
class ResultInElig_137_225_5(Result):
def __init__(self, **kwargs):
Result.__init__(self, code=ResultCode.INELIGIBLE,
statute=Statute(137, 225, 5), **kwargs)
class RecordAnalyzer(object):
"""
A class for analyzing client's records
Attributes:
client: A Client instance
"""
def __init__(self, client):
self.client = client
def _is_charge_level(charge, type_, class_):
check = 'Is the charge a {}'.format(type_)
if class_:
check += ' class {}'.format(class_)
result = (charge.level.type_ == type_ and
(not class_ or charge.level.class_ == class_))
return CheckResult(check=check, result=result)
def _is_charge_statute(charge, statute):
check = 'Does the charge fall under statute: ' + str(statute)
return CheckResult(check=check, result=charge.statute == statute)
def _is_charge_in_statute_list(charge, statutes, desc):
check = ('Does the charge fall under any of these statutes: ' +
','.join(str(statute) for statute in statutes))
# TODO implement this
return CheckResult(check=check, result=False, check_desc=desc)
def _is_charge_sex_crime(charge):
# TODO update
_statutes_sex_crimes = []
return RecordAnalyzer._is_charge_in_statute_list(
charge, _statutes_sex_crimes, 'Is the charge a sex crime')
def _is_charge_traffic_crime(charge):
# TODO update
_statutes_traffic_crimes = []
return RecordAnalyzer._is_charge_in_statute_list(
charge, _statutes_traffic_crimes, 'Is the charge a traffic crime')
def _have_open_case(self):
check = 'Is there a open case for the client'
result = any([case.state == CaseState.OPEN for case in self.client.cases])
return CheckResult(check=check, result=result)
"""
Run Time Eligibility analysis on the client and their records
TODO: make it return analysis for each charge as well (which is supposed
to be an update on eligibility date and relevan statutes)
Returns:
An Result instance
"""
def time_eligibility(self):
analysis = []
analysis.append(self._have_open_case())
if analysis[-1].result:
return Result(ResultCode.OPEN_CASE, analysis)
# TODO implement the rest
return Result(ResultCode.NO_ACTION)
"""
Run Type Eligibility analysis on a charge
Args:
charge: A Charge instance
Returns:
A Result instance
"""
def type_eligibility(self, charge):
analysis = []
analysis.append(RecordAnalyzer._is_charge_level(charge, 'Felony', 'A'))
if analysis[-1].result:
return ResultInElig_137_225_5(analysis=analysis)
analysis.append(RecordAnalyzer._is_charge_sex_crime(charge))
if analysis[-1].result:
return ResultInElig_137_225_5(analysis=analysis)
analysis.append(RecordAnalyzer._is_charge_traffic_crime(charge))
if analysis[-1].result:
return ResultInElig_137_225_5(analysis=analysis)
# TODO add remaining analysis
return Result(ResultCode.FURTHER_ANALYSIS, analysis)
"""
Analyze which records are expungeable
The method sets the Charge instance's result attribute with type eligibility
analysis result.
Returns:
A tuple consisting of:
- A Result instance that describes the Time Eligibility analysis.
"""
def analyze(self):
for case in self.client.cases:
for charge in case.charges:
charge.type_elig_result = self.type_eligibility(charge)
return self.time_eligibility()
|
'''
for
else
'''
variavel = ['Daniel', "Wolter", "Martins"]
for valor in variavel:
if valor.startswith('D'):# startswith verifica se existe uma variavel que comeca com carcter d
print('começa com D', valor)
else:
print('nao começa com D', valor) |
from . import safe_str as _safe_str
from .iterutils import listify as _listify
from .path import InstallRoot as _InstallRoot, install_path as _install_path
def file_install_path(file, cross=None):
return _install_path(file.path, file.install_root,
directory=isinstance(file, Directory), cross=cross)
def installify(file, cross=None):
return file.clone(lambda f: file_install_path(f, cross), True)
def _clone_traits(exclude=set(), subfiles={}):
def inner(cls):
cls._clone_exclude = cls._clone_exclude | exclude
if subfiles:
cls._clone_subfiles = cls._clone_subfiles.copy()
cls._clone_subfiles.update(subfiles)
return cls
return inner
class Node(_safe_str.safe_string_ops):
private = False
def __init__(self, path):
self.creator = None
self.path = path
def _safe_str(self):
return _safe_str.safe_str(self.path)
@property
def all(self):
return [self]
def __repr__(self):
return '<{type} {name}>'.format(
type=type(self).__name__, name=repr(self.path)
)
def __hash__(self):
return hash(self.path)
def __eq__(self, rhs):
return type(self) == type(rhs) and self.path == rhs.path
def __ne__(self, rhs):
return not (self == rhs)
class Phony(Node):
pass
class File(Node):
_clone_exclude = {'path', 'creator', 'post_install'}
_clone_subfiles = {}
install_kind = None
install_root = None
def __init__(self, path):
super().__init__(path)
self.post_install = None
@property
def install_deps(self):
return []
def _clone_args(self, pathfn, recursive):
args = {'path': pathfn(self)}
for k, v in self.__dict__.items():
if k in self._clone_exclude:
continue
try:
dest = self._clone_subfiles[k]
orig = getattr(self, k)
if orig is None:
args[dest] = None
elif recursive:
args[dest] = pathfn(orig)
else:
args[dest] = orig.path
except KeyError:
args[k] = v
return args
def clone(self, pathfn, recursive=False, inner=None):
clone = type(self)(**self._clone_args(pathfn, recursive))
if inner and inner is not self:
for i in self._clone_subfiles:
if getattr(self, i) is inner:
return getattr(clone, i)
raise RuntimeError('unable to find inner clone object')
return clone
@_clone_traits(exclude={'files'})
class Directory(File):
def __init__(self, path, files=None):
super().__init__(path)
self.files = files
def _clone_args(self, pathfn, recursive):
args = super()._clone_args(pathfn, recursive)
if self.files is None:
args['files'] = None
elif recursive:
args['files'] = [i.clone(pathfn, recursive) for i in self.files]
else:
args['files'] = [i for i in self.files]
return args
class CodeFile(File):
def __init__(self, path, lang):
super().__init__(path)
self.lang = lang
class SourceFile(CodeFile):
pass
class HeaderFile(CodeFile):
install_kind = 'data'
install_root = _InstallRoot.includedir
class PrecompiledHeader(HeaderFile):
install_kind = None
@_clone_traits(subfiles={'object_file': 'object_path'})
class MsvcPrecompiledHeader(PrecompiledHeader):
def __init__(self, path, object_path, header_name, format, lang):
super().__init__(path, lang)
self.object_file = ObjectFile(object_path, format, self.lang)
self.object_file.private = True
self.header_name = header_name
def _clone_args(self, pathfn, recursive):
args = super()._clone_args(pathfn, recursive)
args['format'] = self.object_file.format
return args
class HeaderDirectory(Directory):
install_kind = 'data'
install_root = _InstallRoot.includedir
def __init__(self, path, files=None, system=False, langs=None):
super().__init__(path, files)
self.system = system
self.langs = _listify(langs)
class ModuleDefFile(File):
pass
class Binary(File):
install_kind = 'data'
install_root = _InstallRoot.libdir
def __init__(self, path, format, lang=None):
super().__init__(path)
self.format = format
self.lang = lang
class ObjectFile(Binary):
pass
# This is used by JVM languages to hold a list of all the object files
# generated by a particular source file's compilation.
class ObjectFileList(ObjectFile):
install_kind = None
def __init__(self, path, object_name, format, lang=None):
super().__init__(path, format, lang)
self.object_file = ObjectFile(object_name, format, lang)
# This represents any kind of binary data that's been "linked" (or had some
# similar process applied to it) so that it can be used by a linker/loader,
# installed to the system, etc.
@_clone_traits(exclude={'runtime_deps', 'linktime_deps', 'package_deps'})
class LinkedBinary(Binary):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.runtime_deps = []
self.linktime_deps = []
self.package_deps = []
@property
def install_deps(self):
return self.runtime_deps + self.linktime_deps
class Executable(LinkedBinary):
install_kind = 'program'
install_root = _InstallRoot.bindir
@_clone_traits(exclude={'parent'})
class Library(LinkedBinary):
@property
def runtime_file(self):
return None
# This is used for JVM binaries, which can be both executables and libraries.
# Multiple inheritance is a sign that we should perhaps switch to a trait-based
# system though...
class ExecutableLibrary(Executable, Library):
install_kind = 'program'
install_root = _InstallRoot.libdir
class SharedLibrary(Library):
install_kind = 'program'
@property
def runtime_file(self):
return self
@_clone_traits(exclude={'format', 'lang'})
class LinkLibrary(SharedLibrary):
def __init__(self, path, library):
super().__init__(path, library.format, library.lang)
self.library = library
self.linktime_deps = [library]
@property
def runtime_file(self):
return self.library
def clone(self, path, recursive=False, inner=None):
if recursive:
return self.library.clone(path, True, inner or self)
return super().clone(path, False, inner)
@_clone_traits(subfiles={'soname': 'soname', 'link': 'linkname'})
class VersionedSharedLibrary(SharedLibrary):
def __init__(self, path, format, lang, soname, linkname):
super().__init__(path, format, lang)
self.soname = LinkLibrary(soname, self)
self.link = LinkLibrary(linkname, self.soname)
class StaticLibrary(Library):
def __init__(self, path, format, lang=None, forward_opts=None):
super().__init__(path, format, lang)
self.forward_opts = forward_opts
class WholeArchive(StaticLibrary):
def __init__(self, library):
self.library = library
def __getattribute__(self, name):
if name in ['library', '_safe_str', '__repr__', '__hash__', '__eq__']:
return object.__getattribute__(self, name)
return getattr(object.__getattribute__(self, 'library'), name)
class ExportFile(File):
private = True
# This refers specifically to DLL files that have an import library, not just
# anything with a .dll extension (for instance, .NET DLLs are just regular
# shared libraries). While this is a "library" in some senses, since you can't
# link to it during building, we just consider it a LinkedBinary.
@_clone_traits(subfiles={'import_lib': 'import_name',
'export_file': 'export_name'})
class DllBinary(LinkedBinary):
install_root = _InstallRoot.bindir
private = True
def __init__(self, path, format, lang, import_name, export_name=None):
super().__init__(path, format, lang)
self.import_lib = LinkLibrary(import_name, self)
self.export_file = ExportFile(export_name) if export_name else None
class DualUseLibrary:
def __init__(self, shared, static):
self.shared = shared
self.static = static
self.shared.parent = self
self.static.parent = self
@property
def all(self):
return [self.shared, self.static]
def __repr__(self):
return '<DualUseLibrary {!r}>'.format(self.shared.path)
def __hash__(self):
return hash(self.shared.path)
def __eq__(self, rhs):
return (type(self) == type(rhs) and self.shared == rhs.shared and
self.static == rhs.static)
def __ne__(self, rhs):
return not (self == rhs)
@property
def package_deps(self):
return self.shared.package_deps
@property
def install_deps(self):
return self.shared.install_deps
@property
def forward_opts(self):
return self.static.forward_opts
def clone(self, *args, **kwargs):
return DualUseLibrary(self.shared.clone(*args, **kwargs),
self.static.clone(*args, **kwargs))
class PkgConfigPcFile(File):
install_root = _InstallRoot.libdir
|
#!/usr/bin/env python3
import subprocess
import sys
from pathlib import Path
_WELL_KNOWN_FILE = Path("tests", "pyright_test.py")
_PYRIGHT_COMMAND = ["npx", "-p", "pyright@1.1.114", "pyright"]
def main() -> None:
assert_npm_is_installed()
ret = subprocess.run(_PYRIGHT_COMMAND).returncode
sys.exit(ret)
def assert_npm_is_installed() -> None:
if not _WELL_KNOWN_FILE.exists():
print("pyright_test.py must be run from the typeshed root directory", file=sys.stderr)
sys.exit(1)
try:
subprocess.run(["npx", "--version"])
except OSError:
print("error running npx; is Node.js installed?", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
# Copyright 2021 Peng Cheng Laboratory (http://www.szpclab.com/) and FedLab Authors (smilelab.group)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Coordinator(object):
"""Deal with the mapping relation between client id in FL system and process rank in communication.
Note
Server Manager creates a Coordinator following:
1. init network connection.
2. client send local group info (the number of client simulating in local) to server.
4. server receive all info and init a server Coordinator.
Args:
setup_dict (dict): A dict like {rank:client_num ...}, representing the map relation between process rank and client id.
mode (str, optional): “GLOBAL” and "LOCAL". Coordinator will map client id to (rank, global id) or (rank, local id) according to mode. For example, client id 51 is in a machine which has 1 manager and serial trainer simulating 10 clients. LOCAL id means the index of its 10 clients. Therefore, global id 51 will be mapped into local id 1 (depending on setting).
"""
def __init__(self, setup_dict, mode='LOCAL') -> None:
self.map = setup_dict
self.mode = mode
def map_id(self, id):
"""a map function from client id to (rank,local id)
Args:
id (int): client id
Returns:
rank, id : rank in distributed group and local id.
"""
m_id = id
for rank, num in self.map.items():
if m_id >= num:
m_id -= num
else:
local_id = m_id
global_id = id
ret_id = local_id if self.mode == 'LOCAL' else global_id
return rank, ret_id
def map_id_list(self, id_list):
"""a map function from id_list to dict{rank:local id}
This can be very useful in Scale modules.
Args:
id_list (list(int)): a list of client id.
Returns:
map_dict (dict): contains process rank and its relative local client ids.
"""
map_dict = {}
for id in id_list:
rank, id = self.map_id(id)
if rank in map_dict.keys():
map_dict[rank].append(id)
else:
map_dict[rank] = [id]
return map_dict
def switch(self):
if self.mode == 'GLOBAL':
self.mode = 'LOCAL'
elif self.mode == 'LOCAL':
self.mode = 'GLOBAL'
else:
raise ValueError("Invalid Map Mode {}".format(self.mode))
@property
def total(self):
return int(sum(self.map.values()))
def __str__(self) -> str:
return "Coordinator map information: {} \nMap mode: {} \nTotal: {}".format(
self.map, self.mode, self.total)
def __call__(self, info, *args, **kwds):
if isinstance(info, int):
return self.map_id(info)
if isinstance(info, list):
return self.map_id_list(info) |
import torch
import torch.nn as nn
from torch.nn import functional as F
import numbers
import math
from .blocks import ConvBlock, DeconvBlock, MeanShift
class GaussianSmoothing(nn.Module):
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# print(kernel.shape)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
# print(kernel.shape)
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
return self.conv(input, weight=self.weight, groups=self.groups)
class FeedbackBlock(nn.Module):
def __init__(self, num_features, num_groups, upscale_factor, act_type, norm_type):
super(FeedbackBlock, self).__init__()
if upscale_factor == 2:
stride = 2
padding = 2
kernel_size = 6
elif upscale_factor == 3:
stride = 3
padding = 2
kernel_size = 7
elif upscale_factor == 4:
stride = 4
padding = 2
kernel_size = 8
elif upscale_factor == 8:
stride = 8
padding = 2
kernel_size = 12
self.num_groups = num_groups
self.compress_in = ConvBlock(2*num_features, num_features,
kernel_size=1,
act_type=act_type, norm_type=norm_type)
self.upBlocks = nn.ModuleList()
self.downBlocks = nn.ModuleList()
self.uptranBlocks = nn.ModuleList()
self.downtranBlocks = nn.ModuleList()
for idx in range(self.num_groups):
self.upBlocks.append(DeconvBlock(num_features, num_features,
kernel_size=kernel_size, stride=stride, padding=padding,
act_type=act_type, norm_type=norm_type))
self.downBlocks.append(ConvBlock(num_features, num_features,
kernel_size=kernel_size, stride=stride, padding=padding,
act_type=act_type, norm_type=norm_type, valid_padding=False))
if idx > 0:
self.uptranBlocks.append(ConvBlock(num_features*(idx+1), num_features,
kernel_size=1, stride=1,
act_type=act_type, norm_type=norm_type))
self.downtranBlocks.append(ConvBlock(num_features*(idx+1), num_features,
kernel_size=1, stride=1,
act_type=act_type, norm_type=norm_type))
self.compress_out = ConvBlock(num_groups*num_features, num_features,
kernel_size=1,
act_type=act_type, norm_type=norm_type)
self.should_reset = True
self.last_hidden = None
def forward(self, x):
if self.should_reset:
self.last_hidden = torch.zeros(x.size()).cuda()
self.last_hidden.copy_(x)
self.should_reset = False
x = torch.cat((x, self.last_hidden), dim=1)
# print(x.shape)
x = self.compress_in(x)
lr_features = []
hr_features = []
lr_features.append(x)
for idx in range(self.num_groups):
LD_L = torch.cat(tuple(lr_features), 1) # when idx == 0, lr_features == [x]
if idx > 0:
LD_L = self.uptranBlocks[idx-1](LD_L)
LD_H = self.upBlocks[idx](LD_L)
hr_features.append(LD_H)
LD_H = torch.cat(tuple(hr_features), 1)
if idx > 0:
LD_H = self.downtranBlocks[idx-1](LD_H)
LD_L = self.downBlocks[idx](LD_H)
lr_features.append(LD_L)
del hr_features
output = torch.cat(tuple(lr_features[1:]), 1) # leave out input x, i.e. lr_features[0]
output = self.compress_out(output)
self.last_hidden = output
return output
def reset_state(self):
self.should_reset = True
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(inplace=False), res_scale=0.1):
super(ResBlock, self).__init__()
self.body = nn.ModuleList()
for i in range(2):
m = []
m.append(conv(n_feats, n_feats, kernel_size, bias=bias, mode='C'))
m.append(nn.BatchNorm2d(n_feats))
m.append(act)
self.body.append(nn.Sequential(*m))
# self.res_scale = res_scale
def forward(self, x):
out1 = self.body[0](x)
out2 = self.body[1](x+out1)
res = x + out2
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class RANDOM(nn.Module):
def __init__(self, in_channels, out_channels, num_features, num_blocks, num_groups, upscale_factor, act_type = 'prelu', norm_type = None):
super(RANDOM, self).__init__()
if upscale_factor == 2:
stride = 2
padding = 2
kernel_size = 6
elif upscale_factor == 3:
stride = 3
padding = 2
kernel_size = 7
elif upscale_factor == 4:
stride = 4
padding = 2
kernel_size = 8
elif upscale_factor == 8:
stride = 8
padding = 2
kernel_size = 12
self.num_blocks = num_blocks
self.num_features = num_features
self.upscale_factor = upscale_factor
#self.blur_matrix = gaussian blur kernel
# RGB mean for DIV2K
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = MeanShift(rgb_mean, rgb_std)
#extract in
self.conv_in = ConvBlock(in_channels, 4*num_features,
kernel_size=3,
act_type=act_type, norm_type=norm_type)
self.feat_in = ConvBlock(4*num_features, num_features,
kernel_size=1,
act_type=act_type, norm_type=norm_type)
self.add_blur = GaussianSmoothing(3, 7, 1.6)
self.conv_in_blur = ConvBlock(in_channels, 4*num_features,
kernel_size=3,
act_type=act_type, norm_type=norm_type)
self.feat_in_blur = ConvBlock(4*num_features, num_features,
kernel_size=1,
act_type=act_type, norm_type=norm_type)
# BI feature extraction
self.res_blocks = nn.ModuleList()
self.blocks = nn.ModuleList()
for _ in range(self.num_blocks-1):
self.res_blocks.append(ResBlock(ConvBlock, num_features, kernel_size=3))
self.blocks.append(FeedbackBlock(num_features, num_groups, upscale_factor, act_type, norm_type))
# basic block
self.blocks.append(FeedbackBlock(num_features, num_groups, upscale_factor, act_type, norm_type))
# reconstruction block
# uncomment for pytorch 0.4.0
# self.upsample = nn.Upsample(scale_factor=upscale_factor, mode='bilinear')
self.out = ConvBlock(num_features, num_features,
kernel_size=1, stride=1,
act_type='prelu', norm_type=norm_type)
self.upsampler = Upsampler(ConvBlock, scale=4, n_feats=64)
self.conv_out = ConvBlock(num_features, out_channels,
kernel_size=3,
act_type=None, norm_type=norm_type)
self.add_mean = MeanShift(rgb_mean, rgb_std, 1)
def forward(self, x, is_test=False):
self._reset_state()
x = self.sub_mean(x)
# uncomment for pytorch 0.4.0
# inter_res = self.upsample(x)
# comment for pytorch 0.4.0
inter_res = nn.functional.interpolate(x, scale_factor=self.upscale_factor, mode='bilinear', align_corners=False)
if is_test == False:
x_blur = F.pad(x, (3, 3, 3, 3), mode='reflect')
x = self.conv_in(x)
feat_x = self.feat_in(x)
x_blur = self.add_blur(x_blur)
# print(x_blur.shape)
x_blur = self.conv_in(x_blur)
feat_blur = self.feat_in(x_blur)
feat_mid = torch.add(feat_x, feat_blur)
feat_mid = self.blocks[0](feat_mid)
for _ in range(self.num_blocks-1):
feat_x = self.res_blocks[_](feat_x)
# print('feat_x: {}').format(feat_x.shape)
feat_blur = self.res_blocks[_](feat_blur)
# print('feat_blur: {}').format(feat_blur.shape)
feat_mid = self.blocks[_+1](torch.add(torch.add(feat_x,feat_blur),feat_mid))
# h = torch.add(inter_res, self.conv_out(self.out(h)))
# h = self.add_mean(h)
# outs.append(h)
feat_mid = self.out(feat_mid)
feat_mid = self.upsampler(feat_mid)
feat_mid = self.conv_out(feat_mid)
feat_mid = torch.add(feat_mid, inter_res)
h = self.add_mean(feat_mid)
return h # return output of every timesteps
else:
x = self.conv_in(x)
feat_x = self.feat_in(x)
feat_mid = self.blocks[0](feat_x)
for _ in range(self.num_blocks -1):
feat_x = self.res_blocks[_](feat_x)
feat_mid = self.blocks[_+1](torch.add(feat_x,feat_mid))
# h = torch.add(inter_res, self.conv_out(self.out(h)))
# h = self.add_mean(h)
# outs.append(h)
feat_mid = self.out(feat_mid)
feat_mid = self.upsampler(feat_mid)
feat_mid = self.conv_out(feat_mid)
feat_mid = torch.add(feat_mid, inter_res)
h = self.add_mean(feat_mid)
return h # return output of every timesteps
def _reset_state(self):
[x.reset_state() for x in self.blocks] |
"""
Takes a JSON file and visualizes the annotation boxes on images.
Outputs visualized at OUT_DIR/<json_file_basename>/../..*.jpg
srun --mem 10000 python tools/face/viz_json.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
sys.path.append('./tools')
import numpy as np
import os, cv2
import argparse
import os.path as osp
import time
import skvideo.io
import json
import csv
from six.moves import xrange
from PIL import Image
from tqdm import tqdm
# JSON_FILE = 'data/CS6_annot/cs6-subset-gt_face_train_annot_coco_style.json'
JSON_FILE = 'Outputs/modified_annots/cs6-train-hp_noisy-1.00.json'
OUT_DIR = 'Outputs/visualizations/'
NUM_IMG = 500 # number of images to visualize
DEBUG = False
def parse_args():
parser = argparse.ArgumentParser(description='Creating CS6 ground truth data')
parser.add_argument(
'--output_dir', help='directory for saving outputs',
default=OUT_DIR, type=str
)
parser.add_argument(
'--json_file', help='Name of JSON file', default=JSON_FILE
)
parser.add_argument(
'--imdir', help="root directory for loading dataset images",
default='data/CS6_annot', type=str)
return parser.parse_args()
_GREEN = (18, 127, 15)
color_dict = {'red': (0,0,225), 'green': (0,255,0), 'yellow': (0,255,255),
'blue': (255,0,0), '_GREEN':(18, 127, 15), '_GRAY': (218, 227, 218)}
# -----------------------------------------------------------------------------------
def draw_detection_list(im, dets):
# -----------------------------------------------------------------------------------
""" Draw bounding boxes on a copy of image and return it.
[x0 y0 w h conf_score]
"""
im_det = im.copy()
if dets.ndim == 1:
dets = dets[np.newaxis,:] # handle single detection case
# format into [xmin, ymin, xmax, ymax]
dets[:, 2] = dets[:, 2] + dets[:, 0]
dets[:, 3] = dets[:, 3] + dets[:, 1]
for i, det in enumerate(dets):
bbox = dets[i, :4]
x0, y0, x1, y1 = [int(x) for x in bbox]
line_color = color_dict['yellow']
cv2.rectangle(im_det, (x0, y0), (x1, y1), line_color, thickness=2)
return im_det
if __name__ == '__main__':
args = parse_args()
with open(args.json_file) as f:
ann_dict = json.load(f)
print(ann_dict.keys())
out_dir = osp.join(args.output_dir,
osp.splitext(osp.basename(args.json_file))[0])
if not osp.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
i = 0
for img_annot in tqdm(ann_dict['images']):
image_name = img_annot['file_name']
print(osp.join(args.imdir, image_name))
if not osp.exists(osp.join(args.imdir, image_name)):
continue
image_id = img_annot['id']
bboxes = [x['bbox'] for x in ann_dict['annotations'] \
if x['image_id'] == image_id]
im = cv2.imread(osp.join(args.imdir, image_name))
assert im.size > 0
im_det = draw_detection_list(im, np.array(bboxes))
out_path = osp.join(out_dir, image_name.replace('/', '_'))
cv2.imwrite(out_path, im_det)
i += 1
if i == NUM_IMG:
break
|
from django import forms
from django.contrib.auth.models import User
from rest.models import ApiToken
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'password')
class TokenForm(forms.ModelForm):
class Meta:
model = ApiToken
fields = ('token_clientId',)
|
from dataclasses import dataclass, field
from typing import Dict, List
import json
settings_path = "newsletters/config.json"
@dataclass
class Settings():
mailgunApiKey: str
def load():
j = '{ "mailgunApiKey": "123!" }'
j_dict = json.loads(j)
return Settings(**j_dict)
def save(config):
print("Saving settings") |
"""
A package for generating various graphs in networkx.
"""
from networkx.generators.atlas import *
from networkx.generators.bipartite import *
from networkx.generators.classic import *
from networkx.generators.degree_seq import *
from networkx.generators.directed import *
from networkx.generators.ego import *
from networkx.generators.geometric import *
from networkx.generators.hybrid import *
from networkx.generators.line import *
from networkx.generators.random_graphs import *
from networkx.generators.small import *
from networkx.generators.stochastic import *
from networkx.generators.social import *
from networkx.generators.threshold import *
from networkx.generators.intersection import *
|
from django.urls import path
from . import views
urlpatterns = [
path('register', views.register, name='register')
] |
from .base_schema import BaseResponseSchema
from marshmallow import Schema, fields
class ImageSchema(Schema):
"""
The image records from database will be deserialized to this this type
and then will be sent to the client as array of images using ImageResponseSchema
"""
image_url = fields.String()
title = fields.String()
description = fields.String()
posted_on = fields.String()
class ImageResponseSchema(BaseResponseSchema):
"""
This class is responsible for representing image response to our clients
"""
data = fields.Nested(ImageSchema, many=True)
|
from machines import Edison2015,Edison
from datetime import datetime
from generate import TimeController
from stats.trace import ResultTrace
from stats import Histogram, NumericStats
class ExperimentDefinition(object):
"""
This class contains the definition and state of a single scheduling
experiment. The definition is used to generate the workload and configure
the scheduler. The state keeps track of the experiment: if it has been run,
its output analyzed, etc. It allows to load and store data from/on a
database.
"""
def __init__(self,
name=None,
experiment_set=None,
seed="AAAAAA",
machine="edison",
trace_type="single",
manifest_list=None,
workflow_policy="no",
workflow_period_s=0,
workflow_share=0.0,
workflow_handling="manifest",
subtraces = None,
start_date = datetime(2015,1,1),
preload_time_s = 3600*24*2,
workload_duration_s = 3600*24*7,
work_state = "fresh",
analysis_state = "0",
overload_target=0.0,
table_name="experiment",
conf_file=""):
"""Constructor allows to fully configure an experiment. However, a
Definition can be created with default values and then loaded from
the database.
Args:
- name: String with a human readable description of the experiment. If
not set, name is set to value derived from the rest of the
arguments.
- experiment_set: String that identifies the group of experiments to
which this one belongs. If not set, it is constructed from the
experiment parameters.
- machine: string identifying the system that the scheduling simulation
must model in terms of hardware, and prioirity policies.
- trace_type: string with three values "single", "delta", "group". A
single experiment is a run of a workload that is analyzed. A delta
is the comparison of the workflows in two single traces (listed in
subtraces), and a group experiment aggregates the results of a
number of single experiments (listed in subtraces).
- manifest_list: List of dictionaries. Each dict has two keys:
"manifest", which value lists the name of the manifest file for a
workflow type; and "share" a 0-1 value indicating the chance for a
workflow to be of "manifest" type in the workload.
- workflow_policy: string that governs how workflows are calculated to
be added in the workload. Can take three values "no", no workflows;
"period", one workflow every workflow_period_s seconds; and "share",
workflow_share % of the jobs will be workflows.
- workflow_period: positive number indicating how many seconds will be
between two workflows in the workload.
- workflow_share: float between 0 and 100 representing the % of jobs
that are workflows.
- workflow_handling: string governing how workflows are run and
scheduled in the experiment. It can take three values: "single",
where workflows are submitted as a single job; "multi", where
each task in a workflow is run in an independent job; and "manifest"
where workflows are submitted as a single job, but workflow aware
backfilling is used.
- subtraces: list of trace_id (int) of traces that should be used in the
analysis of this experiment. Only valid for delta and group
experiments.
- start_date: datetime object pointing at the beginning of the
generated workload.
- pre_load_time_s: number of seconds of workload to be generated before
the start_date. This workload is used to "load" the scheduler, but
the analysis will be performed only from the "start_date".
- workload_duration_s: number of seconds of workload to be generated
after the start_date.
- work_state: string representing the state of the experiment.
possible values: "fresh", "pre_simulating", "simulating",
"simulation_done", "simulation_error", "pre_analyzing",
"analyzing", "analysis_done", "analysis_error".
- analysis_state: sub step of the analysis phase.
- overload_target: if set to > 1.0, the workload generated will produce
extra jobs during preload so in a period of time, overload_target
times the capacity of the system (produced in that period) will be
submitted.
- table_name: dabatase table to store and load the content of
an experiment from.
- conf_file: if set to sting, the experiment will be run using a
configuration file of such name. Other settins will be overriden.
"""
if subtraces is None:
subtraces = []
if manifest_list is None:
manifest_list = []
self._name=name
self._experiment_set=experiment_set
self._seed=seed
self._machine=machine
self._trace_type=trace_type
self._manifest_list=manifest_list
self._workflow_policy=workflow_policy
self._workflow_period_s=workflow_period_s
self._workflow_share=workflow_share
self._workflow_handling=workflow_handling
self._subtraces = subtraces
self._start_date = start_date
self._preload_time_s = preload_time_s
self._workload_duration_s = workload_duration_s
self._work_state = work_state
self._analysis_state = analysis_state
self._overload_target = overload_target
self._table_name = table_name
self._conf_file = conf_file
self._simulating_start=None
self._simulating_end=None
self._worker=""
for man in [x["manifest"] for x in manifest_list]:
if "_" in man or "_" in man:
raise ValueError("A manifest name cannot contain the characters"
" '_' or '-', found: {0}".format(man))
self._trace_id = None
self._owner = None
self._ownership_stamp = None
if self._experiment_set is None:
self._experiment_set = self._get_default_experiment_set()
if self._name is None:
self._name = "{0}-s[{1}]".format(self._experiment_set, self._seed)
def get_true_workflow_handling(self):
"""Returns "no" if there are no workflows in the trace, the configured
value on worfklow_poliy otherwise."""
if self._workflow_policy=="no":
return "no"
else:
return self._workflow_handling
def get_machine(self):
"""
Returns a Machine object corresponding to the machine configured.
"""
if self._machine == "edison":
return Edison2015()
elif self._machine == "default":
return Edison()
raise ValueError("Unknown machine set: {}".format(self._machine))
def get_overload_factor(self):
return self._overload_target%1000;
def get_forced_initial_wait(self):
if self._overload_target>999:
runtime=self._overload_target/1000;
return runtime
return 0
def get_system_user_list(self):
return ["tester:1000",
"root:0"
"linpack:300",
"nobody:99",
"dbus:81",
"rpc:32",
"nscd:28",
"vcsa:69",
"abrt:499",
"saslauth:498",
"postfix:89",
"apache:48",
"rpcuser:29",
"nfsnobody:65534",
"ricci:140",
"haldaemon:68",
"nslcd:65",
"ntp:38",
"piranha:60",
"sshd:74",
"luci:141",
"tcpdump:72",
"oprofile:16",
"postgres:26",
"usbmuxd:113",
"avahi:70",
"avahi-autoipd:170",
"rtkit:497",
"pulse:496",
"gdm:42",
"named:25",
"snmptt:495",
"hacluster:494",
"munge:493",
"mysql:27",
"bsmuser:400",
"puppet:52",
"nagios:401",
"slurm:106"
]
def get_user_list(self):
"""
Returns a list of strings with the usernames to be emulated.
"""
return ["user1"]
def get_qos_list(self):
"""
Returns a list of the qos policies to be used in the workload.
"""
return ["qos1"]
def get_partition_list(self):
"""
Returns a list of the partitions to be used in the workload.
"""
return ["main"]
def get_account_list(self):
"""
Returns a list of accounts ot be used in the workload.
"""
return ["account1"]
def get_trace_file_name(self):
"""
Returns a file system safe name based on the experiment name for its
workload file.
"""
return self.clean_file_name(self._name+".trace")
def get_qos_file_name(self):
"""
Returns a file system safe name based on the experiment name for its
qos file.
"""
return self.clean_file_name(self._name+".qos")
def get_users_file_name(self):
"""
Returns a file system safe name based on the experiment name for its
users file.
"""
return self.clean_file_name(self._name+".users")
def get_start_epoch(self):
"""
Returns the start date of the experiment in epoch format (int).
"""
return TimeController.get_epoch(self._start_date)
def get_end_epoch(self):
"""
Returns the ending date of the experiment in epoch format (int).
"""
return (TimeController.get_epoch(self._start_date) +
self._workload_duration_s)
def clean_file_name(self, file_name):
"""Returns a string with a file-system name verions of file_name."""
return "".join([c for c in file_name if c.isalpha()
or c.isdigit()
or c=='.'
or c=="-"]).rstrip()
def _manifest_list_to_text(self, manifest_list):
"""Serializes the manifest list into a string"""
list_of_text=[]
for one_man in manifest_list:
list_of_text.append("{0}|{1}".format(
one_man["share"],
one_man["manifest"]))
return ",".join(list_of_text)
def _text_to_manifest_list(self, manifest_text):
"""Deserializes a string into a manifest list"""
manifest_list = []
for man in manifest_text.split(","):
if man == "":
continue
man_parts = man.split("|")
man_share = float(man_parts[0])
man_file = man_parts[1]
manifest_list.append({"share":man_share, "manifest":man_file})
return manifest_list
def _get_default_experiment_set(self):
"""Returns the default experiment set based on the experiment
configuration."""
conf_file_str=""
if self._conf_file:
conf_file_str="-"+self._conf_file
return ("{0}-{1}-m[{2}]-{3}-p{4}-%{5}-{6}-t[{7}]-{8}d-{9}d-O{10}{11}"
"".format(
self._machine,
self._trace_type,
self._manifest_list_to_text(self._manifest_list),
self._workflow_policy,
self._workflow_period_s,
self._workflow_share,
self._workflow_handling,
",".join([str(t) for t in self._subtraces]),
int(self._preload_time_s/(3600*24)),
int(self._workload_duration_s/(3600*24)),
self._overload_target,
conf_file_str))
def store(self, db_obj):
"""Stores the object into a database at the table self._table_name.
Args:
- db_obj: configured DBManager object that will store the data.
Returns trace_id
"""
keys= ["name",
"experiment_set",
"seed",
"machine",
"trace_type",
"manifest_list",
"workflow_policy",
"workflow_period_s",
"workflow_share",
"workflow_handling",
"subtraces",
"start_date",
"preload_time_s",
"workload_duration_s",
"work_state",
"analysis_state",
"overload_target",
"conf_file"]
values = [self._name,
self._experiment_set,
self._seed,
self._machine,
self._trace_type,
self._manifest_list_to_text(self._manifest_list),
self._workflow_policy,
self._workflow_period_s,
self._workflow_share,
self._workflow_handling,
",".join([str(t) for t in self._subtraces]),
db_obj.date_to_mysql(self._start_date),
self._preload_time_s,
self._workload_duration_s,
self._work_state,
self._analysis_state,
self._overload_target,
self._conf_file]
ok, insert_id = db_obj.insertValues(self._table_name, keys, values,
get_insert_id=True)
if not ok:
raise Exception("Error inserting experiment in database: {0}"
"".format(values))
self._trace_id = insert_id
return self._trace_id
def mark_pre_simulating(self, db_obj):
return self.upate_state(db_obj, "pre_simulating")
def mark_simulating(self, db_obj, worker_host=None):
if worker_host:
self.update_worker(db_obj,worker_host)
self.update_simulating_start(db_obj)
return self.upate_state(db_obj, "simulating")
def mark_simulation_done(self, db_obj):
self.update_simulating_end(db_obj)
return self.upate_state(db_obj, "simulation_done")
def mark_simulation_failed(self, db_obj):
self.update_simulating_end(db_obj)
return self.upate_state(db_obj, "simulation_failed")
def mark_pre_analyzing(self, db_obj):
return self.upate_state(db_obj, "pre_analyzing")
def mark_analysis_done(self, db_obj):
return self.upate_state(db_obj, "analysis_done")
def mark_second_pass(self, db_obj):
return self.upate_state(db_obj, "second_pass_done")
def mark_pre_second_pass(self, db_obj):
return self.upate_state(db_obj, "pre_second_pass")
def upate_state(self, db_obj, state):
"""
Sets the state of the experiment.
"""
old_state=self._work_state
self._work_state = state
return db_obj.setFieldOnTable(self._table_name, "work_state", state,
"trace_id", str(self._trace_id),
"and work_state='{0}'".format(old_state))
def update_worker(self, db_obj, worker_host):
self._worker=worker_host
return db_obj.setFieldOnTable(self._table_name, "worker", worker_host,
"trace_id", str(self._trace_id))
def update_simulating_start(self, db_obj):
return db_obj.setFieldOnTable(self._table_name, "simulating_start",
"now()",
"trace_id", str(self._trace_id),
no_commas=True)
def update_simulating_end(self, db_obj):
return db_obj.setFieldOnTable(self._table_name, "simulating_end",
"now()",
"trace_id", str(self._trace_id),
no_commas=True)
def reset_simulating_time(self, db_obj):
db_obj.setFieldOnTable(self._table_name, "simulating_end",
0,
"trace_id", str(self._trace_id),
no_commas=True)
return db_obj.setFieldOnTable(self._table_name, "simulating_start",
0,
"trace_id", str(self._trace_id),
no_commas=True)
def load(self, db_obj, trace_id):
"""Configures the object according to a row in self._table_name
identified by trace_id.
Args:
- db_obj: configured DBManager object that will load the data from
- trace_id: integer identifying the experiment data to load.
"""
self._trace_id = trace_id
keys= ["name",
"experiment_set",
"seed",
"machine",
"trace_type",
"manifest_list",
"workflow_policy",
"workflow_period_s",
"workflow_share",
"workflow_handling",
"subtraces",
"start_date",
"preload_time_s",
"workload_duration_s",
"work_state",
"analysis_state",
"overload_target",
"conf_file",
"simulating_start",
"simulating_end",
"worker"]
data_dic=db_obj.getValuesDicList(self._table_name, keys, condition=
"trace_id={0}".format(
self._trace_id))
if data_dic == False:
raise ValueError("Experiment not found!")
for key in keys:
setattr(self, "_"+key, data_dic[0][key])
self._manifest_list=self._text_to_manifest_list(self._manifest_list)
self._subtraces = [int(x) for x in self._subtraces.split(",") if x!=""]
def load_fresh(self, db_obj):
"""Configures the object with the data of the first experiment with
state="fresh", ordered by trace_id. Then set the state to
"pre_simulating".
Returns True if load was succesful, False if no experiments with state
"fresh" are available.
"""
return self.load_next_state(db_obj, "fresh", "pre_simulating")
def load_pending(self, db_obj):
"""Configures the object with the data of the first experiment with
state="simulation_done", ordered by trace_id. Then set the state to
"pre_analyzing".
Returns True if load was succesful, False if no experiments with state
"fresh" are available.
"""
return self.load_next_state(db_obj, "simulation_done", "pre_analyzing")
def load_ready_second_pass(self, db_obj):
"""Configures the object with the data of the first experiment with
state="simulation_done", ordered by trace_id. Then set the state to
"pre_analyzing".
Returns True if load was succesful, False if no experiments with state
"fresh" are available.
"""
return self.load_next_state(db_obj, "simulation_done", "pre_analyzing")
def load_next_state(self,db_obj, state, new_state, check_pending=False,
subtraces_state=None):
"""Configures the object with the data of the first experiment with
state=fresh, ordered by trace_id. Then stes the state go new_state.
The operation is concurrent safe, two codes running load_next_state
for the same state will never receive the data from the same experiment.
Args:
- db_obj: DBManager object configured to access a datbases.
- state: state of the experiment to be searched for.
- new_state: experiment to be set on both the db and the object once
the experiment is loaded.
- check_pending: checks if the the sub_traces are in a particular
state.
- subtraces_state: Expected state of the subtraces to performa load
nad state set.
Returns: False if no more experiments with
state "state" are available, True otherwise.
"""
update_ok=False
data_left=True
count = 1000
while data_left and not update_ok:
db_obj.start_transaction()
rows=db_obj.getValuesAsColumns(self._table_name, ["trace_id"],
condition = "work_state='{0}' "
"and trace_type='{1}' ".format(
state,
self._trace_type),
orderBy="trace_id")
data_left = len(rows["trace_id"])>0
if data_left:
found_good=False
for trace_id in rows["trace_id"]:
self.load(db_obj,int(trace_id))
found_good = (not check_pending
or self.are_sub_traces_analyzed(
db_obj,
subtraces_state))
if found_good:
break
if not found_good:
db_obj.end_transaction()
break
update_ok = self.upate_state(db_obj, new_state)
db_obj.end_transaction()
if count == 0:
raise Exception("Tried to load an experiment configuration many"
" times and failed!!")
count -= 1
return data_left
def get_exps_in_state(self, db_obj, state):
rows=db_obj.getValuesAsColumns(self._table_name, ["trace_id"],
condition = "work_state='{0}' "
"and trace_type='{1}' ".format(
state,
self._trace_type),
orderBy="trace_id")
return rows["trace_id"]
def pass_other_second_pass_requirements(self, db_obj):
return True
def load_next_ready_for_pass(self,db_obj, state="analysis_done",
new_state="pre_second_pass",
workflow_handling="manifest",
workflow_handling_list=["single", "multi"]):
update_ok=False
data_left=True
count = 100
""" Changes:
- it passes over the ones that not good yet
- does not use subtraces
"""
while data_left and not update_ok:
db_obj.start_transaction()
rows=db_obj.getValuesAsColumns(self._table_name, ["trace_id"],
condition = "work_state='{0}' "
"and trace_type='{1}' "
"and workflow_handling='{2}'".format(
state,
self._trace_type,
workflow_handling),
orderBy="trace_id")
data_left = len(rows["trace_id"])>0
this_is_the_one=False
if data_left:
for trace_id in rows["trace_id"]:
self.load(db_obj,int(trace_id))
other_defs_ok=True
for (other_handling, t_id) in zip(
workflow_handling_list,
[trace_id+x+1 for x in range(
len(workflow_handling_list))]):
new_def=self.get_exp_def_like_me()
new_def.load(db_obj, t_id)
other_defs_ok=(other_defs_ok and
new_def._work_state=="analysis_done" and
new_def._workflow_handling==other_handling and
new_def.pass_other_second_pass_requirements(db_obj))
if (not other_defs_ok or
not self.pass_other_second_pass_requirements(db_obj)):
continue
else:
this_is_the_one=True
break
if this_is_the_one:
update_ok = self.upate_state(db_obj, new_state)
db_obj.end_transaction()
if count == 0:
raise ValueError("Tried to load an experiment configuration many"
" times and failed!!")
count -= 1
return data_left
def get_exp_def_like_me(self):
return ExperimentDefinition()
def del_results(self, db_obj):
"""Deletes all analysis results associated with this experiment"""
field="trace_id"
value=self._trace_id
db_obj.delete_rows(Histogram()._table_name, field, value)
db_obj.delete_rows(ResultTrace()._get_utilization_result()._table_name,
field, value)
db_obj.delete_rows(NumericStats()._table_name, field, value)
def del_results_like(self, db_obj, like_field="type", like_value="lim_%"):
"""Deletes all analysis results associated with this experiment"""
field="trace_id"
value=self._trace_id
db_obj.delete_rows(Histogram()._table_name, field, value,
like_field, like_value)
db_obj.delete_rows(ResultTrace()._get_utilization_result()._table_name,
field, value, like_field, like_value)
db_obj.delete_rows(NumericStats()._table_name, field, value,
like_field, like_value)
def del_trace(self, db_obj):
"""Deletes simulation trace associated with this experiment"""
field="trace_id"
value=self._trace_id
db_obj.delete_rows(ResultTrace()._table_name,
field, value)
def del_exp(self, db_obj):
field="trace_id"
value=self._trace_id
db_obj.delete_rows(self._table_name,
field, value)
def are_sub_traces_analyzed(self, db_obj, state):
if not type(state) is list:
state=[state]
for trace_id in self._subtraces:
rows=db_obj.getValuesAsColumns(self._table_name, ["work_state"],
condition = "trace_id={0} ".format(trace_id))
if len(rows["work_state"])==0:
raise ValueError("Subtrace not found!")
if not rows["work_state"][0] in state:
return False
return True
def create_table(self, db_obj):
"""Creates a table valid to store Definition objects"""
print ("Experiment table creation will fail if MYSQL Database does not"
" support 'zero' values in timestamp fields. To zero values"
" can be allowed by removing STRICT_TRANS_TABLES from 'sql_mode='"
" in my.cnf."
"")
query = """
create table `experiment` (
`trace_id` int(10) NOT NULL AUTO_INCREMENT,
`name` varchar(512),
`experiment_set` varchar(512),
`seed` varchar(256), # Alphanum seed for workload gen.
`trace_type` varchar(64), # single, delta, group
`machine` varchar(64), # Machine to simulate, e.g. 'edison'
`manifest_list` varchar (1024), # Manifests to use in the trace. Format:
# [{"manifest1.json":1.0}] or
# [{"manifest1.json":0.5},{"manifest1.json":0.5}]
`workflow_policy` varchar(1024), # workflow submission policy:
# 'no', 'period', 'percentage'
`workflow_period_s` INT DEFAULT 0, # used in "period" policy.
# seconds between two worflows.
`workflow_share` DOUBLE DEFAULT 0.0, # used in "percentage" policy
# 0-100% share of workflows over
# jobs
`workflow_handling` varchar(64), # How workflows are submitted and
# scheduled: 'single', 'multi',
# 'backfill'
`start_date` datetime, # epoch date where the trace should start
`preload_time_s` int, # lenght (in s.) to create filling workload at the
# begining. It won't be analyzed.
`workload_duration_s` int, # lenght (in s.) of the workload to be generated
`subtraces` varchar(100), # For the group and delta traces, what traces
# where used to build this one.
`work_state` varchar(64), # State of the simulation, analysis steps:
# 'fresh', 'simulating', 'simulation done',
# 'analyzing', 'analysis done'
`analysis_state` varchar(64) DEFAULT "", # States inside of the simulation. depending on
# trace_type and workflow_policy
`owner` varchar(64) DEFAULT "", # IP of the host that did the last update
`conf_file` varchar(64) DEFAULT "", # Name of config file to be used in experiment
`ownership_stamp` datetime, # Time since last ownership.
`overload_target` DOUBLE DEFAULT 1.1, # Target cores-hours to be submitted
`simulating_start` timestamp DEFAULT 0,
`simulating_end` timestamp DEFAULT 0,
`worker` varchar(256) DEFAULT "",
PRIMARY KEY(`trace_id`)
) ENGINE = InnoDB;
"""
db_obj.doUpdate(query)
def is_it_ready_to_process(self):
return self._work_state in ["analysis_done"]
def is_analysis_done(self, second_pass=False):
if second_pass:
return self._work_state =="second_pass_done"
return (self._work_state =="analysis_done" or
self._work_state =="second_pass_done")
class GroupExperimentDefinition(ExperimentDefinition):
"""Grouped experiment definition: Experiment composed by multiple single
experiments with the same scheduler and workload characteristics, but
different randome seed. Stats on worflow and job variables are calculated
putting all traces togehter. Median is calculated over the utilizations.
"""
def __init__(self,
name=None,
experiment_set=None,
seed="AAAAAA",
machine="edison",
trace_type="group",
manifest_list=None,
workflow_policy="no",
workflow_period_s=0,
workflow_share=0.0,
workflow_handling="manifest",
subtraces = None,
start_date = datetime(2015,1,1),
preload_time_s = 3600*24*2,
workload_duration_s = 3600*24*7,
work_state = "pending",
analysis_state = "0",
overload_target=0.0,
table_name="experiment"):
super(GroupExperimentDefinition,self).__init__(
name=name,
experiment_set=experiment_set,
seed=seed,
machine=machine,
trace_type=trace_type,
manifest_list=manifest_list,
workflow_policy=workflow_policy,
workflow_period_s=workflow_period_s,
workflow_share=workflow_share,
workflow_handling=workflow_handling,
subtraces = subtraces,
start_date = start_date,
preload_time_s = preload_time_s,
workload_duration_s = workload_duration_s,
work_state = work_state,
analysis_state = analysis_state,
overload_target=overload_target,
table_name=table_name)
def load_pending(self, db_obj):
"""Configures the object with the data of the first experiment with
state="fresh", ordered by trace_id. Then set the state to
"pre_simulating".
Returns True if load was succesful, False if no experiments with state
"fresh" are available.
"""
return self.load_next_state(db_obj, "pending", "pre_analyzing",
True, ["analysis_done", "second_pass_done"])
def add_sub_trace(self, trace_id):
self._subtraces.append(trace_id)
def is_it_ready_to_process(self, db_obj):
"""Returns true is the sub traces have been generated and analyzed."""
for trace_id in self._subtraces:
rt = ExperimentDefinition()
rt.load(db_obj, trace_id)
if not (rt._work_state in ["analysis_done", "second_pass_done"]):
return False
return True
def pass_other_second_pass_requirements(self, db_obj):
for sub_trace_id in self._subtraces:
ex = ExperimentDefinition()
ex.load(db_obj, sub_trace_id)
if not ex.is_analysis_done():
return False
return True
def get_exp_def_like_me(self):
return GroupExperimentDefinition()
class DeltaExperimentDefinition(GroupExperimentDefinition):
"""Delta Experiments: Comparison between two single experiments with the
same random seed, workload configuraion, but different scheduler
configuration. Workflow variables are compared workflow to workflow, and
statistics calculated over the differences.
"""
def __init__(self,
name=None,
experiment_set=None,
seed="AAAAAA",
machine="edison",
trace_type="delta",
manifest_list=None,
workflow_policy="no",
workflow_period_s=0,
workflow_share=0.0,
workflow_handling="manifest",
subtraces = None,
start_date = datetime(2015,1,1),
preload_time_s = 3600*24*2,
workload_duration_s = 3600*24*7,
work_state = "pending",
analysis_state = "0",
table_name="experiment",
overload_target=None):
super(GroupExperimentDefinition,self).__init__(
name=name,
experiment_set=experiment_set,
seed=seed,
machine=machine,
trace_type=trace_type,
manifest_list=manifest_list,
workflow_policy=workflow_policy,
workflow_period_s=workflow_period_s,
workflow_share=workflow_share,
workflow_handling=workflow_handling,
subtraces = subtraces,
start_date = start_date,
preload_time_s = preload_time_s,
workload_duration_s = workload_duration_s,
work_state = work_state,
analysis_state = analysis_state,
table_name=table_name,
overload_target=overload_target)
def add_compare_pair(self, first_id, second_id):
self.add_sub_trace(first_id, second_id)
def is_it_ready_to_process(self, db_obj):
"""Returns true is the sub traces have been at least generated."""
for trace_id in self._subtraces:
rt = ExperimentDefinition()
rt.load(db_obj, trace_id)
if not (rt._work_state in ["simulation_done", "analysis_done"]):
return False
return True
def get_exp_def_like_me(self):
return DeltaExperimentDefinition()
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .sparse_weights import SparseWeights, SparseWeights2d
class PrunableSparseWeightBase(object):
"""
Enable easy setting and getting of the off-mask that defines which
weights are zero.
"""
@property
def off_mask(self):
"""
Gets the value of `zero_mask` in bool format. Thus one may call
```
self.weight[~self.off_mask] # returns weights that are currently on
```
"""
return self.zero_mask.bool()
@off_mask.setter
def off_mask(self, mask):
"""
Sets the values of `zero_mask`, updating self.sparsity to reflect the
sparsity of the new mask.
"""
self.sparsity = mask.sum().item() / mask.numel()
self.zero_mask[:] = mask
class PrunableSparseWeights(SparseWeights, PrunableSparseWeightBase):
"""
Enforce weight sparsity on linear module. The off-weights may be
changed dynamically through the `off_mask` property.
"""
def __init__(self, module, weight_sparsity=None, sparsity=None):
super().__init__(
module, weight_sparsity=weight_sparsity, sparsity=sparsity,
allow_extremes=True
)
class PrunableSparseWeights2d(SparseWeights2d, PrunableSparseWeightBase):
"""
Enforce weight sparsity on CNN modules. The off-weights may be
changed dynamically through the `off_mask` property.
"""
def __init__(self, module, weight_sparsity=None, sparsity=None):
super().__init__(
module, weight_sparsity=weight_sparsity, sparsity=sparsity,
allow_extremes=True
)
|
"""This module provides an interface to the program DSSP.
For more information on DSSP see [4]_.
References
----------
.. [4] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
"""
import subprocess
import tempfile
from .assembly import Assembly
def dssp_available():
"""True if mkdssp is available on the path."""
available = False
try:
subprocess.check_output(['mkdssp'], stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
available = True
except FileNotFoundError:
print("DSSP has not been found on your path. If you have already "
"installed DSSP but are unsure how to add it to your path, "
"check out this: https://stackoverflow.com/a/14638025")
return available
def run_dssp(pdb, path=True):
"""Uses DSSP to find helices and extracts helices from a pdb file or string.
Parameters
----------
pdb : str
Path to pdb file or string.
path : bool, optional
Indicates if pdb is a path or a string.
Returns
-------
dssp_out : str
Std out from DSSP.
"""
if not path:
if isinstance(pdb, str):
pdb = pdb.encode()
with tempfile.NamedTemporaryFile() as temp_pdb:
temp_pdb.write(pdb)
temp_pdb.seek(0)
dssp_out = subprocess.check_output(
['mkdssp', temp_pdb.name])
else:
dssp_out = subprocess.check_output(
['mkdssp', pdb])
dssp_out = dssp_out.decode()
return dssp_out
def extract_all_ss_dssp(in_dssp, path=True):
"""Uses DSSP to extract secondary structure information on every residue.
Parameters
----------
in_dssp : str
Path to DSSP file.
path : bool, optional
Indicates if pdb is a path or a string.
Returns
-------
dssp_residues : [tuple]
Each internal list contains:
[0] int Residue number
[1] str Secondary structure type
[2] str Chain identifier
[3] str Residue type
[4] float Phi torsion angle
[5] float Psi torsion angle
[6] int dssp solvent accessibility
"""
if path:
with open(in_dssp, 'r') as inf:
dssp_out = inf.read()
else:
dssp_out = in_dssp[:]
dssp_residues = []
active = False
for line in dssp_out.splitlines():
if active:
try:
res_num = int(line[5:10].strip())
chain = line[10:12].strip()
residue = line[13]
ss_type = line[16]
phi = float(line[103:109].strip())
psi = float(line[109:116].strip())
acc = int(line[35:38].strip())
dssp_residues.append(
(res_num, ss_type, chain, residue, phi, psi, acc))
except ValueError:
pass
else:
if line[2] == '#':
active = True
return dssp_residues
def find_ss_regions(dssp_residues, loop_assignments=(' ', 'B', 'S', 'T')):
"""Separates parsed DSSP data into groups of secondary structure.
Notes
-----
Example: all residues in a single helix/loop/strand will be gathered
into a list, then the next secondary structure element will be
gathered into a separate list, and so on.
Parameters
----------
dssp_residues : [tuple]
Each internal list contains:
[0] int Residue number
[1] str Secondary structure type
[2] str Chain identifier
[3] str Residue type
[4] float Phi torsion angle
[5] float Psi torsion angle
[6] int dssp solvent accessibility
Returns
-------
fragments : [[list]]
Lists grouped in continuous regions of secondary structure.
Innermost list has the same format as above.
"""
loops = loop_assignments
previous_ele = None
fragment = []
fragments = []
for ele in dssp_residues:
if previous_ele is None:
fragment.append(ele)
elif ele[2] != previous_ele[2]:
fragments.append(fragment)
fragment = [ele]
elif previous_ele[1] in loops:
if ele[1] in loops:
fragment.append(ele)
else:
fragments.append(fragment)
fragment = [ele]
else:
if ele[1] == previous_ele[1]:
fragment.append(ele)
else:
fragments.append(fragment)
fragment = [ele]
previous_ele = ele
fragments.append(fragment)
return fragments
def tag_dssp_data(assembly, loop_assignments=(' ', 'B', 'S', 'T')):
"""Adds output data from DSSP to an Assembly.
A dictionary will be added to the `tags` dictionary of each
residue called `dssp_data`, which contains the secondary
structure definition, solvent accessibility phi and psi values
from DSSP. A list of regions of continuous secondary assignments
will also be added to each `Polypeptide`.
The tags are added in place, so nothing is returned from this
function.
Parameters
----------
assembly : ampal.Assembly
An Assembly containing some protein.
loop_assignments : tuple or list
A tuple containing the DSSP secondary structure identifiers to
that are classed as loop regions.
"""
dssp_out = run_dssp(assembly.pdb, path=False)
dssp_data = extract_all_ss_dssp(dssp_out, path=False)
for record in dssp_data:
rnum, sstype, chid, _, phi, psi, sacc = record
assembly[chid][str(rnum)].tags['dssp_data'] = {
'ss_definition': sstype,
'solvent_accessibility': sacc,
'phi': phi,
'psi': psi
}
ss_regions = find_ss_regions(dssp_data, loop_assignments)
for region in ss_regions:
chain = region[0][2]
ss_type = ' ' if region[0][1] in loop_assignments else region[0][1]
first_residue = str(region[0][0])
last_residue = str(region[-1][0])
if not 'ss_regions' in assembly[chain].tags:
assembly[chain].tags['ss_regions'] = []
assembly[chain].tags['ss_regions'].append(
(first_residue, last_residue, ss_type))
return
def get_ss_regions(assembly, ss_types):
"""Returns an Assembly containing Polymers for each region of structure.
Parameters
----------
assembly : ampal.Assembly
`Assembly` object to be searched secondary structure regions.
ss_types : list
List of secondary structure tags to be separate i.e. ['H']
would return helices, ['H', 'E'] would return helices
and strands.
Returns
-------
fragments : Assembly
`Assembly` containing a `Polymer` for each region of specified
secondary structure.
"""
if not any(map(lambda x: 'ss_regions' in x.tags, assembly)):
raise ValueError(
'This assembly does not have any tagged secondary structure '
'regions. Use `ampal.dssp.tag_dssp_data` to add the tags.'
)
fragments = Assembly()
for polypeptide in assembly:
if 'ss_regions' in polypeptide.tags:
for start, end, ss_type in polypeptide.tags['ss_regions']:
if ss_type in ss_types:
fragment = polypeptide.get_slice_from_res_id(start, end)
fragments.append(fragment)
if not fragments:
raise ValueError('No regions matching that secondary structure type'
' have been found. Use standard DSSP labels.')
return fragments
__author__ = "Christopher W. Wood, Gail J. Bartlett"
|
import string
from unittest import TestCase
from xsdata.utils.text import alnum
from xsdata.utils.text import camel_case
from xsdata.utils.text import capitalize
from xsdata.utils.text import classify
from xsdata.utils.text import kebab_case
from xsdata.utils.text import mixed_case
from xsdata.utils.text import mixed_pascal_case
from xsdata.utils.text import mixed_snake_case
from xsdata.utils.text import original_case
from xsdata.utils.text import pascal_case
from xsdata.utils.text import screaming_snake_case
from xsdata.utils.text import snake_case
from xsdata.utils.text import split_words
from xsdata.utils.text import StringType
class TextTests(TestCase):
def test_original_case(self):
self.assertEqual("p00p", original_case("p00p", foobar=True))
self.assertEqual("p00p", original_case("p00p"))
self.assertEqual("USERName", original_case("USERName"))
self.assertEqual("UserNAME", original_case("UserNAME"))
self.assertEqual("USER_name", original_case("USER_name"))
self.assertEqual("USER-NAME", original_case("USER-NAME"))
self.assertEqual("User_Name", original_case("User_Name"))
self.assertEqual("user_name", original_case("user_name"))
self.assertEqual("SUserNAME", original_case("SUserNAME"))
def test_snake_case(self):
self.assertEqual("p00p", snake_case("p00p", foobar=True))
self.assertEqual("p00p", snake_case("p00p"))
self.assertEqual("username", snake_case("USERName"))
self.assertEqual("user_name", snake_case("UserNAME"))
self.assertEqual("user_name", snake_case("USER_name"))
self.assertEqual("user_name", snake_case("USER-NAME"))
self.assertEqual("user_name", snake_case("User_Name"))
self.assertEqual("user_name", snake_case("user_name"))
self.assertEqual("suser_name", snake_case("SUserNAME"))
def test_screaming_snake_case(self):
self.assertEqual("P00P", screaming_snake_case("p00p", foobar=True))
self.assertEqual("P00P", screaming_snake_case("p00p"))
self.assertEqual("USERNAME", screaming_snake_case("USERName"))
self.assertEqual("USER_NAME", screaming_snake_case("UserNAME"))
self.assertEqual("USER_NAME", screaming_snake_case("USER_name"))
self.assertEqual("USER_NAME", screaming_snake_case("USER-NAME"))
self.assertEqual("USER_NAME", screaming_snake_case("User_Name"))
self.assertEqual("USER_NAME", screaming_snake_case("user_name"))
self.assertEqual("SUSER_NAME", screaming_snake_case("SUserNAME"))
def test_pascal_case(self):
self.assertEqual("P00P", pascal_case("p00p", foobar=True))
self.assertEqual("P00P", pascal_case("p00p"))
self.assertEqual("Username", pascal_case("USERName"))
self.assertEqual("UserName", pascal_case("UserNAME"))
self.assertEqual("UserName", pascal_case("USER_name"))
self.assertEqual("UserName", pascal_case("USER-NAME"))
self.assertEqual("UserName", pascal_case("User_Name"))
self.assertEqual("UserName", pascal_case("user_name"))
self.assertEqual("SuserName", pascal_case("SUserNAME"))
def test_camel_case(self):
self.assertEqual("p00P", camel_case("p00p", foobar=True))
self.assertEqual("p00P", camel_case("p00p"))
self.assertEqual("username", camel_case("USERName"))
self.assertEqual("userName", camel_case("UserNAME"))
self.assertEqual("userName", camel_case("USER_name"))
self.assertEqual("userName", camel_case("USER-NAME"))
self.assertEqual("userName", camel_case("User_Name"))
self.assertEqual("userName", camel_case("user_name"))
self.assertEqual("suserName", camel_case("SUserNAME"))
def test_mixed_case(self):
self.assertEqual("p00p", mixed_case("p00p", foobar=True))
self.assertEqual("p00p", mixed_case("p00p"))
self.assertEqual("USERName", mixed_case("USERName"))
self.assertEqual("UserNAME", mixed_case("UserNAME"))
self.assertEqual("USERname", mixed_case("USER_name"))
self.assertEqual("USERNAME", mixed_case("USER-NAME"))
self.assertEqual("UserName", mixed_case("User_Name"))
self.assertEqual("username", mixed_case("user_name"))
self.assertEqual("SUserNAME", mixed_case("SUserNAME"))
def test_mixed_pascal_case(self):
self.assertEqual("P00p", mixed_pascal_case("p00p", foobar=True))
self.assertEqual("P00p", mixed_pascal_case("p00p"))
self.assertEqual("USERName", mixed_pascal_case("USERName"))
self.assertEqual("UserNAME", mixed_pascal_case("UserNAME"))
self.assertEqual("USERname", mixed_pascal_case("USER_name"))
self.assertEqual("USERNAME", mixed_pascal_case("USER-NAME"))
self.assertEqual("UserName", mixed_pascal_case("User_Name"))
self.assertEqual("Username", mixed_pascal_case("user_name"))
self.assertEqual("SUserNAME", mixed_pascal_case("SUserNAME"))
def test_mixed_snake_case(self):
self.assertEqual("p00p", mixed_snake_case("p00p", foobar=True))
self.assertEqual("p00p", mixed_snake_case("p00p"))
self.assertEqual("USERName", mixed_snake_case("USERName"))
self.assertEqual("User_NAME", mixed_snake_case("UserNAME"))
self.assertEqual("USER_name", mixed_snake_case("USER_name"))
self.assertEqual("USER_NAME", mixed_snake_case("USER-NAME"))
self.assertEqual("User_Name", mixed_snake_case("User_Name"))
self.assertEqual("user_name", mixed_snake_case("user_name"))
self.assertEqual("SUser_NAME", mixed_snake_case("SUserNAME"))
def test_kebab_case(self):
self.assertEqual("p00p", kebab_case("p00p", foobar=True))
self.assertEqual("p00p", kebab_case("p00p"))
self.assertEqual("USERName", kebab_case("USERName"))
self.assertEqual("User-NAME", kebab_case("UserNAME"))
self.assertEqual("USER-name", kebab_case("USER_name"))
self.assertEqual("USER-NAME", kebab_case("USER-NAME"))
self.assertEqual("User-Name", kebab_case("User_Name"))
self.assertEqual("user-name", kebab_case("user_name"))
self.assertEqual("SUser-NAME", kebab_case("SUserNAME"))
def test_capitalize(self):
self.assertEqual("UserName", capitalize("userName"))
self.assertEqual(".userName", capitalize(".userName"))
def test_split_words(self):
self.assertEqual(["user", "Name"], split_words("userName"))
self.assertEqual(["User", "Name"], split_words("User.Name"))
self.assertEqual(["User", "Name"], split_words("UserName"))
self.assertEqual(["USER", "NAME"], split_words("USER_NAME"))
self.assertEqual(["user", "name"], split_words("user_name"))
self.assertEqual(["user", "name"], split_words("user__name"))
self.assertEqual(["common", "v48", "0"], split_words("common_v48_0"))
self.assertEqual(["user"], split_words("user"))
self.assertEqual(["user"], split_words("_user"))
self.assertEqual(["user"], split_words("__user"))
self.assertEqual(["TMessage", "DB"], split_words("TMessageDB"))
self.assertEqual(["GLOBAL", "REF"], split_words("GLOBAL-REF"))
self.assertEqual(["book"], split_words("βιβλιο-book"))
def test_alnum(self):
self.assertEqual("foo1", alnum("foo 1"))
self.assertEqual("foo1", alnum(" foo_1 "))
self.assertEqual("foo1", alnum("\tfoo*1"))
self.assertEqual("foo1", alnum(" foo*1"))
self.assertEqual("1", alnum(" βιβλίο*1"))
def test_classify(self):
for ltr in string.ascii_uppercase:
self.assertEqual(StringType.UPPER, classify(ltr))
for ltr in string.ascii_lowercase:
self.assertEqual(StringType.LOWER, classify(ltr))
for ltr in string.digits:
self.assertEqual(StringType.NUMERIC, classify(ltr))
for ltr in "~!@#$%^&*()_+β":
self.assertEqual(StringType.OTHER, classify(ltr))
|
#!/usr/bin/env python3
import os
from jobmonitor.api import (
kubernetes_schedule_job,
kubernetes_schedule_job_queue,
register_job,
upload_code_package,
)
from jobmonitor.connections import mongo
excluded_files = [
"core",
"output.tmp",
".vscode",
"node_modules",
"scripts",
".git",
"*.pyc",
"._*",
"__pycache__",
"*.pdf",
"*.js",
"*.yaml",
".pylintrc",
".gitignore",
".AppleDouble",
".jobignore",
]
project = "decentralized_powersgd"
experiment = os.path.splitext(os.path.basename(__file__))[0]
script = "train.py"
description = """
This is meant to be equivalent to the setup in the Choco DL paper
""".strip()
base_config = {
"n_workers": 8,
"topology": "ring",
"batch_size": 128,
"task_architecture": "ResNet20",
"lr_schedule_milestones": [(150, 0.1), (225, 0.1)],
"optimizer_diffusion_rate": 1.308,
}
code_package, files_uploaded = upload_code_package(".", excludes=excluded_files + ["gossip_run.py"])
print("Uploaded {} files.".format(len(files_uploaded)))
def schedule(name, config, skip_existing=False):
# Skip pre-existing entries
if (
skip_existing
and mongo.job.count_documents({"project": project, "job": name, "experiment": experiment})
> 0
):
return
config = {**base_config, **config}
n_workers = config["n_workers"]
job_id = register_job(
user="vogels",
project=project,
experiment=experiment,
job=name,
n_workers=n_workers,
priority=10,
config_overrides=config,
runtime_environment={"clone": {"code_package": code_package}, "script": script},
annotations={"description": description},
)
print(
f'sbatch --ntasks {n_workers} --job-name="{name}" --gpus-per-task=1 --cpus-per-task=8 --wrap="srun jobrun {job_id} --mpi"'
)
seed = 0
for factor in [4]:
for theta in [0.25]:
for diffusion_rate in [5e-3]:
schedule(
f"moniqua-lr{factor}-theta{theta}-dr{diffusion_rate}",
dict(
distributed_lr_warmup_factor=factor,
optimizer="moniqua",
optimizer_diffusion_rate=diffusion_rate,
optimizer_theta=theta,
seed=seed,
),
skip_existing=True,
)
|
from netmiko import ConnectHandler
from netmiko.ssh_exception import *
import json
from nuaal.utils import get_logger, check_path, write_output
from nuaal.utils import Filter
from nuaal.definitions import DATA_PATH, OUTPUT_PATH
import timeit
import os
class CliBaseConnection(object):
"""
This class represents the base object, from which other (vendor specific classes) inherit.
This class is basically a wrapper class around Kirk Byers' excellent library, netmiko.
Even though the netmiko library already provides pretty straightforward and easy way to access network devices,
the CliBaseConnection tries to handle multiple events which can arise, such as:
- Device is unreachable
- Fallback to Telnet if SSH is not supported by device (and vice-versa)
- Handles errors in outputs
Apart from the 'send command, receive output' this class also performs the parsing and storing outputs.
"""
def __init__(
self, ip=None, username=None, password=None,
parser=None, secret=None, enable=False, store_outputs=False,
DEBUG=False, verbosity=3, netmiko_params={}
):
"""
:param ip: (str) IP address or FQDN of the device you're trying to connect to
:param username: (str) Username used for login to device
:param password: (str) Password used for login to device
:param parser: (ParserModule) Instance of ParserModule class which will be used for parsing of text outputs.
By default, new instance of ParserModule is created.
:param secret: (str) Enable secret for accessing Privileged EXEC Mode
:param enable: (bool) Whether or not enable Privileged EXEC Mode on device
:param store_outputs: (bool) Whether or not store text outputs of sent commands
:param DEBUG: (bool) Enable debugging logging
"""
self.ip = ip
self.username = username
self.password = password
self.ssh_method = None
self.telnet_method = None
self.primary_method = None
self.secondary_method = None
self.secret = secret
self.enable = enable
self.netmiko_params = netmiko_params if isinstance(netmiko_params, dict) else {}
self.provider = None
self._get_provider()
self.store_outputs = store_outputs
self.enabled = False
self.is_alive = False
self.config = False
self.prompt_end = [">", "#"] # The first item is for 'not-enabled mode', the second is for 'Privileged EXEC Mode'
self.logger = get_logger(name="Connection-{}".format(self.ip), DEBUG=DEBUG, verbosity=verbosity)
self.parser = parser
self.connected = False
self.failures = []
self.outputs = {}
self.data = {"ipAddress": self.ip}
self.device = None
def __enter__(self):
"""
Enables usage of Python's Context Manager, using ``with`` statement.
:return: Instance of the ``self`` object.
"""
self._connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
This function is used when exiting Python's Context Manager.
:return: ``None``
"""
try:
self.save_output(filename=self.data["hostname"], data=self.data)
except KeyError:
self.logger.error(msg="Could not store data of device {}. Reason: Could not retrieve any data".format(self.ip))
except Exception as e:
self.logger.error(msg="Could not store data of device {}. Reason: Unhandled Exception: {}".format(self.ip, repr(e)))
finally:
self.disconnect()
def _get_provider(self):
"""
Creates provider dictionary for Netmiko connection
:return: ``None``
"""
self.provider = {
"ip": self.ip,
"username": self.username,
"password": self.password
}
if self.secret:
self.provider["secret"] = self.secret
def _connect_telnet(self):
"""
This function tries to establish connection with device via Telnet
:return: (``netmiko.ConnectHandler``) device
"""
device = None
self.provider["device_type"] = self.telnet_method
self.logger.debug(msg="Trying to connect to device {} via Telnet...".format(self.ip))
try:
device = ConnectHandler(**self.provider, **self.netmiko_params)
except TimeoutError:
self.logger.error(msg="Could not connect to '{}' using '{}'. Reason: TimeOut.".format(self.ip, self.telnet_method))
self.failures.append("telnet_connection_timeout")
except ConnectionRefusedError:
self.failures.append("telnet_connection_refused")
self.logger.error(msg="Could not connect to '{}' using '{}'. Reason: Connection Refused.".format(self.ip, self.telnet_method))
# TODO: Check fix in netmiko
except AttributeError("module 'serial' has no attribute 'EIGHTBITS'", ):
pass
except Exception as e:
print(repr(e))
self.failures.append("telnet_unknown")
self.logger.error(msg="Could not connect to '{}' using '{}'. Reason: Unknown.".format(self.ip, self.telnet_method))
finally:
if device:
self.logger.info(msg="Connected to '{}' using '{}'.".format(self.ip, self.telnet_method))
return device
def _connect_ssh(self):
"""
This function tries to establish connection with device via SSH
:return: (``netmiko.ConnectHandler``) device
"""
device = None
self.logger.debug(msg="Trying to connect to device {} via SSH...".format(self.ip))
self.provider["device_type"] = self.ssh_method
try:
device = ConnectHandler(**self.provider, **self.netmiko_params)
except NetMikoTimeoutException:
self.failures.append("ssh_connection_timeout")
self.logger.error(msg="Could not connect to '{}' using '{}'. Reason: Timeout.".format(self.ip, self.ssh_method))
except NetMikoAuthenticationException:
self.failures.append("ssh_auth_fail")
self.logger.error(msg="Could not connect to '{}' using '{}'. Reason: Authentication Failed.".format(self.ip, self.ssh_method))
# TODO: Check fix in netmiko
except AttributeError("module 'serial' has no attribute 'EIGHTBITS'",):
pass
except Exception as e:
print(repr(e))
self.failures.append("ssh_unknown")
self.logger.error(msg="Could not connect to '{}' using '{}'. Reason: Unknown.".format(self.ip, self.ssh_method))
finally:
if device:
self.logger.info(msg="Connected to '{}' using '{}'.".format(self.ip, self.ssh_method))
return device
def _connect(self):
"""
This function handles connection to device, if primary method fails, it will try to connect using secondary method.
:return: ``None``
"""
if self.device is not None:
if self.device.is_alive():
self.is_alive = True
self.logger.debug(msg="Connection is already established.")
else:
try:
self.logger.debug(msg="Trying to re-establish connection to device.")
self.device.establish_connection()
self.device.session_preparation()
self._check_enable_level(self.device)
except Exception as e:
self.logger.critical(msg="Failed to reconnect do device.")
else:
self.is_alive = False
device = None
if self.primary_method == self.ssh_method:
device = self._connect_ssh()
elif self.primary_method == self.telnet_method:
device = self._connect_telnet()
if not device:
if self.secondary_method == self.telnet_method:
device = self._connect_telnet()
elif self.secondary_method == self.ssh_method:
self._connect_ssh()
if device is not None:
self._check_enable_level(device)
else:
self.logger.error(msg="Could not connect to device '{}'".format(self.ip))
def _check_enable_level(self, device):
"""
This function is called at the end of ``self._connect()`` to ensure that the connection is actually alive
and that the proper privilege level is set.
:param device: (``Netmiko.ConnectHandler``) Instance of ``netmiko.ConnectHandler``. If the connection is working, this will be set as ``self.device``
:return: ``None``
"""
try:
if device.is_alive():
self.is_alive = True
else:
self.logger.critical(msg="Connection is not alive.")
prompt = device.find_prompt()
self.data["hostname"] = prompt[:-1]
if prompt[-1] == self.prompt_end[0]:
self.enabled = False
if prompt[-1] == self.prompt_end[1]:
self.enabled = True
if self.enable and not self.enabled:
device.enable()
if device.find_prompt()[-1] == self.prompt_end[1]:
self.logger.debug(msg="Successfully enabled Privileged EXEC Mode on device '{}'".format(self.ip))
self.enabled = True
else:
self.logger.error(msg="Failed to enable Privileged EXEC Mode on device '{}'".format(self.ip))
if not self.enable and self.enabled:
device.exit_enable_mode()
if device.find_prompt()[-1] ==self.prompt_end[0]:
self.logger.debug(msg="Successfully disabled Privileged EXEC Mode on device '{}'".format(self.ip))
self.enabled = True
else:
self.logger.error(msg="Failed to disable Privileged EXEC Mode on device '{}'".format(self.ip))
except ValueError as e:
self.logger.critical(msg="Could not enter enable mode: {}".format(repr(e)))
except Exception as e:
print(repr(e))
finally:
self.device = device
def disconnect(self):
"""
This function handles graceful disconnect from the device.
:return: ``None``
"""
if self.device is not None:
self.device.disconnect()
if not self.device.is_alive():
self.is_alive = False
self.logger.info(msg="Successfully disconnected from device {}".format(self.ip))
else:
self.is_alive = True
self.logger.error(msg="Failed to disconnect from device {}".format(self.ip))
else:
self.logger.info(msg="Device {} is not connected.".format(self.ip))
def _send_command(self, command, expect_string=None):
"""
:param str command: Command to send to device
:return: Plaintext output of command from device
"""
if (not self.device) or (not self.is_alive):
self.logger.error(msg="Device {} is not connected, cannot send command.".format(self.ip))
return None
self.logger.debug(msg="Sending command '{}' to device {} ({})".format(command, self.data["hostname"], self.ip))
output = None
try:
output = self.device.send_command(command_string=command, expect_string=expect_string)
except AttributeError:
self.logger.critical(msg="Connection to device {} has not been initialized.".format(self.ip))
except Exception as e:
self.logger.error(msg="Unhandled exception occurred when trying to send command. Exception: {}".format(repr(e)))
finally:
if output and self.store_outputs:
self.save_output(filename=command, data=output)
return output
def _send_commands(self, commands):
"""
Sends multiple commands to device.
:param list commands: List of commands to run
:return: Dictionary with key=command, value=output_of_the_command
"""
output = {}
for command in commands:
output[command] = self._send_command(command)
return output
def _command_handler(self, commands=None, action=None, out_filter=None, return_raw=False):
"""
This function tries to send multiple 'types' of given command and waits for correct output.
This should solve the problem with different command syntax, such as 'show mac address-table' vs
'show mac-address-table' on different versions of Cisco IOS.
When correct output is returned, it is then parsed and the result is returned.
:param str action: Action to perform - has to be key of self.command_mappings
:param list commands: List of command string to try, such as ['show mac-address-table', 'show mac address-table']
:param out_filter: Instance of Filter class
:param bool return_raw: If set to `True`, raw output will be returned.
:return: JSON representation of command output
"""
start_time = timeit.default_timer()
if commands is None:
commands = self.command_mappings[action]
command_output = ""
used_command = ""
parsed_output = []
for command in commands:
command_output = self._send_command(command)
if not command_output:
self.logger.error(msg="Could not retrieve any output. Possibly non-active connection.")
return []
if "% Invalid input detected at '^' marker." in command_output:
self.logger.error(msg="Device {} does not support command '{}'".format(self.ip, command))
elif "% Ambiguous command:" in command_output:
self.logger.error(msg="Device {}: Ambiguous command: '{}'".format(self.ip, command))
elif command_output == "":
self.logger.error(msg="Device {} returned empty output for command '{}'".format(self.ip, command))
else:
self.logger.debug(msg="Device {} returned output for command '{}'".format(self.ip, command))
used_command = command
break
if self.store_outputs and command_output != "":
self.save_output(filename=used_command, data=command_output)
if command_output == "" or command_output is None:
self.logger.error(msg="Device {} did not return output for any of the commands: {}".format(self.ip, commands))
if return_raw:
return ""
else:
return []
if return_raw:
return command_output
else:
# Try parsing the output
try:
parsed_output = self.parser.autoparse(command=commands[0], text=command_output)
if isinstance(out_filter, Filter):
parsed_output = out_filter.universal_cleanup(data=parsed_output)
if action is not None:
self.data[action[4:]] = parsed_output
except Exception as e:
print(repr(e))
self.logger.error(msg="Device {}: Failed to parse output of command '{}'".format(self.ip, used_command))
finally:
self.logger.debug(msg="Processing of action {} took {} seconds.".format(action, timeit.default_timer()-start_time))
return parsed_output
def store_raw_output(self, command, raw_output, ext="txt"):
"""
This function is used for storing the plaintext output of the commands called on the device in separate files. Used mainly for debugging and
development purposes. This function is only called if the `store_outputs` parameter is set to `True`.
:param str command: Command string executed on the device.
:param str raw_output: Plaintext output of the command.
:param str ext: Extension of the file, ".txt" by default.
:return: ``None``
"""
folder_name = str(self.ip)
if "hostname" in self.data.keys():
folder_name = "{}_{}".format(self.ip, self.data["hostname"])
write_output(path=folder_name, filename=command.replace(" ", "_"), data=raw_output, logger=self.logger)
"""
path = os.path.join(OUTPUT_PATH, self.ip)
path = check_path(path)
if path:
with open(os.path.join(path, "{}.{}".format(command, ext)), mode="w+") as f:
f.write(raw_output)
"""
def save_output(self, filename, data):
folder_name = str(self.ip)
if "hostname" in self.data.keys():
folder_name = "{}_{}".format(self.ip, self.data["hostname"])
write_output(path=folder_name, filename=filename.replace(" ", "_"), data=data, logger=self.logger)
def check_connection(self):
"""
This function can be used to check state of the connection. Returns `True` if the connection is active and `False` if it isn't.
:return: Bool value representing the connection state.
"""
if self.device is not None:
if self.device.is_alive():
self.is_alive = True
return True
else:
self.logger.error(msg="Connection is prepared, but not established.")
self.is_alive = False
return False
else:
self.logger.error(msg="Connection has not been initialized.")
self.is_alive = False
return False
def config_mode(self):
if self.check_connection():
if self.device.check_config_mode():
self.logger.debug(msg="Configuration mode is already enabled.")
return True
else:
self.device.config_mode()
if self.device.check_config_mode():
self.logger.info(msg="Configuration mode has been enabled.")
return True
else:
self.logger.error(msg="Failed to enter configuration mode.")
return False
else:
self.logger.error(msg="Could not enter device configuration mode. Reason: Connection is not established.")
return False
#####################
### GET Functions ###
#####################
def get_vlans(self):
"""
This function returns JSON representation of all VLANs enabled on the device, together with list of assigned interfaces. In Cisco terms, this represents
the `show vlan brief` command.
:return: List of dictionaries.
"""
return self._command_handler(action="get_vlans")
def get_inventory(self):
"""
This function return JSON representation of all installed modules and HW parts of the device. In Cisco terms, this represents the command `show inventory`.
:return: List of dictionaries.
"""
return self._command_handler(action="get_inventory")
def get_interfaces(self):
"""
This function returns JSON representation of all physical and virtual interfaces of the device, containing all available info about each interface.
In Cisco terms, this represents usage of command `show interfaces`.
:return: List of dictionaries.
"""
return self._command_handler(action="get_interfaces")
def get_portchannels(self):
"""
This function returns JSON representation of all logical bind interfaces (etherchannels, portchannels). In Cisco terms, this represents the
`show etherchannel summary` command.
:return: List of dictionaries.
"""
return self._command_handler(action="get_portchannels")
def get_license(self):
"""
This function return JSON representation of licenses activated or installed on the device. In Cisco terms, this represents the `show license` command.
:return: List of dictionaries.
"""
return self._command_handler(action="get_license")
def get_version(self):
"""
Returns JSON representation of basic device information, such as vendor, device platform, software version etc. In Cisco terms, this represents the
command `show version`.
:return: List of dictionaries.
"""
return self._command_handler(action="get_version")
def get_mac_address_table(self):
"""
Returns content of device MAC address table in JSON format. In Cisco terms, this represents the command `show mac address-table`.
:return: List of dictionaries.
"""
return self._command_handler(action="get_mac_address_table")
def get_arp(self):
"""
Returns content of device ARP table in JSON format. In Cisco terms, this represents the command `show ip arp`.
:return: List of dictionaries.
"""
return self._command_handler(action="get_arp")
def __str__(self):
return "[Connection -> {}]".format(self.ip)
def __repr__(self):
return "[Connection -> {}]".format(self.ip) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
A simple script for testing either in-built methods or newly added methods
"""
import torch
import numpy as np
from ptranking.ltr_global import ltr_seed
from ptranking.ltr_adhoc.eval.ltr import LTREvaluator
np.random.seed(seed=ltr_seed)
torch.manual_seed(seed=ltr_seed)
if __name__ == '__main__':
"""
>>> Learning-to-Rank Models <<<
(1) Optimization based on Empirical Risk Minimization
-----------------------------------------------------------------------------------------
| Pointwise | RankMSE |
-----------------------------------------------------------------------------------------
| Pairwise | RankNet |
-----------------------------------------------------------------------------------------
| Listwise | LambdaRank % ListNet % ListMLE % RankCosine % ApproxNDCG % WassRank |
| | STListNet % LambdaLoss |
-----------------------------------------------------------------------------------------
>>> Supported Datasets <<<
-----------------------------------------------------------------------------------------
| LETTOR | MQ2007_Super % MQ2008_Super % MQ2007_Semi % MQ2008_Semi |
-----------------------------------------------------------------------------------------
| MSLRWEB | MSLRWEB10K % MSLRWEB30K |
-----------------------------------------------------------------------------------------
| Yahoo_LTR | Set1 % Set2 |
-----------------------------------------------------------------------------------------
| ISTELLA_LTR | Istella_S % Istella % Istella_X |
-----------------------------------------------------------------------------------------
"""
cuda = None # the gpu id, e.g., 0 or 1, otherwise, set it as None indicating to use cpu
debug = True # in a debug mode, we just check whether the model can operate
config_with_json = False # specify configuration with json files or not
reproduce = False # given pre-trained models, reproduce experiments
models_to_run = [
#'RankMSE',
'RankNet',
#'LambdaRank',
#'ListNet',
#'ListMLE',
#'RankCosine',
#'ApproxNDCG',
#'WassRank',
#'STListNet',
#'LambdaLoss'
]
evaluator = LTREvaluator(cuda=cuda)
if config_with_json: # specify configuration with json files
# the directory of json files
#dir_json = '/Users/dryuhaitao/WorkBench/Dropbox/CodeBench/GitPool/wildltr_ptranking/testing/ltr_adhoc/json/'
dir_json = '/Users/solar/WorkBench/Dropbox/CodeBench/GitPool/wildltr_ptranking/testing/ltr_adhoc/json/'
#dir_json = '/home/dl-box/WorkBench/Dropbox/CodeBench/GitPool/wildltr_ptranking/testing/ltr_adhoc/json/'
# reco - linear
#dir_json = '/Users/dryuhaitao/WorkBench/Experiments/RECO/Linear/'
# reco - non-linear
#dir_json = '/Users/dryuhaitao/WorkBench/Experiments/RECO/Nonlinear/'
# auto 1 layer with activation yahoo
#dir_json = '/home/dl-box/WorkBench/ExperimentBench/Auto/ERM/yahoo/'
# auto 1 layer without activation yahoo
#dir_json = '/home/dl-box/WorkBench/ExperimentBench/Auto/ERM/yahoo/'
# auto 1 layer with activation ms30k
#dir_json = '/home/dl-box/WorkBench/ExperimentBench/Auto/ERM/ms30k/'
# auto 1 layer without activation yahoo
#dir_json = '/home/dl-box/WorkBench/ExperimentBench/Auto/ERM/ms30k/'
#dir_json = '/home/dl-box/WorkBench/ExperimentBench/Auto/ERM/mq2008/'
#dir_json = '/Users/iimac/Workbench/ExperimentBench/PGRanking/ms30k/'
for model_id in models_to_run:
evaluator.run(debug=debug, model_id=model_id, config_with_json=config_with_json, dir_json=dir_json)
else: # specify configuration manually
''' pointsf | listsf, namely the type of neural scoring function '''
sf_id = 'pointsf'
''' Selected dataset '''
#data_id = 'Set1'
#data_id = 'MSLRWEB30K'
data_id = 'MQ2008_Super'
''' By grid_search, we can explore the effects of different hyper-parameters of a model '''
grid_search = False
''' Location of the adopted data '''
#dir_data = '/Users/dryuhaitao/WorkBench/Corpus/' + 'LETOR4.0/MQ2008/'
#dir_data = '/home/dl-box/WorkBench/Datasets/L2R/LETOR4.0/MQ2008/'
#dir_data = '/Users/solar/WorkBench/Datasets/L2R/LETOR4.0/MQ2008/'
dir_data = '/Users/iimac/Workbench/Corpus/L2R/LETOR4.0/MQ2008/'
#data_id = 'Istella_X'
#dir_data = '/home/dl-box/WorkBench/Datasets/L2R/ISTELLA_L2R/Istella_X/'
#data_id = 'Istella'
#dir_data = '/home/dl-box/WorkBench/Datasets/L2R/ISTELLA_L2R/Istella/'
#data_id = 'Istella_S'
#dir_data = '/home/dl-box/WorkBench/Datasets/L2R/ISTELLA_L2R/Istella_S/'
''' Output directory '''
#dir_output = '/Users/dryuhaitao/WorkBench/CodeBench/Bench_Output/NeuralLTR/Listwise/'
#dir_output = '/home/dl-box/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/Listwise/'
#dir_output = '/Users/solar/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/'
dir_output = '/Users/iimac/Workbench/CodeBench/Output/NeuralLTR/'
for model_id in models_to_run:
evaluator.run(debug=debug, model_id=model_id, sf_id=sf_id, grid_search=grid_search,
data_id=data_id, dir_data=dir_data, dir_output=dir_output, reproduce=reproduce)
|
import vcs, numpy, os, sys, cdms2, vcs.testing.regression as regression
x=regression.init()
f=cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
data=f("clt",slice(0,1,))
gm = x.createisoline()
gm.levels = range(0,110,10)
gm.linecolors = ["green","red","blue","bisque","yellow","grey",
[100,0,0,50], [0,100,0],"salmon",[0,0,100,75]]
x.plot(data,gm,bg=True)
regression.run(x, 'test_vcs_settings_color_name_rgba_isoline.png') |
import torch
import torch.nn as nn
import math
import numpy as np
__all__ = ['regnetx_002', 'regnetx_004', 'regnetx_006', 'regnetx_008', 'regnetx_016', 'regnetx_032',
'regnetx_040', 'regnetx_064', 'regnetx_080', 'regnetx_120', 'regnetx_160', 'regnetx_320']
class ResStemIN(nn.Module):
"""ResNet stem for ImageNet: 7x7, BN, ReLU, MaxPool."""
def __init__(self, w_in, w_out):
super(ResStemIN, self).__init__()
self.conv = nn.Conv2d(w_in, w_out, 7, stride=2, padding=3, bias=False)
self.bn = nn.BatchNorm2d(w_out)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SimpleStemIN(nn.Module):
"""Simple stem for ImageNet: 3x3, BN, ReLU."""
def __init__(self, in_w, out_w):
super(SimpleStemIN, self).__init__()
self.conv = nn.Conv2d(in_w, out_w, 3, stride=2, padding=1, bias=False)
self.bn = nn.BatchNorm2d(out_w)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, ReLU, FC, Sigmoid."""
def __init__(self, w_in, w_se):
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.f_ex = nn.Sequential(
nn.Conv2d(w_in, w_se, 1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(w_se, w_in, 1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class BottleneckTransform(nn.Module):
"""Bottlenect transformation: 1x1, 3x3 [+SE], 1x1"""
def __init__(self, w_in, w_out, stride, bm, gw, se_r):
super(BottleneckTransform, self).__init__()
w_b = int(round(w_out * bm))
g = w_b // gw
self.a = nn.Conv2d(w_in, w_b, 1, stride=1, padding=0, bias=False)
self.a_bn = nn.BatchNorm2d(w_b)
self.a_relu = nn.ReLU(inplace=True)
self.b = nn.Conv2d(w_b, w_b, 3, stride=stride, padding=1, groups=g, bias=False)
self.b_bn = nn.BatchNorm2d(w_b)
self.b_relu = nn.ReLU(inplace=True)
if se_r:
w_se = int(round(w_in * se_r))
self.se = SE(w_b, w_se)
self.c = nn.Conv2d(w_b, w_out, 1, stride=1, padding=0, bias=False)
self.c_bn = nn.BatchNorm2d(w_out)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBottleneckBlock(nn.Module):
"""Residual bottleneck block: x + F(x), F = bottleneck transform"""
def __init__(self, w_in, w_out, stride, bm=1.0, gw=1, se_r=None):
super(ResBottleneckBlock, self).__init__()
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self.proj = nn.Conv2d(w_in, w_out, 1, stride=stride, padding=0, bias=False)
self.bn = nn.BatchNorm2d(w_out)
self.f = BottleneckTransform(w_in, w_out, stride, bm, gw, se_r)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.relu(x)
return x
class AnyHead(nn.Module):
"""AnyNet head: AvgPool, 1x1."""
def __init__(self, w_in, nc):
super(AnyHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(w_in, nc, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class AnyStage(nn.Module):
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
super(AnyStage, self).__init__()
for i in range(d):
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
name = "b{}".format(i + 1)
self.add_module(name, block_fun(b_w_in, w_out, b_stride, bm, gw, se_r))
def forward(self, x):
for block in self.children():
x = block(x)
return x
class AnyNet(nn.Module):
"""AnyNet model."""
def __init__(self, **kwargs):
super(AnyNet, self).__init__()
if kwargs:
self._construct(
stem_w=kwargs["stem_w"],
ds=kwargs["ds"],
ws=kwargs["ws"],
ss=kwargs["ss"],
bms=kwargs["bms"],
gws=kwargs["gws"],
se_r=kwargs["se_r"],
nc=kwargs["nc"],
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
zero_init_gamma = hasattr(m, "final_bn") and m.final_bn
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
def _construct(self, stem_w, ds, ws, ss, bms, gws, se_r, nc):
# Generate dummy bot muls and gs for models that do not use them
bms = bms if bms else [None for _d in ds]
gws = gws if gws else [None for _d in ds]
stage_params = list(zip(ds, ws, ss, bms, gws))
self.stem = SimpleStemIN(3, stem_w)
prev_w = stem_w
for i, (d, w, s, bm, gw) in enumerate(stage_params):
name = "s{}".format(i + 1)
self.add_module(name, AnyStage(prev_w, w, s, d, ResBottleneckBlock, bm, gw, se_r))
prev_w = w
self.head = AnyHead(w_in=prev_w, nc=nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x
def quantize_float(f, q):
"""Converts a float to closest non-zero int divisible by q."""
return int(round(f / q) * q)
def adjust_ws_gs_comp(ws, bms, gs):
"""Adjusts the compatibility of widths and groups."""
ws_bot = [int(w * b) for w, b in zip(ws, bms)]
gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)]
ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)]
ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)]
return ws, gs
def get_stages_from_blocks(ws, rs):
"""Gets ws/ds of network at each stage from per block values."""
ts_temp = zip(ws + [0], [0] + ws, rs + [0], [0] + rs)
ts = [w != wp or r != rp for w, wp, r, rp in ts_temp]
s_ws = [w for w, t in zip(ws, ts[:-1]) if t]
s_ds = np.diff([d for d, t in zip(range(len(ts)), ts) if t]).tolist()
return s_ws, s_ds
def generate_regnet(w_a, w_0, w_m, d, q=8):
"""Generates per block ws from RegNet parameters."""
assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0
ws_cont = np.arange(d) * w_a + w_0
ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) # ks = [0,1,2...,3...]
ws = w_0 * np.power(w_m, ks) # float channel for 4 stages
ws = np.round(np.divide(ws, q)) * q # make it divisible by 8
num_stages, max_stage = len(np.unique(ws)), ks.max() + 1
ws, ws_cont = ws.astype(int).tolist(), ws_cont.tolist()
return ws, num_stages, max_stage, ws_cont
class RegNet(AnyNet):
"""RegNet model."""
def __init__(self, w_a, w_0, w_m, d, group_w, bot_mul, se_r=None, num_classes=1000, **kwargs):
# Generate RegNet ws per block
ws, num_stages, _, _ = generate_regnet(w_a, w_0, w_m, d)
# Convert to per stage format
s_ws, s_ds = get_stages_from_blocks(ws, ws)
# Use the same gw, bm and ss for each stage
s_gs = [group_w for _ in range(num_stages)]
s_bs = [bot_mul for _ in range(num_stages)]
s_ss = [2 for _ in range(num_stages)]
# Adjust the compatibility of ws and gws
s_ws, s_gs = adjust_ws_gs_comp(s_ws, s_bs, s_gs)
# Get AnyNet arguments defining the RegNet
kwargs = {
"stem_w": 32,
"ds": s_ds,
"ws": s_ws,
"ss": s_ss,
"bms": s_bs,
"gws": s_gs,
"se_r": se_r,
"nc": num_classes,
}
super(RegNet, self).__init__(**kwargs)
def regnetx_002(**kwargs):
model = RegNet(w_a=36.44, w_0=24, w_m=2.49, d=13, group_w=8, bot_mul=1, **kwargs)
return model
def regnetx_004(**kwargs):
model = RegNet(w_a=24.48, w_0=24, w_m=2.54, d=22, group_w=16, bot_mul=1, **kwargs)
return model
def regnetx_006(**kwargs):
model = RegNet(w_a=36.97, w_0=48, w_m=2.24, d=16, group_w=24, bot_mul=1, **kwargs)
return model
def regnetx_008(**kwargs):
model = RegNet(w_a=35.73, w_0=56, w_m=2.28, d=16, group_w=16, bot_mul=1, **kwargs)
return model
def regnetx_016(**kwargs):
model = RegNet(w_a=34.01, w_0=80, w_m=2.25, d=18, group_w=24, bot_mul=1, **kwargs)
return model
def regnetx_032(**kwargs):
model = RegNet(w_a=26.31, w_0=88, w_m=2.25, d=25, group_w=48, bot_mul=1, **kwargs)
return model
def regnetx_040(**kwargs):
model = RegNet(w_a=38.65, w_0=96, w_m=2.43, d=23, group_w=40, bot_mul=1, **kwargs)
return model
def regnetx_064(**kwargs):
model = RegNet(w_a=60.83, w_0=184, w_m=2.07, d=17, group_w=56, bot_mul=1, **kwargs)
return model
def regnetx_080(**kwargs):
model = RegNet(w_a=49.56, w_0=80, w_m=2.88, d=23, group_w=120, bot_mul=1, **kwargs)
return model
def regnetx_120(**kwargs):
model = RegNet(w_a=73.36, w_0=168, w_m=2.37, d=19, group_w=112, bot_mul=1, **kwargs)
return model
def regnetx_160(**kwargs):
model = RegNet(w_a=55.59, w_0=216, w_m=2.1, d=22, group_w=128, bot_mul=1, **kwargs)
return model
def regnetx_320(**kwargs):
model = RegNet(w_a=69.86, w_0=320, w_m=2.0, d=23, group_w=168, bot_mul=1, **kwargs)
return model
|
class IdpConstants:
# endpoints
PICKUP_ENDPOINT = "/ext/ref/pickup"
DROPOFF_ENDPOINT = "/ext/ref/dropoff"
AGENTLESS_BASE = "AgentlessIdPSample/app"
START_SP_SSO = "/sp/startSSO.ping"
# attribute keys
SUBJECT = "subject"
AUTH_INST = "authnInst"
# POST keys
USERNAME = "username"
PASSWORD = "password"
REF = "REF"
RESUME_PATH = "resumePath"
CURRENT_BASE_URL = "currentBaseUrl"
# adapter configuration keys
CONFIG_SECTION = "idpAdapterConfiguration"
BASE_PF_URL = "basePfUrl"
ADAPTER_USERNAME = "username"
ADAPTER_PASSWORD = "passphrase"
ADAPTER_ID = "adapterId"
TARGET_URL = "targetURL"
PARTNER_ENTITY_ID = "partnerEntityId"
# PF headers
PING_UNAME_HEADER = "ping.uname"
PING_PASSWORD_HEADER = "ping.pwd"
PING_ADAPTER_HEADER = "ping.instanceId"
|
####################################################################
######### Copyright 2016-2017 BigSQL ###########
####################################################################
import os
import platform
from twisted.internet.defer import inlineCallbacks, returnValue
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
import sys
import sqlite3
import json
from datetime import datetime
PGC_HOME = os.getenv("PGC_HOME", "")
PGC_LOGS = os.getenv("PGC_LOGS", "")
pgc_scripts_path = os.path.join(PGC_HOME, 'hub', 'scripts')
if pgc_scripts_path not in sys.path:
sys.path.append(pgc_scripts_path)
pgclib_scripts_path = os.path.join(PGC_HOME, 'hub', 'scripts', 'lib')
if pgclib_scripts_path not in sys.path:
sys.path.append(pgclib_scripts_path)
import util
this_uname = str(platform.system())
if this_uname == "Darwin":
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib', 'osx'))
elif this_uname == "Linux":
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib', 'linux'))
current_path = os.path.dirname(os.path.realpath(__file__))
reports_path = os.path.join(current_path, "reports")
db_local = PGC_HOME + os.sep + "conf" + os.sep + "pgc_local.db"
class Reporting(object):
"""
This class exposes all the actions for the components in the methods defined.
"""
def __init__(self, appsession=ApplicationSession):
self.session = appsession
def get_process_status(self, process_log_dir):
process_dict = {}
status_file = os.path.join(process_log_dir,"status")
if os.path.exists(status_file):
with open(status_file) as data_file:
data = json.load(data_file)
process_dict['exit_code'] = data.get("exit_code", None)
process_dict['pid'] = data.get("pid")
return process_dict
@inlineCallbacks
def generate_profiler_reports(self, hostname="localhost", username="postgres",
port=5432, database="", password="", queries="",
report_title="", report_desc="",
action=None, comp=None):
if comp:
util.read_env_file(comp)
username = os.environ.get("PGUSER")
port = os.environ.get("PGPORT")
result = {}
result['action'] = action
try:
from ProfilerReport import ProfilerReport
args = {}
args['pgPass'] = password
args['hostName'] = hostname
args['pgDB'] = database
args['pgUser'] = username
args['pgPort'] = port
plReport = ProfilerReport(args)
report_file = ""
result['error'] = 0
if action=="enable":
plReport.enableProfiler()
result['msg'] = "Global profiling statistics has been enabled. Execute a PL/pgSQL workload before viewing the report."
elif action=="disable":
plReport.disableProfiler()
result['msg'] = "Global profiling statistics has been disabled."
elif action=="check":
is_enabled=plReport.is_enabled()
result['enabled']=is_enabled
if is_enabled:
result['status'] = 'enabled'
else:
result['status'] = 'disabled'
elif action=="reset":
plReport.resetSharedData()
result['msg'] = "Global profiling statistics reset."
elif action=="profile_query":
report_file = plReport.generateQueryReports(queries,
report_title,
report_desc)
elif action=="generate":
if plReport.has_data():
report_file = plReport.generateGlobalReports(
report_title, report_desc)
else:
result['error'] = 1
result['msg'] = "No profiling statistics available."
if not plReport.is_enabled():
result['msg'] = "Profiler is not enabled."
result['report_file'] = report_file
except Exception as e:
import traceback
print traceback.format_exc()
print e
result = {}
result['error'] = 1
result['msg'] = str(e)
yield self.session.publish('com.bigsql.profilerReports', result)
@inlineCallbacks
def generate_badger_reports(self, log_files, db=None, jobs=None, log_prefix=None, title=None):
result = {}
try:
from BadgerReport import BadgerReport
badgerRpts = BadgerReport()
report_file = badgerRpts.generateReports(log_files, db, jobs, log_prefix, title)
process_log_dir = report_file['log_dir']
report_status=self.get_process_status(process_log_dir)
result['pid'] = report_status.get('pid')
result['exit_code'] = report_status.get('exit_code')
if report_status.get('exit_code') is None:
result['in_progress'] = True
if report_file['error']:
result['error'] = 1
result['msg'] = report_file['error']
else:
result['error'] = 0
result['report_file'] = report_file['file']
report_file_path=os.path.join(reports_path,report_file['file'])
if not os.path.exists(report_file_path):
result['error'] = 1
result['msg'] = "Check the parameters provided."
except Exception as e:
import traceback
print traceback.format_exc()
print e
result = {}
result['error'] = 1
result['msg'] = str(e)
yield self.session.publish('com.bigsql.badgerReports', result)
@inlineCallbacks
def get_pg_log_files(self, comp):
jsonObj = {}
files_list = []
connL = sqlite3.connect(db_local)
jsonDict = {}
jsonList = []
try:
c = connL.cursor()
sql = "SELECT component, logdir" + \
" FROM components " + \
" where logdir != '' and component='"+ comp +"' order by component desc"
c.execute(sql)
t_comp = c.fetchall()
files_list = []
connL.close()
for comp in t_comp:
log_dir = comp[1]
if os.path.isdir(log_dir):
mtime = lambda f: os.stat(os.path.join(log_dir, f)).st_mtime
comp_dir_list = list(sorted(os.listdir(log_dir),
key=mtime, reverse=True))
for d in comp_dir_list:
log_file_path = os.path.join(log_dir, d)
if not d.startswith("install") and d.endswith(".log"):
jsonDict['file']=d
jsonDict["log_file"] = log_file_path
file_size=os.path.getsize(log_file_path)
jsonDict["file_size"] = util.get_file_size(file_size)
jsonDict["component"] = comp[0]
mtime=os.stat(log_file_path).st_mtime
mdate=datetime.fromtimestamp(mtime).strftime('%Y-%m-%d %H:%M:%S')
jsonDict['mtime']=mdate
jsonList.append(jsonDict)
jsonDict = {}
files_list.append(log_file_path)
files_list.append(os.path.join(log_dir, "*"))
#jsonDict['file']="postgresql*.log"
#jsonDict["log_file"] = os.path.join(log_dir, "postgresql*.log")
#jsonDict["component"] = comp[0]
#jsonList.append(jsonDict)
#jsonDict = {}
jsonObj = json.dumps(jsonList)
except sqlite3.Error, e:
print str(e)
yield self.session.publish('com.bigsql.log_files_list', jsonObj)
@inlineCallbacks
def get_log_settings(self,comp):
try:
result={}
from BadgerReport import BadgerReport
badger=BadgerReport()
logging_settings=badger.getLoggingSettings(comp)
result['error']=0
result['settings']=logging_settings
result['msg']=""
except Exception as e:
result={}
result['error']=1
result['msg']=str(e)
yield self.session.publish('com.bigsql.logging_settings', result)
@inlineCallbacks
def change_log_params(self,comp, logdict):
try:
result={}
from BadgerReport import BadgerReport
badger=BadgerReport()
change_status=badger.changeLoggingParams(comp, logdict)
result['error']=0
result['settings']=change_status
result['msg']=""
except Exception as e:
result={}
result['error']=1
result['msg']=str(e)
yield self.session.publish('com.bigsql.on_change_log_params', result)
@inlineCallbacks
def switch_log_file(self,comp, fileName=None):
try:
result={}
from BadgerReport import BadgerReport
badger=BadgerReport()
switchStatus=badger.switchLogfile(comp, fileName)
result['error']=0
result['status']=switchStatus
result['msg']=""
except Exception as e:
result={}
result['error']=1
result['msg']=str(e)
yield self.session.publish('com.bigsql.onSwitchLogfile', result) |
"""
Run all doctest examples inside the `polars` module using Python's built-in doctest module.
How to check examples: run this script, if exits with code 0, all is good. Otherwise, the errors will be reported.
How to modify behaviour for doctests:
1. if you would like code to be run and output checked: add the output below the code block
2. if you would like code to be run (and thus checked whether it actually not fails), but output not be checked: add
`# doctest: +IGNORE_RESULT` to the code block. You may still add example output.
3. if you would not like code to run: add `#doctest: +SKIP`. You may still add example output.
Notes:
* Doctest does not have a built-in IGNORE_RESULT directive. We have a number of tests where we want to ensure that the
code runs, but the output may be random by design, or not interesting for us to check. To allow for this behaviour, a
custom output checker has been created, see below.
* The doctests depend on the exact string representation staying the same. This may not be true in the future. For
instance, in the past, the printout of dataframes has changed from rounded corners to less rounded corners. To
facilitate such a change, whilst not immediately having to add IGNORE_RESULT directives everywhere or changing all
outputs, set `IGNORE_RESULT_ALL=True` below. Do note that this does mean no output is being checked anymore.
* This script will always take the code from this repo (see `src_dir` below), but the module used to run the code is
determined by the import below (see `import polars as pl`). For example, in CI, the import will import the installed
package, not the code in the repo. This is similar to how pytest works.
"""
import doctest
import sys
from pathlib import Path
from typing import Any
import polars
print(polars.__file__)
if __name__ == "__main__":
# set to True to just run the code, and do not check any output. Will still report errors if the code is invalid
IGNORE_RESULT_ALL = False
# Below the implementation if the IGNORE_RESULT directive
# You can ignore the result of a doctest by adding "doctest: +IGNORE_RESULT" into the code block
# The difference with SKIP is that if the code errors on running, that will still be reported.
IGNORE_RESULT = doctest.register_optionflag("IGNORE_RESULT")
OutputChecker = doctest.OutputChecker
class CustomOutputChecker(OutputChecker):
def check_output(self, want: str, got: str, optionflags: Any) -> bool:
if IGNORE_RESULT_ALL:
return True
if IGNORE_RESULT & optionflags:
return True
else:
return OutputChecker.check_output(self, want, got, optionflags)
doctest.OutputChecker = CustomOutputChecker # type: ignore
# We want to be relaxed about whitespace, but strict on True vs 1
doctest.NORMALIZE_WHITESPACE = True
doctest.DONT_ACCEPT_TRUE_FOR_1 = True
# If REPORT_NDIFF is turned on, it will report on line by line, character by character, differences
# The disadvantage is that you cannot just copy the output directly into the docstring
# doctest.REPORT_NDIFF = True
results_list = []
src_dir = Path(polars.__file__).parent # __file__ returns the __init__.py
print(src_dir)
for file in src_dir.rglob("*.py"):
pretty_file_name = file.relative_to(src_dir)
print(file)
print(f"===== Testing {pretty_file_name} =====")
# The globs arg means we do not have to do `import polars as pl` on each example
# optionflags=1 enables the NORMALIZE_WHITESPACE and other options above
res = doctest.testfile(
str(file), module_relative=False, globs={"pl": polars}, optionflags=1
)
results_list.append(
{
"name": str(pretty_file_name),
"attempted": res.attempted,
"failed": res.failed,
}
)
results = polars.DataFrame(results_list)
print(results.sort("attempted", reverse=True))
# we define success as no failures, and at least one doctest having run
success_flag = (results["failed"].sum() == 0) and (results["attempted"].sum() > 0)
sys.exit(int(not success_flag))
|
#!/usr/bin/env python2
# coding: utf-8
import logging
import threading
import time
from collections import OrderedDict
import psutil
from geventwebsocket import Resource
from geventwebsocket import WebSocketApplication
from geventwebsocket import WebSocketError
from geventwebsocket import WebSocketServer
from pykit import threadutil
from pykit import utfjson
from pykit import jobq
logger = logging.getLogger(__name__)
MEM_AVAILABLE = 'mem_available'
CPU_IDLE_PERCENT = 'cpu_idle_percent'
CLIENT_NUMBER = 'client_number'
JOBS_DIR = 'jobs'
CHECK_LOAD_PARAMS = {
'mem_low_threshold': {
'load_name': MEM_AVAILABLE,
'default': 500 * 1024 ** 2, # 500M
'greater': True,
},
'cpu_low_threshold': {
'load_name': CPU_IDLE_PERCENT,
'default': 3, # 3%
'greater': True,
},
'max_client_number': {
'load_name': CLIENT_NUMBER,
'default': 1000,
'greater': False,
},
}
class SystemOverloadError(Exception):
pass
class JobError(Exception):
pass
class InvalidMessageError(JobError):
pass
class InvalidProgressError(InvalidMessageError):
pass
class LoadingError(JobError):
pass
class JobNotInSessionError(JobError):
pass
class Job(object):
lock = threading.RLock()
sessions = {}
def __init__(self, channel, msg, func):
self.ident = msg['ident']
self.channel = channel
self.data = msg
self.worker = func
self.ctx = {}
self.err = None
self.progress_available = threading.Event()
if self.ident in self.sessions:
logger.info('job: %s already exists, created by chennel %s' %
(self.ident, repr(self.sessions[self.ident].channel)))
return
else:
self.sessions[self.ident] = self
logger.info(('inserted job: %s to sessions by channel %s, ' +
'there are %d jobs in sessions now') %
(self.ident, repr(self.channel), len(self.sessions)))
self.thread = threadutil.start_thread(target=self.work, args=(),
daemon=True)
def work(self):
logger.info("job %s started, the data is: %s" %
(self.ident, self.data))
try:
self.worker(self)
except Exception as e:
logger.exception('job %s got exception: %s' %
(self.ident, repr(e)))
self.err = e
finally:
logger.info('job %s ended' % self.ident)
self.close()
def close(self):
with self.lock:
del self.sessions[self.ident]
logger.info(('removed job: %s from sessions, there are %d ' +
'jobs in sessions now') %
(self.ident, len(self.sessions)))
def get_or_create_job(channel, msg, func):
with Job.lock:
Job(channel, msg, func)
job = Job.sessions.get(msg['ident'])
return job
def progress_sender(job, channel, interval=5, stat=None):
stat = stat or (lambda data: data)
data = job.data
i = 10
try:
while True:
# if thread died due to some reason, still send 10 stats
if not job.thread.is_alive():
logger.info('job %s died: %s' % (job.ident, repr(job.err)))
if i == 0:
channel.ws.close()
break
i -= 1
logger.info('jod %s on channel %s send progress: %s' %
(job.ident, repr(channel), repr(stat(data))))
to_send = stat(data)
if channel.report_system_load and type(to_send) == type({}):
to_send['system_load'] = channel.get_system_load()
channel.ws.send(utfjson.dump(to_send))
if job.progress_available.wait(interval):
job.progress_available.clear()
except WebSocketError as e:
if channel.ws.closed == True:
logger.info('the client has closed the connection')
else:
logger.exception(('got websocket error when sending progress on' +
' channel %s: %s') % (repr(channel), repr(e)))
except Exception as e:
logger.exception('got exception when sending progress on channel %s: %s'
% (repr(channel), repr(e)))
channel.ws.close()
class JobdWebSocketApplication(WebSocketApplication):
jobq_mgr = None
def on_open(self):
logger.info('on open, the channel is: ' + repr(self))
self.ignore_message = False
def _parse_request(self, message):
try:
try:
msg = utfjson.load(message)
except Exception as e:
raise InvalidMessageError(
'message is not a vaild json string: %s' % message)
self._check_msg(msg)
self.report_system_load = msg.get('report_system_load') == True
self.cpu_sample_interval = msg.get('cpu_sample_interval', 0.02)
if not isinstance(self.cpu_sample_interval, (int, long, float)):
raise InvalidMessageError(
'cpu_sample_interval is not a number')
check_load = msg.get('check_load')
if type(check_load) == type({}):
self._check_system_load(check_load)
self.jobs_dir = msg.get('jobs_dir', JOBS_DIR)
self._setup_response(msg)
return
except SystemOverloadError as e:
logger.info('system overload on chennel %s, %s'
% (repr(self), repr(e)))
self._send_err_and_close(e)
except JobError as e:
logger.info('error on channel %s while handling message, %s'
% (repr(self), repr(e)))
self._send_err_and_close(e)
except Exception as e:
logger.exception(('exception on channel %s while handling ' +
'message, %s') % (repr(self), repr(e)))
self._send_err_and_close(e)
def on_message(self, message):
logger.info('on message, the channel is: %s, the message is: %s' %
(repr(self), message))
if self.ignore_message:
return
else:
self.ignore_message = True
self.jobq_mgr.put((self, message))
def _send_err_and_close(self, err):
try:
err_msg = {
'err': err.__class__.__name__,
'val': err.args,
}
self.ws.send(utfjson.dump(err_msg))
except Exception as e:
logger.error(('error on channel %s while sending back error '
+ 'message, %s') % (repr(self), repr(e)))
def get_system_load(self):
return {
MEM_AVAILABLE: psutil.virtual_memory().available,
CPU_IDLE_PERCENT: psutil.cpu_times_percent(
self.cpu_sample_interval).idle,
CLIENT_NUMBER: len(self.protocol.server.clients),
}
def _check_system_load(self, check_load):
system_load = self.get_system_load()
for param_name, param_attr in CHECK_LOAD_PARAMS.iteritems():
param_value = check_load.get(param_name, param_attr['default'])
if not isinstance(param_value, (int, long, float)):
raise InvalidMessageError('%s is not a number' % param_name)
load_name = param_attr['load_name']
diff = system_load[load_name] - param_value
if not param_attr['greater']:
diff = 0 - diff
if diff < 0:
raise SystemOverloadError(
'%s: %d is %s than: %d' %
(load_name, system_load[load_name],
param_attr['greater'] and 'less' or 'greater',
param_value))
def _check_msg(self, msg):
if type(msg) != type({}):
raise InvalidMessageError("message is not dictionary")
if 'ident' not in msg:
raise InvalidMessageError("'ident' is not in message")
if 'func' not in msg:
raise InvalidMessageError("'func' is not in message")
def _setup_response(self, msg):
func = self._get_func_by_name(msg)
channel = self
job = get_or_create_job(channel, msg, func)
if job is None:
raise JobNotInSessionError(
'job not in sessions: ' + repr(Job.sessions))
progress = msg.get('progress', {})
if progress in (None, False):
return
if type(progress) != type({}):
raise InvalidProgressError(
'the progress in message is not a dictionary')
interval = progress.get('interval', 5)
progress_key = progress.get('key')
if progress_key is None:
lam = lambda r: r
else:
lam = lambda r: r.get(progress_key)
threadutil.start_thread(target=progress_sender,
args=(job, channel, interval, lam),
daemon=True)
def _get_func_by_name(self, msg):
mod_func = self.jobs_dir.split('/') + msg['func'].split('.')
mod_path = '.'.join(mod_func[:-1])
func_name = mod_func[-1]
try:
mod = __import__(mod_path)
except (ImportError, SyntaxError) as e:
raise LoadingError('failed to import %s: %s' % (mod_path, repr(e)))
for mod_name in mod_path.split('.')[1:]:
mod = getattr(mod, mod_name)
logger.info('mod imported from: ' + repr(mod.__file__))
try:
func = getattr(mod, func_name)
except AttributeError as e:
raise LoadingError("function not found: " + repr(func_name))
return func
def on_close(self, reason):
logger.info('on close, the channel is: ' + repr(self))
def _parse_request(args):
app, msg = args
app._parse_request(msg)
def run(ip='127.0.0.1', port=63482, jobq_thread_count=10):
JobdWebSocketApplication.jobq_mgr = jobq.JobManager(
[(_parse_request, jobq_thread_count)])
WebSocketServer(
(ip, port),
Resource(OrderedDict({'/': JobdWebSocketApplication})),
).serve_forever()
|
a = int(input('Digite o ano de nascimento: '))
i = 2020 - a
if i <= 9:
print('A categoria é mirim')
elif 14 >= i > 9:
print('A categoria é infantil')
elif 19 >= i > 14:
print('A categoria é junior')
elif 20 >= i > 19:
print('A categoria é senior')
else:
print('A categoria é master')
|
from threading import Thread
import socket
import json
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError
from junospyez_ossh_server.log import logger
__all__ = ['OutboundSSHServer']
def gather_basic_facts(device):
"""
Using the provide Junos Device object, retrieve basic facts about the device.
Parameters
----------
device : Device
The Junos device instance
Returns
-------
dict
A collection of basic facts about the device that will be stored in the database.
"""
# -----------------------------------------------------
# get information from the provided device facts object
# -----------------------------------------------------
basic_facts = dict()
basic_facts['os_version'] = device.facts['version']
basic_facts['hostname'] = device.facts['hostname']
basic_facts['device_sn'] = device.facts['serialnumber']
basic_facts['device_model'] = device.facts['model']
# -------------------------------------------------------------------------------
# need to do a route lookup using the outbound ssh config to determine the actual
# management interface used to reach this service. For now, use the first
# server ip-address (name). It is possible that the device could be configured
# with multiple ossh clients. If we need to support this use-case, then we will
# need to add additional checks for specific client name.
# -------------------------------------------------------------------------------
config = device.rpc.get_config(filter_xml='system/services/outbound-ssh')
servers = config.xpath('.//servers/name')
server_ipaddr = servers[0].text
# -----------------------------------------------------------------------------------
# get mgmt_interface value from the route lookup. The route lookup will give use the
# logical interface name, which we will also need for finding the assigned ip-address
# -----------------------------------------------------------------------------------
resp = device.rpc.get_route_information(destination=server_ipaddr)
if_name = resp.xpath('.//via | .//nh-local-interface')[0].text
basic_facts['mgmt_interface'] = if_name.partition('.')[0] # physical interface
# -------------------------------------------------------------
# get mgmt_ipaddr from the if_name obtained by the route lookup
# -------------------------------------------------------------
if_info = device.rpc.get_interface_information(interface_name=if_name, terse=True)
basic_facts['mgmt_ipaddr'] = if_info.findtext('.//ifa-local').partition('/')[0].strip()
# ----------------------------------------------------------
# get mgmt_macaddr value assigned to the management interface
# ----------------------------------------------------------
resp = device.rpc.get_interface_information(interface_name=basic_facts['mgmt_interface'], media=True)
found = resp.findtext('.//current-physical-address').strip()
basic_facts['mgmt_macaddr'] = found
return basic_facts
class OutboundSSHServer(object):
NAME = 'outbound-ssh-server'
DEFAULT_LISTEN_BACKLOG = 10
logger = logger
def __init__(self, ipaddr, port, login_user, login_password, on_device=None, on_error=None):
"""
Parameters
----------
ipaddr : str
The server IP address
port : int
The server port to accept requests
login_user : str
The device login user name
login_password : str
The device login password
on_device : callaback
User callback function that is invoked when the server has remote device NETCONF establish
and has retrieved basic facts. The callback takes two parameters, the PyEZ device instance,
and a dictionary of gathered basic facts, for example:
>>> import json
>>>
>>> def dump_facts(device, facts):
>>> print("GOT FACTS: ", json.dumps(facts, indent=3))
on_error : callback
User callback function that is invoked when error occurs when attempting to
connect or communicate with remote device. The callback takes two parameters, the PyEZ device
instance (could be None) and the error exception instance, for example:
>>> import json
>>>
>>> def dump_error(device, exc):
>>> print("GOT ERROR: ", str(exc))
"""
self.thread = None
self.socket = None
self.login_user = login_user
self.login_password = login_password
self.bind_ipaddr = ipaddr
self.bind_port = int(port)
self.listen_backlog = OutboundSSHServer.DEFAULT_LISTEN_BACKLOG
self._callbacks = dict()
self.on_device = on_device # callable also provided at :meth:`start`
self.on_error = on_error # callable also provided at :meth:`start`
# ----------------------------------------------------------------------------------------------------------------
# PROPERTIES
# ----------------------------------------------------------------------------------------------------------------
@property
def name(self):
return self.__class__.NAME
@property
def on_device(self):
def no_op(device, facts):
pass
return self._callbacks['on_device'] or no_op
@on_device.setter
def on_device(self, callback):
if callback and not callable(callback):
raise ValueError('callback is not callable')
self._callbacks['on_device'] = callback
@property
def on_error(self):
def no_op(device, exc):
pass
return self._callbacks['on_error'] or no_op
@on_error.setter
def on_error(self, callback):
if callback and not callable(callback):
raise ValueError('callback is not callable')
self._callbacks['on_error'] = callback
# ----------------------------------------------------------------------------------------------------------------
# PRIVATE METHODS
# ----------------------------------------------------------------------------------------------------------------
def _setup_server_socket(self):
s_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s_sock.bind((self.bind_ipaddr, self.bind_port))
s_sock.listen(self.listen_backlog)
self.socket = s_sock
def _server_thread(self):
"""
This is the running thread target for the outbound-ssh server. The purpose of this thread
is to await inbound connections from the Junos devices and then spawn a specific thread for that device
for future processing.
"""
try:
self._setup_server_socket()
except Exception as exc:
logger.error(f'{self.name}: failed to setup socket: %s' % str(exc))
return
while True:
# await a device to make an outbound connection. The socket accept() returns a tuple
# (socket, (device ipaddr, device port)). create a new thread to process the inbound with
# this information
try:
in_sock, (in_addr, in_port) = self.socket.accept()
except ConnectionAbortedError:
# this triggers when the server socket is closed by the shutdown() method
logger.info(f'{self.name} shutting down')
return
in_str = f'{in_addr}:{in_port}'
dev_name = f'device-{in_str}'
logger.info(f'{self.name}: accepted connection from {in_str}')
# spawn a device-specific thread for further processing
try:
Thread(name=dev_name, target=self._device_thread,
kwargs=dict(in_sock=in_sock, in_addr=in_addr, in_port=in_port)).start()
except RuntimeError as exc:
logger.err(f'{self.name}: ERROR: failed to start processing {in_addr}: %s' % str(exc))
in_sock.close()
continue
# NOT REACHABLE
logger.critical('Unreachable code reached')
def _device_thread(self, in_sock, in_addr, in_port):
"""
This is a thread target function that is launched by the OSSH service. The purpose of this function
is to make a NETCONF connection back to the device, gather basic facts, and store them into the database.
If all goes well, the `facts` field in the database will contain the information about the device. If
all does not go well, then there is an "error" field within the facts that the caller can example. The
most likely error reason is the provided user name and password values are not correct.
Parameters
----------------
in_addr: str
the Junos device management IP address that connected to the OSSH service
in_sock: socket
the socket instance from the outbound connection.
"""
via_str = f'{in_addr}:{in_port}'
sock_fd = in_sock.fileno()
# attempt to add this device entry to the database; the unique ID is the IP address.
# it is AOK if the entry already exists as the device-thread will simply update the record with the
# information retrieved
try:
logger.info(f"establishing netconf to device via: {via_str}")
dev = Device(sock_fd=sock_fd, user=self.login_user, password=self.login_password)
dev.open()
except ConnectError as exc:
logger.error(f'Connection error to device via {via_str}: {exc.msg}')
in_sock.close()
return
except Exception as exc:
logger.error(f'unable to establish netconf to device via {via_str}: {str(exc)}')
in_sock.close()
try:
logger.info(f"gathering basic facts from device via: {via_str}")
facts = gather_basic_facts(dev)
logger.info(json.dumps(facts, indent=3))
# call user on-device callback
self.on_device(dev, facts)
logger.info(f"completed device with management IP address: {facts['mgmt_ipaddr']}")
dev.close()
except Exception as exc:
error = f"ERROR: unable to process device {in_addr}:{in_port}: %s" % str(exc)
logger.error(error)
if self.on_error:
self.on_error(dev, exc)
finally:
in_sock.close()
# ----------------------------------------------------------------------------------------------------------------
# PUBLIC METHODS
# ----------------------------------------------------------------------------------------------------------------
def start(self, on_device=None, on_error=None):
"""
Start the ossh-server background thread.
Examples
--------
Start the server, will use the existing server attributes.
>>> ok, msg = server.start()
Start the server, provide a new `on_device` callback.
>>> import json
>>>
>>> def dump_facts(device, facts):
>>> print("GOT FACTS: ", json.dumps(facts, indent=3))
>>>
>>> ok, msg = server.start(on_device=dump_facts)
Parameters
----------
on_device : callaback
User callback function that is invoked when the server has remote device NETCONF establish
and has retrieved basic facts.
on_error : callback
User callback function that is invoked when error occurs when attempting to
connect or communicate with remote device.
Returns
-------
tuple
ok : bool
True if started ok, False otherwise
msg : str
message string
"""
if self.socket:
msg = f'{self.name} already running'
logger.error(msg)
return False, msg
if on_device:
self.on_device = on_device
if on_error:
self.on_error = on_error
logger.info(f'{self.name}: starting on {self.bind_ipaddr}:{self.bind_port}')
try:
self.thread = Thread(name=self.name, target=self._server_thread)
self.thread.start()
except Exception as exc:
msg = f'{self.name} unable to start: %s' % str(exc)
logger.error(msg)
return False, msg
msg = f'{self.name}: started'
logger.info(msg)
return True, msg
def stop(self):
"""
Stops the ossh-server thread.
Examples
--------
>>> server.stop()
"""
self.socket.close()
self.thread = None
self.socket = None
logger.info(f'{self.name}: stopped')
|
from app import app
from config import RUN_HOST, RUN_PORT
app.run(host=RUN_HOST, port=RUN_PORT)
|
import multiprocessing
import queue
import threading
import json
from datetime import datetime
import enum
import logging
from sqlalchemy import MetaData, Table
from ..db.datamodel import db, app, project, portscanner, target, vulnLookupTable
from ..utils import Ports, PortStatus
class WorkerCommand(enum.Enum):
STOP = 0
TEST = 1
class Vulnscan():
def __init__(self, project, vulns, vulnargs, procCnt = 1, threadCnt = 1):
self.project = project
self.vulns = vulns
self.vulnargs = vulnargs
self.procCnt = procCnt
self.threadCnt = threadCnt
self.stopEvent = multiprocessing.Event()
self.inQ = multiprocessing.Queue()
self.outQ = multiprocessing.Queue()
self.workers = []
self.reporter = None
def setup(self):
self.reporter = VulnscanReporter(self.outQ, self.stopEvent, logObj = app.logger)
self.reporter.daemon = True
self.reporter.start()
for i in range(self.procCnt):
p = VulnscanWorker(self.inQ, self.outQ, self.stopEvent, threadCnt = self.threadCnt)
p.daemon = True
self.workers.append(p)
def scan(self):
app.logger.debug('VULNSCAN scan starting')
self.setup()
app.logger.debug('VULNSCAN starting worker processes')
for worker in self.workers:
worker.start()
app.logger.debug('VULNSCAN polling database for targets and starting scanning')
for vuln in self.vulns:
ports = Ports(vuln.plugindef.triggerPorts)
for scan in self.project.scans.all():
for t in scan.targets.filter(target.port.in_(ports.ports)).filter(target.port_status == PortStatus.OPEN).all():
vulnclass = type(vuln)
vt = VulnscanTask(t, vulnclass(), WorkerCommand.TEST, args = self.vulnargs)
self.inQ.put(vt)
app.logger.debug('VULNSCAN waiting for workers to finish...')
for i in range(self.procCnt * self.threadCnt):
self.inQ.put(VulnscanTask(workerCmd = WorkerCommand.STOP))
for worker in self.workers:
worker.join()
app.logger.debug('VULNSCAN waiting for reporter to finish...')
self.outQ.put(VulnscanTask(workerCmd = WorkerCommand.STOP))
self.reporter.join()
class LogEntry():
def __init__(self, level, src, msg):
self.src = src
self.level = level
self.msg = msg
class VulnscanTask():
def __init__(self, target = None, vuln = None, workerCmd = None, args = None):
self.target = target
self.vuln = vuln
self.workerCmd = workerCmd
self.vulnargs = args
class VulnscanWorker(multiprocessing.Process):
def __init__(self, inQ, outQ, stopEvent, threadCnt = 1):
multiprocessing.Process.__init__(self)
self.name = 'VulnscanWorker'
self.inQ = inQ
self.outQ = outQ
self.threadCnt = threadCnt
self.stopEvent = stopEvent
self.threads = []
def log(self, level, msg):
self.outQ.put(LogEntry(level,self.name,msg))
def setup(self):
for i in range(self.threadCnt):
t = threading.Thread(target = self.work, args = ())
t.daemon = True
self.threads.append(t)
def run(self):
try:
self.log(logging.INFO, 'Starting up...')
self.setup()
for t in self.threads:
t.start()
for t in self.threads:
t.join()
except Exception as e:
self.log(logging.WARNING, 'Worker exception! Terminating! Reason: %s' % (str(e),))
def work(self):
while not self.stopEvent.is_set():
try:
try:
vt = self.inQ.get(timeout = 1)
except queue.Empty:
#this timeout exception is here to have a way of constantly checking the stopEvent
continue
if vt.workerCmd == WorkerCommand.STOP:
return
elif vt.workerCmd == WorkerCommand.TEST:
vt.vuln.target = vt.target
vt.vuln.args = vt.vulnargs
vt.vuln.test()
self.outQ.put(vt)
else:
self.log(logging.WARNING, 'Unkown command! %s' % (str(WorkerCommand.TEST), ))
continue
except Exception as e:
self.log(logging.WARNING, str(e))
break
self.log(logging.INFO, 'Stopping.')
class VulnscanReporter(threading.Thread):
def __init__(self, outQ, stopEvent, logObj = None):
threading.Thread.__init__(self)
self.outQ = outQ
self.stopEvent = stopEvent
self.logObj = logObj
self.name = 'VulnscanReporter'
self.dbmetadata = None
def setup(self):
return
#self.dbmetadata = db.MetaData()
#self.dbmetadata.reflect(app=app)
def log(self, level, msg):
self.handleLog(LogEntry(level,self.name,msg))
def run(self):
self.log(logging.INFO, 'Starting up...')
self.setup()
while not self.stopEvent.is_set():
try:
try:
vt = self.outQ.get(timeout = 1)
except queue.Empty:
#this timeout exception is here to have a way of constantly checking the stopEvent
continue
if isinstance(vt, LogEntry):
self.handleLog(vt)
elif isinstance(vt, VulnscanTask):
if vt.workerCmd == WorkerCommand.STOP:
return
elif vt.workerCmd == WorkerCommand.TEST:
self.handleVulnscan(vt)
else:
self.log(logging.INFO, 'Unkown command! %s' % (str(WorkerCommand.TEST), ))
continue
else:
self.log(logging.INFO, 'Unknown object landed in the outQ! Type is: %s' % (type(vt),))
except Exception as e:
self.log(logging.INFO, str(e))
break
self.log(logging.INFO, 'Stopping.')
def handleLog(self, log):
self.logObj.log(log.level, '%s %s' % (log.src, log.msg))
def handleVulnscan(self, vt):
self.log(logging.INFO, 'Result: %s' % (vt.vuln.toJSON(),))
### yeah, not the most elegant solution, probably there is a better way?
vulnTable = vulnLookupTable[vt.vuln.plugindef.classname]
t = vulnTable(vt.target, vt.vuln)
db.session.add(t)
db.session.commit()
if __name__ == '__main__':
from ..services.SMB.info.SMB001 import SMB001
v = SMB001()
#this is how you can override trigger ports
#v.plugindef.triggerPorts = '443'
p = project.query.first()
vs = Vulnscan(p,[v])
vs.scan()
print('Done!')
|
# Module with same api as built-in platform but different implementation of linux_distribution that uses distro's one.
from __future__ import absolute_import
from platform import *
def linux_distribution():
import distro
id_name, version, codename = distro.linux_distribution(full_distribution_name=False)
# distro returns rhel instead of redhat and sles/suse_linux instead of suse. oracle 5 returns enterpriseenterpriseserver.
id_name = id_name.replace('rhel', 'redhat').replace('sles', 'suse').replace('suse_sap', 'suse').replace('suse_linux', 'suse').replace('enterpriseenterpriseserver', 'oracle')
codename = codename.replace('Trusty Tahr', 'trusty')
return (id_name, version, codename)
|
"""
Implementation of a function the set the WIFI configuration.
This function is primarily developed for a Raspberry PI
"""
from cloudmesh.burn.wifi.raspberryos import Wifi as WifiRaspberryOs
from cloudmesh.burn.wifi.ubuntu import Wifi as WifiUbuntu
# noinspection PyPep8Naming
def Wifi(card_os="raspberry"):
if card_os == "raspberry":
return WifiRaspberryOs
else:
return WifiUbuntu
|
from snuba.query.conditions import (
BooleanFunctions,
ConditionFunctions,
binary_condition,
get_first_level_and_conditions,
get_first_level_or_conditions,
is_binary_condition,
is_condition,
is_in_condition,
is_in_condition_pattern,
is_not_in_condition,
is_not_in_condition_pattern,
is_unary_condition,
unary_condition,
)
from snuba.query.dsl import literals_tuple
from snuba.query.expressions import Column, Expression, FunctionCall, Literal
from snuba.query.matchers import Column as ColumnPattern
from snuba.query.matchers import String
def test_expressions_from_basic_condition() -> None:
"""
Iterates over the expressions in a basic condition
f(t1.c1) = t1.c2
"""
c = Column(None, "t1", "c1")
f1 = FunctionCall(None, "f", (c,))
c2 = Column(None, "t1", "c2")
condition = binary_condition(None, ConditionFunctions.EQ, f1, c2)
ret = list(condition)
expected = [c, f1, c2, condition]
assert ret == expected
def test_aliased_expressions_from_basic_condition() -> None:
"""
Iterates over the expressions in a basic condition when those expressions
are aliased
f(t1.c1) as a = t1.c2 as a2
"""
c = Column(None, "t1", "c1")
f1 = FunctionCall("a", "f", (c,))
c2 = Column("a2", "t1", "c2")
condition = binary_condition(None, ConditionFunctions.EQ, f1, c2)
ret = list(condition)
expected = [c, f1, c2, condition]
assert ret == expected
def test_map_expressions_in_basic_condition() -> None:
"""
Change the column name over the expressions in a basic condition
"""
c = Column(None, "t1", "c1")
f1 = FunctionCall(None, "f", (c,))
c2 = Column(None, "t1", "c2")
c3 = Column(None, "t1", "c3")
def replace_col(e: Expression) -> Expression:
if isinstance(e, Column) and e.column_name == "c1":
return c3
return e
condition = binary_condition(None, ConditionFunctions.EQ, f1, c2)
condition = condition.transform(replace_col)
condition_b = binary_condition(
None, ConditionFunctions.EQ, FunctionCall(None, "f", (c3,)), c2,
)
ret = list(condition)
expected = [c3, FunctionCall(None, "f", (c3,)), c2, condition_b]
assert ret == expected
def test_nested_simple_condition() -> None:
"""
Iterates and maps expressions over a complex Condition:
(A=B OR A=B) AND (A=B OR A=B)
"""
c1 = Column(None, "t1", "c1")
c2 = Column(None, "t1", "c2")
co1 = binary_condition(None, ConditionFunctions.EQ, c1, c2)
c3 = Column(None, "t1", "c1")
c4 = Column(None, "t1", "c2")
co2 = binary_condition(None, ConditionFunctions.EQ, c3, c4)
or1 = binary_condition(None, BooleanFunctions.OR, co1, co2)
c5 = Column(None, "t1", "c1")
c6 = Column(None, "t1", "c2")
co4 = binary_condition(None, ConditionFunctions.EQ, c5, c6)
c7 = Column(None, "t1", "c1")
c8 = Column(None, "t1", "c2")
co5 = binary_condition(None, ConditionFunctions.EQ, c7, c8)
or2 = binary_condition(None, BooleanFunctions.OR, co4, co5)
and1 = binary_condition(None, BooleanFunctions.AND, or1, or2)
ret = list(and1)
expected = [c1, c2, co1, c3, c4, co2, or1, c5, c6, co4, c7, c8, co5, or2, and1]
assert ret == expected
cX = Column(None, "t1", "cX")
co1_b = binary_condition(None, ConditionFunctions.EQ, c1, cX)
co2_b = binary_condition(None, ConditionFunctions.EQ, c3, cX)
or1_b = binary_condition(None, BooleanFunctions.OR, co1_b, co2_b)
co4_b = binary_condition(None, ConditionFunctions.EQ, c5, cX)
co5_b = binary_condition(None, ConditionFunctions.EQ, c7, cX)
or2_b = binary_condition(None, BooleanFunctions.OR, co4_b, co5_b)
and1_b = binary_condition(None, BooleanFunctions.AND, or1_b, or2_b)
def replace_col(e: Expression) -> Expression:
if isinstance(e, Column) and e.column_name == "c2":
return cX
return e
and1 = and1.transform(replace_col)
ret = list(and1)
expected = [
c1,
cX,
co1_b,
c3,
cX,
co2_b,
or1_b,
c5,
cX,
co4_b,
c7,
cX,
co5_b,
or2_b,
and1_b,
]
assert ret == expected
def test_in_condition() -> None:
in_condition = binary_condition(
None,
ConditionFunctions.IN,
Column(None, None, "tags_key"),
literals_tuple(None, [Literal(None, "t1"), Literal(None, "t2")]),
)
assert is_in_condition(in_condition)
match = is_in_condition_pattern(ColumnPattern(None, String("tags_key"))).match(
in_condition
)
assert match is not None
assert match.expression("tuple") == literals_tuple(
None, [Literal(None, "t1"), Literal(None, "t2")]
)
assert match.expression("lhs") == Column(None, None, "tags_key")
def test_not_in_condition() -> None:
not_in_condition = binary_condition(
None,
ConditionFunctions.NOT_IN,
Column(None, None, "tags_key"),
literals_tuple(None, [Literal(None, "t1"), Literal(None, "t2")]),
)
assert is_not_in_condition(not_in_condition)
match = is_not_in_condition_pattern(ColumnPattern(None, String("tags_key"))).match(
not_in_condition
)
assert match is not None
assert match.expression("tuple") == literals_tuple(
None, [Literal(None, "t1"), Literal(None, "t2")]
)
assert match.expression("lhs") == Column(None, None, "tags_key")
def test_is_x_condition_functions() -> None:
eq_condition = binary_condition(
None, ConditionFunctions.EQ, Column(None, None, "test"), Literal(None, "1")
)
assert is_binary_condition(eq_condition, ConditionFunctions.EQ)
assert not is_binary_condition(eq_condition, ConditionFunctions.NEQ)
un_condition = unary_condition(
None, ConditionFunctions.IS_NOT_NULL, Column(None, None, "test")
)
assert is_unary_condition(un_condition, ConditionFunctions.IS_NOT_NULL)
assert not is_unary_condition(un_condition, ConditionFunctions.IS_NULL)
assert not is_unary_condition(eq_condition, ConditionFunctions.IS_NOT_NULL)
almost_condition = FunctionCall(None, "isNotNullish", (Column(None, None, "test"),))
assert is_condition(eq_condition)
assert is_condition(un_condition)
assert not is_condition(almost_condition)
def test_first_level_conditions() -> None:
c1 = binary_condition(
None,
ConditionFunctions.EQ,
Column(None, "table1", "column1"),
Literal(None, "test"),
)
c2 = binary_condition(
None,
ConditionFunctions.EQ,
Column(None, "table2", "column2"),
Literal(None, "test"),
)
c3 = binary_condition(
None,
ConditionFunctions.EQ,
Column(None, "table3", "column3"),
Literal(None, "test"),
)
cond = binary_condition(
None,
BooleanFunctions.AND,
binary_condition(None, BooleanFunctions.AND, c1, c2),
c3,
)
assert get_first_level_and_conditions(cond) == [c1, c2, c3]
cond = binary_condition(
None,
BooleanFunctions.OR,
binary_condition(None, BooleanFunctions.AND, c1, c2),
c3,
)
assert get_first_level_or_conditions(cond) == [
binary_condition(None, BooleanFunctions.AND, c1, c2),
c3,
]
|
from ctypes import *
msvcrt = cdll.msvcrt
message = b"Hello, world!"
msvcrt.printf(b"Testing: %s\n", message) |
import matplotlib.pyplot as plt
import queue as Q
utilNum = 0
pktDrop = 0
# class for pcket
class DATAPACKET:
def __init__(self,pid=0,gtime=0.0,sourceid=0):
self.packetId = pid
self.sourceId = sourceid
self.gentime = gtime
self.qInTIme = -1
self.qOutTime = -1
# class for source
class DATASOURCE:
def __init__(self,lamda,sid,bs):
self.genRate = lamda
self.sourceId = sid
self.bs = bs
# class for switch
class DATASWITCH:
def __init__(self,bwidth):
self.bss = bwidth
self.qSize = 0
# class for various Event
class EVENT:
def __init__(self,eid,pid,t):
self.eId = eid
self.pId = pid
self.curTime = t
def __lt__(self, other):
return self.curTime < other.curTime
# function to calculate pktLossRate
def calculatePktLossRate(nSource,bs,bss,pktLength,source,simTime,fixedQueueSize):
packet = []
avgQueuingDelay = 0.0
swich = DATASWITCH(bss)
global pktDrop
pktDrop = 0
# pq is priority queue on the basis of event current time
pq = Q.PriorityQueue()
ttime = 0.0
# generating first packet from every source at an fixed interval
for i in range(nSource):
packet.append(DATAPACKET(i, ttime, i))
pq.put(EVENT(0, i, ttime))
ttime += 0.000001
pcount = 0
pktTot = nSource
lastLeftTime = 0
executionTime = 0.0
packetarrived=0
# Simulating for fixed time
# Event0 = generation of packet
# Event1 = reaching Queue time
# Event2 = leaving queue time
# Event3 = reaching sink time
curTime = 0
while (curTime < simTime):
x = pq.get()
pid = x.pId
curTime = x.curTime
#executionTime = max(executionTime,curTime)
# Event 0 -> (Event0,Event1)
if x.eId == 0:
rate = source[packet[pid].sourceId].genRate
pq.put(EVENT(0, pktTot, curTime + 1 / rate))
packet.append(DATAPACKET(pktTot, curTime + 1 / rate, packet[pid].sourceId))
pktTot = pktTot + 1
pq.put(EVENT(1, pid, curTime + pktLength/bs))
packet[pid].qInTime = curTime + pktLength / bs
# Event 1 -> Event2,and if queue is full packet drop
elif x.eId == 1:
packetarrived = packetarrived+1
if(swich.qSize<=fixedQueueSize):
rtime = (swich.qSize * pktLength) / bss
tx = 0
if packet[pid].qInTime - lastLeftTime < (pktLength / bss):
tx = max(0, (pktLength / bss) - (packet[pid].qInTime - lastLeftTime))
if lastLeftTime == 0:
tx = 0
pq.put(EVENT(2, pid, curTime + rtime + tx))
packet[pid].qOutTime = curTime + rtime + tx
swich.qSize = swich.qSize + 1
else:
#counting pkt drop
pktDrop = pktDrop+1
# Event2 -> Event3
elif x.eId == 2:
swich.qSize = swich.qSize - 1
lastLeftTime = curTime
sTime = curTime + pktLength / bss
pq.put(EVENT(3, pid, sTime))
else:
avgQueuingDelay = packet[pid].qOutTime - packet[pid].qInTime + avgQueuingDelay
pcount = pcount + 1
#return pktDroprate
return pktDrop/packetarrived
def main():
print("Enter 0 to use default value or 1 to use own")
resp = int(input())
if resp == 0:
# nsource = number of source
nSource = 4
# bs = bandwidth between source and switch in bit
bs = 1e2
# bss = bandwidth between switch and sink in bit
bsslow = 10
bsshigh = 500
# pktLength = size of each packet in bit
pktLength = 2
# grate = packet genrate
grate = 20
# simulation time
simTime = 200
#max queue size in switch
fixedQueueSize = 50
else :
nSource = int(input("Enter Number of Source :"))
bs = float(input("Enter bandwdth between Source and switch(bs) in bit:"))
bsslow = float(input("Enter bandwidth(lower bound) between switch and sink(bss) in bit:"))
bsshigh = float(input("Enter bandwidth(upper bound) between switch and sink(bss) in bit:"))
pktLength = int(input("Enter packet length in bit(pktlength) :"))
grate = int(input("Enter packet generation rate:"))
if pktLength * grate >= bs:
print("pktLength*grater should be less than bs")
return 0
simTime = int(input("Enter Simulation time:"))
fixedQueueSize = int(input("Enter Queue size in switch:"))
# utilNum = numerator of utilization factor i.e arrival rate
global utilNum
# x and y holds value of delay and utilizationfactor
x = []
y = []
source = []
for i in range(nSource):
source.append(DATASOURCE(grate, i, bs))
utilNum = utilNum+grate
# varying nbss i.e bss for plotting
nbss = bsslow
while(nbss<=bsshigh):
loadfactor=(utilNum*pktLength)/nbss
pktDropRate = calculatePktLossRate(nSource,bs,nbss,pktLength,source,simTime,fixedQueueSize)
x.append(loadfactor)
y.append(pktDropRate)
print(loadfactor," ",pktDropRate)
nbss = nbss+(bsshigh-bsslow)/980
# plotting curve
plt.plot(x,y)
plt.xlabel("Utilization Factor")
plt.ylabel("Packet loss rate")
plt.text(x[len(x) - 1], y[len(y) - 1], "nSource=4,bs(bit)=1e2,lambda(gen rate)=20,pktlength=2 bit, \n bsslow(bit/s)=10,bsshigh=500")
plt.title("Packet loss rate vs Utilization Factor")
plt.show()
if __name__=="__main__":
main()
|
import toml
import watcher
import signal
import watcher
import requests
import os
from argparse import ArgumentParser
parser = ArgumentParser(description="QueueWatchClient")
parser.add_argument("path_to_google_home", type=str, help="URL to Google Home")
parser.add_argument("--local", action='store_true', help="Run with using local redis server")
parser.add_argument("--debug", action='store_true', help="Run with debug mode")
parser.add_argument("--wait", type=int, help="Wait time between songs [sec]")
# TODO: Implements multi target selection available
# TODO: Implements dynamic address modification (based on HTTP request)
# TARGETS = []
TARGET = None
TRYTIMES = 3
def callback(data):
"""
callback function
:param data: parsed json data (dict)
:return:
"""
for i in range(TRYTIMES):
params = {"text":data["song"]["music_url"]}
res = requests.get(TARGET, params=params)
if res.status_code == requests.codes.ok:
return
raise TimeoutError
if __name__ == '__main__':
args = parser.parse_args()
TARGET = args.path_to_google_home
conf = toml.load(os.path.join(os.path.dirname(__file__), "config.toml"))
if args.local:
host = conf["redis-local"]["ip"]
port = conf["redis-local"]["port"]
password = None
else:
host = conf["redis"]["ip"]
port = conf["redis"]["port"]
password = conf["redis"]["password"]
w = watcher.PlayQueueWatcher(host, port, password=password, debug=args.debug, wait=args.wait)
w.start(callback, with_thread=False) |
#
# Contains the logic to decide, based on the policy, which graphs
# to transform to JitCodes or not.
#
from rpython.jit.codewriter import support
from rpython.jit.codewriter.jitcode import JitCode
from rpython.jit.codewriter.effectinfo import (VirtualizableAnalyzer,
QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze,
EffectInfo, CallInfoCollection)
from rpython.translator.simplify import get_funcobj, get_functype
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.translator.backendopt.canraise import RaiseAnalyzer
from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer
class CallControl(object):
virtualref_info = None # optionally set from outside
has_libffi_call = False # default value
def __init__(self, cpu=None, jitdrivers_sd=[]):
assert isinstance(jitdrivers_sd, list) # debugging
self.cpu = cpu
self.jitdrivers_sd = jitdrivers_sd
self.jitcodes = {} # map {graph: jitcode}
self.unfinished_graphs = [] # list of graphs with pending jitcodes
self.callinfocollection = CallInfoCollection()
if hasattr(cpu, 'rtyper'): # for tests
self.rtyper = cpu.rtyper
translator = self.rtyper.annotator.translator
self.raise_analyzer = RaiseAnalyzer(translator)
self.readwrite_analyzer = ReadWriteAnalyzer(translator)
self.virtualizable_analyzer = VirtualizableAnalyzer(translator)
self.quasiimmut_analyzer = QuasiImmutAnalyzer(translator)
self.randomeffects_analyzer = RandomEffectsAnalyzer(translator)
#
for index, jd in enumerate(jitdrivers_sd):
jd.index = index
def find_all_graphs(self, policy):
try:
return self.candidate_graphs
except AttributeError:
pass
is_candidate = policy.look_inside_graph
assert len(self.jitdrivers_sd) > 0
todo = [jd.portal_graph for jd in self.jitdrivers_sd]
if hasattr(self, 'rtyper'):
for oopspec_name, ll_args, ll_res in support.inline_calls_to:
c_func, _ = support.builtin_func_for_spec(self.rtyper,
oopspec_name,
ll_args, ll_res)
todo.append(c_func.value._obj.graph)
candidate_graphs = set(todo)
def callers():
graph = top_graph
print graph
while graph in coming_from:
graph = coming_from[graph]
print '<-', graph
coming_from = {}
while todo:
top_graph = todo.pop()
for _, op in top_graph.iterblockops():
if op.opname not in ("direct_call", "indirect_call", "oosend"):
continue
kind = self.guess_call_kind(op, is_candidate)
# use callers() to view the calling chain in pdb
if kind != "regular":
continue
for graph in self.graphs_from(op, is_candidate):
if graph in candidate_graphs:
continue
assert is_candidate(graph)
todo.append(graph)
candidate_graphs.add(graph)
coming_from[graph] = top_graph
self.candidate_graphs = candidate_graphs
return candidate_graphs
def graphs_from(self, op, is_candidate=None):
if is_candidate is None:
is_candidate = self.is_candidate
if op.opname == 'direct_call':
funcobj = get_funcobj(op.args[0].value)
graph = funcobj.graph
if is_candidate(graph):
return [graph] # common case: look inside this graph
else:
assert op.opname in ('indirect_call', 'oosend')
if op.opname == 'indirect_call':
graphs = op.args[-1].value
else:
v_obj = op.args[1].concretetype
graphs = v_obj._lookup_graphs(op.args[0].value)
#
if graphs is None:
# special case: handle the indirect call that goes to
# the 'instantiate' methods. This check is a bit imprecise
# but it's not too bad if we mistake a random indirect call
# for the one to 'instantiate'.
from rpython.rtyper.lltypesystem import rclass
CALLTYPE = op.args[0].concretetype
if (op.opname == 'indirect_call' and len(op.args) == 2 and
CALLTYPE == rclass.OBJECT_VTABLE.instantiate):
graphs = list(self._graphs_of_all_instantiate())
#
if graphs is not None:
result = []
for graph in graphs:
if is_candidate(graph):
result.append(graph)
if result:
return result # common case: look inside these graphs,
# and ignore the others if there are any
# residual call case: we don't need to look into any graph
return None
def _graphs_of_all_instantiate(self):
for vtable in self.rtyper.lltype2vtable.values():
if vtable.instantiate:
yield vtable.instantiate._obj.graph
def guess_call_kind(self, op, is_candidate=None):
if op.opname == 'direct_call':
funcptr = op.args[0].value
if self.jitdriver_sd_from_portal_runner_ptr(funcptr) is not None:
return 'recursive'
funcobj = get_funcobj(funcptr)
if getattr(funcobj, 'graph', None) is None:
return 'residual'
targetgraph = funcobj.graph
if hasattr(targetgraph, 'func'):
# must never produce JitCode for a function with
# _gctransformer_hint_close_stack_ set!
if getattr(targetgraph.func,
'_gctransformer_hint_close_stack_', False):
return 'residual'
if hasattr(targetgraph.func, 'oopspec'):
return 'builtin'
elif op.opname == 'oosend':
SELFTYPE, methname, opargs = support.decompose_oosend(op)
if SELFTYPE.oopspec_name is not None:
return 'builtin'
if self.graphs_from(op, is_candidate) is None:
return 'residual'
return 'regular'
def is_candidate(self, graph):
# used only after find_all_graphs()
return graph in self.candidate_graphs
def grab_initial_jitcodes(self):
for jd in self.jitdrivers_sd:
jd.mainjitcode = self.get_jitcode(jd.portal_graph)
jd.mainjitcode.is_portal = True
def enum_pending_graphs(self):
while self.unfinished_graphs:
graph = self.unfinished_graphs.pop()
yield graph, self.jitcodes[graph]
def get_jitcode(self, graph, called_from=None):
# 'called_from' is only one of the callers, used for debugging.
try:
return self.jitcodes[graph]
except KeyError:
# must never produce JitCode for a function with
# _gctransformer_hint_close_stack_ set!
if hasattr(graph, 'func') and getattr(graph.func,
'_gctransformer_hint_close_stack_', False):
raise AssertionError(
'%s has _gctransformer_hint_close_stack_' % (graph,))
#
fnaddr, calldescr = self.get_jitcode_calldescr(graph)
jitcode = JitCode(graph.name, fnaddr, calldescr,
called_from=called_from)
self.jitcodes[graph] = jitcode
self.unfinished_graphs.append(graph)
return jitcode
def get_jitcode_calldescr(self, graph):
"""Return the calldescr that describes calls to the 'graph'.
This returns a calldescr that is appropriate to attach to the
jitcode corresponding to 'graph'. It has no extra effectinfo,
because it is not needed there; it is only used by the blackhole
interp to really do the call corresponding to 'inline_call' ops.
"""
fnptr = self.rtyper.type_system.getcallable(graph)
FUNC = get_functype(lltype.typeOf(fnptr))
assert self.rtyper.type_system.name == "lltypesystem"
fnaddr = llmemory.cast_ptr_to_adr(fnptr)
NON_VOID_ARGS = [ARG for ARG in FUNC.ARGS if ARG is not lltype.Void]
calldescr = self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS),
FUNC.RESULT, EffectInfo.MOST_GENERAL)
return (fnaddr, calldescr)
def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE,
extraeffect=None):
"""Return the calldescr that describes all calls done by 'op'.
This returns a calldescr that we can put in the corresponding
call operation in the calling jitcode. It gets an effectinfo
describing the effect of the call: which field types it may
change, whether it can force virtualizables, whether it can
raise, etc.
"""
NON_VOID_ARGS = [x.concretetype for x in op.args[1:]
if x.concretetype is not lltype.Void]
RESULT = op.result.concretetype
# check the number and type of arguments
FUNC = get_functype(op.args[0].concretetype)
ARGS = FUNC.ARGS
assert NON_VOID_ARGS == [T for T in ARGS if T is not lltype.Void]
assert RESULT == FUNC.RESULT
# ok
# get the 'elidable' and 'loopinvariant' flags from the function object
elidable = False
loopinvariant = False
call_release_gil_target = llmemory.NULL
if op.opname == "direct_call":
funcobj = get_funcobj(op.args[0].value)
assert getattr(funcobj, 'calling_conv', 'c') == 'c', (
"%r: getcalldescr() with a non-default call ABI" % (op,))
func = getattr(funcobj, '_callable', None)
elidable = getattr(func, "_elidable_function_", False)
loopinvariant = getattr(func, "_jit_loop_invariant_", False)
if loopinvariant:
assert not NON_VOID_ARGS, ("arguments not supported for "
"loop-invariant function!")
if getattr(func, "_call_aroundstate_target_", None):
call_release_gil_target = func._call_aroundstate_target_
call_release_gil_target = llmemory.cast_ptr_to_adr(
call_release_gil_target)
# build the extraeffect
random_effects = self.randomeffects_analyzer.analyze(op)
if random_effects:
extraeffect = EffectInfo.EF_RANDOM_EFFECTS
# random_effects implies can_invalidate
can_invalidate = random_effects or self.quasiimmut_analyzer.analyze(op)
if extraeffect is None:
if self.virtualizable_analyzer.analyze(op):
extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
elif loopinvariant:
extraeffect = EffectInfo.EF_LOOPINVARIANT
elif elidable:
if self._canraise(op):
extraeffect = EffectInfo.EF_ELIDABLE_CAN_RAISE
else:
extraeffect = EffectInfo.EF_ELIDABLE_CANNOT_RAISE
elif self._canraise(op):
extraeffect = EffectInfo.EF_CAN_RAISE
else:
extraeffect = EffectInfo.EF_CANNOT_RAISE
#
effectinfo = effectinfo_from_writeanalyze(
self.readwrite_analyzer.analyze(op), self.cpu, extraeffect,
oopspecindex, can_invalidate, call_release_gil_target)
#
assert effectinfo is not None
if elidable or loopinvariant:
assert extraeffect != EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
# XXX this should also say assert not can_invalidate, but
# it can't because our analyzer is not good enough for now
# (and getexecutioncontext() can't really invalidate)
#
return self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS), RESULT,
effectinfo)
def _canraise(self, op):
if op.opname == 'pseudo_call_cannot_raise':
return False
try:
return self.raise_analyzer.can_raise(op)
except lltype.DelayedPointer:
return True # if we need to look into the delayed ptr that is
# the portal, then it's certainly going to raise
def calldescr_canraise(self, calldescr):
effectinfo = calldescr.get_extra_info()
return effectinfo.check_can_raise()
def jitdriver_sd_from_portal_graph(self, graph):
for jd in self.jitdrivers_sd:
if jd.portal_graph is graph:
return jd
return None
def jitdriver_sd_from_portal_runner_ptr(self, funcptr):
for jd in self.jitdrivers_sd:
if funcptr is jd.portal_runner_ptr:
return jd
return None
def jitdriver_sd_from_jitdriver(self, jitdriver):
for jd in self.jitdrivers_sd:
if jd.jitdriver is jitdriver:
return jd
return None
def get_vinfo(self, VTYPEPTR):
seen = set()
for jd in self.jitdrivers_sd:
if jd.virtualizable_info is not None:
if jd.virtualizable_info.is_vtypeptr(VTYPEPTR):
seen.add(jd.virtualizable_info)
if seen:
assert len(seen) == 1
return seen.pop()
else:
return None
def could_be_green_field(self, GTYPE, fieldname):
GTYPE_fieldname = (GTYPE, fieldname)
for jd in self.jitdrivers_sd:
if jd.greenfield_info is not None:
if GTYPE_fieldname in jd.greenfield_info.green_fields:
return True
return False
|
# Minimum Unavailable Coin Problem
# (30m, 128MB)
# 부분 집합을 만드는 로직에 대해서 파악하는 것이 중요하다.
# 1 <= N <= 1,000
n = int(input())
data = list(map(int, input().split()))
flag = [0] * n
power_set_sum_list = []
# Powerset은 2^n -1 의 개수를 가진다.
def make_power_set(flag, index):
if n == index:
sum = 0
for i in range(0, n):
if flag[i] == True:
sum += data[i]
power_set_sum_list.append(sum)
return
flag[index] = 1
make_power_set(flag, index+1)
flag[index] = 0
make_power_set(flag, index+1)
make_power_set(flag, 0)
power_set_sum_list = sorted(power_set_sum_list)
result = 0
for i in range(1, power_set_sum_list[-1]):
if result != 0:
break
same = False
for value in power_set_sum_list:
if value == i:
same = True
break
if same == False:
result = i
print(result)
|
from torch import Tensor
import torch.nn as nn
from gnnff.nn.functional import shifted_softplus
from gnnff.nn.base import Dense
__all__ = ["OutputModuleError", "ForceMapping", "EnergyMapping"]
class OutputModuleError(Exception):
pass
class ForceMapping(nn.Module):
"""
From edge embedding tensor, calculating the force magnitude of all inter atomic forces.
And then, calculate the inter atomic forces by multiplying unit vectors.
Attributes
----------
n_edge_feature : int
dimension of the embedded edge features.
n_layers : int, default=2
number of output layers.
activation : collable or None, default=gnnff.nn.activation.shifted_softplus
activation function. All hidden layers would the same activation function
except the output layer that does not apply any activation function.
"""
def __init__(
self,
n_edge_feature: int,
n_layers: int = 2,
activation=shifted_softplus,
) -> None:
super().__init__()
n_neurons_list = []
c_neurons = n_edge_feature
for _ in range(n_layers):
n_neurons_list.append(c_neurons)
c_neurons = max(1, c_neurons // 2)
# The output layer has 1 neurons.
n_neurons_list.append(1)
layers = [
Dense(n_neurons_list[i], n_neurons_list[i + 1], activation=activation)
for i in range(n_layers - 1)
]
layers.append(Dense(n_neurons_list[-2], n_neurons_list[-1], activation=None))
self.out_net = nn.Sequential(*layers)
def forward(self, last_edge_embedding: Tensor, unit_vecs: Tensor) -> Tensor:
"""
Calculates the inter atomic forces.
B : Batch size
At : Total number of atoms in the batch
Nbr : Total number of neighbors of each atom
Parameters
----------
last_edge_embedding : torch.Tensor
calculated edge embedding tensor of (B x At x Nbr x n_edge_features) shape.
unit_vecs : torch.Tensor
unit vecs of each edge.
Returns
-------
predicted_forces : torch.Tensor
predicting inter atomic forces for each atoms. (B x At x 3) shape.
"""
# calculate force_magnitude from last edge_embedding
force_magnitude = self.out_net(last_edge_embedding)
force_magnitude = force_magnitude.expand(-1, -1, -1, 3)
# predict inter atpmic forces by multiplying the unit vectors
preditcted_forces = force_magnitude * unit_vecs
# summation of all neighbors effection
preditcted_forces = preditcted_forces.sum(dim=2)
return preditcted_forces
class EnergyMapping(nn.Module):
"""
From node embedding tensor, calculating the total energy.
Attributes
----------
n_node_feature : int
dimension of the embedded node features.
n_edge_feature : int
dimension of the embedded edge features.
n_layers : int, default=2
number of output layers.
activation : collable or None, default=gnnff.nn.activation.shifted_softplus
activation function. All hidden layers would the same activation function
except the output layer that does not apply any activation function.
"""
def __init__(
self,
n_node_feature: int,
n_layers: int = 2,
activation=shifted_softplus,
) -> None:
super().__init__()
n_neurons_list = []
c_neurons = n_node_feature
for _ in range(n_layers):
n_neurons_list.append(c_neurons)
c_neurons = max(1, c_neurons // 2)
# The output layer has 1 neurons.
n_neurons_list.append(1)
layers = [
Dense(n_neurons_list[i], n_neurons_list[i + 1], activation=activation)
for i in range(n_layers - 1)
]
layers.append(Dense(n_neurons_list[-2], n_neurons_list[-1], activation=None))
self.out_net = nn.Sequential(*layers)
def forward(self, last_node_embedding: Tensor) -> Tensor:
"""
Calculates the total energy of the cell.
B : Batch size
At : Total number of atoms in the batch
Parameters
----------
last_node_embedding : torch.Tensor
calculated node embedding of (B x At x Nbr x n_node_features) shape.
Returns
-------
predicted_energy : torch.Tensor
predicting total energy with (B x 1) shape.
"""
# calculate atomic energy from last node_embedding
atomic_energy = self.out_net(last_node_embedding)
# sumation of all atomic energy in batch
preditcted_energy = atomic_energy.sum(dim=1)
return preditcted_energy
|
r"""
\file se2_sam.py.
Created on: Jan 11, 2021
\author: Jeremie Deray
---------------------------------------------------------
This file is:
(c) 2021 Jeremie Deray
This file is part of `manif`, a C++ template-only library
for Lie theory targeted at estimation for robotics.
Manif is:
(c) 2021 Jeremie Deray
---------------------------------------------------------
---------------------------------------------------------
Demonstration example:
2D smoothing and mapping (SAM).
See se3_sam.py for a 3D version of this example.
See se2_localization.py for a simpler localization example using EKF.
------------------------------------------------------------
This demo corresponds to the application
in chapter V, section B, in the paper Sola-18,
[https://arxiv.org/abs/1812.01537].
The following is an abstract of the content of the paper.
Please consult the paper for better reference.
We consider a robot in 2D space surrounded by a small
number of punctual landmarks or _beacons_.
The robot receives control actions in the form of axial
and angular velocities, and is able to measure the location
of the beacons w.r.t its own reference frame.
The robot pose X_i is in SE(2) and the beacon positions b_k in R^2,
X_i = | R_i t_i | // position and orientation
| 0 1 |
b_k = (bx_k, by_k) // lmk coordinates in world frame
The control signal u is a twist in se(2) comprising longitudinal
velocity vx and angular velocity wz, with no other velocity
components, integrated over the sampling time dt.
u = (vx*dt, 0, w*dt)
The control is corrupted by additive Gaussian noise u_noise,
with covariance
Q = diagonal(sigma_v^2, sigma_s^2, sigma_yaw^2).
This noise accounts for possible lateral slippage
through non-zero values of sigma_s.
At the arrival of a control u, a new robot pose is created at
X_j = X_i * Exp(u) = X_i + u.
This new pose is then added to the graph.
Landmark measurements are of the range and bearing type,
though they are put in Cartesian form for simplicity,
y = (yx, yy) // lmk coordinates in robot frame
Their noise n is zero mean Gaussian, and is specified
with a covariances matrix R.
We notice the rigid motion action y_ik = h(X_i,b_k) = X_i^-1 * b_k
(see appendix D).
The world comprises 5 landmarks.
Not all of them are observed from each pose.
A set of pairs pose--landmark is created to establish which
landmarks are observed from each pose.
These pairs can be observed in the factor graph, as follows.
The factor graph of the SAM problem looks like this:
------- b1
b3 / |
| / b4 |
| / / \|
X0 ---- X1 ---- X2
| \ / \ /
| b0 b2
*
where:
- X_i are SE2 robot poses
- b_k are R^2 landmarks or beacons
- * is a pose prior to anchor the map and make the problem observable
- segments indicate measurement factors:
- motion measurements from X_i to X_j
- landmark measurements from X_i to b_k
- absolute pose measurement from X0 to * (the origin of coordinates)
We thus declare 9 factors pose---landmark, as follows:
poses --- lmks
x0 --- b0
x0 --- b1
x0 --- b3
x1 --- b0
x1 --- b2
x1 --- b4
x2 --- b1
x2 --- b2
x2 --- b4
The main variables are summarized again as follows
Xi : robot pose at time i, SE(2)
u : robot control, (v*dt; 0; w*dt) in se(2)
Q : control perturbation covariance
b : landmark position, R^2
y : Cartesian landmark measurement in robot frame, R^2
R : covariance of the measurement noise
We define the state to estimate as a manifold composite:
X in < SE2, SE2, SE2, R^2, R^2, R^2, R^2, R^2 >
X = < X0, X1, X2, b0, b1, b2, b3, b4 >
The estimation error dX is expressed
in the tangent space at X,
dX in < se2, se2, se2, R^2, R^2, R^2, R^2, R^2 >
~ < R^3, R^3, R^3, R^2, R^2, R^2, R^2, R^2 > = R^19
dX = [ dx0, dx1, dx2, db0, db1, db2, db3, db4 ] in R^19
with
dx_i: pose error in se(2) ~ R^3
db_k: landmark error in R^2
The prior, motion and measurement models are
- for the prior factor:
p_0 = X_0
- for the motion factors - motion expectation equation:
d_ij = X_j (-) X_i = log(X_i.inv * X_j)
- for the measurement factors - measurement expectation equation:
e_ik = h(X_i, b_k) = X_i^-1 * b_k
The algorithm below comprises first a simulator to
produce measurements, then uses these measurements
to estimate the state, using a graph representation
and Lie-based non-linear iterative least squares solver
that uses the pseudo-inverse method.
This file has plain code with only one main() function.
There are no function calls other than those involving `manif`.
Printing the prior state (before solving) and posterior state (after solving),
together with a ground-truth state defined by the simulator
allows for evaluating the quality of the estimates.
This information is complemented with the evolution of
the optimizer's residual and optimal step norms. This allows
for evaluating the convergence of the optimizer.
"""
from manifpy import SE2, SE2Tangent
import numpy as np
from numpy.linalg import inv, norm
Vector = np.array
def Jacobian():
return np.zeros((SE2.DoF, SE2.DoF))
def random(dim, s=0.1):
"""Random vector Rdim in [-1*s, 1*s]."""
return np.random.uniform([-1*s]*dim, [1*s]*dim)
if __name__ == '__main__':
print()
print('2D Smoothing and Mapping. 3 poses, 5 landmarks.')
print('-----------------------------------------------')
np.set_printoptions(precision=3, suppress=True)
# START CONFIGURATION
# some experiment constants
DoF = SE2.DoF
Dim = SE2.Dim
NUM_POSES = 3
NUM_LMKS = 5
NUM_FACTORS = 9
NUM_STATES = NUM_POSES * DoF + NUM_LMKS * Dim
NUM_MEAS = NUM_POSES * DoF + NUM_FACTORS * Dim
MAX_ITER = 20 # for the solver
# Define the robot pose element
Xi = SE2.Identity()
X_simu = SE2.Identity()
u_nom = Vector([0.1, 0.0, 0.05])
u_sigmas = Vector([0.01, 0.01, 0.01])
Q = np.diagflat(np.square(u_sigmas))
W = np.diagflat(1./u_sigmas) # this is Q^(-T/2)
# Declare the Jacobians of the motion wrt robot and control
J_x = Jacobian()
J_u = Jacobian()
controls = []
# Define five landmarks in R^2
landmarks = [0] * NUM_LMKS
landmarks_simu = [
Vector([3.0, 0.0]),
Vector([2.0, -1.0]),
Vector([2.0, 1.0]),
Vector([3.0, -1.0]),
Vector([3.0, 1.0]),
]
y_sigmas = Vector([0.001, 0.001])
R = np.diagflat(np.square(y_sigmas))
S = np.diagflat(1./y_sigmas) # this is R^(-T/2)
# Declare some temporaries
J_d_xi = Jacobian()
J_d_xj = Jacobian()
J_ix_x = Jacobian()
J_r_p0 = Jacobian()
J_e_ix = np.zeros((Dim, DoF))
J_e_b = np.zeros((Dim, Dim))
r = np.zeros(NUM_MEAS)
J = np.zeros((NUM_MEAS, NUM_STATES))
r"""
The factor graph of the SAM problem looks like this:
------- b1
b3 / |
| / b4 |
| / / \|
X0 ---- X1 ---- X2
| \ / \ /
| b0 b2
*
where:
- Xi are poses
- bk are landmarks or beacons
- * is a pose prior to anchor the map and make the problem observable
Define pairs of nodes for all the landmark measurements
There are 3 pose nodes [0..2] and 5 landmarks [0..4].
A pair pose -- lmk means that the lmk was measured from the pose
Each pair declares a factor in the factor graph
We declare 9 pairs, or 9 factors, as follows:
"""
# 0-0,1,3 | 1-0,2,4 | 2-1,2,4
pairs = [[0, 1, 3], [0, 2, 4], [1, 2, 4]]
# Define the beacon's measurements
measurements = {
0: {0: 0, 1: 0, 3: 0},
1: {0: 0, 2: 0, 4: 0},
2: {1: 0, 2: 0, 4: 0}
}
# END CONFIGURATION
# Simulator
poses_simu = []
poses_simu.append(X_simu)
poses = []
poses.append(Xi + (SE2Tangent.Random()*0.1)) # use very noisy priors
# Make 10 steps. Measure up to three landmarks each time.
for i in range(NUM_POSES):
# make measurements
for k in pairs[i]:
# simulate measurement
b = landmarks_simu[k] # lmk coordinates in world frame
y_noise = y_sigmas * random(Dim) # measurement noise
y = X_simu.inverse().act(b) # landmark measurement, before adding noise
# add noise and compute prior lmk from prior pose
measurements[i][k] = y + y_noise # store noisy measurements
b = Xi.act(y + y_noise) # mapped landmark with noise
landmarks[k] = b + random(Dim) # use noisy priors
# make motions
# do not make the last motion since we're done after 3rd pose
if i < NUM_POSES - 1:
# move simulator, without noise
X_simu = X_simu + SE2Tangent(u_nom)
# move prior, with noise
u_noise = u_sigmas * random(DoF)
Xi = Xi + SE2Tangent(u_nom + u_noise)
# store
poses_simu.append(X_simu)
poses.append(Xi + (SE2Tangent.Random()*0.1)) # use noisy priors
controls.append(u_nom + u_noise)
# Estimator
# DEBUG INFO
print('prior')
for X in poses:
print('pose : ', X.translation().transpose(), ' ', X.angle())
for l in landmarks:
print('lmk : ', l.transpose())
print('-----------------------------------------------')
# iterate
for iteration in range(MAX_ITER):
# Clear residual vector and Jacobian matrix
r.fill(0)
J.fill(0)
row = 0
col = 0
"""
1. evaluate prior factor
NOTE (see Chapter 2, Section E, of Sola-18):
To compute any residual, we consider the following variables:
r: residual
e: expectation
y: prior specification 'measurement'
W: sqrt information matrix of the measurement noise.
In case of a non-trivial prior measurement, we need to consider
the nature of it: is it a global or a local specification?
When prior information `y` is provided in the global reference,
we need a left-minus operation (.-) to compute the residual.
This is usually the case for pose priors, since it is natural
to specify position and orientation wrt a global reference,
r = W * (e (.-) y)
= W * (e * y.inv).log()
When `y` is provided as a local reference,
then right-minus (-.) is required,
r = W * (e (-.) y)
= W * (y.inv * e).log()
Notice that if y = Identity()
then local and global residuals are the same.
Here, expectation, measurement and info matrix are trivial,
as follows
expectation
e = poses[0]; // first pose
measurement
y = SE2d::Identity() // robot at the origin
info matrix:
W = I // trivial
residual uses left-minus since reference measurement is global
r = W * (poses[0] (.-) measurement)
= log(poses[0] * Id.inv) = poses[0].log()
Jacobian matrix :
J_r_p0 = Jr_inv(log(poses[0])) // see proof below
Proof: Let p0 = poses[0] and y = measurement.
We have the partials
J_r_p0 = W^(T/2) * d(log(p0 * y.inv)/d(poses[0])
with W = i and y = I.
Since d(log(r))/d(r) = Jr_inv(r) for any r in the Lie algebra,
we have
J_r_p0 = Jr_inv(log(p0))
residual and Jacobian.
Notes:
We have residual = expectation - measurement,
in global tangent space
We have the Jacobian in J_r_p0 = J[row:row+DoF, col:col+DoF]
"""
r[row:row+DoF] = poses[0].lminus(SE2.Identity(), J_r_p0).coeffs()
J[row:row+DoF, col:col+DoF] = J_r_p0
row += DoF
# loop poses
for i in range(NUM_POSES):
# 2. evaluate motion factors
# do not make the last motion since we're done after 3rd pose
if i < NUM_POSES - 1:
j = i + 1 # this is next pose's id
# recover related states and data
Xi = poses[i]
Xj = poses[j]
u = SE2Tangent(controls[i])
# expectation
# (use right-minus since motion measurements are local)
d = Xj.rminus(Xi, J_d_xj, J_d_xi) # expected motion = Xj (-) Xi
# residual
r[row:row+DoF] = W @ (d - u).coeffs() # residual
# Jacobian of residual wrt first pose
col = i * DoF
J[row:row+DoF, col:col+DoF] = W @ J_d_xi
# Jacobian of residual wrt second pose
col = j * DoF
J[row:row+DoF, col:col+DoF] = W @ J_d_xj
# advance rows
row += DoF
# 3. evaluate measurement factors
for k in pairs[i]:
# recover related states and data
Xi = poses[i]
b = landmarks[k]
y = measurements[i][k]
# expectation
e = Xi.inverse(J_ix_x).act(b, J_e_ix, J_e_b) # expected measurement = Xi.inv * bj
J_e_x = J_e_ix @ J_ix_x # chain rule
# residual
r[row:row+Dim] = S @ (e - y)
# Jacobian of residual wrt pose
col = i * DoF
J[row:row+Dim, col:col+DoF] = S @ J_e_x
# Jacobian of residual wrt lmk
col = NUM_POSES * DoF + k * Dim
J[row:row+Dim, col:col+Dim] = S @ J_e_b
# advance rows
row += Dim
# 4. Solve
# compute optimal step
# ATTENTION: This is an expensive step!!
# ATTENTION: Use QR factorization and
# column reordering for larger problems!!
dX = - inv(J.transpose() @ J) @ J.transpose() @ r
# update all poses
for i in range(NUM_POSES):
# we go very verbose here
row = i * DoF
size = DoF
dx = dX[row:row+size]
poses[i] = poses[i] + SE2Tangent(dx)
# update all landmarks
for k in range(NUM_LMKS):
# we go very verbose here
row = NUM_POSES * DoF + k * Dim
size = Dim
db = dX[row:row+size]
landmarks[k] = landmarks[k] + db
# DEBUG INFO
print('residual norm: ', norm(r), ', step norm: ', norm(dX))
# conditional exit
if norm(dX) < 1e-6:
break
print('-----------------------------------------------')
# Print results
# solved problem
print('posterior')
for X in poses:
print('pose : ', X.translation().transpose(), ' ', X.angle())
for b in landmarks:
print('lmk : ', b.transpose())
print('-----------------------------------------------')
# ground truth
print('ground truth')
for X in poses_simu:
print('pose : ', X.translation().transpose(), ' ', X.angle())
for b in landmarks_simu:
print('lmk : ', b.transpose())
print('-----------------------------------------------')
|
import re
from robobrowser import RoboBrowser
# Browse to Rap Genius
browser = RoboBrowser(history=True)
browser.open('https://www.patricbrc.org')
# Search for Queen
form = browser.get_form(action='/dijit__WidgetsInTemplateMixin_6/')
form["username"] = 'gresch'
form["password"] = 'sequencing'
browser.session.headers['Referer'] = base_url
browser.submit_form(form)
print(str(browser.select)) |
puan= 0
#soru1
print("Türkiyede kaç yaşından itibaren ehliyet sahibi olabilirsiniz?")
cevap=input("Cevabı yazınız: ")
if cevap=="18":
puan+=20
print("Cevabınız doğrudur.")
print("Mevcut puan durumunuz:"+" "+str(puan))
else:
print("Cevabınız yanlış")
print("Bu soru için hiç puan toplayamadınız.")
print("Mevcut puan durumunuz: "+ " "+str(puan))
#soru2
print("Haftanın ilk günü nedir?")
cevap=input("Cevabı yazınız:")
if cevap=="Pazartesi":
puan+=20
print("Cevabınız doğrudur.")
print("Mevcut puan durumunuz:"+" "+str(puan))
else:
print("Cevabınız yanlış")
print("Bu soru için hiç puan toplayamadınız.")
print("Mevcut puan durumunuz: "+" "+str(puan))
#soru3
print("Kaç mevsim vardır?")
cevap=input("Cevabı yazınız:")
if cevap=="12":
puan+=20
print("Cevabınız doğrudur.")
print("Mevcut puan durumunuz: "+" "+str(puan))
else:
print("Cevabınız yanlış")
print("Bu soru için hiç puan toplayamadınız.")
print("Mevcut puan durumunuz: "+" "+str(puan))
#soru4
print("Bir yıl kaç gündür?")
cevap=input("Cevabı yazınız:")
if cevap=="365":
puan+=20
print("Cevabınız doğrudur.")
print("Mevcut puan durumunuz: "+" "+str(puan))
else:
print("Cevabınız yanlış")
print("Bu soru için hiç puan toplayamdınız.")
print("Mevcut puan durumunuz: "+" "+str(puan))
#soru5
print("Kapadokya hangi ilimizde bulunur?")
cevaap=input("Cevabı yazınız:")
if cevap=="Nevşehir":
puan+=20
print("Cevabınız doğrudur.")
print("Mevcut puan durumunuz: "+" "+str(puan))
else:
print("Cevabınız yanlış")
print("Bu soru için hiç puan toplayamadınız.")
print("Mevcut puan durumunuz: "+" "+str(puan))
Son="Sınavımız bitmiştir.Kazanılan puan: "
print("Son")
|
from collections import OrderedDict
from typing import Any, Optional, Union
import networkx as nx
import numpy as np
import pytorch_lightning as pl
import scipy.linalg as slin
from sklearn.metrics import precision_score, recall_score, accuracy_score, roc_auc_score
from xgboost import XGBClassifier
import torch
import torch.nn as nn
import decaf.logger as log
from opacus.accountants.rdp import RDPAccountant
from opacus.optimizers.optimizer import DPOptimizer
from opacus.grad_sample.grad_sample_module import GradSampleModule
from opacus.data_loader import shape_safe
'''
What do I need to do to be able to use all the features in a one-hot way
First, change the data processing to one-hotify all the categorical features but that shouldn't be too bad
The biggest change will be in the sequential function - what are all the places that rely on idx
Would need to pass in dictionary with features -> num_categories
In __init__, turn this into features -> idx_range as well DONE
In __init__, also create one_hot mask DONE
1) In iterating through the features - just get features from dag: DONE
2) In applying the mask - multiply by one_hot mask: DONE
3) In setting the data - dictionary with ranges for each feature: DONE
'''
class TraceExpm(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, input: torch.Tensor) -> torch.Tensor:
# detach so we can cast to NumPy
E = slin.expm(input.detach().numpy())
f = np.trace(E)
E = torch.from_numpy(E)
ctx.save_for_backward(E)
return torch.as_tensor(f, dtype=input.dtype)
@staticmethod
def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
(E,) = ctx.saved_tensors
grad_input = grad_output * E.t()
return grad_input
trace_expm = TraceExpm.apply
activation_layer = nn.Tanh()
class Generator_causal(nn.Module):
def __init__(
self,
z_dim: int,
x_dim: int,
h_dim: int,
use_mask: bool = False,
f_scale: float = 0.1,
dag_seed: list = [],
feat_num: dict = {}
) -> None:
super().__init__()
self.x_dim = len(feat_num)
def block(in_feat: int, out_feat: int, normalize: bool = False) -> list:
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(activation_layer)
return layers
self.shared = nn.Sequential(*block(h_dim, h_dim), *block(h_dim, h_dim))
# self.shared = n[nn.Sequential(*block(h_dim, h_dim)) for _ in range(self.x_dim)]
if use_mask:
if len(dag_seed) > 0:
M_init = torch.rand(x_dim, x_dim) * 0.0
M_init[torch.eye(x_dim, dtype=bool)] = 0
M_init = torch.rand(x_dim, x_dim) * 0.0
for pair in dag_seed:
M_init[pair[0], pair[1]] = 1
self.M = torch.nn.parameter.Parameter(M_init, requires_grad=False)
print("Initialised adjacency matrix as parsed:\n", self.M)
self.feat_num = feat_num
previous_idx = 0
feat_ranges = {}
for idx in sorted(feat_num.keys()):
feat_ranges[idx] = (previous_idx, previous_idx + feat_num[idx])
previous_idx += feat_num[idx]
self.feat_ranges = feat_ranges
#Now create the one-hot mask
oh_M = torch.empty((0, self.x_dim))
for idx in sorted(feat_num.keys()):
oh_idx = torch.unsqueeze(M_init[idx, :], 0)
oh_idx = torch.tile(oh_idx, (feat_num[idx], 1))
oh_M = torch.cat([oh_M, oh_idx], 0)
self.oh_M = torch.nn.parameter.Parameter(oh_M, requires_grad=False)
else:
M_init = torch.rand(x_dim, x_dim) * 0.2
M_init[torch.eye(x_dim, dtype=bool)] = 0
self.M = torch.nn.parameter.Parameter(M_init)
else:
self.M = torch.ones(x_dim, x_dim)
self.fc_i = nn.ModuleList(
[nn.Linear(sum(self.feat_num.values()) + 1, h_dim) for i in range(self.x_dim)]
)
self.fc_f = nn.ModuleList([nn.Linear(h_dim, feat_num[i]) for i in sorted(feat_num.keys())])
# self.fc_f = nn.ModuleList([nn.Sequential(
# nn.Linear(h_dim, h_dim),
# activation_layer,
# nn.Linear(h_dim, feat_num[i])
# )
# for i in sorted(feat_num.keys())])
for layer in self.shared.parameters():
if type(layer) == nn.Linear:
torch.nn.init.xavier_normal_(layer.weight)
layer.weight.data *= f_scale
for i, layer in enumerate(self.fc_i):
torch.nn.init.xavier_normal_(layer.weight)
layer.weight.data *= f_scale
layer.weight.data[:, i] = 1e-16
for i, layer in enumerate(self.fc_f):
torch.nn.init.xavier_normal_(layer.weight)
layer.weight.data *= f_scale
# for m in self.shared:
# for layer in m:
# if type(layer) == nn.Linear:
# torch.nn.init.xavier_normal_(layer.weight)
# layer.weight.data *= f_scale
def sequential(
self,
x: torch.Tensor,
z: torch.Tensor,
gen_order: Union[list, dict, None] = None,
biased_edges: dict = {},
) -> torch.Tensor:
out = x.cuda().clone().detach()
if gen_order is None:
gen_order = list(range(self.x_dim))
for i in gen_order:
x_masked = out.clone() * self.oh_M[:, i]
# x_masked[:, i] = 0.0
if i in biased_edges:
for j in biased_edges[i]:
x_j = x_masked[:, j].detach().numpy()
np.random.shuffle(x_j)
x_masked[:, j] = torch.from_numpy(x_j)
out_i = activation_layer(
self.fc_i[i](torch.cat([x_masked, z[:, i].unsqueeze(1)], axis=1))
)
if self.feat_num[i] == 1:
out[:, self.feat_ranges[i][0]:self.feat_ranges[i][1]] = nn.Sigmoid()(self.fc_f[i](self.shared(out_i))) #.squeeze()
else:
out[:, self.feat_ranges[i][0]:self.feat_ranges[i][1]] = nn.Softmax(dim=1)(self.fc_f[i](self.shared(out_i))) #.squeeze()
return out
class Discriminator(nn.Module):
def __init__(self, x_dim: int, h_dim: int) -> None:
super().__init__()
self.model = nn.Sequential(
nn.Linear(x_dim, h_dim),
activation_layer,
nn.Linear(h_dim, h_dim),
activation_layer,
nn.Linear(h_dim, 1),
)
for layer in self.model.parameters():
if type(layer) == nn.Linear:
torch.nn.init.xavier_normal_(layer)
def forward(self, x_hat: torch.Tensor) -> torch.Tensor:
return self.model(x_hat)
class DECAF(pl.LightningModule):
def __init__(
self,
input_dim: int,
dag_seed: list = [],
h_dim: int = 200,
lr: float = 1e-3,
b1: float = 0.5,
b2: float = 0.999,
batch_size: int = 32,
lambda_gp: float = 10,
lambda_privacy: float = 1,
d_updates: int = 5,
eps: float = 1e-8,
alpha: float = 1,
rho: float = 1,
weight_decay: float = 1e-2,
grad_dag_loss: bool = False,
l1_g: float = 0,
l1_W: float = 1,
p_gen: float = -1,
use_mask: bool = False,
enable_dp: bool = False,
delta: float = 1e-5,
noise_multiplier: float = 0.,
sample_rate: float = 0.,
max_grad_norm: float = 2.,
feat_num: dict = {}
):
super().__init__()
self.save_hyperparameters()
self.iterations_d = 0
self.iterations_g = 0
log.info(f"dag_seed {dag_seed}")
self.x_dim = input_dim
self.oh_x_dim = sum(feat_num.values())
self.z_dim = self.x_dim
log.info(
f"Setting up network with x_dim = {self.x_dim}, z_dim = {self.z_dim}, h_dim = {h_dim}"
)
# networks
self.generator = Generator_causal(
z_dim=self.z_dim,
x_dim=self.x_dim,
h_dim=h_dim,
use_mask=use_mask,
dag_seed=dag_seed,
feat_num = feat_num
)
self.discriminator = Discriminator(x_dim=self.oh_x_dim, h_dim=h_dim)
self.dag_seed = dag_seed
self.feat_num = feat_num
self.enable_dp = enable_dp
if self.enable_dp:
self.priv_acc = RDPAccountant()
self.delta = delta
self.batch_size = batch_size
self.noise_multiplier = noise_multiplier
self.sample_rate = sample_rate
self.max_grad_norm = max_grad_norm
self.discriminator = GradSampleModule(self.discriminator)
self.automatic_optimization=False
def forward(self, x: torch.Tensor, z: torch.Tensor) -> torch.Tensor:
return self.generator(x, z)
def gradient_dag_loss(self, x: torch.Tensor, z: torch.Tensor) -> torch.Tensor:
"""
Calculates the gradient of the output wrt the input. This is a better way to compute the DAG loss,
but fairly slow atm
"""
x.requires_grad = True
z.requires_grad = True
gen_x = self.generator(x, z)
dummy = torch.ones(x.size(0))
dummy = dummy.type_as(x)
W = torch.zeros(x.shape[1], x.shape[1])
W = W.type_as(x)
for i in range(x.shape[1]):
gradients = torch.autograd.grad(
outputs=gen_x[:, i],
inputs=x,
grad_outputs=dummy,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
W[i] = torch.sum(torch.abs(gradients), axis=0)
h = trace_expm(W ** 2) - self.hparams.x_dim
return 0.5 * self.hparams.rho * h * h + self.hparams.alpha * h
def compute_gradient_penalty(
self, real_samples: torch.Tensor, fake_samples: torch.Tensor
) -> torch.Tensor:
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = torch.rand(real_samples.size(0), 1)
alpha = alpha.expand(real_samples.size())
alpha = alpha.type_as(real_samples)
# Get random interpolation between real and fake samples
interpolates = (
alpha * real_samples + ((1 - alpha) * fake_samples)
).requires_grad_(True)
d_interpolates = self.discriminator(interpolates)
fake = torch.ones(real_samples.size(0), 1)
fake = fake.type_as(real_samples)
# Get gradient w.r.t. interpolates
gradients = torch.autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def privacy_loss(
self, real_samples: torch.Tensor, fake_samples: torch.Tensor
) -> torch.Tensor:
return -torch.mean(
torch.sqrt(
torch.mean((real_samples - fake_samples) ** 2, axis=1)
+ self.hparams.eps
)
)
def get_W(self) -> torch.Tensor:
if self.hparams.use_mask:
return self.generator.M
else:
W_0 = []
for i in range(self.x_dim):
weights = self.generator.fc_i[i].weight[
:, :-1
] # don't take the noise variable's weights
W_0.append(
torch.sqrt(
torch.sum((weights) ** 2, axis=0, keepdim=True)
+ self.hparams.eps
)
)
return torch.cat(W_0, axis=0).T
def dag_loss(self) -> torch.Tensor:
W = self.get_W()
h = trace_expm(W ** 2) - self.x_dim
l1_loss = torch.norm(W, 1)
return (
0.5 * self.hparams.rho * h ** 2
+ self.hparams.alpha * h
+ self.hparams.l1_W * l1_loss
)
def sample_z(self, n: int) -> torch.Tensor:
return torch.rand(n, self.z_dim) * 2 - 1
@staticmethod
def l1_reg(model: nn.Module) -> float:
l1 = torch.tensor(0.0, requires_grad=True)
for name, layer in model.named_parameters():
if "weight" in name:
l1 = l1 + layer.norm(p=1)
return l1
def gen_synthetic(
self, x: torch.Tensor, gen_order: Optional[list] = None, biased_edges: dict = {}
) -> torch.Tensor:
return self.generator.sequential(
x,
self.sample_z(x.shape[0]).type_as(x),
gen_order=gen_order,
biased_edges=biased_edges,
)
def get_dag(self) -> np.ndarray:
return np.round(self.get_W().cpu().detach().numpy(), 3)
def get_bi_dag(self) -> np.ndarray:
dag = np.round(self.get_W().detach().numpy(), 3)
bi_dag = np.zeros_like(dag)
for i in range(len(dag)):
for j in range(i, len(dag)):
bi_dag[i][j] = dag[i][j] + dag[j][i]
return np.round(bi_dag, 3)
def get_gen_order(self) -> list:
dense_dag = np.array(self.get_dag())
dense_dag[dense_dag > 0.5] = 1
dense_dag[dense_dag <= 0.5] = 0
G = nx.from_numpy_matrix(dense_dag, create_using=nx.DiGraph)
gen_order = list(nx.algorithms.dag.topological_sort(G))
return gen_order
def training_step(
self, batch: torch.Tensor, batch_idx: int, optimizer_idx: int
) -> OrderedDict:
# sample noise
z = self.sample_z(batch.shape[0])
z = z.type_as(batch)
if self.hparams.p_gen < 0:
generated_batch = self.generator.sequential(batch, z, self.get_gen_order())
else: # train simultaneously
raise ValueError(
"we're not allowing simultaneous generation no more. Set p_gen negative"
)
# train generator
if optimizer_idx == 0:
self.iterations_d += 1
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
real_loss = torch.mean(self.discriminator(batch))
fake_loss = torch.mean(self.discriminator(generated_batch.detach()))
# discriminator loss
d_loss = fake_loss - real_loss
# add the gradient penalty
d_loss += self.hparams.lambda_gp * self.compute_gradient_penalty(
batch, generated_batch
)
tqdm_dict = {"d_loss": d_loss.detach()}
output = OrderedDict(
{"loss": d_loss, "progress_bar": tqdm_dict, "log": tqdm_dict}
)
return output
elif optimizer_idx == 1:
# sanity check: keep track of G updates
self.iterations_g += 1
# adversarial loss (negative D fake loss)
g_loss = -torch.mean(
self.discriminator(generated_batch)
) # self.adversarial_loss(self.discriminator(self.generated_batch), valid)
# add privacy loss of ADS-GAN
g_loss += self.hparams.lambda_privacy * self.privacy_loss(
batch, generated_batch
)
# add l1 regularization loss
g_loss += self.hparams.l1_g * self.l1_reg(self.generator)
if len(self.dag_seed) == 0:
if self.hparams.grad_dag_loss:
g_loss += self.gradient_dag_loss(batch, z)
tqdm_dict = {"g_loss": g_loss.detach()}
output = OrderedDict(
{"loss": g_loss, "progress_bar": tqdm_dict, "log": tqdm_dict}
)
return output
else:
raise ValueError("should not get here")
# def training_step(self, batch: torch.Tensor, batch_idx: int) -> OrderedDict:
# # sample noise
# z = self.sample_z(batch.shape[0])
# z = z.type_as(batch)
# if self.hparams.p_gen < 0:
# generated_batch = self.generator.sequential(batch, z, self.get_gen_order())
# else: # train simultaneously
# raise ValueError(
# "we're not allowing simultaneous generation no more. Set p_gen negative"
# )
# optimizer_idx = int(batch_idx % (1 + self.hparams.d_updates) == self.hparams.d_updates)
# opt = self.optimizers()
# #Zero-grad discriminator optimizer
# opt = opt[optimizer_idx]
# # train generator
# if optimizer_idx == 0:
# self.iterations_d += 1
# # Measure discriminator's ability to classify real from generated samples
# self.discriminator.remove_hooks()
# gp = self.hparams.lambda_gp * self.compute_gradient_penalty(batch, generated_batch)
# opt.zero_grad()
# self.manual_backward(gp, create_graph=True, retain_graph=True)
# self.discriminator.add_hooks()
# fake_output = self.discriminator(generated_batch.detach())
# fake_loss = torch.mean(fake_output)
# self.manual_backward(fake_loss)
# opt.step()
# real_output = self.discriminator(batch)
# real_loss = - torch.mean(real_output)
# opt.zero_grad()
# self.manual_backward(real_loss)
# opt.step()
# # opt.zero_grad()
# self.priv_acc.step(noise_multiplier=self.noise_multiplier, sample_rate=self.sample_rate)
# disc_loss = fake_loss - real_loss
# tqdm_dict = {"d_loss": disc_loss.detach()}
# output = OrderedDict(
# {"loss": disc_loss, "progress_bar": tqdm_dict, "log": tqdm_dict}
# )
# return output
# elif optimizer_idx == 1:
# # sanity check: keep track of G updates
# g_output = self.discriminator(generated_batch)
# g_loss = - torch.mean(g_output)
# opt.zero_grad()
# self.manual_backward(g_loss)
# opt.step()
# tqdm_dict = {"g_loss": g_loss.detach()}
# output = OrderedDict(
# {"loss": g_loss, "progress_bar": tqdm_dict, "log": tqdm_dict}
# )
# self.iterations_g += 1
# return output
# else:
# raise ValueError("should not get here")
# def training_epoch_end(self, training_step_outputs):
# eps = self.priv_acc.get_epsilon(self.delta)
# self.log("Epsilon", eps, prog_bar=True, on_epoch=True, logger=True)
def validation_step(self, batch, batch_idx):
'''
Goal: want precision/recall/auroc metrics for training epochs
a) Generate synthetic data
b) Train classifier on synthetic data
c) Evaluate precision/recall/AUROC for classifier on validation data
validation_step: just return the batch
validation_epoch_end: generate synthetic data, train XGB classifier on synth data, evaluate on v
'''
return batch
def validation_epoch_end(self, validation_step_outputs):
v_data = torch.cat(validation_step_outputs, 0).cpu().numpy().astype(np.uint8)
v_x, v_y = v_data[:, :-1], v_data[:, -1]
#THIS LENGTH IS HARDCODED IN - CHANGE
synth_data = self.gen_synthetic(torch.ones((30162, self.oh_x_dim)).cuda()).cpu().numpy()
X_synth, y_synth = synth_data[:, :-1], synth_data[:, -1]
y_synth = (y_synth)/(np.max(y_synth) - np.min(y_synth))
y_synth = np.round(y_synth, 0).astype(np.uint8)
try:
clf = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss').fit(X_synth, y_synth)
except:
self.log("precision", 0., on_epoch=True, prog_bar=True, logger=True)
self.log("recall", 0., on_epoch=True, prog_bar=True, logger=True)
self.log("auroc", 0., on_epoch=True, prog_bar=True, logger=True)
return
y_pred = clf.predict(v_x)
self.log("precision", precision_score(v_y, y_pred, zero_division=0), on_epoch=True, prog_bar=True, logger=True)
self.log("recall", recall_score(v_y, y_pred), on_epoch=True, prog_bar=True, logger=True)
try:
self.log("auroc", roc_auc_score(v_y, y_pred), on_epoch=True, prog_bar=True, logger=True)
except:
self.log("auroc", 0., on_epoch=True, prog_bar=True, logger=True)
def configure_optimizers(self) -> tuple:
lr = self.hparams.lr
b1 = self.hparams.b1
b2 = self.hparams.b2
weight_decay = self.hparams.weight_decay
opt_g = torch.optim.AdamW(
self.generator.parameters(),
lr=lr,
betas=(b1, b2),
weight_decay=weight_decay,
)
opt_d = torch.optim.AdamW(
self.discriminator.parameters(),
lr=lr,
betas=(b1, b2),
weight_decay=weight_decay,
)
if self.enable_dp:
opt_d = DPOptimizer(opt_d, noise_multiplier=self.noise_multiplier, max_grad_norm=self.max_grad_norm, expected_batch_size=self.batch_size)
return (
{"optimizer": opt_d, "frequency": self.hparams.d_updates},
{"optimizer": opt_g, "frequency": 1},
)
|
from flask import Flask, render_template, request
import pickle
import numpy as np
filename='HeartPatients-prediction-model.pkl'
classifier = pickle.load(open(filename, 'rb'))
app = Flask(__name__, static_folder='static')
@app.route('/')
def home():
return render_template('home.html')
@app.route('/result', methods=['POST'])
def result():
age = int(request.form['age'])
sex = int(request.form['sex'])
trestbps = int(request.form['trestbps'])
chol = int(request.form['chol'])
restecg = int(request.form['restecg'])
thalach = int(request.form['thalach'])
thal = int(request.form['thal'])
exang = int(request.form['exang'])
cp = int(request.form['cp'])
ca = int(request.form['ca'])
fbs = int(request.form['fbs'])
slope = int(request.form['slope'])
oldpeak = float(request.form['oldpeak'])
arr = np.array([[age, sex, cp, trestbps, chol, fbs, restecg,
thalach, exang, ca, slope, thal,oldpeak]])
y = classifier.predict(arr)
# No heart disease
if y == 0:
return render_template('nodisease.html')
# y=1,2,4,4 are stages of heart disease
else:
return render_template('heartdisease.html', stage=int(y))
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == "__main__":
app.run(debug=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import __future__
import sys
import json
def banner():
ban = '====' * 30
print("{}\nSAMPLE INP:\n{}\n{}".format(ban,ban,open(ip, 'r').read()))
print("{}\nSAMPLE OUT:\n{}\n{}".format(ban,ban,open(op, 'r').read()))
print("{}\nSTART:\n{}".format(ban,ban))
sys.stdin = open(ip, 'r')
cnt = -1
def comp(inp,ln):
outl = output_arr[ln]
if str(inp) != outl:
raise Exception("Error input output: line {}, file: {}\ngot: {} expected: {}".format(ln,op,inp,outl))
ip = "./challenge_sample_input"
op = "./challenge_sample_output"
output_arr = map(str,open(op,'r').read().split('\n'))
banner()
# https://www.hackerrank.com/challenges/class-2-find-the-torsional-angle/problem
import math
class Points(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __sub__(self, no):
return Points(
self.x - no.x,
self.y - no.y,
self.z - no.z,
)
def dot(self, no):
return self.x*no.x + self.y*no.y + self.z*no.z
def cross(self, no):
return Points(
self.y*no.z - self.z*no.y,
self.z*no.x - self.x*no.z,
self.x*no.y - self.y*no.x
)
def absolute(self):
return pow((self.x ** 2 + self.y ** 2 + self.z ** 2), 0.5)
if __name__ == '__main__':
points = [map(float, raw_input().split()) for i in range(4)]
a, b, c, d = Points(*points[0]), Points(*points[1]), Points(*points[2]), Points(*points[3])
x = (b - a).cross(c - b)
y = (c - b).cross(d - c)
angle = math.acos(x.dot(y) / (x.absolute() * y.absolute()))
print "%.2f" % math.degrees(angle)
|
#!/usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author: HJK
@file: env.py
@time: 2019-01-08
全局变量
"""
import logging
FAKE_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # noqa
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101 Firefox/60.0', # noqa
'referer': 'https://www.google.com'
}
IOS_USERAGENT = 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'
# QQ下载音乐不能没有User-Agent
# 百度下载音乐User-Agent不能是浏览器,神奇……
WGET_HEADERS = {
"Accept": "*/*",
"Accept-Encoding": "identity",
"User-Agent": "Wget/1.19.5 (darwin17.5.0)"
}
# 日志
LOG_LEVEL = logging.DEBUG
def init_option():
# 命令行参数,写到函数里防止被意外初始化
global OPTS
OPTS = {
# 自定义来源 -s --source
'source': 'qq netease kugou baidu xiami',
# 自定义数量 -c --count
'count': 5,
# 保存目录 -o --outdir
'outdir': '.',
# 搜索关键字
'keyword': '',
# 显示详情
'verbose': False,
# 搜索结果排序和去重
'merge': False,
# 代理
'proxies': None,
}
def set_option(opt, value):
OPTS[opt] = value
def get_option(opt):
return OPTS.get(opt, '')
|
from piece import Piece
from itertools import groupby
from move import Move
class Board(object):
def __init__(self, size=8):
self.board = [[None]*size for _ in range(size)]
self.size = size
self.castling = {
"W": {"k": False, "q": False},
"B": {"k": False, "q": False},
}
self.enpassant = None
self.moves = []
def within_boundaries(self, x, y):
return 0 <= x < self.size and 0 <= y < self.size
def setup_fen(self, position_fen):
position, turn, castling, enpassant, _, _ = position_fen.split(" ")
if enpassant != "-":
y, x = list(enpassant.lower())
y = ord(y) - ord('a')
x = self.size - int(x)
self.enpassant = (x, y)
for char in castling:
if char == "K":
self.castling["W"]["k"] = True
elif char == "Q":
self.castling["W"]["q"] = True
elif char == "k":
self.castling["B"]["k"] = True
elif char == "q":
self.castling["B"]["q"] = True
self.current_player = turn.upper()
for y, row in enumerate(position.split("/")):
grouped_alpha = ["".join(items) for _, items in groupby(
row, lambda x: str.isdigit(x))]
group = []
for item in grouped_alpha:
if item.isdigit():
group.append(item)
else:
group.extend(list(item))
x = 0
for item in group:
if item.isdigit():
x += int(item)
elif item.isalpha():
if item.islower():
self.board[y][x] = Piece(item, "B")
else:
self.board[y][x] = Piece(item.lower(), "W")
x += 1
def get_pieces(self, player=None):
if not player:
player = self.current_player
player_pieces = []
for x, row in enumerate(self.board):
for y, piece in enumerate(row):
if piece:
if piece.color == player:
player_pieces.append((x, y))
return player_pieces
def get_pinned_piece_positions(self, player=None):
if not player:
player = self.current_player
player_pieces = self.get_pieces(player)
opponent_pieces = self.get_pieces("W" if player == "B" else "B")
king_position = self.find_piece("k", player)
check_count = 0
for opponent_position in opponent_pieces:
for move_x, move_y in Piece.path(self, opponent_position):
if (move_x, move_y) == king_position:
check_count += 1
pinned_pieces = set()
for x, y in player_pieces:
if self.board[x][y].type == "k":
continue
removed_piece = self.board[x][y]
self.board[x][y] = None
check_count_next = 0
for opponent_position in opponent_pieces:
for move_x, move_y in Piece.path(self, opponent_position):
if (move_x, move_y) == king_position:
check_count_next += 1
if check_count_next > check_count:
pinned_pieces.add((x, y))
self.board[x][y] = removed_piece
return pinned_pieces
def print_board(self):
for row in self.board:
fmt = ""
for piece in row:
if piece:
fmt += f" {str(piece)} "
else:
fmt += " . "
print(fmt)
def switch_player(self):
self.current_player = "W" if self.current_player == "B" else "B"
def input_move(self):
x, y = None, None
while True:
input_position = input(
f"{self.current_player}:Select piece to move (example: 1,2 for Black Pawn): ")
x, y = map(int, input_position.split(","))
if self.board[x][y] and self.board[x][y].color == self.current_player and self.board[x][y].moves(self, (x, y)):
break
else:
print("Not a valid piece to move.")
print(f"{self.board[x][y].type} selected")
options = list(self.board[x][y].moves(self, (x, y)))
print(f"your options are: {options}")
x2, y2 = None, None
while True:
to = input(
f"{self.current_player}:choose from list where you want to place it:")
if int(to) >= 1 and int(to) <= len(options):
x2, y2 = options[int(to)-1]
break
else:
print("Not a valid option")
promotion_piece = None
if self.board[x][y].type == "p" and (x2 == 0 or x2 == self.size - 1):
while True:
choices = ("q", "r", "b", "n")
promotion_piece = input("Enter which piece to promote to:")
if promotion_piece in choices:
break
else:
print("Enter correct promotion piece:")
return ((x, y), (x2, y2), promotion_piece)
def undo_move(self):
move = self.moves.pop()
for player, rights in move.delta_castling.items():
for side, right in rights.items():
self.castling[player][side] = not right
self.enpassant = move.delta_enpassant_square
for piece, (x, y), (x2, y2) in move.moved_pieces:
if move.promotion_piece:
self.board[x][y] = self.board[x2][y2]
self.board[x][y].type = "p"
self.board[x2][y2] = None
else:
self.board[x][y] = self.board[x2][y2]
self.board[x2][y2] = None
for piece, (x, y) in move.captured_pieces:
self.board[x][y] = piece
def make_move(self, initial_position, final_position, promotion_piece):
move = Move()
x, y = initial_position
x2, y2 = final_position
move.moved_pieces.append([self.board[x][y], (x, y), (x2, y2)])
if self.board[x][y].type == "k":
if (self.castling[self.board[x][y].color]["k"]):
move.delta_castling[self.board[x][y].color]["k"] = False
if self.castling[self.board[x][y].color]["q"]:
move.delta_castling[self.board[x][y].color]["q"] = False
self.castling[self.board[x][y].color]["k"] = False
self.castling[self.board[x][y].color]["q"] = False
if self.board[x][y].type == 'r':
rook_initial_positions = {
"W": {
"q": (self.size - 1, 0),
"k": (self.size-1, self.size-1)
},
"B": {
"q": (0, 0),
"k": (0, self.size - 1)
}
}[self.board[x][y].color]
for side, rook_position in rook_initial_positions.items():
if (x, y) == rook_position:
if self.castling[self.board[x][y].color][side]:
move.delta_castling[self.board[x]
[y].color][side] = False
self.castling[self.board[x][y].color][side] = False
if self.board[x][y].type == 'k':
if abs(y2-y) == 2:
self.board[x2][y2] = self.board[x][y]
self.board[x][y] = None
direction = (y2 - y)//2
rook_y = self.size-1 if direction > 0 else 0
rook_y2 = y2 - direction
self.board[x2][rook_y2] = self.board[x2][rook_y]
self.board[x2][rook_y] = None
move.moved_pieces.append(
[self.board[x2][rook_y2], (x2, rook_y), (x2, rook_y2)])
else:
if self.board[x2][y2]:
move.captured_pieces.append([self.board[x2][y2], (x2, y2)])
self.board[x2][y2] = self.board[x][y]
self.board[x][y] = None
elif self.board[x][y].type == "p" and self.enpassant == (x2, y2):
self.board[x2][y2] = self.board[x][y]
self.board[x][y] = None
move.captured_pieces.append([self.board[x][y2], (x, y2)])
self.board[x][y2] = None
else:
if self.board[x2][y2]:
move.captured_pieces.append([self.board[x2][y2], (x2, y2)])
self.board[x2][y2] = self.board[x][y]
self.board[x][y] = None
if promotion_piece:
self.board[x2][y2].type = promotion_piece
move.promotion_piece = promotion_piece
if self.board[x2][y2].type == "p" and abs(x2 - x) == 2:
move.delta_enpassant_square = self.enpassant
self.enpassant = ((x2 + (x-x2)//2, y))
else:
if self.enpassant:
move.delta_enpassant_square = self.enpassant
self.enpassant = None
self.moves.append(move)
def find_piece(self, type_of_piece, color_of_piece):
for x in range(self.size):
for y in range(self.size):
piece = self.board[x][y]
if piece and piece.type == type_of_piece and piece.color == color_of_piece:
return(x, y)
def is_check(self, player=None):
if not player:
player = self.current_player
king_position = self.find_piece("k", player)
return king_position in self.get_attacking_squares("W" if player == "B" else "B")
def cannot_move(self, player=None):
for x, y in self.get_pieces(player):
if Piece.moves(self, (x, y)):
return False
return True
def is_stalemate(self, player=None):
if not player:
player = self.current_player
return self.cannot_move(player) and not self.is_check(player)
def is_checkmate(self, player=None):
if not player:
player = self.current_player
return self.cannot_move(player) and self.is_check(player)
def get_attacking_squares(self, player=None):
if not player:
player = self.current_player
pieces = self.get_pieces(player)
attacking_squares = set()
for piece_position in pieces:
x, y = piece_position
if self.board[x][y].type == "p":
direction = -1 if self.board[x][y].color == "W" else +1
if self.within_boundaries(x+direction, y-1):
attacking_squares.add((x+direction, y-1))
if self.within_boundaries(x+direction, y+1):
attacking_squares.add((x+direction, y+1))
else:
attacking_squares.update(Piece.attack(self, piece_position))
return attacking_squares
if __name__ == "__main__":
board = Board()
# board.setup_fen("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
board.setup_fen("8/2k3P1/8/8/2K5/8/8/8 w - - 0 1")
board.print_board()
while not board.is_checkmate():
print(f"Pinned pieces: {board.get_pinned_piece_positions()}")
board.make_move(*board.input_move())
board.switch_player()
board.print_board()
print("Game Over")
|
import frappe
from frappe.utils import now
def create_gl_entry(doc_type, account, dr, cr, party=None):
if not party:
party = doc_type.party
options = {
'doctype': 'GL Entry',
'voucher_type': doc_type.doctype,
'voucher_no': doc_type.name,
'posting_date': doc_type.posting_date,
'account': account,
'debit': dr,
'credit': cr,
'party': party,
}
gl_entry = frappe.get_doc(options)
gl_entry.insert()
def create_revere_gl_entry(voucher_type, voucher_no):
filters = {
'voucher_type': voucher_type,
'voucher_no': voucher_no,
}
gl_entries = frappe.get_all('GL Entry', filters=filters, fields=['*'])
frappe.db.sql("""
UPDATE
`tabGL Entry`
SET
is_cancelled = 1,
modified=%s,
modified_by=%s
WHERE
voucher_type=%s and
voucher_no=%s and
is_cancelled = 0
""", (now(), frappe.session.user, gl_entries[0].voucher_type, gl_entries[0].voucher_no))
for gl_entry in gl_entries:
debit_amount = gl_entry.debit_amount
credit_amount = gl_entry.credit_amount
gl_entry.name = None
gl_entry.debit_amount = credit_amount
gl_entry.credit_amount = debit_amount
gl_entry.remarks = 'Cancelled'
gl_entry.is_cancelled = 1
new_gl_entry = frappe.new_doc('GL Entry')
new_gl_entry.update(gl_entry)
new_gl_entry.insert()
new_gl_entry.submit()
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import UUID
from polyaxon.polyflow import V1RunKind
def get_fxt_templated_pipeline_with_upstream_run(run_uuid: UUID):
return {
"version": 1.1,
"kind": "operation",
"component": {
"name": "test-build-run",
"description": "testing a build and run pipeline",
"tags": ["backend", "native"],
"run": {
"kind": V1RunKind.DAG,
"operations": [
{
"dagRef": "build-template",
"name": "build",
"params": {
"env_vars": {
"value": [["env1", "value1"], ["env2", "value2"]]
}
},
},
{
"dagRef": "experiment-template",
"name": "run",
"dependencies": ["build"],
"params": {
"image": {
"ref": "ops.build",
"value": "outputs.docker-image",
},
"lr": {"value": 0.001},
"some-run": {
"value": "outputs.some-int",
"ref": "runs.{}".format(run_uuid.hex),
},
},
},
],
"components": [
{
"name": "experiment-template",
"description": "experiment to predict something",
"tags": ["key", "value"],
"inputs": [
{
"name": "lr",
"type": "float",
"value": 0.1,
"isOptional": True,
},
{"name": "image", "type": "str"},
{"name": "some-run", "type": "int"},
],
"termination": {"maxRetries": 2},
"run": {
"kind": V1RunKind.JOB,
"environment": {
"nodeSelector": {"polyaxon": "experiments"},
"serviceAccountName": "service",
"imagePullSecrets": ["secret1", "secret2"],
},
"container": {
"image": "{{ image }}",
"command": ["python3", "main.py"],
"args": "--lr={{ lr }}",
"resources": {"requests": {"cpu": 1}},
},
},
},
{
"name": "build-template",
"description": "build images",
"tags": ["backend", "kaniko"],
"inputs": [
{"name": "env_vars", "type": "list", "isList": "true"}
],
"outputs": [{"name": "docker-image", "type": "str"}],
"termination": {"maxRetries": 2},
"run": {
"kind": V1RunKind.JOB,
"environment": {
"nodeSelector": {"polyaxon": "experiments"},
"serviceAccountName": "service",
"imagePullSecrets": ["secret1", "secret2"],
},
"container": {
"image": "base",
"resources": {"requests": {"cpu": 1}},
},
"init": [
{
"dockerfile": {
"image": "base",
"env": "{{ env_vars }}",
}
}
],
},
},
],
},
},
}
def get_fxt_build_run_pipeline():
return {
"version": 1.1,
"kind": "operation",
"dependencies": ["foo", "bar"],
"trigger": "all_succeeded",
"component": {
"name": "build_run_pipeline",
"tags": ["foo", "bar"],
"description": "testing a build and run pipeline",
"run": {
"kind": V1RunKind.DAG,
"operations": [
{"dagRef": "build-template", "name": "A"},
{"dagRef": "job-template", "name": "B", "dependencies": ["A"]},
],
"components": [
{
"name": "job-template",
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
},
{
"name": "build-template",
"tags": ["backend", "kaniko"],
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"init": [{"connection": "foo", "git": {"revision": "dev"}}],
},
},
],
},
},
}
def get_fxt_build_run_pipeline_with_inputs():
return {
"version": 1.1,
"kind": "operation",
"dependencies": ["foo", "bar"],
"params": {"param1": {"value": "foo"}, "param2": {"value": "bar"}},
"trigger": "all_succeeded",
"component": {
"name": "my-pipe-test",
"description": "testing a pipe",
"tags": ["key", "value"],
"inputs": [
{"name": "param1", "type": "str"},
{"name": "param2", "type": "str"},
],
"run": {
"kind": V1RunKind.DAG,
"operations": [
{"dagRef": "build-template", "name": "A"},
{"dagRef": "job-template", "name": "B", "dependencies": ["A"]},
],
"components": [
{
"name": "job-template",
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
},
{
"name": "build-template",
"tags": ["backend", "kaniko"],
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"init": [{"connection": "foo", "git": {"revision": "dev"}}],
},
},
],
},
},
}
def get_fxt_pipeline_params_env_termination():
return {
"version": 1.1,
"name": "params_env_termination",
"kind": "operation",
"dependencies": ["foo", "bar"],
"trigger": "all_succeeded",
"params": {"pipe_param1": {"value": "foo"}, "pipe_param2": {"value": "bar"}},
"termination": {"maxRetries": 2},
"runPatch": {
"kind": V1RunKind.DAG,
"environment": {
"nodeSelector": {"polyaxon": "experiments"},
"serviceAccountName": "service",
"imagePullSecrets": ["secret1", "secret2"],
},
},
"component": {
"inputs": [
{"name": "pipe_param1", "type": "str"},
{"name": "pipe_param2", "type": "str"},
],
"run": {
"kind": V1RunKind.DAG,
"operations": [
{
"dagRef": "build-template",
"name": "A",
"params": {
"param2": {"ref": "dag", "value": "inputs.pipe_param2"}
},
},
{
"dagRef": "job-template",
"name": "B",
"dependencies": ["A"],
"params": {
"param1": {"ref": "dag", "value": "inputs.pipe_param1"}
},
"termination": {"maxRetries": 3},
"runPatch": {"kind": V1RunKind.JOB},
},
],
"components": [
{
"name": "job-template",
"inputs": [{"name": "param1", "type": "str"}],
"termination": {"maxRetries": 1},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
},
{
"name": "build-template",
"tags": ["backend", "kaniko"],
"inputs": [{"name": "param2", "type": "str"}],
"termination": {"maxRetries": 1},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"init": [{"connection": "foo", "git": {"revision": "dev"}}],
},
},
],
},
},
}
|
from ronglian_sms_sdk import SmsSDK
from celery_tasks.main import celery_app
from . import constants
accId = '8aaf0708732220a60173b6f78b5542a6'
accToken = '02cfd485898847cca52798061c8703cf'
appId = '8a216da87ce04099017cfac0226105b4'
# 使用装饰器装饰异步任务,保证celery识别任务
@celery_app.task(bind=True, name='send_sms_code', retry_backoff=3)
def send_sms_code(self, mobile, sms_code):
try:
sdk = SmsSDK(accId, accToken, appId)
interval = constants.SEND_SMS_CODE_INTERVAL // 60
send_ret = sdk.sendMessage(tid=constants.SEND_SMS_TEMPLATE_ID, mobile=mobile, datas=[sms_code, interval])
return send_ret
except Exception as e:
raise self.retry(exc=e, max_retries=3) |
from __future__ import unicode_literals
import django
from django.test import TestCase
from node.resources.exceptions import InvalidResources
from node.resources.json.resources import Resources
class TestResources(TestCase):
def setUp(self):
django.setup()
def test_successful_validation(self):
"""Tests successful validation done in __init__"""
# Try minimal acceptable configuration
Resources()
# Test with a few resources and check default version
resource_dict = {'resources': {'foo': 1.0, 'cpus': 2}}
resources = Resources(resource_dict)
self.assertEqual(resources.get_dict()['version'], '1.0')
def test_invalid_resources(self):
"""Tests validation done in __init__ where the resource values are invalid"""
# Blank resource
resources = {'resources': {'foo': ''}}
self.assertRaises(InvalidResources, Resources, resources)
# String resource value
resources = {'resources': {'foo': 'my_string'}}
self.assertRaises(InvalidResources, Resources, resources)
|
""" Font metrics for the Adobe core 14 fonts.
Font metrics are used to compute the boundary of each character
written with a proportional font.
The following data were extracted from the AFM files:
http://www.ctan.org/tex-archive/fonts/adobe/afm/
"""
### BEGIN Verbatim copy of the license part
#
# Adobe Core 35 AFM Files with 314 Glyph Entries - ReadMe
#
# This file and the 35 PostScript(R) AFM files it accompanies may be
# used, copied, and distributed for any purpose and without charge,
# with or without modification, provided that all copyright notices
# are retained; that the AFM files are not distributed without this
# file; that all modifications to this file or any of the AFM files
# are prominently noted in the modified file(s); and that this
# paragraph is not modified. Adobe Systems has no responsibility or
# obligation to support the use of the AFM files.
#
### END Verbatim copy of the license part
FONT_METRICS = {
'Courier': ({'FontName': 'Courier', 'Descent': -194.0, 'FontBBox': (-6.0, -249.0, 639.0, 803.0), 'FontWeight': 'Medium', 'CapHeight': 572.0, 'FontFamily': 'Courier', 'Flags': 64, 'XHeight': 434.0, 'ItalicAngle': 0.0, 'Ascent': 627.0}, {' ': 600, '!': 600, '"': 600, '#': 600, '$': 600, '%': 600, '&': 600, u"'": 600, '(': 600, ')': 600, '*': 600, '+': 600, ',': 600, '-': 600, '.': 600, '/': 600, '0': 600, '1': 600, '2': 600, '3': 600, '4': 600, '5': 600, '6': 600, '7': 600, '8': 600, '9': 600, ':': 600, ';': 600, '<': 600, '=': 600, '>': 600, '?': 600, '@': 600, 'A': 600, 'B': 600, 'C': 600, 'D': 600, 'E': 600, 'F': 600, 'G': 600, 'H': 600, 'I': 600, 'J': 600, 'K': 600, 'L': 600, 'M': 600, 'N': 600, 'O': 600, 'P': 600, 'Q': 600, 'R': 600, 'S': 600, 'T': 600, 'U': 600, 'V': 600, 'W': 600, 'X': 600, 'Y': 600, 'Z': 600, '[': 600, '\\': 600, ']': 600, '^': 600, '_': 600, '`': 600, 'a': 600, 'b': 600, 'c': 600, 'd': 600, 'e': 600, 'f': 600, 'g': 600, 'h': 600, 'i': 600, 'j': 600, 'k': 600, 'l': 600, 'm': 600, 'n': 600, 'o': 600, 'p': 600, 'q': 600, 'r': 600, 's': 600, 't': 600, 'u': 600, 'v': 600, 'w': 600, 'x': 600, 'y': 600, 'z': 600, '{': 600, '|': 600, '}': 600, '~': 600, '\xa1': 600, '\xa2': 600, '\xa3': 600, '\xa4': 600, '\xa5': 600, '\xa6': 600, '\xa7': 600, '\xa8': 600, '\xa9': 600, '\xaa': 600, '\xab': 600, '\xac': 600, '\xae': 600, '\xaf': 600, '\xb0': 600, '\xb1': 600, '\xb2': 600, '\xb3': 600, '\xb4': 600, '\xb5': 600, '\xb6': 600, '\xb7': 600, '\xb8': 600, '\xb9': 600, '\xba': 600, '\xbb': 600, '\xbc': 600, '\xbd': 600, '\xbe': 600, '\xbf': 600, '\xc0': 600, '\xc1': 600, '\xc2': 600, '\xc3': 600, '\xc4': 600, '\xc5': 600, '\xc6': 600, '\xc7': 600, '\xc8': 600, '\xc9': 600, '\xca': 600, '\xcb': 600, '\xcc': 600, '\xcd': 600, '\xce': 600, '\xcf': 600, '\xd0': 600, '\xd1': 600, '\xd2': 600, '\xd3': 600, '\xd4': 600, '\xd5': 600, '\xd6': 600, '\xd7': 600, '\xd8': 600, '\xd9': 600, '\xda': 600, '\xdb': 600, '\xdc': 600, '\xdd': 600, '\xde': 600, '\xdf': 600, '\xe0': 600, '\xe1': 600, '\xe2': 600, '\xe3': 600, '\xe4': 600, '\xe5': 600, '\xe6': 600, '\xe7': 600, '\xe8': 600, '\xe9': 600, '\xea': 600, '\xeb': 600, '\xec': 600, '\xed': 600, '\xee': 600, '\xef': 600, '\xf0': 600, '\xf1': 600, '\xf2': 600, '\xf3': 600, '\xf4': 600, '\xf5': 600, '\xf6': 600, '\xf7': 600, '\xf8': 600, '\xf9': 600, '\xfa': 600, '\xfb': 600, '\xfc': 600, '\xfd': 600, '\xfe': 600, '\xff': 600, '\u0100': 600, '\u0101': 600, '\u0102': 600, '\u0103': 600, '\u0104': 600, '\u0105': 600, '\u0106': 600, '\u0107': 600, '\u010c': 600, '\u010d': 600, '\u010e': 600, '\u010f': 600, '\u0110': 600, '\u0111': 600, '\u0112': 600, '\u0113': 600, '\u0116': 600, '\u0117': 600, '\u0118': 600, '\u0119': 600, '\u011a': 600, '\u011b': 600, '\u011e': 600, '\u011f': 600, '\u0122': 600, '\u0123': 600, '\u012a': 600, '\u012b': 600, '\u012e': 600, '\u012f': 600, '\u0130': 600, '\u0131': 600, '\u0136': 600, '\u0137': 600, '\u0139': 600, '\u013a': 600, '\u013b': 600, '\u013c': 600, '\u013d': 600, '\u013e': 600, '\u0141': 600, '\u0142': 600, '\u0143': 600, '\u0144': 600, '\u0145': 600, '\u0146': 600, '\u0147': 600, '\u0148': 600, '\u014c': 600, '\u014d': 600, '\u0150': 600, '\u0151': 600, '\u0152': 600, '\u0153': 600, '\u0154': 600, '\u0155': 600, '\u0156': 600, '\u0157': 600, '\u0158': 600, '\u0159': 600, '\u015a': 600, '\u015b': 600, '\u015e': 600, '\u015f': 600, '\u0160': 600, '\u0161': 600, '\u0162': 600, '\u0163': 600, '\u0164': 600, '\u0165': 600, '\u016a': 600, '\u016b': 600, '\u016e': 600, '\u016f': 600, '\u0170': 600, '\u0171': 600, '\u0172': 600, '\u0173': 600, '\u0178': 600, '\u0179': 600, '\u017a': 600, '\u017b': 600, '\u017c': 600, '\u017d': 600, '\u017e': 600, '\u0192': 600, '\u0218': 600, '\u0219': 600, '\u02c6': 600, '\u02c7': 600, '\u02d8': 600, '\u02d9': 600, '\u02da': 600, '\u02db': 600, '\u02dc': 600, '\u02dd': 600, '\u2013': 600, '\u2014': 600, '\u2018': 600, '\u2019': 600, '\u201a': 600, '\u201c': 600, '\u201d': 600, '\u201e': 600, '\u2020': 600, '\u2021': 600, '\u2022': 600, '\u2026': 600, '\u2030': 600, '\u2039': 600, '\u203a': 600, '\u2044': 600, '\u2122': 600, '\u2202': 600, '\u2206': 600, '\u2211': 600, '\u2212': 600, '\u221a': 600, '\u2260': 600, '\u2264': 600, '\u2265': 600, '\u25ca': 600, '\uf6c3': 600, '\ufb01': 600, '\ufb02': 600}),
'Courier-Bold': ({'FontName': 'Courier-Bold', 'Descent': -194.0, 'FontBBox': (-88.0, -249.0, 697.0, 811.0), 'FontWeight': 'Bold', 'CapHeight': 572.0, 'FontFamily': 'Courier', 'Flags': 64, 'XHeight': 434.0, 'ItalicAngle': 0.0, 'Ascent': 627.0}, {' ': 600, '!': 600, '"': 600, '#': 600, '$': 600, '%': 600, '&': 600, u"'": 600, '(': 600, ')': 600, '*': 600, '+': 600, ',': 600, '-': 600, '.': 600, '/': 600, '0': 600, '1': 600, '2': 600, '3': 600, '4': 600, '5': 600, '6': 600, '7': 600, '8': 600, '9': 600, ':': 600, ';': 600, '<': 600, '=': 600, '>': 600, '?': 600, '@': 600, 'A': 600, 'B': 600, 'C': 600, 'D': 600, 'E': 600, 'F': 600, 'G': 600, 'H': 600, 'I': 600, 'J': 600, 'K': 600, 'L': 600, 'M': 600, 'N': 600, 'O': 600, 'P': 600, 'Q': 600, 'R': 600, 'S': 600, 'T': 600, 'U': 600, 'V': 600, 'W': 600, 'X': 600, 'Y': 600, 'Z': 600, '[': 600, '\\': 600, ']': 600, '^': 600, '_': 600, '`': 600, 'a': 600, 'b': 600, 'c': 600, 'd': 600, 'e': 600, 'f': 600, 'g': 600, 'h': 600, 'i': 600, 'j': 600, 'k': 600, 'l': 600, 'm': 600, 'n': 600, 'o': 600, 'p': 600, 'q': 600, 'r': 600, 's': 600, 't': 600, 'u': 600, 'v': 600, 'w': 600, 'x': 600, 'y': 600, 'z': 600, '{': 600, '|': 600, '}': 600, '~': 600, '\xa1': 600, '\xa2': 600, '\xa3': 600, '\xa4': 600, '\xa5': 600, '\xa6': 600, '\xa7': 600, '\xa8': 600, '\xa9': 600, '\xaa': 600, '\xab': 600, '\xac': 600, '\xae': 600, '\xaf': 600, '\xb0': 600, '\xb1': 600, '\xb2': 600, '\xb3': 600, '\xb4': 600, '\xb5': 600, '\xb6': 600, '\xb7': 600, '\xb8': 600, '\xb9': 600, '\xba': 600, '\xbb': 600, '\xbc': 600, '\xbd': 600, '\xbe': 600, '\xbf': 600, '\xc0': 600, '\xc1': 600, '\xc2': 600, '\xc3': 600, '\xc4': 600, '\xc5': 600, '\xc6': 600, '\xc7': 600, '\xc8': 600, '\xc9': 600, '\xca': 600, '\xcb': 600, '\xcc': 600, '\xcd': 600, '\xce': 600, '\xcf': 600, '\xd0': 600, '\xd1': 600, '\xd2': 600, '\xd3': 600, '\xd4': 600, '\xd5': 600, '\xd6': 600, '\xd7': 600, '\xd8': 600, '\xd9': 600, '\xda': 600, '\xdb': 600, '\xdc': 600, '\xdd': 600, '\xde': 600, '\xdf': 600, '\xe0': 600, '\xe1': 600, '\xe2': 600, '\xe3': 600, '\xe4': 600, '\xe5': 600, '\xe6': 600, '\xe7': 600, '\xe8': 600, '\xe9': 600, '\xea': 600, '\xeb': 600, '\xec': 600, '\xed': 600, '\xee': 600, '\xef': 600, '\xf0': 600, '\xf1': 600, '\xf2': 600, '\xf3': 600, '\xf4': 600, '\xf5': 600, '\xf6': 600, '\xf7': 600, '\xf8': 600, '\xf9': 600, '\xfa': 600, '\xfb': 600, '\xfc': 600, '\xfd': 600, '\xfe': 600, '\xff': 600, '\u0100': 600, '\u0101': 600, '\u0102': 600, '\u0103': 600, '\u0104': 600, '\u0105': 600, '\u0106': 600, '\u0107': 600, '\u010c': 600, '\u010d': 600, '\u010e': 600, '\u010f': 600, '\u0110': 600, '\u0111': 600, '\u0112': 600, '\u0113': 600, '\u0116': 600, '\u0117': 600, '\u0118': 600, '\u0119': 600, '\u011a': 600, '\u011b': 600, '\u011e': 600, '\u011f': 600, '\u0122': 600, '\u0123': 600, '\u012a': 600, '\u012b': 600, '\u012e': 600, '\u012f': 600, '\u0130': 600, '\u0131': 600, '\u0136': 600, '\u0137': 600, '\u0139': 600, '\u013a': 600, '\u013b': 600, '\u013c': 600, '\u013d': 600, '\u013e': 600, '\u0141': 600, '\u0142': 600, '\u0143': 600, '\u0144': 600, '\u0145': 600, '\u0146': 600, '\u0147': 600, '\u0148': 600, '\u014c': 600, '\u014d': 600, '\u0150': 600, '\u0151': 600, '\u0152': 600, '\u0153': 600, '\u0154': 600, '\u0155': 600, '\u0156': 600, '\u0157': 600, '\u0158': 600, '\u0159': 600, '\u015a': 600, '\u015b': 600, '\u015e': 600, '\u015f': 600, '\u0160': 600, '\u0161': 600, '\u0162': 600, '\u0163': 600, '\u0164': 600, '\u0165': 600, '\u016a': 600, '\u016b': 600, '\u016e': 600, '\u016f': 600, '\u0170': 600, '\u0171': 600, '\u0172': 600, '\u0173': 600, '\u0178': 600, '\u0179': 600, '\u017a': 600, '\u017b': 600, '\u017c': 600, '\u017d': 600, '\u017e': 600, '\u0192': 600, '\u0218': 600, '\u0219': 600, '\u02c6': 600, '\u02c7': 600, '\u02d8': 600, '\u02d9': 600, '\u02da': 600, '\u02db': 600, '\u02dc': 600, '\u02dd': 600, '\u2013': 600, '\u2014': 600, '\u2018': 600, '\u2019': 600, '\u201a': 600, '\u201c': 600, '\u201d': 600, '\u201e': 600, '\u2020': 600, '\u2021': 600, '\u2022': 600, '\u2026': 600, '\u2030': 600, '\u2039': 600, '\u203a': 600, '\u2044': 600, '\u2122': 600, '\u2202': 600, '\u2206': 600, '\u2211': 600, '\u2212': 600, '\u221a': 600, '\u2260': 600, '\u2264': 600, '\u2265': 600, '\u25ca': 600, '\uf6c3': 600, '\ufb01': 600, '\ufb02': 600}),
'Courier-BoldOblique': ({'FontName': 'Courier-BoldOblique', 'Descent': -194.0, 'FontBBox': (-49.0, -249.0, 758.0, 811.0), 'FontWeight': 'Bold', 'CapHeight': 572.0, 'FontFamily': 'Courier', 'Flags': 64, 'XHeight': 434.0, 'ItalicAngle': -11.0, 'Ascent': 627.0}, {' ': 600, '!': 600, '"': 600, '#': 600, '$': 600, '%': 600, '&': 600, u"'": 600, '(': 600, ')': 600, '*': 600, '+': 600, ',': 600, '-': 600, '.': 600, '/': 600, '0': 600, '1': 600, '2': 600, '3': 600, '4': 600, '5': 600, '6': 600, '7': 600, '8': 600, '9': 600, ':': 600, ';': 600, '<': 600, '=': 600, '>': 600, '?': 600, '@': 600, 'A': 600, 'B': 600, 'C': 600, 'D': 600, 'E': 600, 'F': 600, 'G': 600, 'H': 600, 'I': 600, 'J': 600, 'K': 600, 'L': 600, 'M': 600, 'N': 600, 'O': 600, 'P': 600, 'Q': 600, 'R': 600, 'S': 600, 'T': 600, 'U': 600, 'V': 600, 'W': 600, 'X': 600, 'Y': 600, 'Z': 600, '[': 600, '\\': 600, ']': 600, '^': 600, '_': 600, '`': 600, 'a': 600, 'b': 600, 'c': 600, 'd': 600, 'e': 600, 'f': 600, 'g': 600, 'h': 600, 'i': 600, 'j': 600, 'k': 600, 'l': 600, 'm': 600, 'n': 600, 'o': 600, 'p': 600, 'q': 600, 'r': 600, 's': 600, 't': 600, 'u': 600, 'v': 600, 'w': 600, 'x': 600, 'y': 600, 'z': 600, '{': 600, '|': 600, '}': 600, '~': 600, '\xa1': 600, '\xa2': 600, '\xa3': 600, '\xa4': 600, '\xa5': 600, '\xa6': 600, '\xa7': 600, '\xa8': 600, '\xa9': 600, '\xaa': 600, '\xab': 600, '\xac': 600, '\xae': 600, '\xaf': 600, '\xb0': 600, '\xb1': 600, '\xb2': 600, '\xb3': 600, '\xb4': 600, '\xb5': 600, '\xb6': 600, '\xb7': 600, '\xb8': 600, '\xb9': 600, '\xba': 600, '\xbb': 600, '\xbc': 600, '\xbd': 600, '\xbe': 600, '\xbf': 600, '\xc0': 600, '\xc1': 600, '\xc2': 600, '\xc3': 600, '\xc4': 600, '\xc5': 600, '\xc6': 600, '\xc7': 600, '\xc8': 600, '\xc9': 600, '\xca': 600, '\xcb': 600, '\xcc': 600, '\xcd': 600, '\xce': 600, '\xcf': 600, '\xd0': 600, '\xd1': 600, '\xd2': 600, '\xd3': 600, '\xd4': 600, '\xd5': 600, '\xd6': 600, '\xd7': 600, '\xd8': 600, '\xd9': 600, '\xda': 600, '\xdb': 600, '\xdc': 600, '\xdd': 600, '\xde': 600, '\xdf': 600, '\xe0': 600, '\xe1': 600, '\xe2': 600, '\xe3': 600, '\xe4': 600, '\xe5': 600, '\xe6': 600, '\xe7': 600, '\xe8': 600, '\xe9': 600, '\xea': 600, '\xeb': 600, '\xec': 600, '\xed': 600, '\xee': 600, '\xef': 600, '\xf0': 600, '\xf1': 600, '\xf2': 600, '\xf3': 600, '\xf4': 600, '\xf5': 600, '\xf6': 600, '\xf7': 600, '\xf8': 600, '\xf9': 600, '\xfa': 600, '\xfb': 600, '\xfc': 600, '\xfd': 600, '\xfe': 600, '\xff': 600, '\u0100': 600, '\u0101': 600, '\u0102': 600, '\u0103': 600, '\u0104': 600, '\u0105': 600, '\u0106': 600, '\u0107': 600, '\u010c': 600, '\u010d': 600, '\u010e': 600, '\u010f': 600, '\u0110': 600, '\u0111': 600, '\u0112': 600, '\u0113': 600, '\u0116': 600, '\u0117': 600, '\u0118': 600, '\u0119': 600, '\u011a': 600, '\u011b': 600, '\u011e': 600, '\u011f': 600, '\u0122': 600, '\u0123': 600, '\u012a': 600, '\u012b': 600, '\u012e': 600, '\u012f': 600, '\u0130': 600, '\u0131': 600, '\u0136': 600, '\u0137': 600, '\u0139': 600, '\u013a': 600, '\u013b': 600, '\u013c': 600, '\u013d': 600, '\u013e': 600, '\u0141': 600, '\u0142': 600, '\u0143': 600, '\u0144': 600, '\u0145': 600, '\u0146': 600, '\u0147': 600, '\u0148': 600, '\u014c': 600, '\u014d': 600, '\u0150': 600, '\u0151': 600, '\u0152': 600, '\u0153': 600, '\u0154': 600, '\u0155': 600, '\u0156': 600, '\u0157': 600, '\u0158': 600, '\u0159': 600, '\u015a': 600, '\u015b': 600, '\u015e': 600, '\u015f': 600, '\u0160': 600, '\u0161': 600, '\u0162': 600, '\u0163': 600, '\u0164': 600, '\u0165': 600, '\u016a': 600, '\u016b': 600, '\u016e': 600, '\u016f': 600, '\u0170': 600, '\u0171': 600, '\u0172': 600, '\u0173': 600, '\u0178': 600, '\u0179': 600, '\u017a': 600, '\u017b': 600, '\u017c': 600, '\u017d': 600, '\u017e': 600, '\u0192': 600, '\u0218': 600, '\u0219': 600, '\u02c6': 600, '\u02c7': 600, '\u02d8': 600, '\u02d9': 600, '\u02da': 600, '\u02db': 600, '\u02dc': 600, '\u02dd': 600, '\u2013': 600, '\u2014': 600, '\u2018': 600, '\u2019': 600, '\u201a': 600, '\u201c': 600, '\u201d': 600, '\u201e': 600, '\u2020': 600, '\u2021': 600, '\u2022': 600, '\u2026': 600, '\u2030': 600, '\u2039': 600, '\u203a': 600, '\u2044': 600, '\u2122': 600, '\u2202': 600, '\u2206': 600, '\u2211': 600, '\u2212': 600, '\u221a': 600, '\u2260': 600, '\u2264': 600, '\u2265': 600, '\u25ca': 600, '\uf6c3': 600, '\ufb01': 600, '\ufb02': 600}),
'Courier-Oblique': ({'FontName': 'Courier-Oblique', 'Descent': -194.0, 'FontBBox': (-49.0, -249.0, 749.0, 803.0), 'FontWeight': 'Medium', 'CapHeight': 572.0, 'FontFamily': 'Courier', 'Flags': 64, 'XHeight': 434.0, 'ItalicAngle': -11.0, 'Ascent': 627.0}, {' ': 600, '!': 600, '"': 600, '#': 600, '$': 600, '%': 600, '&': 600, u"'": 600, '(': 600, ')': 600, '*': 600, '+': 600, ',': 600, '-': 600, '.': 600, '/': 600, '0': 600, '1': 600, '2': 600, '3': 600, '4': 600, '5': 600, '6': 600, '7': 600, '8': 600, '9': 600, ':': 600, ';': 600, '<': 600, '=': 600, '>': 600, '?': 600, '@': 600, 'A': 600, 'B': 600, 'C': 600, 'D': 600, 'E': 600, 'F': 600, 'G': 600, 'H': 600, 'I': 600, 'J': 600, 'K': 600, 'L': 600, 'M': 600, 'N': 600, 'O': 600, 'P': 600, 'Q': 600, 'R': 600, 'S': 600, 'T': 600, 'U': 600, 'V': 600, 'W': 600, 'X': 600, 'Y': 600, 'Z': 600, '[': 600, '\\': 600, ']': 600, '^': 600, '_': 600, '`': 600, 'a': 600, 'b': 600, 'c': 600, 'd': 600, 'e': 600, 'f': 600, 'g': 600, 'h': 600, 'i': 600, 'j': 600, 'k': 600, 'l': 600, 'm': 600, 'n': 600, 'o': 600, 'p': 600, 'q': 600, 'r': 600, 's': 600, 't': 600, 'u': 600, 'v': 600, 'w': 600, 'x': 600, 'y': 600, 'z': 600, '{': 600, '|': 600, '}': 600, '~': 600, '\xa1': 600, '\xa2': 600, '\xa3': 600, '\xa4': 600, '\xa5': 600, '\xa6': 600, '\xa7': 600, '\xa8': 600, '\xa9': 600, '\xaa': 600, '\xab': 600, '\xac': 600, '\xae': 600, '\xaf': 600, '\xb0': 600, '\xb1': 600, '\xb2': 600, '\xb3': 600, '\xb4': 600, '\xb5': 600, '\xb6': 600, '\xb7': 600, '\xb8': 600, '\xb9': 600, '\xba': 600, '\xbb': 600, '\xbc': 600, '\xbd': 600, '\xbe': 600, '\xbf': 600, '\xc0': 600, '\xc1': 600, '\xc2': 600, '\xc3': 600, '\xc4': 600, '\xc5': 600, '\xc6': 600, '\xc7': 600, '\xc8': 600, '\xc9': 600, '\xca': 600, '\xcb': 600, '\xcc': 600, '\xcd': 600, '\xce': 600, '\xcf': 600, '\xd0': 600, '\xd1': 600, '\xd2': 600, '\xd3': 600, '\xd4': 600, '\xd5': 600, '\xd6': 600, '\xd7': 600, '\xd8': 600, '\xd9': 600, '\xda': 600, '\xdb': 600, '\xdc': 600, '\xdd': 600, '\xde': 600, '\xdf': 600, '\xe0': 600, '\xe1': 600, '\xe2': 600, '\xe3': 600, '\xe4': 600, '\xe5': 600, '\xe6': 600, '\xe7': 600, '\xe8': 600, '\xe9': 600, '\xea': 600, '\xeb': 600, '\xec': 600, '\xed': 600, '\xee': 600, '\xef': 600, '\xf0': 600, '\xf1': 600, '\xf2': 600, '\xf3': 600, '\xf4': 600, '\xf5': 600, '\xf6': 600, '\xf7': 600, '\xf8': 600, '\xf9': 600, '\xfa': 600, '\xfb': 600, '\xfc': 600, '\xfd': 600, '\xfe': 600, '\xff': 600, '\u0100': 600, '\u0101': 600, '\u0102': 600, '\u0103': 600, '\u0104': 600, '\u0105': 600, '\u0106': 600, '\u0107': 600, '\u010c': 600, '\u010d': 600, '\u010e': 600, '\u010f': 600, '\u0110': 600, '\u0111': 600, '\u0112': 600, '\u0113': 600, '\u0116': 600, '\u0117': 600, '\u0118': 600, '\u0119': 600, '\u011a': 600, '\u011b': 600, '\u011e': 600, '\u011f': 600, '\u0122': 600, '\u0123': 600, '\u012a': 600, '\u012b': 600, '\u012e': 600, '\u012f': 600, '\u0130': 600, '\u0131': 600, '\u0136': 600, '\u0137': 600, '\u0139': 600, '\u013a': 600, '\u013b': 600, '\u013c': 600, '\u013d': 600, '\u013e': 600, '\u0141': 600, '\u0142': 600, '\u0143': 600, '\u0144': 600, '\u0145': 600, '\u0146': 600, '\u0147': 600, '\u0148': 600, '\u014c': 600, '\u014d': 600, '\u0150': 600, '\u0151': 600, '\u0152': 600, '\u0153': 600, '\u0154': 600, '\u0155': 600, '\u0156': 600, '\u0157': 600, '\u0158': 600, '\u0159': 600, '\u015a': 600, '\u015b': 600, '\u015e': 600, '\u015f': 600, '\u0160': 600, '\u0161': 600, '\u0162': 600, '\u0163': 600, '\u0164': 600, '\u0165': 600, '\u016a': 600, '\u016b': 600, '\u016e': 600, '\u016f': 600, '\u0170': 600, '\u0171': 600, '\u0172': 600, '\u0173': 600, '\u0178': 600, '\u0179': 600, '\u017a': 600, '\u017b': 600, '\u017c': 600, '\u017d': 600, '\u017e': 600, '\u0192': 600, '\u0218': 600, '\u0219': 600, '\u02c6': 600, '\u02c7': 600, '\u02d8': 600, '\u02d9': 600, '\u02da': 600, '\u02db': 600, '\u02dc': 600, '\u02dd': 600, '\u2013': 600, '\u2014': 600, '\u2018': 600, '\u2019': 600, '\u201a': 600, '\u201c': 600, '\u201d': 600, '\u201e': 600, '\u2020': 600, '\u2021': 600, '\u2022': 600, '\u2026': 600, '\u2030': 600, '\u2039': 600, '\u203a': 600, '\u2044': 600, '\u2122': 600, '\u2202': 600, '\u2206': 600, '\u2211': 600, '\u2212': 600, '\u221a': 600, '\u2260': 600, '\u2264': 600, '\u2265': 600, '\u25ca': 600, '\uf6c3': 600, '\ufb01': 600, '\ufb02': 600}),
'Helvetica': ({'FontName': 'Helvetica', 'Descent': -207.0, 'FontBBox': (-166.0, -225.0, 1000.0, 931.0), 'FontWeight': 'Medium', 'CapHeight': 718.0, 'FontFamily': 'Helvetica', 'Flags': 0, 'XHeight': 523.0, 'ItalicAngle': 0.0, 'Ascent': 718.0}, {' ': 278, '!': 278, '"': 355, '#': 556, '$': 556, '%': 889, '&': 667, u"'": 191, '(': 333, ')': 333, '*': 389, '+': 584, ',': 278, '-': 333, '.': 278, '/': 278, '0': 556, '1': 556, '2': 556, '3': 556, '4': 556, '5': 556, '6': 556, '7': 556, '8': 556, '9': 556, ':': 278, ';': 278, '<': 584, '=': 584, '>': 584, '?': 556, '@': 1015, 'A': 667, 'B': 667, 'C': 722, 'D': 722, 'E': 667, 'F': 611, 'G': 778, 'H': 722, 'I': 278, 'J': 500, 'K': 667, 'L': 556, 'M': 833, 'N': 722, 'O': 778, 'P': 667, 'Q': 778, 'R': 722, 'S': 667, 'T': 611, 'U': 722, 'V': 667, 'W': 944, 'X': 667, 'Y': 667, 'Z': 611, '[': 278, '\\': 278, ']': 278, '^': 469, '_': 556, '`': 333, 'a': 556, 'b': 556, 'c': 500, 'd': 556, 'e': 556, 'f': 278, 'g': 556, 'h': 556, 'i': 222, 'j': 222, 'k': 500, 'l': 222, 'm': 833, 'n': 556, 'o': 556, 'p': 556, 'q': 556, 'r': 333, 's': 500, 't': 278, 'u': 556, 'v': 500, 'w': 722, 'x': 500, 'y': 500, 'z': 500, '{': 334, '|': 260, '}': 334, '~': 584, '\xa1': 333, '\xa2': 556, '\xa3': 556, '\xa4': 556, '\xa5': 556, '\xa6': 260, '\xa7': 556, '\xa8': 333, '\xa9': 737, '\xaa': 370, '\xab': 556, '\xac': 584, '\xae': 737, '\xaf': 333, '\xb0': 400, '\xb1': 584, '\xb2': 333, '\xb3': 333, '\xb4': 333, '\xb5': 556, '\xb6': 537, '\xb7': 278, '\xb8': 333, '\xb9': 333, '\xba': 365, '\xbb': 556, '\xbc': 834, '\xbd': 834, '\xbe': 834, '\xbf': 611, '\xc0': 667, '\xc1': 667, '\xc2': 667, '\xc3': 667, '\xc4': 667, '\xc5': 667, '\xc6': 1000, '\xc7': 722, '\xc8': 667, '\xc9': 667, '\xca': 667, '\xcb': 667, '\xcc': 278, '\xcd': 278, '\xce': 278, '\xcf': 278, '\xd0': 722, '\xd1': 722, '\xd2': 778, '\xd3': 778, '\xd4': 778, '\xd5': 778, '\xd6': 778, '\xd7': 584, '\xd8': 778, '\xd9': 722, '\xda': 722, '\xdb': 722, '\xdc': 722, '\xdd': 667, '\xde': 667, '\xdf': 611, '\xe0': 556, '\xe1': 556, '\xe2': 556, '\xe3': 556, '\xe4': 556, '\xe5': 556, '\xe6': 889, '\xe7': 500, '\xe8': 556, '\xe9': 556, '\xea': 556, '\xeb': 556, '\xec': 278, '\xed': 278, '\xee': 278, '\xef': 278, '\xf0': 556, '\xf1': 556, '\xf2': 556, '\xf3': 556, '\xf4': 556, '\xf5': 556, '\xf6': 556, '\xf7': 584, '\xf8': 611, '\xf9': 556, '\xfa': 556, '\xfb': 556, '\xfc': 556, '\xfd': 500, '\xfe': 556, '\xff': 500, '\u0100': 667, '\u0101': 556, '\u0102': 667, '\u0103': 556, '\u0104': 667, '\u0105': 556, '\u0106': 722, '\u0107': 500, '\u010c': 722, '\u010d': 500, '\u010e': 722, '\u010f': 643, '\u0110': 722, '\u0111': 556, '\u0112': 667, '\u0113': 556, '\u0116': 667, '\u0117': 556, '\u0118': 667, '\u0119': 556, '\u011a': 667, '\u011b': 556, '\u011e': 778, '\u011f': 556, '\u0122': 778, '\u0123': 556, '\u012a': 278, '\u012b': 278, '\u012e': 278, '\u012f': 222, '\u0130': 278, '\u0131': 278, '\u0136': 667, '\u0137': 500, '\u0139': 556, '\u013a': 222, '\u013b': 556, '\u013c': 222, '\u013d': 556, '\u013e': 299, '\u0141': 556, '\u0142': 222, '\u0143': 722, '\u0144': 556, '\u0145': 722, '\u0146': 556, '\u0147': 722, '\u0148': 556, '\u014c': 778, '\u014d': 556, '\u0150': 778, '\u0151': 556, '\u0152': 1000, '\u0153': 944, '\u0154': 722, '\u0155': 333, '\u0156': 722, '\u0157': 333, '\u0158': 722, '\u0159': 333, '\u015a': 667, '\u015b': 500, '\u015e': 667, '\u015f': 500, '\u0160': 667, '\u0161': 500, '\u0162': 611, '\u0163': 278, '\u0164': 611, '\u0165': 317, '\u016a': 722, '\u016b': 556, '\u016e': 722, '\u016f': 556, '\u0170': 722, '\u0171': 556, '\u0172': 722, '\u0173': 556, '\u0178': 667, '\u0179': 611, '\u017a': 500, '\u017b': 611, '\u017c': 500, '\u017d': 611, '\u017e': 500, '\u0192': 556, '\u0218': 667, '\u0219': 500, '\u02c6': 333, '\u02c7': 333, '\u02d8': 333, '\u02d9': 333, '\u02da': 333, '\u02db': 333, '\u02dc': 333, '\u02dd': 333, '\u2013': 556, '\u2014': 1000, '\u2018': 222, '\u2019': 222, '\u201a': 222, '\u201c': 333, '\u201d': 333, '\u201e': 333, '\u2020': 556, '\u2021': 556, '\u2022': 350, '\u2026': 1000, '\u2030': 1000, '\u2039': 333, '\u203a': 333, '\u2044': 167, '\u2122': 1000, '\u2202': 476, '\u2206': 612, '\u2211': 600, '\u2212': 584, '\u221a': 453, '\u2260': 549, '\u2264': 549, '\u2265': 549, '\u25ca': 471, '\uf6c3': 250, '\ufb01': 500, '\ufb02': 500}),
'Helvetica-Bold': ({'FontName': 'Helvetica-Bold', 'Descent': -207.0, 'FontBBox': (-170.0, -228.0, 1003.0, 962.0), 'FontWeight': 'Bold', 'CapHeight': 718.0, 'FontFamily': 'Helvetica', 'Flags': 0, 'XHeight': 532.0, 'ItalicAngle': 0.0, 'Ascent': 718.0}, {' ': 278, '!': 333, '"': 474, '#': 556, '$': 556, '%': 889, '&': 722, u"'": 238, '(': 333, ')': 333, '*': 389, '+': 584, ',': 278, '-': 333, '.': 278, '/': 278, '0': 556, '1': 556, '2': 556, '3': 556, '4': 556, '5': 556, '6': 556, '7': 556, '8': 556, '9': 556, ':': 333, ';': 333, '<': 584, '=': 584, '>': 584, '?': 611, '@': 975, 'A': 722, 'B': 722, 'C': 722, 'D': 722, 'E': 667, 'F': 611, 'G': 778, 'H': 722, 'I': 278, 'J': 556, 'K': 722, 'L': 611, 'M': 833, 'N': 722, 'O': 778, 'P': 667, 'Q': 778, 'R': 722, 'S': 667, 'T': 611, 'U': 722, 'V': 667, 'W': 944, 'X': 667, 'Y': 667, 'Z': 611, '[': 333, '\\': 278, ']': 333, '^': 584, '_': 556, '`': 333, 'a': 556, 'b': 611, 'c': 556, 'd': 611, 'e': 556, 'f': 333, 'g': 611, 'h': 611, 'i': 278, 'j': 278, 'k': 556, 'l': 278, 'm': 889, 'n': 611, 'o': 611, 'p': 611, 'q': 611, 'r': 389, 's': 556, 't': 333, 'u': 611, 'v': 556, 'w': 778, 'x': 556, 'y': 556, 'z': 500, '{': 389, '|': 280, '}': 389, '~': 584, '\xa1': 333, '\xa2': 556, '\xa3': 556, '\xa4': 556, '\xa5': 556, '\xa6': 280, '\xa7': 556, '\xa8': 333, '\xa9': 737, '\xaa': 370, '\xab': 556, '\xac': 584, '\xae': 737, '\xaf': 333, '\xb0': 400, '\xb1': 584, '\xb2': 333, '\xb3': 333, '\xb4': 333, '\xb5': 611, '\xb6': 556, '\xb7': 278, '\xb8': 333, '\xb9': 333, '\xba': 365, '\xbb': 556, '\xbc': 834, '\xbd': 834, '\xbe': 834, '\xbf': 611, '\xc0': 722, '\xc1': 722, '\xc2': 722, '\xc3': 722, '\xc4': 722, '\xc5': 722, '\xc6': 1000, '\xc7': 722, '\xc8': 667, '\xc9': 667, '\xca': 667, '\xcb': 667, '\xcc': 278, '\xcd': 278, '\xce': 278, '\xcf': 278, '\xd0': 722, '\xd1': 722, '\xd2': 778, '\xd3': 778, '\xd4': 778, '\xd5': 778, '\xd6': 778, '\xd7': 584, '\xd8': 778, '\xd9': 722, '\xda': 722, '\xdb': 722, '\xdc': 722, '\xdd': 667, '\xde': 667, '\xdf': 611, '\xe0': 556, '\xe1': 556, '\xe2': 556, '\xe3': 556, '\xe4': 556, '\xe5': 556, '\xe6': 889, '\xe7': 556, '\xe8': 556, '\xe9': 556, '\xea': 556, '\xeb': 556, '\xec': 278, '\xed': 278, '\xee': 278, '\xef': 278, '\xf0': 611, '\xf1': 611, '\xf2': 611, '\xf3': 611, '\xf4': 611, '\xf5': 611, '\xf6': 611, '\xf7': 584, '\xf8': 611, '\xf9': 611, '\xfa': 611, '\xfb': 611, '\xfc': 611, '\xfd': 556, '\xfe': 611, '\xff': 556, '\u0100': 722, '\u0101': 556, '\u0102': 722, '\u0103': 556, '\u0104': 722, '\u0105': 556, '\u0106': 722, '\u0107': 556, '\u010c': 722, '\u010d': 556, '\u010e': 722, '\u010f': 743, '\u0110': 722, '\u0111': 611, '\u0112': 667, '\u0113': 556, '\u0116': 667, '\u0117': 556, '\u0118': 667, '\u0119': 556, '\u011a': 667, '\u011b': 556, '\u011e': 778, '\u011f': 611, '\u0122': 778, '\u0123': 611, '\u012a': 278, '\u012b': 278, '\u012e': 278, '\u012f': 278, '\u0130': 278, '\u0131': 278, '\u0136': 722, '\u0137': 556, '\u0139': 611, '\u013a': 278, '\u013b': 611, '\u013c': 278, '\u013d': 611, '\u013e': 400, '\u0141': 611, '\u0142': 278, '\u0143': 722, '\u0144': 611, '\u0145': 722, '\u0146': 611, '\u0147': 722, '\u0148': 611, '\u014c': 778, '\u014d': 611, '\u0150': 778, '\u0151': 611, '\u0152': 1000, '\u0153': 944, '\u0154': 722, '\u0155': 389, '\u0156': 722, '\u0157': 389, '\u0158': 722, '\u0159': 389, '\u015a': 667, '\u015b': 556, '\u015e': 667, '\u015f': 556, '\u0160': 667, '\u0161': 556, '\u0162': 611, '\u0163': 333, '\u0164': 611, '\u0165': 389, '\u016a': 722, '\u016b': 611, '\u016e': 722, '\u016f': 611, '\u0170': 722, '\u0171': 611, '\u0172': 722, '\u0173': 611, '\u0178': 667, '\u0179': 611, '\u017a': 500, '\u017b': 611, '\u017c': 500, '\u017d': 611, '\u017e': 500, '\u0192': 556, '\u0218': 667, '\u0219': 556, '\u02c6': 333, '\u02c7': 333, '\u02d8': 333, '\u02d9': 333, '\u02da': 333, '\u02db': 333, '\u02dc': 333, '\u02dd': 333, '\u2013': 556, '\u2014': 1000, '\u2018': 278, '\u2019': 278, '\u201a': 278, '\u201c': 500, '\u201d': 500, '\u201e': 500, '\u2020': 556, '\u2021': 556, '\u2022': 350, '\u2026': 1000, '\u2030': 1000, '\u2039': 333, '\u203a': 333, '\u2044': 167, '\u2122': 1000, '\u2202': 494, '\u2206': 612, '\u2211': 600, '\u2212': 584, '\u221a': 549, '\u2260': 549, '\u2264': 549, '\u2265': 549, '\u25ca': 494, '\uf6c3': 250, '\ufb01': 611, '\ufb02': 611}),
'Helvetica-BoldOblique': ({'FontName': 'Helvetica-BoldOblique', 'Descent': -207.0, 'FontBBox': (-175.0, -228.0, 1114.0, 962.0), 'FontWeight': 'Bold', 'CapHeight': 718.0, 'FontFamily': 'Helvetica', 'Flags': 0, 'XHeight': 532.0, 'ItalicAngle': -12.0, 'Ascent': 718.0}, {' ': 278, '!': 333, '"': 474, '#': 556, '$': 556, '%': 889, '&': 722, u"'": 238, '(': 333, ')': 333, '*': 389, '+': 584, ',': 278, '-': 333, '.': 278, '/': 278, '0': 556, '1': 556, '2': 556, '3': 556, '4': 556, '5': 556, '6': 556, '7': 556, '8': 556, '9': 556, ':': 333, ';': 333, '<': 584, '=': 584, '>': 584, '?': 611, '@': 975, 'A': 722, 'B': 722, 'C': 722, 'D': 722, 'E': 667, 'F': 611, 'G': 778, 'H': 722, 'I': 278, 'J': 556, 'K': 722, 'L': 611, 'M': 833, 'N': 722, 'O': 778, 'P': 667, 'Q': 778, 'R': 722, 'S': 667, 'T': 611, 'U': 722, 'V': 667, 'W': 944, 'X': 667, 'Y': 667, 'Z': 611, '[': 333, '\\': 278, ']': 333, '^': 584, '_': 556, '`': 333, 'a': 556, 'b': 611, 'c': 556, 'd': 611, 'e': 556, 'f': 333, 'g': 611, 'h': 611, 'i': 278, 'j': 278, 'k': 556, 'l': 278, 'm': 889, 'n': 611, 'o': 611, 'p': 611, 'q': 611, 'r': 389, 's': 556, 't': 333, 'u': 611, 'v': 556, 'w': 778, 'x': 556, 'y': 556, 'z': 500, '{': 389, '|': 280, '}': 389, '~': 584, '\xa1': 333, '\xa2': 556, '\xa3': 556, '\xa4': 556, '\xa5': 556, '\xa6': 280, '\xa7': 556, '\xa8': 333, '\xa9': 737, '\xaa': 370, '\xab': 556, '\xac': 584, '\xae': 737, '\xaf': 333, '\xb0': 400, '\xb1': 584, '\xb2': 333, '\xb3': 333, '\xb4': 333, '\xb5': 611, '\xb6': 556, '\xb7': 278, '\xb8': 333, '\xb9': 333, '\xba': 365, '\xbb': 556, '\xbc': 834, '\xbd': 834, '\xbe': 834, '\xbf': 611, '\xc0': 722, '\xc1': 722, '\xc2': 722, '\xc3': 722, '\xc4': 722, '\xc5': 722, '\xc6': 1000, '\xc7': 722, '\xc8': 667, '\xc9': 667, '\xca': 667, '\xcb': 667, '\xcc': 278, '\xcd': 278, '\xce': 278, '\xcf': 278, '\xd0': 722, '\xd1': 722, '\xd2': 778, '\xd3': 778, '\xd4': 778, '\xd5': 778, '\xd6': 778, '\xd7': 584, '\xd8': 778, '\xd9': 722, '\xda': 722, '\xdb': 722, '\xdc': 722, '\xdd': 667, '\xde': 667, '\xdf': 611, '\xe0': 556, '\xe1': 556, '\xe2': 556, '\xe3': 556, '\xe4': 556, '\xe5': 556, '\xe6': 889, '\xe7': 556, '\xe8': 556, '\xe9': 556, '\xea': 556, '\xeb': 556, '\xec': 278, '\xed': 278, '\xee': 278, '\xef': 278, '\xf0': 611, '\xf1': 611, '\xf2': 611, '\xf3': 611, '\xf4': 611, '\xf5': 611, '\xf6': 611, '\xf7': 584, '\xf8': 611, '\xf9': 611, '\xfa': 611, '\xfb': 611, '\xfc': 611, '\xfd': 556, '\xfe': 611, '\xff': 556, '\u0100': 722, '\u0101': 556, '\u0102': 722, '\u0103': 556, '\u0104': 722, '\u0105': 556, '\u0106': 722, '\u0107': 556, '\u010c': 722, '\u010d': 556, '\u010e': 722, '\u010f': 743, '\u0110': 722, '\u0111': 611, '\u0112': 667, '\u0113': 556, '\u0116': 667, '\u0117': 556, '\u0118': 667, '\u0119': 556, '\u011a': 667, '\u011b': 556, '\u011e': 778, '\u011f': 611, '\u0122': 778, '\u0123': 611, '\u012a': 278, '\u012b': 278, '\u012e': 278, '\u012f': 278, '\u0130': 278, '\u0131': 278, '\u0136': 722, '\u0137': 556, '\u0139': 611, '\u013a': 278, '\u013b': 611, '\u013c': 278, '\u013d': 611, '\u013e': 400, '\u0141': 611, '\u0142': 278, '\u0143': 722, '\u0144': 611, '\u0145': 722, '\u0146': 611, '\u0147': 722, '\u0148': 611, '\u014c': 778, '\u014d': 611, '\u0150': 778, '\u0151': 611, '\u0152': 1000, '\u0153': 944, '\u0154': 722, '\u0155': 389, '\u0156': 722, '\u0157': 389, '\u0158': 722, '\u0159': 389, '\u015a': 667, '\u015b': 556, '\u015e': 667, '\u015f': 556, '\u0160': 667, '\u0161': 556, '\u0162': 611, '\u0163': 333, '\u0164': 611, '\u0165': 389, '\u016a': 722, '\u016b': 611, '\u016e': 722, '\u016f': 611, '\u0170': 722, '\u0171': 611, '\u0172': 722, '\u0173': 611, '\u0178': 667, '\u0179': 611, '\u017a': 500, '\u017b': 611, '\u017c': 500, '\u017d': 611, '\u017e': 500, '\u0192': 556, '\u0218': 667, '\u0219': 556, '\u02c6': 333, '\u02c7': 333, '\u02d8': 333, '\u02d9': 333, '\u02da': 333, '\u02db': 333, '\u02dc': 333, '\u02dd': 333, '\u2013': 556, '\u2014': 1000, '\u2018': 278, '\u2019': 278, '\u201a': 278, '\u201c': 500, '\u201d': 500, '\u201e': 500, '\u2020': 556, '\u2021': 556, '\u2022': 350, '\u2026': 1000, '\u2030': 1000, '\u2039': 333, '\u203a': 333, '\u2044': 167, '\u2122': 1000, '\u2202': 494, '\u2206': 612, '\u2211': 600, '\u2212': 584, '\u221a': 549, '\u2260': 549, '\u2264': 549, '\u2265': 549, '\u25ca': 494, '\uf6c3': 250, '\ufb01': 611, '\ufb02': 611}),
'Helvetica-Oblique': ({'FontName': 'Helvetica-Oblique', 'Descent': -207.0, 'FontBBox': (-171.0, -225.0, 1116.0, 931.0), 'FontWeight': 'Medium', 'CapHeight': 718.0, 'FontFamily': 'Helvetica', 'Flags': 0, 'XHeight': 523.0, 'ItalicAngle': -12.0, 'Ascent': 718.0}, {' ': 278, '!': 278, '"': 355, '#': 556, '$': 556, '%': 889, '&': 667, u"'": 191, '(': 333, ')': 333, '*': 389, '+': 584, ',': 278, '-': 333, '.': 278, '/': 278, '0': 556, '1': 556, '2': 556, '3': 556, '4': 556, '5': 556, '6': 556, '7': 556, '8': 556, '9': 556, ':': 278, ';': 278, '<': 584, '=': 584, '>': 584, '?': 556, '@': 1015, 'A': 667, 'B': 667, 'C': 722, 'D': 722, 'E': 667, 'F': 611, 'G': 778, 'H': 722, 'I': 278, 'J': 500, 'K': 667, 'L': 556, 'M': 833, 'N': 722, 'O': 778, 'P': 667, 'Q': 778, 'R': 722, 'S': 667, 'T': 611, 'U': 722, 'V': 667, 'W': 944, 'X': 667, 'Y': 667, 'Z': 611, '[': 278, '\\': 278, ']': 278, '^': 469, '_': 556, '`': 333, 'a': 556, 'b': 556, 'c': 500, 'd': 556, 'e': 556, 'f': 278, 'g': 556, 'h': 556, 'i': 222, 'j': 222, 'k': 500, 'l': 222, 'm': 833, 'n': 556, 'o': 556, 'p': 556, 'q': 556, 'r': 333, 's': 500, 't': 278, 'u': 556, 'v': 500, 'w': 722, 'x': 500, 'y': 500, 'z': 500, '{': 334, '|': 260, '}': 334, '~': 584, '\xa1': 333, '\xa2': 556, '\xa3': 556, '\xa4': 556, '\xa5': 556, '\xa6': 260, '\xa7': 556, '\xa8': 333, '\xa9': 737, '\xaa': 370, '\xab': 556, '\xac': 584, '\xae': 737, '\xaf': 333, '\xb0': 400, '\xb1': 584, '\xb2': 333, '\xb3': 333, '\xb4': 333, '\xb5': 556, '\xb6': 537, '\xb7': 278, '\xb8': 333, '\xb9': 333, '\xba': 365, '\xbb': 556, '\xbc': 834, '\xbd': 834, '\xbe': 834, '\xbf': 611, '\xc0': 667, '\xc1': 667, '\xc2': 667, '\xc3': 667, '\xc4': 667, '\xc5': 667, '\xc6': 1000, '\xc7': 722, '\xc8': 667, '\xc9': 667, '\xca': 667, '\xcb': 667, '\xcc': 278, '\xcd': 278, '\xce': 278, '\xcf': 278, '\xd0': 722, '\xd1': 722, '\xd2': 778, '\xd3': 778, '\xd4': 778, '\xd5': 778, '\xd6': 778, '\xd7': 584, '\xd8': 778, '\xd9': 722, '\xda': 722, '\xdb': 722, '\xdc': 722, '\xdd': 667, '\xde': 667, '\xdf': 611, '\xe0': 556, '\xe1': 556, '\xe2': 556, '\xe3': 556, '\xe4': 556, '\xe5': 556, '\xe6': 889, '\xe7': 500, '\xe8': 556, '\xe9': 556, '\xea': 556, '\xeb': 556, '\xec': 278, '\xed': 278, '\xee': 278, '\xef': 278, '\xf0': 556, '\xf1': 556, '\xf2': 556, '\xf3': 556, '\xf4': 556, '\xf5': 556, '\xf6': 556, '\xf7': 584, '\xf8': 611, '\xf9': 556, '\xfa': 556, '\xfb': 556, '\xfc': 556, '\xfd': 500, '\xfe': 556, '\xff': 500, '\u0100': 667, '\u0101': 556, '\u0102': 667, '\u0103': 556, '\u0104': 667, '\u0105': 556, '\u0106': 722, '\u0107': 500, '\u010c': 722, '\u010d': 500, '\u010e': 722, '\u010f': 643, '\u0110': 722, '\u0111': 556, '\u0112': 667, '\u0113': 556, '\u0116': 667, '\u0117': 556, '\u0118': 667, '\u0119': 556, '\u011a': 667, '\u011b': 556, '\u011e': 778, '\u011f': 556, '\u0122': 778, '\u0123': 556, '\u012a': 278, '\u012b': 278, '\u012e': 278, '\u012f': 222, '\u0130': 278, '\u0131': 278, '\u0136': 667, '\u0137': 500, '\u0139': 556, '\u013a': 222, '\u013b': 556, '\u013c': 222, '\u013d': 556, '\u013e': 299, '\u0141': 556, '\u0142': 222, '\u0143': 722, '\u0144': 556, '\u0145': 722, '\u0146': 556, '\u0147': 722, '\u0148': 556, '\u014c': 778, '\u014d': 556, '\u0150': 778, '\u0151': 556, '\u0152': 1000, '\u0153': 944, '\u0154': 722, '\u0155': 333, '\u0156': 722, '\u0157': 333, '\u0158': 722, '\u0159': 333, '\u015a': 667, '\u015b': 500, '\u015e': 667, '\u015f': 500, '\u0160': 667, '\u0161': 500, '\u0162': 611, '\u0163': 278, '\u0164': 611, '\u0165': 317, '\u016a': 722, '\u016b': 556, '\u016e': 722, '\u016f': 556, '\u0170': 722, '\u0171': 556, '\u0172': 722, '\u0173': 556, '\u0178': 667, '\u0179': 611, '\u017a': 500, '\u017b': 611, '\u017c': 500, '\u017d': 611, '\u017e': 500, '\u0192': 556, '\u0218': 667, '\u0219': 500, '\u02c6': 333, '\u02c7': 333, '\u02d8': 333, '\u02d9': 333, '\u02da': 333, '\u02db': 333, '\u02dc': 333, '\u02dd': 333, '\u2013': 556, '\u2014': 1000, '\u2018': 222, '\u2019': 222, '\u201a': 222, '\u201c': 333, '\u201d': 333, '\u201e': 333, '\u2020': 556, '\u2021': 556, '\u2022': 350, '\u2026': 1000, '\u2030': 1000, '\u2039': 333, '\u203a': 333, '\u2044': 167, '\u2122': 1000, '\u2202': 476, '\u2206': 612, '\u2211': 600, '\u2212': 584, '\u221a': 453, '\u2260': 549, '\u2264': 549, '\u2265': 549, '\u25ca': 471, '\uf6c3': 250, '\ufb01': 500, '\ufb02': 500}),
'Symbol': ({'FontName': 'Symbol', 'FontBBox': (-180.0, -293.0, 1090.0, 1010.0), 'FontWeight': 'Medium', 'FontFamily': 'Symbol', 'Flags': 0, 'ItalicAngle': 0.0}, {' ': 250, '!': 333, '#': 500, '%': 833, '&': 778, '(': 333, ')': 333, '+': 549, ',': 250, '.': 250, '/': 278, '0': 500, '1': 500, '2': 500, '3': 500, '4': 500, '5': 500, '6': 500, '7': 500, '8': 500, '9': 500, ':': 278, ';': 278, '<': 549, '=': 549, '>': 549, '?': 444, '[': 333, ']': 333, '_': 500, '{': 480, '|': 200, '}': 480, '\xac': 713, '\xb0': 400, '\xb1': 549, '\xb5': 576, '\xd7': 549, '\xf7': 549, '\u0192': 500, '\u0391': 722, '\u0392': 667, '\u0393': 603, '\u0395': 611, '\u0396': 611, '\u0397': 722, '\u0398': 741, '\u0399': 333, '\u039a': 722, '\u039b': 686, '\u039c': 889, '\u039d': 722, '\u039e': 645, '\u039f': 722, '\u03a0': 768, '\u03a1': 556, '\u03a3': 592, '\u03a4': 611, '\u03a5': 690, '\u03a6': 763, '\u03a7': 722, '\u03a8': 795, '\u03b1': 631, '\u03b2': 549, '\u03b3': 411, '\u03b4': 494, '\u03b5': 439, '\u03b6': 494, '\u03b7': 603, '\u03b8': 521, '\u03b9': 329, '\u03ba': 549, '\u03bb': 549, '\u03bd': 521, '\u03be': 493, '\u03bf': 549, '\u03c0': 549, '\u03c1': 549, '\u03c2': 439, '\u03c3': 603, '\u03c4': 439, '\u03c5': 576, '\u03c6': 521, '\u03c7': 549, '\u03c8': 686, '\u03c9': 686, '\u03d1': 631, '\u03d2': 620, '\u03d5': 603, '\u03d6': 713, '\u2022': 460, '\u2026': 1000, '\u2032': 247, '\u2033': 411, '\u2044': 167, '\u20ac': 750, '\u2111': 686, '\u2118': 987, '\u211c': 795, '\u2126': 768, '\u2135': 823, '\u2190': 987, '\u2191': 603, '\u2192': 987, '\u2193': 603, '\u2194': 1042, '\u21b5': 658, '\u21d0': 987, '\u21d1': 603, '\u21d2': 987, '\u21d3': 603, '\u21d4': 1042, '\u2200': 713, '\u2202': 494, '\u2203': 549, '\u2205': 823, '\u2206': 612, '\u2207': 713, '\u2208': 713, '\u2209': 713, '\u220b': 439, '\u220f': 823, '\u2211': 713, '\u2212': 549, '\u2217': 500, '\u221a': 549, '\u221d': 713, '\u221e': 713, '\u2220': 768, '\u2227': 603, '\u2228': 603, '\u2229': 768, '\u222a': 768, '\u222b': 274, '\u2234': 863, '\u223c': 549, '\u2245': 549, '\u2248': 549, '\u2260': 549, '\u2261': 549, '\u2264': 549, '\u2265': 549, '\u2282': 713, '\u2283': 713, '\u2284': 713, '\u2286': 713, '\u2287': 713, '\u2295': 768, '\u2297': 768, '\u22a5': 658, '\u22c5': 250, '\u2320': 686, '\u2321': 686, '\u2329': 329, '\u232a': 329, '\u25ca': 494, '\u2660': 753, '\u2663': 753, '\u2665': 753, '\u2666': 753, '\uf6d9': 790, '\uf6da': 790, '\uf6db': 890, '\uf8e5': 500, '\uf8e6': 603, '\uf8e7': 1000, '\uf8e8': 790, '\uf8e9': 790, '\uf8ea': 786, '\uf8eb': 384, '\uf8ec': 384, '\uf8ed': 384, '\uf8ee': 384, '\uf8ef': 384, '\uf8f0': 384, '\uf8f1': 494, '\uf8f2': 494, '\uf8f3': 494, '\uf8f4': 494, '\uf8f5': 686, '\uf8f6': 384, '\uf8f7': 384, '\uf8f8': 384, '\uf8f9': 384, '\uf8fa': 384, '\uf8fb': 384, '\uf8fc': 494, '\uf8fd': 494, '\uf8fe': 494, '\uf8ff': 790}),
'Times-Bold': ({'FontName': 'Times-Bold', 'Descent': -217.0, 'FontBBox': (-168.0, -218.0, 1000.0, 935.0), 'FontWeight': 'Bold', 'CapHeight': 676.0, 'FontFamily': 'Times', 'Flags': 0, 'XHeight': 461.0, 'ItalicAngle': 0.0, 'Ascent': 683.0}, {' ': 250, '!': 333, '"': 555, '#': 500, '$': 500, '%': 1000, '&': 833, u"'": 278, '(': 333, ')': 333, '*': 500, '+': 570, ',': 250, '-': 333, '.': 250, '/': 278, '0': 500, '1': 500, '2': 500, '3': 500, '4': 500, '5': 500, '6': 500, '7': 500, '8': 500, '9': 500, ':': 333, ';': 333, '<': 570, '=': 570, '>': 570, '?': 500, '@': 930, 'A': 722, 'B': 667, 'C': 722, 'D': 722, 'E': 667, 'F': 611, 'G': 778, 'H': 778, 'I': 389, 'J': 500, 'K': 778, 'L': 667, 'M': 944, 'N': 722, 'O': 778, 'P': 611, 'Q': 778, 'R': 722, 'S': 556, 'T': 667, 'U': 722, 'V': 722, 'W': 1000, 'X': 722, 'Y': 722, 'Z': 667, '[': 333, '\\': 278, ']': 333, '^': 581, '_': 500, '`': 333, 'a': 500, 'b': 556, 'c': 444, 'd': 556, 'e': 444, 'f': 333, 'g': 500, 'h': 556, 'i': 278, 'j': 333, 'k': 556, 'l': 278, 'm': 833, 'n': 556, 'o': 500, 'p': 556, 'q': 556, 'r': 444, 's': 389, 't': 333, 'u': 556, 'v': 500, 'w': 722, 'x': 500, 'y': 500, 'z': 444, '{': 394, '|': 220, '}': 394, '~': 520, '\xa1': 333, '\xa2': 500, '\xa3': 500, '\xa4': 500, '\xa5': 500, '\xa6': 220, '\xa7': 500, '\xa8': 333, '\xa9': 747, '\xaa': 300, '\xab': 500, '\xac': 570, '\xae': 747, '\xaf': 333, '\xb0': 400, '\xb1': 570, '\xb2': 300, '\xb3': 300, '\xb4': 333, '\xb5': 556, '\xb6': 540, '\xb7': 250, '\xb8': 333, '\xb9': 300, '\xba': 330, '\xbb': 500, '\xbc': 750, '\xbd': 750, '\xbe': 750, '\xbf': 500, '\xc0': 722, '\xc1': 722, '\xc2': 722, '\xc3': 722, '\xc4': 722, '\xc5': 722, '\xc6': 1000, '\xc7': 722, '\xc8': 667, '\xc9': 667, '\xca': 667, '\xcb': 667, '\xcc': 389, '\xcd': 389, '\xce': 389, '\xcf': 389, '\xd0': 722, '\xd1': 722, '\xd2': 778, '\xd3': 778, '\xd4': 778, '\xd5': 778, '\xd6': 778, '\xd7': 570, '\xd8': 778, '\xd9': 722, '\xda': 722, '\xdb': 722, '\xdc': 722, '\xdd': 722, '\xde': 611, '\xdf': 556, '\xe0': 500, '\xe1': 500, '\xe2': 500, '\xe3': 500, '\xe4': 500, '\xe5': 500, '\xe6': 722, '\xe7': 444, '\xe8': 444, '\xe9': 444, '\xea': 444, '\xeb': 444, '\xec': 278, '\xed': 278, '\xee': 278, '\xef': 278, '\xf0': 500, '\xf1': 556, '\xf2': 500, '\xf3': 500, '\xf4': 500, '\xf5': 500, '\xf6': 500, '\xf7': 570, '\xf8': 500, '\xf9': 556, '\xfa': 556, '\xfb': 556, '\xfc': 556, '\xfd': 500, '\xfe': 556, '\xff': 500, '\u0100': 722, '\u0101': 500, '\u0102': 722, '\u0103': 500, '\u0104': 722, '\u0105': 500, '\u0106': 722, '\u0107': 444, '\u010c': 722, '\u010d': 444, '\u010e': 722, '\u010f': 672, '\u0110': 722, '\u0111': 556, '\u0112': 667, '\u0113': 444, '\u0116': 667, '\u0117': 444, '\u0118': 667, '\u0119': 444, '\u011a': 667, '\u011b': 444, '\u011e': 778, '\u011f': 500, '\u0122': 778, '\u0123': 500, '\u012a': 389, '\u012b': 278, '\u012e': 389, '\u012f': 278, '\u0130': 389, '\u0131': 278, '\u0136': 778, '\u0137': 556, '\u0139': 667, '\u013a': 278, '\u013b': 667, '\u013c': 278, '\u013d': 667, '\u013e': 394, '\u0141': 667, '\u0142': 278, '\u0143': 722, '\u0144': 556, '\u0145': 722, '\u0146': 556, '\u0147': 722, '\u0148': 556, '\u014c': 778, '\u014d': 500, '\u0150': 778, '\u0151': 500, '\u0152': 1000, '\u0153': 722, '\u0154': 722, '\u0155': 444, '\u0156': 722, '\u0157': 444, '\u0158': 722, '\u0159': 444, '\u015a': 556, '\u015b': 389, '\u015e': 556, '\u015f': 389, '\u0160': 556, '\u0161': 389, '\u0162': 667, '\u0163': 333, '\u0164': 667, '\u0165': 416, '\u016a': 722, '\u016b': 556, '\u016e': 722, '\u016f': 556, '\u0170': 722, '\u0171': 556, '\u0172': 722, '\u0173': 556, '\u0178': 722, '\u0179': 667, '\u017a': 444, '\u017b': 667, '\u017c': 444, '\u017d': 667, '\u017e': 444, '\u0192': 500, '\u0218': 556, '\u0219': 389, '\u02c6': 333, '\u02c7': 333, '\u02d8': 333, '\u02d9': 333, '\u02da': 333, '\u02db': 333, '\u02dc': 333, '\u02dd': 333, '\u2013': 500, '\u2014': 1000, '\u2018': 333, '\u2019': 333, '\u201a': 333, '\u201c': 500, '\u201d': 500, '\u201e': 500, '\u2020': 500, '\u2021': 500, '\u2022': 350, '\u2026': 1000, '\u2030': 1000, '\u2039': 333, '\u203a': 333, '\u2044': 167, '\u2122': 1000, '\u2202': 494, '\u2206': 612, '\u2211': 600, '\u2212': 570, '\u221a': 549, '\u2260': 549, '\u2264': 549, '\u2265': 549, '\u25ca': 494, '\uf6c3': 250, '\ufb01': 556, '\ufb02': 556}),
'Times-BoldItalic': ({'FontName': 'Times-BoldItalic', 'Descent': -217.0, 'FontBBox': (-200.0, -218.0, 996.0, 921.0), 'FontWeight': 'Bold', 'CapHeight': 669.0, 'FontFamily': 'Times', 'Flags': 0, 'XHeight': 462.0, 'ItalicAngle': -15.0, 'Ascent': 683.0}, {' ': 250, '!': 389, '"': 555, '#': 500, '$': 500, '%': 833, '&': 778, u"'": 278, '(': 333, ')': 333, '*': 500, '+': 570, ',': 250, '-': 333, '.': 250, '/': 278, '0': 500, '1': 500, '2': 500, '3': 500, '4': 500, '5': 500, '6': 500, '7': 500, '8': 500, '9': 500, ':': 333, ';': 333, '<': 570, '=': 570, '>': 570, '?': 500, '@': 832, 'A': 667, 'B': 667, 'C': 667, 'D': 722, 'E': 667, 'F': 667, 'G': 722, 'H': 778, 'I': 389, 'J': 500, 'K': 667, 'L': 611, 'M': 889, 'N': 722, 'O': 722, 'P': 611, 'Q': 722, 'R': 667, 'S': 556, 'T': 611, 'U': 722, 'V': 667, 'W': 889, 'X': 667, 'Y': 611, 'Z': 611, '[': 333, '\\': 278, ']': 333, '^': 570, '_': 500, '`': 333, 'a': 500, 'b': 500, 'c': 444, 'd': 500, 'e': 444, 'f': 333, 'g': 500, 'h': 556, 'i': 278, 'j': 278, 'k': 500, 'l': 278, 'm': 778, 'n': 556, 'o': 500, 'p': 500, 'q': 500, 'r': 389, 's': 389, 't': 278, 'u': 556, 'v': 444, 'w': 667, 'x': 500, 'y': 444, 'z': 389, '{': 348, '|': 220, '}': 348, '~': 570, '\xa1': 389, '\xa2': 500, '\xa3': 500, '\xa4': 500, '\xa5': 500, '\xa6': 220, '\xa7': 500, '\xa8': 333, '\xa9': 747, '\xaa': 266, '\xab': 500, '\xac': 606, '\xae': 747, '\xaf': 333, '\xb0': 400, '\xb1': 570, '\xb2': 300, '\xb3': 300, '\xb4': 333, '\xb5': 576, '\xb6': 500, '\xb7': 250, '\xb8': 333, '\xb9': 300, '\xba': 300, '\xbb': 500, '\xbc': 750, '\xbd': 750, '\xbe': 750, '\xbf': 500, '\xc0': 667, '\xc1': 667, '\xc2': 667, '\xc3': 667, '\xc4': 667, '\xc5': 667, '\xc6': 944, '\xc7': 667, '\xc8': 667, '\xc9': 667, '\xca': 667, '\xcb': 667, '\xcc': 389, '\xcd': 389, '\xce': 389, '\xcf': 389, '\xd0': 722, '\xd1': 722, '\xd2': 722, '\xd3': 722, '\xd4': 722, '\xd5': 722, '\xd6': 722, '\xd7': 570, '\xd8': 722, '\xd9': 722, '\xda': 722, '\xdb': 722, '\xdc': 722, '\xdd': 611, '\xde': 611, '\xdf': 500, '\xe0': 500, '\xe1': 500, '\xe2': 500, '\xe3': 500, '\xe4': 500, '\xe5': 500, '\xe6': 722, '\xe7': 444, '\xe8': 444, '\xe9': 444, '\xea': 444, '\xeb': 444, '\xec': 278, '\xed': 278, '\xee': 278, '\xef': 278, '\xf0': 500, '\xf1': 556, '\xf2': 500, '\xf3': 500, '\xf4': 500, '\xf5': 500, '\xf6': 500, '\xf7': 570, '\xf8': 500, '\xf9': 556, '\xfa': 556, '\xfb': 556, '\xfc': 556, '\xfd': 444, '\xfe': 500, '\xff': 444, '\u0100': 667, '\u0101': 500, '\u0102': 667, '\u0103': 500, '\u0104': 667, '\u0105': 500, '\u0106': 667, '\u0107': 444, '\u010c': 667, '\u010d': 444, '\u010e': 722, '\u010f': 608, '\u0110': 722, '\u0111': 500, '\u0112': 667, '\u0113': 444, '\u0116': 667, '\u0117': 444, '\u0118': 667, '\u0119': 444, '\u011a': 667, '\u011b': 444, '\u011e': 722, '\u011f': 500, '\u0122': 722, '\u0123': 500, '\u012a': 389, '\u012b': 278, '\u012e': 389, '\u012f': 278, '\u0130': 389, '\u0131': 278, '\u0136': 667, '\u0137': 500, '\u0139': 611, '\u013a': 278, '\u013b': 611, '\u013c': 278, '\u013d': 611, '\u013e': 382, '\u0141': 611, '\u0142': 278, '\u0143': 722, '\u0144': 556, '\u0145': 722, '\u0146': 556, '\u0147': 722, '\u0148': 556, '\u014c': 722, '\u014d': 500, '\u0150': 722, '\u0151': 500, '\u0152': 944, '\u0153': 722, '\u0154': 667, '\u0155': 389, '\u0156': 667, '\u0157': 389, '\u0158': 667, '\u0159': 389, '\u015a': 556, '\u015b': 389, '\u015e': 556, '\u015f': 389, '\u0160': 556, '\u0161': 389, '\u0162': 611, '\u0163': 278, '\u0164': 611, '\u0165': 366, '\u016a': 722, '\u016b': 556, '\u016e': 722, '\u016f': 556, '\u0170': 722, '\u0171': 556, '\u0172': 722, '\u0173': 556, '\u0178': 611, '\u0179': 611, '\u017a': 389, '\u017b': 611, '\u017c': 389, '\u017d': 611, '\u017e': 389, '\u0192': 500, '\u0218': 556, '\u0219': 389, '\u02c6': 333, '\u02c7': 333, '\u02d8': 333, '\u02d9': 333, '\u02da': 333, '\u02db': 333, '\u02dc': 333, '\u02dd': 333, '\u2013': 500, '\u2014': 1000, '\u2018': 333, '\u2019': 333, '\u201a': 333, '\u201c': 500, '\u201d': 500, '\u201e': 500, '\u2020': 500, '\u2021': 500, '\u2022': 350, '\u2026': 1000, '\u2030': 1000, '\u2039': 333, '\u203a': 333, '\u2044': 167, '\u2122': 1000, '\u2202': 494, '\u2206': 612, '\u2211': 600, '\u2212': 606, '\u221a': 549, '\u2260': 549, '\u2264': 549, '\u2265': 549, '\u25ca': 494, '\uf6c3': 250, '\ufb01': 556, '\ufb02': 556}),
'Times-Italic': ({'FontName': 'Times-Italic', 'Descent': -217.0, 'FontBBox': (-169.0, -217.0, 1010.0, 883.0), 'FontWeight': 'Medium', 'CapHeight': 653.0, 'FontFamily': 'Times', 'Flags': 0, 'XHeight': 441.0, 'ItalicAngle': -15.5, 'Ascent': 683.0}, {' ': 250, '!': 333, '"': 420, '#': 500, '$': 500, '%': 833, '&': 778, u"'": 214, '(': 333, ')': 333, '*': 500, '+': 675, ',': 250, '-': 333, '.': 250, '/': 278, '0': 500, '1': 500, '2': 500, '3': 500, '4': 500, '5': 500, '6': 500, '7': 500, '8': 500, '9': 500, ':': 333, ';': 333, '<': 675, '=': 675, '>': 675, '?': 500, '@': 920, 'A': 611, 'B': 611, 'C': 667, 'D': 722, 'E': 611, 'F': 611, 'G': 722, 'H': 722, 'I': 333, 'J': 444, 'K': 667, 'L': 556, 'M': 833, 'N': 667, 'O': 722, 'P': 611, 'Q': 722, 'R': 611, 'S': 500, 'T': 556, 'U': 722, 'V': 611, 'W': 833, 'X': 611, 'Y': 556, 'Z': 556, '[': 389, '\\': 278, ']': 389, '^': 422, '_': 500, '`': 333, 'a': 500, 'b': 500, 'c': 444, 'd': 500, 'e': 444, 'f': 278, 'g': 500, 'h': 500, 'i': 278, 'j': 278, 'k': 444, 'l': 278, 'm': 722, 'n': 500, 'o': 500, 'p': 500, 'q': 500, 'r': 389, 's': 389, 't': 278, 'u': 500, 'v': 444, 'w': 667, 'x': 444, 'y': 444, 'z': 389, '{': 400, '|': 275, '}': 400, '~': 541, '\xa1': 389, '\xa2': 500, '\xa3': 500, '\xa4': 500, '\xa5': 500, '\xa6': 275, '\xa7': 500, '\xa8': 333, '\xa9': 760, '\xaa': 276, '\xab': 500, '\xac': 675, '\xae': 760, '\xaf': 333, '\xb0': 400, '\xb1': 675, '\xb2': 300, '\xb3': 300, '\xb4': 333, '\xb5': 500, '\xb6': 523, '\xb7': 250, '\xb8': 333, '\xb9': 300, '\xba': 310, '\xbb': 500, '\xbc': 750, '\xbd': 750, '\xbe': 750, '\xbf': 500, '\xc0': 611, '\xc1': 611, '\xc2': 611, '\xc3': 611, '\xc4': 611, '\xc5': 611, '\xc6': 889, '\xc7': 667, '\xc8': 611, '\xc9': 611, '\xca': 611, '\xcb': 611, '\xcc': 333, '\xcd': 333, '\xce': 333, '\xcf': 333, '\xd0': 722, '\xd1': 667, '\xd2': 722, '\xd3': 722, '\xd4': 722, '\xd5': 722, '\xd6': 722, '\xd7': 675, '\xd8': 722, '\xd9': 722, '\xda': 722, '\xdb': 722, '\xdc': 722, '\xdd': 556, '\xde': 611, '\xdf': 500, '\xe0': 500, '\xe1': 500, '\xe2': 500, '\xe3': 500, '\xe4': 500, '\xe5': 500, '\xe6': 667, '\xe7': 444, '\xe8': 444, '\xe9': 444, '\xea': 444, '\xeb': 444, '\xec': 278, '\xed': 278, '\xee': 278, '\xef': 278, '\xf0': 500, '\xf1': 500, '\xf2': 500, '\xf3': 500, '\xf4': 500, '\xf5': 500, '\xf6': 500, '\xf7': 675, '\xf8': 500, '\xf9': 500, '\xfa': 500, '\xfb': 500, '\xfc': 500, '\xfd': 444, '\xfe': 500, '\xff': 444, '\u0100': 611, '\u0101': 500, '\u0102': 611, '\u0103': 500, '\u0104': 611, '\u0105': 500, '\u0106': 667, '\u0107': 444, '\u010c': 667, '\u010d': 444, '\u010e': 722, '\u010f': 544, '\u0110': 722, '\u0111': 500, '\u0112': 611, '\u0113': 444, '\u0116': 611, '\u0117': 444, '\u0118': 611, '\u0119': 444, '\u011a': 611, '\u011b': 444, '\u011e': 722, '\u011f': 500, '\u0122': 722, '\u0123': 500, '\u012a': 333, '\u012b': 278, '\u012e': 333, '\u012f': 278, '\u0130': 333, '\u0131': 278, '\u0136': 667, '\u0137': 444, '\u0139': 556, '\u013a': 278, '\u013b': 556, '\u013c': 278, '\u013d': 611, '\u013e': 300, '\u0141': 556, '\u0142': 278, '\u0143': 667, '\u0144': 500, '\u0145': 667, '\u0146': 500, '\u0147': 667, '\u0148': 500, '\u014c': 722, '\u014d': 500, '\u0150': 722, '\u0151': 500, '\u0152': 944, '\u0153': 667, '\u0154': 611, '\u0155': 389, '\u0156': 611, '\u0157': 389, '\u0158': 611, '\u0159': 389, '\u015a': 500, '\u015b': 389, '\u015e': 500, '\u015f': 389, '\u0160': 500, '\u0161': 389, '\u0162': 556, '\u0163': 278, '\u0164': 556, '\u0165': 300, '\u016a': 722, '\u016b': 500, '\u016e': 722, '\u016f': 500, '\u0170': 722, '\u0171': 500, '\u0172': 722, '\u0173': 500, '\u0178': 556, '\u0179': 556, '\u017a': 389, '\u017b': 556, '\u017c': 389, '\u017d': 556, '\u017e': 389, '\u0192': 500, '\u0218': 500, '\u0219': 389, '\u02c6': 333, '\u02c7': 333, '\u02d8': 333, '\u02d9': 333, '\u02da': 333, '\u02db': 333, '\u02dc': 333, '\u02dd': 333, '\u2013': 500, '\u2014': 889, '\u2018': 333, '\u2019': 333, '\u201a': 333, '\u201c': 556, '\u201d': 556, '\u201e': 556, '\u2020': 500, '\u2021': 500, '\u2022': 350, '\u2026': 889, '\u2030': 1000, '\u2039': 333, '\u203a': 333, '\u2044': 167, '\u2122': 980, '\u2202': 476, '\u2206': 612, '\u2211': 600, '\u2212': 675, '\u221a': 453, '\u2260': 549, '\u2264': 549, '\u2265': 549, '\u25ca': 471, '\uf6c3': 250, '\ufb01': 500, '\ufb02': 500}),
'Times-Roman': ({'FontName': 'Times-Roman', 'Descent': -217.0, 'FontBBox': (-168.0, -218.0, 1000.0, 898.0), 'FontWeight': 'Roman', 'CapHeight': 662.0, 'FontFamily': 'Times', 'Flags': 0, 'XHeight': 450.0, 'ItalicAngle': 0.0, 'Ascent': 683.0}, {' ': 250, '!': 333, '"': 408, '#': 500, '$': 500, '%': 833, '&': 778, u"'": 180, '(': 333, ')': 333, '*': 500, '+': 564, ',': 250, '-': 333, '.': 250, '/': 278, '0': 500, '1': 500, '2': 500, '3': 500, '4': 500, '5': 500, '6': 500, '7': 500, '8': 500, '9': 500, ':': 278, ';': 278, '<': 564, '=': 564, '>': 564, '?': 444, '@': 921, 'A': 722, 'B': 667, 'C': 667, 'D': 722, 'E': 611, 'F': 556, 'G': 722, 'H': 722, 'I': 333, 'J': 389, 'K': 722, 'L': 611, 'M': 889, 'N': 722, 'O': 722, 'P': 556, 'Q': 722, 'R': 667, 'S': 556, 'T': 611, 'U': 722, 'V': 722, 'W': 944, 'X': 722, 'Y': 722, 'Z': 611, '[': 333, '\\': 278, ']': 333, '^': 469, '_': 500, '`': 333, 'a': 444, 'b': 500, 'c': 444, 'd': 500, 'e': 444, 'f': 333, 'g': 500, 'h': 500, 'i': 278, 'j': 278, 'k': 500, 'l': 278, 'm': 778, 'n': 500, 'o': 500, 'p': 500, 'q': 500, 'r': 333, 's': 389, 't': 278, 'u': 500, 'v': 500, 'w': 722, 'x': 500, 'y': 500, 'z': 444, '{': 480, '|': 200, '}': 480, '~': 541, '\xa1': 333, '\xa2': 500, '\xa3': 500, '\xa4': 500, '\xa5': 500, '\xa6': 200, '\xa7': 500, '\xa8': 333, '\xa9': 760, '\xaa': 276, '\xab': 500, '\xac': 564, '\xae': 760, '\xaf': 333, '\xb0': 400, '\xb1': 564, '\xb2': 300, '\xb3': 300, '\xb4': 333, '\xb5': 500, '\xb6': 453, '\xb7': 250, '\xb8': 333, '\xb9': 300, '\xba': 310, '\xbb': 500, '\xbc': 750, '\xbd': 750, '\xbe': 750, '\xbf': 444, '\xc0': 722, '\xc1': 722, '\xc2': 722, '\xc3': 722, '\xc4': 722, '\xc5': 722, '\xc6': 889, '\xc7': 667, '\xc8': 611, '\xc9': 611, '\xca': 611, '\xcb': 611, '\xcc': 333, '\xcd': 333, '\xce': 333, '\xcf': 333, '\xd0': 722, '\xd1': 722, '\xd2': 722, '\xd3': 722, '\xd4': 722, '\xd5': 722, '\xd6': 722, '\xd7': 564, '\xd8': 722, '\xd9': 722, '\xda': 722, '\xdb': 722, '\xdc': 722, '\xdd': 722, '\xde': 556, '\xdf': 500, '\xe0': 444, '\xe1': 444, '\xe2': 444, '\xe3': 444, '\xe4': 444, '\xe5': 444, '\xe6': 667, '\xe7': 444, '\xe8': 444, '\xe9': 444, '\xea': 444, '\xeb': 444, '\xec': 278, '\xed': 278, '\xee': 278, '\xef': 278, '\xf0': 500, '\xf1': 500, '\xf2': 500, '\xf3': 500, '\xf4': 500, '\xf5': 500, '\xf6': 500, '\xf7': 564, '\xf8': 500, '\xf9': 500, '\xfa': 500, '\xfb': 500, '\xfc': 500, '\xfd': 500, '\xfe': 500, '\xff': 500, '\u0100': 722, '\u0101': 444, '\u0102': 722, '\u0103': 444, '\u0104': 722, '\u0105': 444, '\u0106': 667, '\u0107': 444, '\u010c': 667, '\u010d': 444, '\u010e': 722, '\u010f': 588, '\u0110': 722, '\u0111': 500, '\u0112': 611, '\u0113': 444, '\u0116': 611, '\u0117': 444, '\u0118': 611, '\u0119': 444, '\u011a': 611, '\u011b': 444, '\u011e': 722, '\u011f': 500, '\u0122': 722, '\u0123': 500, '\u012a': 333, '\u012b': 278, '\u012e': 333, '\u012f': 278, '\u0130': 333, '\u0131': 278, '\u0136': 722, '\u0137': 500, '\u0139': 611, '\u013a': 278, '\u013b': 611, '\u013c': 278, '\u013d': 611, '\u013e': 344, '\u0141': 611, '\u0142': 278, '\u0143': 722, '\u0144': 500, '\u0145': 722, '\u0146': 500, '\u0147': 722, '\u0148': 500, '\u014c': 722, '\u014d': 500, '\u0150': 722, '\u0151': 500, '\u0152': 889, '\u0153': 722, '\u0154': 667, '\u0155': 333, '\u0156': 667, '\u0157': 333, '\u0158': 667, '\u0159': 333, '\u015a': 556, '\u015b': 389, '\u015e': 556, '\u015f': 389, '\u0160': 556, '\u0161': 389, '\u0162': 611, '\u0163': 278, '\u0164': 611, '\u0165': 326, '\u016a': 722, '\u016b': 500, '\u016e': 722, '\u016f': 500, '\u0170': 722, '\u0171': 500, '\u0172': 722, '\u0173': 500, '\u0178': 722, '\u0179': 611, '\u017a': 444, '\u017b': 611, '\u017c': 444, '\u017d': 611, '\u017e': 444, '\u0192': 500, '\u0218': 556, '\u0219': 389, '\u02c6': 333, '\u02c7': 333, '\u02d8': 333, '\u02d9': 333, '\u02da': 333, '\u02db': 333, '\u02dc': 333, '\u02dd': 333, '\u2013': 500, '\u2014': 1000, '\u2018': 333, '\u2019': 333, '\u201a': 333, '\u201c': 444, '\u201d': 444, '\u201e': 444, '\u2020': 500, '\u2021': 500, '\u2022': 350, '\u2026': 1000, '\u2030': 1000, '\u2039': 333, '\u203a': 333, '\u2044': 167, '\u2122': 980, '\u2202': 476, '\u2206': 612, '\u2211': 600, '\u2212': 564, '\u221a': 453, '\u2260': 549, '\u2264': 549, '\u2265': 549, '\u25ca': 471, '\uf6c3': 250, '\ufb01': 556, '\ufb02': 556}),
'ZapfDingbats': ({'FontName': 'ZapfDingbats', 'FontBBox': (-1.0, -143.0, 981.0, 820.0), 'FontWeight': 'Medium', 'FontFamily': 'ITC', 'Flags': 0, 'ItalicAngle': 0.0}, {u'\x01': 974, '\x02': 961, '\x03': 980, '\x04': 719, '\x05': 789, '\x06': 494, '\x07': 552, '\x08': 537, '\t': 577, '\n': 692, '\x0b': 960, '\x0c': 939, '\r': 549, '\x0e': 855, '\x0f': 911, '\x10': 933, '\x11': 945, '\x12': 974, '\x13': 755, '\x14': 846, '\x15': 762, '\x16': 761, '\x17': 571, '\x18': 677, '\x19': 763, '\x1a': 760, '\x1b': 759, '\x1c': 754, '\x1d': 786, '\x1e': 788, '\x1f': 788, ' ': 790, '!': 793, '"': 794, '#': 816, '$': 823, '%': 789, '&': 841, u"'": 823, '(': 833, ')': 816, '*': 831, '+': 923, ',': 744, '-': 723, '.': 749, '/': 790, '0': 792, '1': 695, '2': 776, '3': 768, '4': 792, '5': 759, '6': 707, '7': 708, '8': 682, '9': 701, ':': 826, ';': 815, '<': 789, '=': 789, '>': 707, '?': 687, '@': 696, 'A': 689, 'B': 786, 'C': 787, 'D': 713, 'E': 791, 'F': 785, 'G': 791, 'H': 873, 'I': 761, 'J': 762, 'K': 759, 'L': 892, 'M': 892, 'N': 788, 'O': 784, 'Q': 438, 'R': 138, 'S': 277, 'T': 415, 'U': 509, 'V': 410, 'W': 234, 'X': 234, 'Y': 390, 'Z': 390, '[': 276, '\\': 276, ']': 317, '^': 317, '_': 334, '`': 334, 'a': 392, 'b': 392, 'c': 668, 'd': 668, 'e': 732, 'f': 544, 'g': 544, 'h': 910, 'i': 911, 'j': 667, 'k': 760, 'l': 760, 'm': 626, 'n': 694, 'o': 595, 'p': 776, 'u': 690, 'v': 791, 'w': 790, 'x': 788, 'y': 788, 'z': 788, '{': 788, '|': 788, '}': 788, '~': 788, '\x7f': 788, '\x80': 788, '\x81': 788, '\x82': 788, '\x83': 788, '\x84': 788, '\x85': 788, '\x86': 788, '\x87': 788, '\x88': 788, '\x89': 788, '\x8a': 788, '\x8b': 788, '\x8c': 788, '\x8d': 788, '\x8e': 788, '\x8f': 788, '\x90': 788, '\x91': 788, '\x92': 788, '\x93': 788, '\x94': 788, '\x95': 788, '\x96': 788, '\x97': 788, '\x98': 788, '\x99': 788, '\x9a': 788, '\x9b': 788, '\x9c': 788, '\x9d': 788, '\x9e': 788, '\x9f': 788, '\xa0': 894, '\xa1': 838, '\xa2': 924, '\xa3': 1016, '\xa4': 458, '\xa5': 924, '\xa6': 918, '\xa7': 927, '\xa8': 928, '\xa9': 928, '\xaa': 834, '\xab': 873, '\xac': 828, '\xad': 924, '\xae': 917, '\xaf': 930, '\xb0': 931, '\xb1': 463, '\xb2': 883, '\xb3': 836, '\xb4': 867, '\xb5': 696, '\xb6': 874, '\xb7': 760, '\xb8': 946, '\xb9': 865, '\xba': 967, '\xbb': 831, '\xbc': 873, '\xbd': 927, '\xbe': 970, '\xbf': 918, '\xc0': 748, '\xc1': 836, '\xc2': 771, '\xc3': 888, '\xc4': 748, '\xc5': 771, '\xc6': 888, '\xc7': 867, '\xc8': 696, '\xc9': 874, '\xca': 974, '\xcb': 762, '\xcc': 759, '\xcd': 509, '\xce': 410}),
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Parser for 'A D F' Calculations
######################################
This module provides the main parser (user facing) for parsing 'A D F' output
files. Although there are some natural sections within an 'A D F' output, such
as 'COMPUTATION', 'RESULTS', etc. the specific parsers are not organized in
terms of these sections. Each module within this directory provides a single
parser, specific to a given piece of data.
"""
#from __future__ import absolute_import
#from __future__ import print_function
#from __future__ import division
#import re
#try:
# from exa import Parser, Typed
#except ImportError:
# from exa import TypedMeta as Typed
# from exa import Editor as Parser
#
#
#
#
#class Output(Parser):
# """
# Parser for the 'A D F' calculation(s) of an ADF output file.
# """
# _start = re.compile(r"^\s*\*\s*\|\s*A D F\s*\|\s*\*")
# _end = re.compile(r"^\s*A D F E X I T")
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import xlsxwriter
import logging
import os
from xmind2testcase.utils import get_xmind_testcase_list_dict, get_absolute_path
"""
Convert XMind fie to qq testcase excel file
"""
def xmind_to_qqtestcase_file(xmind_file):
"""Convert XMind file to a qqtestcase file"""
xmind_file = get_absolute_path(xmind_file)
logging.info('Start converting XMind file(%s) to qqtestcase file...', xmind_file)
testsutie_dict = get_xmind_testcase_list_dict(xmind_file)
fileheader = ["编号", "功能模块", "测试点", "前置条件", "操作步骤", "预期结果"]
qqtestcase_file = xmind_file[:-6] + '_qq' + '.xlsx'
if os.path.exists(qqtestcase_file):
logging.info('The qqtestcase file already exists, return it directly: %s', qqtestcase_file)
return qqtestcase_file
workbook = xlsxwriter.Workbook(qqtestcase_file)
# 自动换行
style_text_wrap = workbook.add_format({'text_wrap': 1, 'valign':'top'})
#
# sheet1 = workbook.add_worksheet('README')
# sheet1.write(0, 0, '测试用例内容请至第二页查看') # 第0行第0列写入内容
# sheet1.write(1, 0, '确认数量正确、内容正确后,可将此文件直接导入iWork系统', style1) # 第1行第0列写入内容
smoke_case_dict = {}
for product in testsutie_dict:
smoke_case_dict[product] = []
sheet = workbook.add_worksheet(product) #sheet名
sheet.set_column("A:B", 15)
sheet.set_column("C:F", 30)
# 用例title
sheet.write(0, 0, fileheader[0])
sheet.write(0, 1, fileheader[1])
sheet.write(0, 2, fileheader[2])
sheet.write(0, 3, fileheader[3])
sheet.write(0, 4, fileheader[4])
sheet.write(0, 5, fileheader[5])
#第二行开始写入用例
case_index = 1
case_no = 0
for testcase in testsutie_dict[product]:
row_dict = gen_a_testcase_row_dict(testcase) #包含用例信息
row_list = row_dict["case_row_list"] #取用例列
# print("row_dict", row_dict)
#####################
smoke_case = {}
if row_dict["case_priority"] == "高":
smoke_case["module"] = row_dict["case_module"]
smoke_case["name"] = row_dict["case_title"]
smoke_case["precontion"] = row_dict["case_precontion"]
smoke_case["case"] = []
##########################
for row in row_list:
if len(row[1]) > 0:
case_no += 1
sheet.write(case_index, 0, "No." + str(case_no), style_text_wrap)
else:
sheet.write(case_index, 0, "", style_text_wrap)
sheet.write(case_index, 1, row[0], style_text_wrap)
sheet.write(case_index, 2, row[1], style_text_wrap)
sheet.write(case_index, 3, row[2], style_text_wrap)
sheet.write(case_index, 4, row[3], style_text_wrap)
sheet.write(case_index, 5, row[4], style_text_wrap)
case_index = case_index + 1
#给用例加操作步骤 预期结果
if len(smoke_case) > 0:
smoke_case["case"].append([row[3], row[4]])
if len(smoke_case) > 0:
smoke_case_dict[product].append(smoke_case)
# print(smoke_case_dict)
#写入冒烟测试
#############
if len(smoke_case_dict) > 0:
sheet2 = workbook.add_worksheet("冒烟用例") # sheet名
sheet2.set_column("A:A", 15)
sheet2.set_column("B:B", 80)
_case_index = 0
#分解sheet
for product in smoke_case_dict:
if len(smoke_case_dict[product]) > 0:
# 用例title
sheet2.write(_case_index, 0, "编号", style_text_wrap)
sheet2.write(_case_index, 1, "%s 冒烟用例" % product, style_text_wrap)
_case_index += 1
_smoke_case_list = smoke_case_dict[product]
_case_no = 0
for _smoke_case in _smoke_case_list:
_smoke_case_string = "测试点:\n"
_smoke_case_string += _smoke_case["name"] + "\n\n"
# print("_smoke_case", _smoke_case)
#前置条件
if len(_smoke_case["precontion"]) > 0 and _smoke_case["precontion"] != '无':
_smoke_case_string += "前置条件:\n"
_smoke_case_string += _smoke_case["precontion"] + "\n\n"
_step_index = 0
for _step in _smoke_case["case"]:
if len(_step[0]) > 0 and len(_step[1]):
_step_index += 1
_smoke_case_string += "操作步骤" + str(_step_index) + ":\n"
_smoke_case_string += _step[0] + "\n\n"
_smoke_case_string += "预期结果" + str(_step_index) + ":\n"
_smoke_case_string += _step[1] + "\n\n"
elif len(_step[0]) > 0:
_step_index += 1
_smoke_case_string += "操作步骤" + str(_step_index) + ":\n"
_smoke_case_string += _step[0] + "\n\n"
# print(_smoke_case_string)
_case_no += 1
sheet2.write(_case_index, 0, "No." + str(_case_no), style_text_wrap)
sheet2.write(_case_index, 1, _smoke_case_string, style_text_wrap)
_case_index += 1
_case_index += 1
###################
workbook.close()
logging.info('Convert XMind file(%s) to a qqtestcase file(%s) successfully!', xmind_file, qqtestcase_file)
return qqtestcase_file
#qqtestcase
def gen_a_testcase_row_dict(testcase_dict):
case_module = gen_case_module(testcase_dict['suite'])
case_title = testcase_dict['name']
case_precontion = testcase_dict['preconditions']
case_step_and_expected_result_dict = gen_case_step_and_expected_result_dict(testcase_dict['steps'])
# print("case_step_and_expected_result_dict >> ", case_step_and_expected_result_dict)
case_keyword = '功能测试'
case_priority = gen_case_priority(testcase_dict['importance'])
case_type = gen_case_type(testcase_dict['execution_type'])
case_apply_phase = '迭代测试'
#用例描述
case_depict = ""
case_depict += "所属模块: " + case_module + "\n"
case_depict += "前置条件: " + case_precontion + "\n"
case_depict += "关键词: " + case_keyword + "\n"
case_depict += "优先级: " + case_priority + "\n"
case_depict += "用例类型: " + case_type + "\n"
case_depict += "适用阶段: " + case_apply_phase + "\n"
#用例信息
row_list_dict = {
"case_module": case_module,
"case_title": case_title,
"case_precontion": case_precontion,
"case_priority": case_priority,
"case_row_list": []
}
# 列内容
row_list = []
row = ""
row_index = 1
if not case_step_and_expected_result_dict:
row = [case_module, case_title, case_precontion, "", ""]
row_list.append(row)
# print("aaa", case_step_and_expected_result_dict.items())
else:
for step, expected in case_step_and_expected_result_dict.items():
# 是否首行
if row_index > 1:
case_module = ""
case_title = ""
case_depict = ""
case_precontion = ""
else:
pass
# case_title = "[" + case_module + "]" + " " + case_title
#拼接
if step and expected:
row = [case_module, case_title, case_precontion, step, expected] #预期结果一对多来自parser.py文件
elif step:
row = [case_module, case_title, case_precontion, step, ""]
else:
row = [case_module, case_title, case_precontion, "", ""]
row_list.append(row)
row_index = row_index + 1
row_list_dict["case_row_list"].extend(row_list)
return row_list_dict
def gen_case_module(module_name):
if module_name:
module_name = module_name.replace('(', '(')
module_name = module_name.replace(')', ')')
else:
module_name = '/'
return module_name
def gen_case_step_and_expected_result(steps):
case_step = ''
case_expected_result = ''
for step_dict in steps:
case_step += str(step_dict['step_number']) + '. ' + step_dict['actions'].replace('\n', '').strip() + '\n'
case_expected_result += str(step_dict['step_number']) + '. ' + \
step_dict['expectedresults'].replace('\n', '').strip() + '\n' \
if step_dict.get('expectedresults', '') else ''
return case_step, case_expected_result
#步骤与预期结果的字典
def gen_case_step_and_expected_result_dict(steps):
total_dict = {}
for step_dict in steps:
# print(step_dict)
# total_dict[step_dict['actions'].replace('\n', '').strip()] = step_dict['expectedresults'].replace('\n', '').strip()
# 因为更换成预期结果也有值所以用下面的语句
total_dict[step_dict['actions'].replace('\n', '').strip()] = step_dict['expectedresults'].strip()
return total_dict
def gen_case_priority(priority):
mapping = {1: '高', 2: '中', 3: '低'}
if priority in mapping.keys():
return mapping[priority]
else:
return '中'
def gen_case_type(case_type):
mapping = {1: '手动', 2: '自动'}
if case_type in mapping.keys():
return mapping[case_type]
else:
return '手动'
if __name__ == '__main__':
xmind_file = '../docs/zentao_testcase_template.xmind'
qqtestcase_file = xmind_to_qqtestcase_file(xmind_file)
print('Conver the xmind file to a qqtestcase file succssfully: %s', qqtestcase_file) |
from multipledispatch import dispatch
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
# Helpers
import optimus.create as op
from optimus.functions import filter_row_by_data_type as fbdt
from optimus.helpers.checkit import is_list_of_str_or_int
from optimus.helpers.constants import *
from optimus.helpers.decorators import *
from optimus.helpers.functions import validate_columns_names, parse_columns, one_list_to_val
def rows(self):
@add_attr(rows)
def append(row):
"""
Append a row at the end of a dataframe
:param row: List of values to be appended
:return: Spark DataFrame
"""
df = self
assert isinstance(row, list), "Error: row must me a list"
assert len(row) > 0, "Error: row list must be greater that 0"
assert len(df.dtypes) == len(row), "Error row must be the same length of the dataframe"
cols = []
values = []
for d, r in zip(df.dtypes, row):
col_name = d[0]
data_type = d[1]
if data_type in SPARK_DTYPES_DICT_OBJECTS:
cols.append((col_name, (SPARK_DTYPES_DICT_OBJECTS[data_type]), True))
values.append(r)
values = [tuple(values)]
new_row = op.Create.data_frame(cols, values)
return df.union(new_row)
@add_attr(rows)
def select_by_dtypes(col_name, data_type=None):
"""
This function has built in order to filter some type of row depending of the var type detected by python
for Example if you have a column with
| a |
| 1 |
| b |
and you filter by type = integer you will get
| 1 |
:param col_name: Column to be filtered
:param data_type: Datatype use filter values
:return: Spark DataFrame
"""
col_name = parse_columns(self, col_name)
return self.where(fbdt(col_name, data_type))
@add_attr(rows)
def select(*args, **kwargs):
"""
Alias of Spark filter function. Return rows that match a expression
:param args:
:param kwargs:
:return: Spark DataFrame
"""
return self.filter(*args, **kwargs)
@add_attr(rows)
@dispatch(str)
def sort(columns):
"""
Sort column by row
"""
columns = parse_columns(self, columns)
return self.rows.sort([(columns, "desc",)])
@add_attr(rows)
@dispatch(str, str)
def sort(columns, order="desc"):
"""
Sort column by row
"""
columns = parse_columns(self, columns)
return self.rows.sort([(columns, order,)])
@add_attr(rows)
@dispatch(list)
def sort(col_sort):
"""
Sort columns taking in account multiple columns
:param col_sort: column and sort type combination (col_name, "asc")
:type col_sort: list of tuples
"""
# If a list of columns names are given order this by desc. If you need to specify the order of every
# column use a list of tuples (col_name, "asc")
t = []
if is_list_of_str_or_int(col_sort):
for col_name in col_sort:
t.append(tuple([col_name, "desc"]))
col_sort = t
func = []
for cs in col_sort:
col_name = one_list_to_val(cs[0])
order = cs[1]
if order == "asc":
sort_func = F.asc
elif order == "desc":
sort_func = F.desc
func.append(sort_func(col_name))
df = self.sort(*func)
return df
@add_attr(rows)
def drop(where=None):
"""
Drop a row depending on a dataframe expression
:param where: Expression used to drop the row
:return: Spark DataFrame
"""
return self.where(~where)
@add_attr(rows)
def drop_by_dtypes(col_name, data_type=None):
"""
Drop rows by cell data type
:param col_name: Column in which the filter is going to be apllied
:param data_type: filter by string, integer, float or boolean
:return: Spark DataFrame
"""
validate_columns_names(self, col_name)
return self.rows.drop(fbdt(col_name, data_type))
@add_attr(rows)
def drop_na(columns, how="all"):
"""
Removes rows with null values. You can choose to drop the row if 'all' values are nulls or if
'any' of the values is null.
:param columns:
:param how: ‘any’ or ‘all’. If ‘any’, drop a row if it contains any nulls. If ‘all’, drop a row only if all its
values are null. The default is 'all'.
:return: Returns a new DataFrame omitting rows with null values.
"""
columns = parse_columns(self, columns)
return self.dropna(how, subset=columns)
@add_attr(rows)
def drop_duplicates(columns=None):
"""
Drop duplicates values in a dataframe
:param columns: List of columns to make the comparison, this only will consider this subset of columns,
for dropping duplicates. The default behavior will only drop the identical rows.
:return: Return a new DataFrame with duplicate rows removed
"""
columns = parse_columns(self, columns)
return self.drop_duplicates(subset=columns)
@add_attr(rows)
def drop_first():
"""
Remove first row in a dataframe
:return: Spark DataFrame
"""
return self.zipWithIndex().filter(lambda tup: tup[1] > 0).map(lambda tup: tup[0])
return rows
DataFrame.rows = property(rows)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.