content stringlengths 5 1.05M |
|---|
import deepfilter as df
import numpy as np
import pickle as pkl
import os
#temps = np.concatenate(([0.1], np.arange(0.5, 10.5, 0.5)))
temps = [10.0]
#compare_temps = [0.1, 0.5, 1.0, 5.0, 10.0]
N = 100
train_path = '/home/az396/project/deepfiltering/training/checkpoints'
train_date = '210604'
train_dset = '210602_df1_ch3'
train_model = 'df_conv6_fc2_3ch'
train_domain = 'freq'
train_multiclass_type = 'pa'
split = False
# plot individual training losses
for temp in temps:
print(temp)
#training_info = f'date{train_date}_dset_name{train_dset}_temp{temp}_model{train_model}_domain_{train_domain}'
training_info = f'{train_date}_dset_name{train_dset}_class_{train_multiclass_type}_split_{split}_temp{temp}_model{train_model}_domain_{train_domain}'
training_name = f'{train_date}_training_info_dset_name{train_dset}_temp{temp}_model{train_model}_domain_{train_domain}'
info_path = os.path.join(train_path, training_info, 'info.pkl')
plot_save_path = '/home/az396/project/deepfiltering/analysis/plotting/plots'
name = training_name + '.png'
with open(info_path, 'rb') as infile:
test_info = pkl.load(infile)
df.plot.TrainingInfo(test_info, plot_save_path, name, epochs_per_xtick=6, )
####
# compare training loss on same plot
#info_list = []
#name1 = date + '_compare_trainloss' + model + '.png'
#name2 = date + '_compare_trainacc' + model + '.png'
#name3 = date + '_compare_valacc' + model + '.png'
#for temp in compare_temps:
# print(temp)
# info_path = os.path.join(top, date + '_temp' + str(temp) + model + epochs_str, 'info.pkl')
# with open(info_path, 'rb') as infile:
# info_list.append(pkl.load(infile))
#
#save_path = '/home/az396/project/deepfiltering/analysis/plot/training_loss/compare_loss'
#df.plot.CompareTrainLoss(info_list, compare_temps, save_path, name1, epochs_per_xtick=20)
#df.plot.CompareTrainAccuracy(info_list, compare_temps, save_path, name2, epochs_per_xtick=20)
#df.plot.CompareValAccuracy(info_list, compare_temps, save_path, name3, epochs_per_xtick=20)
####
|
"""
Module with classes for rendering specifications and object hierarchies
"""
from hdmf.spec.spec import AttributeSpec, LinkSpec
from hdmf.spec import GroupSpec
from hdmf.spec import DatasetSpec
from hdmf.utils import docval, getargs
import warnings
class HierarchyDescription(dict):
"""
Dictionary data structure used to describe the contents of the specification or HDMF object hierarchy.
This simple helper data structure was designed to ease rendering of object hierarchies but may
be useful for other purposes as well.
Ultimately, this is a flattened version of a spec or namespace where all datasets, groups, attributes,
and links are sorted into flat lists of dicts. The nesting of the objects is then described via
a list of relationships between the objects. Each object has a unique name that is determined
by the full path to the object plus the actual name or type of the object.
TODO Instead of using our own dict datastructures to describe datasets, groups etc. we should use
the standard spec datastructures provided by HDMF
"""
RELATIONSHIP_TYPES = {'managed_by': 'Object managed by',
'link': 'Object links to',
'attribute_of': 'Object is attribute of'}
def __init__(self):
super(HierarchyDescription, self).__init__()
super(HierarchyDescription, self).__setitem__('datasets', [])
super(HierarchyDescription, self).__setitem__('groups', [])
super(HierarchyDescription, self).__setitem__('attributes', [])
super(HierarchyDescription, self).__setitem__('relationships', [])
super(HierarchyDescription, self).__setitem__('links', [])
def __setitem__(self, key, value):
raise ValueError("Explicit setting of objects not allowed. Use the add_* functions to add objects")
def add_dataset(self, name, shape=None, dtype=None, data_type=None, size=None):
"""
Add a dataset to the description
:param name: Name of the dataset (full path)
:param shape: Shape of the dataset
:param dtype: Data type of the data
:param data_type: object data_type (e.g., NWB neurodata_type)
"""
self['datasets'].append({
'name': name,
'shape': shape,
'dtype': dtype,
'data_type': data_type,
'size': size
})
def add_group(self, name, data_type=None):
"""
Add a group to the description
:param name: Name of the group (full path)
:param data_type: object data type (e.g., NWB neurodata type)
"""
self['groups'].append({
'name': name,
'data_type': data_type
})
def add_attribute(self, name, value, size=None):
"""
Add an attribute
:param name: Name of the attribute (Full name, including the path of the parent object)
:param value: Value of the attribute
:return:
"""
self['attributes'].append({
'name': name,
'value': value,
'size': size
})
def add_link(self, name, target_type):
"""
Add a link
:param name: Name of the link (full path)
:param target_type: Type of object the link points to.
"""
self['links'].append({'name': name,
'target_type': target_type})
def add_relationship(self, source, target, name, rtype):
"""
Add a relationship between two objects
:param source: Name of the source object (full path)
:param target: Name of the target object (full path)
:param name: Name of the relationship
:param rtype: Type of the relationship
"""
self['relationships'].append({
'source': source,
'target': target,
'name': name,
'type': rtype
})
@classmethod
def from_spec(cls, spec):
"""
Traverse the spec to compute spec related hierarchy data.
:param spec: The specification object
:type spec: GroupSpec, AttributeSpec, DatasetSpec
:return: Instance of HierarchyDescription with the hierarchy of the objects
"""
import os
specstats = cls()
def update_stats(obj, parent_name):
"""
Function used to recursively visit all items in a stat and update the specstats object
:param obj: The spec for the object
:param parent_name: String with the full path of the parent in the hierarchy
"""
type_def_key = obj.def_key() if hasattr(obj, 'def_key') else 'data_type'
type_inc_key = obj.inc_key() if hasattr(obj, 'inc_key') else 'data_type'
obj_main_name = obj.name \
if obj.get('name', None) is not None \
else obj.get(type_def_key, None) \
if obj.get(type_def_key, None) is not None \
else obj.get(type_inc_key, None) \
if obj.get(type_inc_key, None) is not None \
else obj['target_type']
if obj.get('name', None) is None:
obj_main_name = '<' + obj_main_name + '>'
obj_name = os.path.join(parent_name, obj_main_name)
if isinstance(obj, GroupSpec):
type_def_key = obj.def_key()
type_inc_key = obj.inc_key()
if obj.get(type_def_key, None) is not None:
nd = obj[type_def_key]
else:
nd = obj.get(type_inc_key, None)
specstats.add_group(name=obj_name,
data_type=nd)
elif isinstance(obj, DatasetSpec):
type_def_key = obj.def_key()
type_inc_key = obj.inc_key()
if obj.get(type_def_key, None) is not None:
nd = obj[type_def_key]
else:
nd = obj.get(type_inc_key, None)
specstats.add_dataset(name=obj_name,
shape=obj.shape,
dtype=obj['type'] if hasattr(obj, 'type') else None,
data_type=nd)
elif isinstance(obj, AttributeSpec):
specstats.add_attribute(name=obj_name,
value=obj.value)
elif isinstance(obj, LinkSpec):
specstats.add_link(name=obj_name,
target_type=obj['target_type'])
# Recursively add all groups and datasets
if isinstance(obj, GroupSpec):
for d in obj.datasets:
dn = update_stats(d, obj_name)
specstats.add_relationship(source=obj_name,
target=dn,
name=dn+"_managed_by_"+obj_name,
rtype='managed_by')
for g in obj.groups:
gn = update_stats(g, obj_name)
specstats.add_relationship(source=obj_name,
target=gn,
name=gn+"_managed_by_"+obj_name,
rtype='managed_by')
for link in obj.links:
ln = update_stats(link, obj_name)
specstats.add_relationship(source=obj_name,
target=ln,
name=ln+"_managed_by_"+obj_name,
rtype='managed_by')
if isinstance(obj, GroupSpec) or isinstance(obj, DatasetSpec):
for a in obj.attributes:
an = update_stats(a, obj_name)
specstats.add_relationship(source=obj_name,
target=an,
name=an+"_attribute_of_"+obj_name,
rtype='attribute_of')
return obj_name
update_stats(spec, parent_name='/')
return specstats
@classmethod
def from_hdf5(cls, hdf_object, root='/', data_type_attr_name='neurodata_type'):
"""
Traverse the file to compute file object hierarchy data.
:param hdf_object: The h5py.Group or h5py.File object. If a string is given then the function assumes
that this is a path to and HDF5 file and will open the file.
:param root: String indicating the root object starting from which we should compute the file statistics.
Default value is "/", i.e., starting from the root itself
:type root: String
:param data_type_attr_name: Name of the attribute in the HDF5 file reserved for storing the name of
object types, e.g., the neurodata_type in the case of NWB:N. Default is
'neurodata_type' with NWB in mind.
:return: Instance of HierarchyDescription with the hierarchy of the objects
"""
import h5py
import os
filestats = cls()
def update_stats(name, obj):
"""
Callback function used in conjunction with the visititems function to compile
statistics for the file
:param name: the name of the object in the file
:param obj: the hdf5 object itself
"""
obj_name = os.path.join(root, name)
# Group and dataset metadata
if isinstance(obj, h5py.Dataset):
ntype = None
if data_type_attr_name in obj.attrs.keys():
ntype = obj.attrs[data_type_attr_name][:]
filestats.add_dataset(name=obj_name,
shape=obj.shape,
dtype=obj.dtype,
data_type=ntype,
size=obj.size * obj.dtype.itemsize)
elif isinstance(obj, h5py.Group):
ntype = None
if data_type_attr_name in obj.attrs.keys():
ntype = obj.attrs[data_type_attr_name][:]
filestats.add_group(name=obj_name, data_type=ntype)
# visititems does not visit any links. We need to add them here
for objkey in obj.keys():
objval = obj.get(objkey, getlink=True)
if isinstance(objval, h5py.SoftLink) or isinstance(objval, h5py.ExternalLink):
try:
linktarget = obj[objkey]
if data_type_attr_name in linktarget.attrs.keys():
targettype = linktarget.attrs[data_type_attr_name][:]
else:
targettype = str(type(linktarget))
except KeyError:
warnings.warn('Unable to determine target type of link %s, %s' % ((obj_name, objkey)))
targettype = 'undefined'
linkname = os.path.join(obj_name, objkey)
filestats.add_link(linkname, target_type=targettype)
if isinstance(objval, h5py.SoftLink):
filestats.add_relationship(source=linkname,
target=objval.path,
name=linkname,
rtype='link')
# Visit all attributes of the object
for attr_name, attr_value in obj.attrs.items():
attr_path = os.path.join(obj_name, attr_name)
try:
size = attr_value.size * attr_value.dtype.itemsize
except:
size = None
filestats.add_attribute(name=attr_path, value=attr_value, size=size)
filestats.add_relationship(source=attr_path,
target=obj_name,
name=attr_name + '_attribute_of_' + obj_name,
rtype='attribute_of')
# Create the relationship for the object
if obj_name != '/':
filestats.add_relationship(source=os.path.dirname(obj_name),
target=obj_name,
name=obj_name + '_managed_by_' + root,
rtype='managed_by')
# Determine the main HDF5 object
if isinstance(hdf_object, str):
main_hdf_object = h5py.File(hdf_object, 'r')
close_main_hdf_object = True
else:
main_hdf_object = hdf_object
close_main_hdf_object = False
# Visit all items in the hdf5 object to compile the object statistics
main_hdf_object[root].visititems(update_stats)
if root == '/':
update_stats(name='/', obj=main_hdf_object['/'])
# Close the hdf5 object if we opened it
if close_main_hdf_object:
main_hdf_object.close()
del main_hdf_object
# Return the results
return filestats
class NXGraphHierarchyDescription(object):
"""
Description of the object hierarchy as an nx graph
"""
@docval({'name': 'data', 'type': HierarchyDescription, 'doc': 'Data of the hierarchy'},
{'name': 'include_groups', 'type': bool,
'doc': 'Bool indicating whether we should include groups in the hierarchy', 'default': True},
{'name': 'include_datasets', 'type': bool,
'doc': 'Bool indicating whether we should include datasets in the hierarchy', 'default': True},
{'name': 'include_attributes', 'type': bool,
'doc': 'Bool indicating whether we should include attributes in the hierarchy', 'default': True},
{'name': 'include_links', 'type': bool,
'doc': 'Bool indicating whether we should include links in the hierarchy', 'default': True},
{'name': 'include_relationships', 'type': bool,
'doc': 'Bool or list of strings indicating thh types of relationships to be included', 'default': True})
def __init__(self, **kwargs):
self.data, self.include_groups, self.include_datasets, self.include_attributes, \
self.include_links, self.include_relationships = getargs('data', 'include_groups',
'include_datasets', 'include_attributes',
'include_links', 'include_relationships',
kwargs)
self.graph = self.nxgraph_from_data(self.data,
self.include_groups,
self.include_datasets,
self.include_attributes,
self.include_links,
self.include_relationships)
self.pos = self.create_hierarchical_graph_layout(self.graph)
def draw(self, **kwargs):
"""
Draw the graph using the draw_graph method
:param kwargs: Additional keyword arguments to be passed to the static draw_graph method
:return:
"""
return self.draw_graph(graph=self.graph, pos=self.pos, data=self.data, **kwargs)
@staticmethod
def nxgraph_from_data(data,
include_groups=True,
include_datasets=True,
include_attributes=True,
include_links=True,
include_relationships=True):
"""
Create a networkX representation of the objects in the HiearchyDescription stored in self.data
:param data: Description of the object hierarchy
:type data: HierarchyDescription
:param include_groups: Bool indicating whether we should include groups in the hierarchy
:param include_datasets: Bool indicating whether we should include datasets in the hierarchy
:param include_attributes: Bool indicating whether we should include groups in the hierarchy
:param include_links: Bool indicating whether we should include links in the hierarchy
:param include_relationships: Bool or list of strings indicating which types of relationships should be included
:return: NXGraph from self.data
"""
import networkx as nx
graph = nx.Graph() # nx.MultiDiGraph()
# Add all nodes
if include_datasets:
for d in data['datasets']:
graph.add_node(d['name'])
if include_groups:
for g in data['groups']:
graph.add_node(g['name'])
if include_attributes:
for g in data['attributes']:
graph.add_node(g['name'])
if include_links:
for link in data['links']:
graph.add_node(link['name'])
# Create edges from relationships
rel_list = include_relationships \
if isinstance(include_relationships, list) \
else data.RELATIONSHIP_TYPES \
if include_relationships is True \
else []
all_nodes = graph.nodes(data=False)
if len(rel_list) > 0:
for r in data['relationships']:
# Add only those relationships we were asked to include
if r['type'] in rel_list:
# Add only relationships for which we have both the source and target in the graph
if r['source'] in all_nodes and r['target'] in all_nodes:
graph.add_edge(r['source'], r['target'])
return graph
@staticmethod
def create_hierarchical_graph_layout(graph):
"""
Given a networkX graph of file hierarchy, comput the positions of all nodes of the
graph (i.e., groups and datasets) in a hierarchical layout
:param graph: Network X graph of file objects
:return: Dictionary where the keys are the names of the nodes in the graph and the values are
tuples with the floating point x and y coordinates for that node.
"""
import numpy as np
pos_hierarchy = {}
allnodes = graph.nodes(data=False)
nodes_at_level = {}
for v in allnodes:
xpos = len(v.split('/')) if v != '/' else 1
try:
nodes_at_level[xpos] += 1
except KeyError:
nodes_at_level[xpos] = 1
curr_nodes_at_level = {i: 0 for i in nodes_at_level.keys()}
for v in np.sort(list(allnodes)):
xpos = len(v.split('/')) if v != '/' else 1
ypos = 1 - float(curr_nodes_at_level[xpos]) / nodes_at_level[xpos] * 1
curr_nodes_at_level[xpos] += 1
pos_hierarchy[v] = np.asarray([np.power(xpos, 2), ypos])
return pos_hierarchy
@staticmethod
def create_warped_hierarchial_graph_layout(graph):
"""
Given a networkX graph of file hierarchy, compute the positions of all nodes of the
graph (i.e., groups and datasets) in a hierarchical layout where the levels of the
hierarchy are warped to follow a semi-circle shape.
:param graph: Network X graph of file objects
:return: Dictionary where the keys are the names of the nodes in the graph and the values are
tuples with the floating point x and y coordinates for that node.
"""
import numpy as np
pos_hierarchy = {}
allnodes = graph.nodes(data=False)
nodes_at_level = {}
for v in allnodes:
xpos = len(v.split('/')) if v != '/' else 1
try:
nodes_at_level[xpos] += 1
except KeyError:
nodes_at_level[xpos] = 1
curr_nodes_at_level = {i: 0 for i in range(7)}
for i, v in enumerate(np.sort(allnodes)):
xpos = len(v.split('/')) if v != '/' else 1
ypos = float(curr_nodes_at_level[xpos]) / nodes_at_level[xpos]
curr_nodes_at_level[xpos] += 1
if xpos > 3:
xpos += np.sin(ypos*np.pi)
xpos = np.power(xpos, 2)
pos_hierarchy[v] = np.asarray([xpos, -ypos])
return pos_hierarchy
@staticmethod
def normalize_graph_layout(graph_layout):
"""
Normalize the positions in the given graph layout so that the x and y
values have a range of [0,1]
:param graph_layout: Dict where the keys are the names of the nodes in the graph
and the values are tuples with the (x,y) locations for the nodes
:return: New graph layout with normalized coordinates
"""
import numpy as np
# Compute positions stats
xpos = np.asarray([i[0] for i in graph_layout.values()])
ypos = np.asarray([i[1] for i in graph_layout.values()])
xmin = xpos.min()
xmax = xpos.max()
xr = xmax - xmin
ymin = ypos.min()
ymax = ypos.max()
yr = ymax - ymin
# Create the output layout
normlized_layout = {k: np.asarray([(n[0] - xmin) / xr, (n[1] - xmin) / yr]) for k, n in graph_layout.items()}
return normlized_layout
def suggest_xlim(self):
"""
Suggest xlimits for plotting
:return: Tuple with min/max x values
"""
import numpy as np
xpos = np.asarray([i[0] for i in self.pos.values()])
xmin = xpos.min()
xmax = xpos.max()
xrange = np.abs(xmax-xmin)
xmin -= xrange*0.2
xmax += xrange*0.2
return (xmin, xmax)
def suggest_ylim(self):
"""
Suggest ylimits for plotting
:return: Tuple with min/max x values
"""
import numpy as np
ypos = np.asarray([i[1] for i in self.pos.values()])
ymin = ypos.min()
ymax = ypos.max()
yrange = np.abs(ymax-ymin)
ymin -= yrange*0.1
ymax += yrange*0.1
return (ymin, ymax)
def suggest_figure_size(self):
"""
Suggest a figure size for a graph based on the number of rows and columns in the hierarchy
:param graph: Network X graph of file objects
:return: Tuple with the width and height for the figure
"""
allnodes = self.graph.nodes(data=False)
nodes_at_level = {}
for v in allnodes:
xpos = len(v.split('/')) if v != '/' else 1
try:
nodes_at_level[xpos] += 1
except KeyError:
nodes_at_level[xpos] = 1
num_rows = max([v for v in nodes_at_level.values()])
# num_cols = len(nodes_at_level)
# w = num_cols + 1.5
# h = num_rows * 0.5 + 1.5
w = 8
h = min(num_rows*0.75, 8)
return (w, h)
@staticmethod
def draw_graph(graph,
pos,
data,
show_labels=True,
relationship_types=None,
figsize=None,
label_offset=(0.0, 0.012),
label_font_size=8,
xlim=None,
ylim=None,
legend_location='lower left',
axis_on=False,
relationship_counts=True,
show_plot=True,
relationship_colors=None,
relationship_alpha=0.7,
node_colors=None,
node_alpha=1.0,
node_shape='o',
node_size=20):
"""
Helper function used to render the file hierarchy and the inter-object relationships
:param graph: The networkx graph
:param pos: Dict with the position for each node generated, e.g., via nx.shell_layout(graph)
:param data: Data about the hierarchy
:type data: HierarchyDescription
:param show_labels: Boolean indicating whether we should show the names of the nodes
:param relationship_types: List of edge types that should be rendered. If None, then all edges will be rendered.
:param figsize: The size of the matplotlib figure
:param label_offset: Offsets for the node labels. This may be either: i) None (default),
ii) Tuple with constant (x,y) offset for the text labels, or
iii) Dict of tuples where the keys are the names of the nodes for which labels should be moved
and the values are the (x,y) offsets for the given nodes.
:param label_font_size: Font size for the labels
:param xlim: The x limits to be used for the plot
:param ylim: The y limits to be used for the plot
:param legend_location: The legend location (e.g., 'upper left' , 'lower right')
:param axis_on: Boolean indicating whether the axes should be turned on or not.
:param relationship_counts: Boolean indicating if edge/relationship counts should be shown.
:param relationship_colors: Optional dict for changing the default colors used for drawing relationship edges.
The keys of the dict are the type of relationship and the values are the names of the colors. This may
also be a single color string in case that all relationships should be shown in the same color. Default
behavior is:
```{'shared_encoding': 'magenta',
'indexes_values': 'cyan',
'equivalent': 'gray',
'indexes': 'orange',
'user': 'green',
'shared_ascending_encoding': 'blue',
'order': 'red',
'managed_by': 'steelblue',
'attribute_of': 'black'}```
:param relationship_alpha: Float alpha value in the range of [0,1] with the alpha value to be used
for relationship edges. This may also be a dict of per-relationship-typ alpha values, similar
to relationship_colors.
:param node_colors: Dict with the color strings of the different node types. This may also be a single
string in case that the same color should be used for all node types. Default behavior is:
```{'typed_dataset': 'blue',
'untyped_dataset': 'lightblue',
'typed_group': 'red',
'untyped_group': 'orange',
'attribute': 'gray',
'link': 'white'}```
:param node_shape: Dict indicating the shape string for each node type. This may also be a single string
if the same shape should be used for all node types. Default='o'
:param node_size: Dict indicating the integer size for each node type. This may also be a single int
if the same size should be used for all node types. Default='o'
:param show_plot: If true call show to display the figure. If False return the matplotlib figure
without showing it.
:return: Matplotlib figure of the data
"""
from matplotlib import pyplot as plt
import networkx as nx
import os
from copy import deepcopy
from matplotlib.ticker import NullLocator
fig = plt.figure(figsize=figsize)
# List of object names
all_nodes = graph.nodes(data=False)
n_names = {'typed_dataset': [i['name'] for i in data['datasets']
if i['data_type'] is not None and i['name'] in all_nodes],
'untyped_dataset': [i['name'] for i in data['datasets']
if i['data_type'] is None and i['name'] in all_nodes],
'typed_group': [i['name'] for i in data['groups']
if i['data_type'] is not None and i['name'] in all_nodes],
'untyped_group': [i['name'] for i in data['groups']
if i['data_type'] is None and i['name'] in all_nodes],
'attribute': [i['name'] for i in data['attributes']
if i['name'] in all_nodes],
'link': [i['name'] for i in data['links']
if i['name'] in all_nodes]
}
# Define the legend labels
n_legend = {'typed_dataset': 'Typed Dataset (%i)' % len(n_names['typed_dataset']),
'untyped_dataset': 'Untyped Dataset (%i)' % len(n_names['untyped_dataset']),
'typed_group': 'Typed Group (%i)' % len(n_names['typed_group']),
'untyped_group': 'Untyped Group (%i)' % len(n_names['untyped_group']),
'attribute': 'Attributes (%i)' % len(n_names['attribute']),
'link': 'Links (%i)' % len(n_names['link'])}
# Define the node colors
n_colors = {'typed_dataset': 'blue',
'untyped_dataset': 'lightblue',
'typed_group': 'red',
'untyped_group': 'orange',
'attribute': 'gray',
'link': 'white'}
if node_colors is not None:
node_colors.update(node_colors)
# Define the node alpha
n_alpha_base = node_alpha if isinstance(node_alpha, float) else 1.0
n_alpha = {k: n_alpha_base for k in n_colors}
if isinstance(node_alpha, dict):
n_alpha.update(node_alpha)
# Define the shape of each node type
n_shape_base = node_shape if isinstance(node_alpha, float) else 'o'
n_shape = {k: n_shape_base for k in n_colors}
if isinstance(node_shape, dict):
n_shape.update(node_shape)
# Define the size of each node type
n_size_base = node_size if isinstance(node_size, float) or isinstance(node_size, int) else 20
n_size = {k: n_size_base for k in n_colors}
if isinstance(node_size, dict):
n_size.update(node_size)
# Draw all the nodes by type with the type-specific properties
for ntype in n_names.keys():
# Draw the typed dataset nodes of the network
nx.draw_networkx_nodes(graph, pos,
nodelist=n_names[ntype],
node_color=n_colors[ntype],
node_shape=n_shape[ntype],
node_size=n_size[ntype],
alpha=n_alpha[ntype],
label=n_legend[ntype])
# Define edge colors and alpha values
rel_colors = {'shared_encoding': 'magenta',
'indexes_values': 'cyan',
'equivalent': 'gray',
'indexes': 'orange',
'user': 'green',
'shared_ascending_encoding': 'blue',
'order': 'lightblue',
'managed_by': 'steelblue',
'link': 'magenta',
'attribute_of': 'black'}
if isinstance(relationship_colors, str):
for k in rel_colors:
rel_colors[k] = relationship_colors
if relationship_colors is not None:
rel_colors.update(relationship_colors)
rel_base_alpha = 0.6 if not isinstance(relationship_alpha, float) else relationship_alpha
rel_alpha = {k: rel_base_alpha for k in rel_colors}
if isinstance(relationship_alpha, dict):
rel_alpha.update(relationship_alpha)
# Resort edges by type
edge_by_type = {}
for r in data['relationships']:
if r['type'] in edge_by_type:
edge_by_type[r['type']].append((r['source'], r['target']))
else:
edge_by_type[r['type']] = [(r['source'], r['target'])]
# Determine the counts of relationships, i.e., how many relationships of each type do we have
if relationship_counts:
relationship_counts = {rt: len(rl) for rt, rl in edge_by_type.items()}
else:
relationship_counts = None
# Draw the network edges
for rt, rl in edge_by_type.items():
if relationship_types is None or rt in relationship_types:
try:
edge_label = rt if relationship_counts is None else (rt+' (%i)' % relationship_counts[rt])
nx.draw_networkx_edges(graph,
pos,
edgelist=rl,
width=1.0,
alpha=rel_alpha[rt],
edge_color=rel_colors[rt],
arrows=False,
style='solid' if rt != 'link' else 'dashed',
label=edge_label
)
except KeyError:
pass
if show_labels:
# Create node labels
labels = {i: os.path.basename(i) if len(os.path.basename(i)) > 0 else i for i in graph.nodes(data=False)}
# Determine label positions
if label_offset is not None:
# Move individual labels by the user-defined offsets
if isinstance(label_offset, dict):
label_pos = deepcopy(pos)
for k, v in label_offset.items():
label_pos[k] += v
# Move all labels by a single, user-defined offset
else:
label_pos = {k: (v+label_offset) for k, v in pos.items()}
else:
# Use the node positions as label positions
label_pos = pos
# Draw the labels
nx.draw_networkx_labels(graph, label_pos, labels, font_size=label_font_size)
if axis_on:
plt.axis('on')
else:
plt.axis('off')
# Get rid of large whitespace around the figure
plt.gca().xaxis.set_major_locator(NullLocator())
plt.gca().yaxis.set_major_locator(NullLocator())
plt.legend(prop={'size': label_font_size}, loc=legend_location)
plt.autoscale(True)
plt.tight_layout()
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
if show_plot:
plt.show()
return fig
|
import csv
from time import time
from time import sleep
def test_reader():
ts = time()
with open('csvreader.csv', 'r') as f:
reader = csv.reader(f)
for line in reader:
continue
te = time()
print(f'Python csv.reader parses {reader.line_num} lines '
f'in {(te-ts)*1000:.0f} milliseconds')
def test_dictreader():
# sleep for 3 seconds
sleep(3)
ts = time()
with open('csvreader.csv', 'r') as f:
reader = csv.DictReader(f)
for line in reader:
continue
te = time()
print(f'Python csv.DictReader parses {reader.line_num} lines '
f'in {(te-ts)*1000:.0f} milliseconds')
if __name__ == '__main__':
test_reader()
test_dictreader() |
def pytest_addoption(parser):
parser.addoption('--aws', action='store_true', dest="aws",
default=False, help="enable tests needing aws lambda")
def pytest_configure(config):
if not config.option.aws:
setattr(config.option, 'markexpr', 'not aws')
|
###***********************************###
'''
Grade Notifier
File: initializegn.py
Author: Ehud Adler
Core Maintainers: Ehud Adler, Akiva Sherman,
Yehuda Moskovits
Copyright: Copyright 2019, Ehud Adler
License: MIT
'''
###***********************************###
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import mysql.connector
import argparse
import time
import os
import re
import requests
import getpass
import traceback
import subprocess
import cunyfirstapi
from helper import constants
from lxml import html
from helper.fileManager import create_dir
from helper.constants import log_path
from helper.constants import script_path, abs_repo_path
from helper.helper import print_to_screen
from helper.security import decrypt
from dotenv import load_dotenv
from os.path import join, dirname
"""Initialize Grade-Notifier
"""
__author__ = "Ehud Adler & Akiva Sherman"
__copyright__ = "Copyright 2018, The Punk Kids"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Ehud Adler & Akiva Sherman"
__email__ = "self@ehudadler.com"
__status__ = "Production"
# Load file.
load_dotenv('../../private/.env')
# Accessing variables.
DB_USERNAME = os.getenv('DB_USERNAME')
DB_PASSWORD = os.getenv('DB_PASSWORD')
DB_HOST = os.getenv('DB_HOST')
def add_to_db(username, encrypted_password, school, phone):
myconnector = mysql.connector.Connect(user=DB_USERNAME,
host=DB_HOST, passwd=DB_PASSWORD)
cursor = myconnector.cursor()
myconnector.autocommit = True
cursor.execute('USE GradeNotifier')
query_string = (f'''INSERT INTO Users (username, password, school, phoneNumber) VALUES '''
f'''(%s, %s, %s, %s);''')
# query_string = myconnector.converter.escape(query_string)
# print(query_string)
data = (username, encrypted_password, school, phone)
cursor.execute(query_string, data)
def user_exists(username, school):
myconnector = mysql.connector.Connect(user=DB_USERNAME,
host=DB_HOST, passwd=DB_PASSWORD)
cursor = myconnector.cursor()
myconnector.autocommit = True
cursor.execute('USE GradeNotifier')
# test if in DB by checking count of records with that username and school combo
query_string = ('''SELECT COUNT(*) FROM Users WHERE '''
f'''username = %s AND school = %s''')
data = (username, school)
cursor.execute(query_string, data)
rows = cursor.next()[0]
return rows > 0
def parse():
parser = argparse.ArgumentParser(
description='Specify commands for CUNY Grade Notifier Retriever v1.0')
parser.add_argument('--school', default="QNS01")
parser.add_argument('--list-codes', action='store_true')
parser.add_argument('--username')
parser.add_argument('--password')
parser.add_argument('--phone')
parser.add_argument('--filename')
# Production
parser.add_argument('--prod')
# Development
parser.add_argument('--enable_phone')
return parser.parse_args()
def main():
args = parse()
try:
username = input(
"Enter username: ") if not args.username else args.username
encrypted_password = getpass.getpass(
"Enter password: ") if not args.password else args.password
number = input(
"Enter phone number: ") if not args.phone else args.phone
prod = False if not args.prod else True
username = re.sub(r'@login\.cuny\.edu', '', username).lower()
if user_exists(username, args.school.upper()):
print_to_screen(
"Seems that you already have a session running.\n" \
+ "If you think there is a mistake, contact me @ Ehud.Adler62@qmail.cuny.edu",
"error",
"Oh No!",
)
return
password = decrypt(encrypted_password, '../../private/keys/private.pem')
api = cunyfirstapi.CUNYFirstAPI(username, password)
api.login()
if api.is_logged_in():
add_to_db(username, encrypted_password, args.school.upper(), number)
print_to_screen(
"Check your phone for a text!\n" \
+ "The service will check for new grades every 30 min and text you when anything changes.\n" \
+ "The service will continue for 2 weeks and then require you to sign-in again.\n" \
+ "Please only sign in once.\n" \
+ "Enjoy!",
"ok",
"Hold Tight!",
)
api.logout()
else:
print_to_screen(
"The username/password combination you entered seems to be invalid.\n" \
+ "Please try again.",
"error",
"Oh No!",
)
except Exception as e:
traceback.print_exc()
if __name__ == '__main__':
main()
|
from models.model_group import Group
import random
def test_modify_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create_new_group(Group(group_name='a', group_header='b', group_footer='c'))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group = Group(group_name='k', group_header='b', group_footer='y')
new_group.group_id = group.group_id
app.group.modify_group_by_id(new_group, group.group_id)
new_groups = db.get_group_list()
old_groups.remove(group)
old_groups.append(new_group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
|
from __future__ import division
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
import numpy as np
import sys
sys.path.append('utils')
slim = tf.contrib.slim
from utils.model import deform_model
from utils.mnist_gen import get_gen
batch_size = 64
learning_rate = 1e-4 # Decrease the learning rate by 10 for fine-tune.
steps_per_epoch = int(np.ceil(60000 / batch_size))
validation_steps = 10000
train_data_generator_scaled = get_gen('train', batch_size=batch_size, shuffle=True, scaled=True)
test_data_generator_scaled = get_gen('test', batch_size=1, shuffle=True, scaled=True)
input_placeholder = tf.placeholder(dtype=tf.float32, shape=[None, 1, 28, 28], name='input')
label_placeholder = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='label')
is_training_placeholder = tf.placeholder(dtype=tf.bool, shape=[], name='is_training')
output_tensor = deform_model(input_placeholder, is_training_placeholder, bn=True, trainable=False)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=label_placeholder, logits=output_tensor))
correct_prediction = tf.equal(tf.argmax(output_tensor, 1), tf.argmax(label_placeholder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
# Restore all the variables from the regular model except for those related to offsets.
variables_to_restore = []
for var in tf.global_variables():
if 'offset' not in var.name:
variables_to_restore.append(var)
restorer = tf.train.Saver(variables_to_restore)
# Define saver
saver = tf.train.Saver(tf.global_variables())
# Check if the trainable variables are correctly set.
print("Only the following variables are trainable:")
for var in tf.trainable_variables():
print(var.name)
if __name__ == '__main__':
with tf.Session(config=config) as sess:
sess.run(init)
restorer.restore(sess, "checkpoint/regular_model")
print("Weights restored from 'checkpoint/regular_model'.")
for i in range(10):
acc_sum, loss_sum = 0., 0.
batch_count = 0
while batch_count < steps_per_epoch:
input, label = next(train_data_generator_scaled)
input = np.transpose(input, axes=[0, 3, 1, 2]) # to fit "NCHW" data format
loss, acc, _ = sess.run([cross_entropy, accuracy, train_op],
feed_dict={input_placeholder: input,
label_placeholder: label,
is_training_placeholder: False})
# The moving average of batch normalization has to be frozen
loss_sum += loss
acc_sum += acc
batch_count += 1
print('Epoch {} training: loss={}, acc={}'.format(i + 1, loss_sum / batch_count, acc_sum / batch_count))
# Save session into checkpoint file
try:
os.mkdir('checkpoint')
except:
pass
save_path = saver.save(sess, "checkpoint/deform_model")
print("Model saved in path: %s" % save_path)
# Testing on scaled MNIST
acc_sum, loss_sum = 0., 0.
batch_count = 0
while batch_count < validation_steps:
input, label = next(test_data_generator_scaled)
input = np.transpose(input, axes=[0, 3, 1, 2]) # to fit "NCHW" data format
loss, acc = sess.run([cross_entropy, accuracy],
feed_dict={input_placeholder: input,
label_placeholder: label,
is_training_placeholder: False})
loss_sum += loss
acc_sum += acc
batch_count += 1
print('Testing for deform conv model on scaled MNIST: loss={}, acc={}'.format(loss_sum / batch_count,
acc_sum / batch_count))
|
# functions that work with the tracking data ....
import pandas as pd
import numpy as np
import math
from scipy.signal import savgol_filter
from scipy.ndimage.filters import gaussian_filter1d
from scipy.stats import circmean,circvar
import cv2
import sys
def pos_make_df(data_pos,box_size_cm,timebase_pos,time_stamps_sessions_pos, loop_n, divider_n):
data_pos_df = pd.DataFrame(data_pos)
data_pos_df['time']=np.array(data_pos_df['frame_counter'],dtype=float)/float(timebase_pos) # in sec
data_pos_df.set_index('time', drop=True, append=False, inplace=True, verify_integrity=False)
# find amount of invalid tracking
x1_fail = np.sum(data_pos_df.x1.values == 1023)/float(len(data_pos_df))
x2_fail = np.sum(data_pos_df.x2.values == 1023)/float(len(data_pos_df))
y1_fail = np.sum(data_pos_df.y1.values == 1023)/float(len(data_pos_df))
y2_fail = np.sum(data_pos_df.y2.values == 1023)/float(len(data_pos_df))
# get rid of 1023 values ...
data_pos_df['x1'].replace(to_replace=1023, inplace=True, method='ffill', axis=None) # ffill first
data_pos_df['x1'].replace(to_replace=1023, inplace=True, method='bfill', axis=None) # then do bfill to get rid of 1023s at the end
data_pos_df['x2'].replace(to_replace=1023, inplace=True, method='ffill', axis=None)
data_pos_df['x2'].replace(to_replace=1023, inplace=True, method='bfill', axis=None)
data_pos_df['y1'].replace(to_replace=1023, inplace=True, method='ffill', axis=None)
data_pos_df['y1'].replace(to_replace=1023, inplace=True, method='bfill', axis=None)
data_pos_df['y2'].replace(to_replace=1023, inplace=True, method='ffill', axis=None)
data_pos_df['y2'].replace(to_replace=1023, inplace=True, method='bfill', axis=None)
# get ratio (px to cm) ...
# do the following calculations only on first session (base session)
idx_start=int(time_stamps_sessions_pos[0]); idx_stop=int(time_stamps_sessions_pos[1]) # take first session (base session)
if np.diff(data_pos_df['frame_counter'].values[idx_stop-int(timebase_pos):idx_stop]).sum() == 0:
#sys.stdout.write('Shortening position data for {} frames (nonsense)'.format(timebase_pos))
idx_stop -= int(timebase_pos)
first_session = data_pos_df.iloc[idx_start:idx_stop,:]
deltax1 = np.max(first_session['x1'])-np.min(first_session['x1'])
deltay1 = np.max(first_session['y1'])-np.min(first_session['y1'])
deltax2 = np.max(first_session['x2'])-np.min(first_session['x2'])
deltay2 = np.max(first_session['y2'])-np.min(first_session['y2'])
px_to_cm = box_size_cm/np.mean([deltax1,deltay1,deltax2,deltay2]) # assuming square arena
#print('1 px = {} cm (assuming {} cm square box)'.format(px_to_cm,box_size_cm))
# find correct LED ...
x_art_all = np.zeros((loop_n,divider_n))
y_art_all = np.zeros((loop_n,divider_n))
# between the two LEDs try to find the center point as the point of minimum movement
for i in xrange(loop_n): # first loop_n position samples
counter_divider = 0
for divider in np.linspace(-1.5,1.5,divider_n):
art_point_x = divider*abs((first_session['x2'].values[i]-first_session['x1'].values[i]))
art_point_y = divider*abs((first_session['y2'].values[i]-first_session['y1'].values[i]))
if first_session['x1'].values[i] <= first_session['x2'].values[i]:
x_art = first_session['x1'].values[i]+art_point_x
if first_session['x1'].values[i] > first_session['x2'].values[i]:
x_art = first_session['x1'].values[i]-art_point_x
if first_session['y1'].values[i] <= first_session['y2'].values[i]:
y_art = first_session['y1'].values[i]+art_point_y
if first_session['y1'].values[i] > first_session['y2'].values[i]:
y_art = first_session['y1'].values[i]-art_point_y
x_art_all[i,counter_divider] = x_art
y_art_all[i,counter_divider] = y_art
counter_divider = counter_divider +1
dist_art_all = np.zeros((loop_n-1,divider_n))
for divider in xrange(divider_n):
dist_art_all[:,divider] = np.sqrt(np.square(np.diff(x_art_all[:,divider]))+np.square(np.diff(y_art_all[:,divider])))
total_dist_art = np.cumsum(dist_art_all,axis=0)[-1,:]
fraction = np.linspace(-1.5,1.5,divider_n)[np.argmin(total_dist_art)]
if (fraction > 0.5):
if (x1_fail < 0.3) and (y1_fail < 0.3):
data_pos_df['correct_x'] = data_pos_df['x1']
data_pos_df['correct_y'] = data_pos_df['y1']
else:
data_pos_df['correct_x'] = data_pos_df['x2']
data_pos_df['correct_y'] = data_pos_df['y2']
else:
if (x2_fail < 0.3) and (y2_fail < 0.3):
data_pos_df['correct_x'] = data_pos_df['x2']
data_pos_df['correct_y'] = data_pos_df['y2']
else:
data_pos_df['correct_x'] = data_pos_df['x1']
data_pos_df['correct_y'] = data_pos_df['y1']
# smooth positions ...
cols = ['x1','x2','y1','y2','correct_x','correct_y']
for col in cols:
#data_pos_df[col+'_inter'] = savgol_filter(data_pos_df[col], 25, 4) # Savitzky golay
data_pos_df[col+'_inter'] = gaussian_filter1d(data_pos_df[col], 2, mode='nearest') # smoothed position with sigma = 2
# Get speed ...
dist = np.sqrt(np.square(np.diff(data_pos_df['correct_x_inter']))+np.square(np.diff(data_pos_df['correct_y_inter'])))
time_diff = np.diff(data_pos_df.index)
time_diff[time_diff == 0] = np.inf
speed = np.hstack((0,dist*px_to_cm/time_diff)) # cm/s
speed_filtered = gaussian_filter1d(speed, 1) # smoothed speed with sigma = 1
data_pos_df['speed'] = speed
data_pos_df['speed_filtered'] = speed_filtered
#######################################################################################################################
# correction of arena and head direction offset
# correct rotation of arena if it is not perfectly positioned at 90 degree to camera
# renew first_session data (do calculations only on base sesssion)
first_session = data_pos_df.iloc[idx_start:idx_stop,:]
center_x = int((np.max(first_session['correct_x_inter']) - np.min(first_session['correct_x_inter'])))
center_y = int((np.max(first_session['correct_y_inter']) - np.min(first_session['correct_y_inter'])))
center = (center_x,center_y)
first_session_coords = np.array(np.column_stack((first_session['correct_x_inter'],first_session['correct_y_inter'])),dtype=int)
angle = cv2.minAreaRect(first_session_coords)[-1]
if np.abs(angle) > 45:
angle = 90 + angle
sys.stdout.write('Detected a arena rotation angle of {:.2f} degree.\n'.format(angle))
M = cv2.getRotationMatrix2D(center,angle,1)
# rotation matrix is applied in the form:
#M00x + M01y + M02
#M10x + M11y + M12
keys_to_correct = [['x1','y1'],['x2','y2'],['x1_inter','y1_inter'],['x2_inter','y2_inter'],
['correct_x','correct_y'],['correct_x_inter','correct_y_inter']]
for pair in keys_to_correct:
correct_xs, correct_ys = apply_rotation(data_pos_df,pair[0],pair[1],M)
#sys.stdout.write('Corrected {} and {}.\n'.format(pair[0],pair[1]))
# write corrected coordinates to dataframe
data_pos_df[pair[0]] = correct_xs
data_pos_df[pair[1]] = correct_ys
# Correct head direction / LED offset:
# Get LED direction ...
diff_x_led = data_pos_df['x2_inter']-data_pos_df['x1_inter']
diff_y_led = data_pos_df['y2_inter']-data_pos_df['y1_inter']
led_angle = np.array([math.atan2(list(x)[0],list(x)[1]) for x in zip(diff_x_led,diff_y_led)])
led_angle = (led_angle + 2*np.pi) % (2*np.pi)
data_pos_df['led_angle'] = led_angle
# Get moving direction ...
diff_x_move = np.diff(data_pos_df['correct_x_inter'])
diff_y_move = np.diff(data_pos_df['correct_y_inter'])
mov_angle = np.array([math.atan2(list(x)[0],list(x)[1]) for x in zip(diff_x_move,diff_y_move)])
mov_angle = np.hstack((mov_angle,0))
mov_angle = (mov_angle + 2*np.pi) % (2*np.pi)
data_pos_df['mov_angle'] = mov_angle
# Calculate head direction / LED offset
# ... renew first_session df:
# to calculate only over first session
first_session = data_pos_df.iloc[idx_start:idx_stop,:]
mov_angle_first = first_session['mov_angle'][first_session['speed']>20].values # filter at 20 cm/s speed (that's quite random)
led_angle_first = first_session['led_angle'][first_session['speed']>20].values
diff_mov_led = mov_angle_first - led_angle_first
diff_mov_led[diff_mov_led<0] = 2*np.pi+diff_mov_led[diff_mov_led<0]
diff_mov_led[diff_mov_led>2*np.pi] = diff_mov_led[diff_mov_led>2*np.pi] - 2*np.pi
head_offset = circmean(diff_mov_led)
head_offset_var = circvar(diff_mov_led)
sys.stdout.write('Head angle offset: {:.2f} degrees | Variance: {:.2f}\n'.format(math.degrees(head_offset),head_offset_var))
if head_offset_var > 1:
sys.stdout.write('Head angle offset variance > 1: This is not accurate.\n')
# ... and correct LED angle:
#led_angle_corr = [led_angle - head_offset if head_offset < 0 else led_angle + head_offset][0]
led_angle_corr = led_angle + head_offset
led_angle_corr[led_angle_corr<0] = 2*np.pi+led_angle_corr[led_angle_corr<0]
led_angle_corr[led_angle_corr>2*np.pi] = led_angle_corr[led_angle_corr>2*np.pi] - 2*np.pi
data_pos_df['head_angle'] = led_angle_corr
# there is a problem here - pandas has problems reading this stuff because it has a
# little endian compiler issue when adding the angle vector to the DataFrame.
# Values can still be read though.
return data_pos_df,px_to_cm,head_offset,head_offset_var
def apply_rotation(data_pos_df,xs,ys,M):
coords = np.array(np.column_stack((data_pos_df[xs],data_pos_df[ys])),dtype=float)
coords_rot = [[coord[0]*M[0,0]+coord[1]*M[0,1]+M[0,2],coord[0]*M[1,0]+coord[1]*M[1,1]+M[1,2]] for coord in coords]
correct_xs = [element[0] for element in coords_rot]
correct_ys = [element[1] for element in coords_rot]
return correct_xs,correct_ys
print('Loaded analysis helpers: Tracking')
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from pylib.pc.custom_ops import compute_keys, build_grid_ds
from pylib.pc import PointCloud, AABB
class Grid:
""" 2D regular grid of a point cloud.
Args:
point_cloud : A `PointCloud` instance to distribute in the grid.
cell_sizes A `float` `Tensor` of shape `[D]`, the sizes of the grid
cells in each dimension.
aabb: An `AABB` instance, the bounding box of the grid, if `None`
the bounding box of `point_cloud` is used. (optional)
"""
def __init__(self, point_cloud: PointCloud, cell_sizes, aabb=None,
name=None):
with tf.compat.v1.name_scope(
name, "constructor for point cloud regular grid",
[self, point_cloud, aabb, cell_sizes]):
cell_sizes = tf.cast(tf.convert_to_tensor(value=cell_sizes),
tf.float32)
if cell_sizes.shape == [] or cell_sizes.shape[0] == 1:
cell_sizes = tf.repeat(cell_sizes, point_cloud._dimension)
#Save the attributes.
self._batch_size = point_cloud._batch_size
self._cell_sizes = cell_sizes
self._point_cloud = point_cloud
self._aabb = point_cloud.get_AABB()
#Compute the number of cells in the grid.
aabb_sizes = self._aabb._aabb_max - self._aabb._aabb_min
batch_num_cells = tf.cast(
tf.math.ceil(aabb_sizes / self._cell_sizes), tf.int32)
self._num_cells = tf.maximum(
tf.reduce_max(batch_num_cells, axis=0), 1)
#Compute the key for each point.
self._cur_keys = compute_keys(
self._point_cloud, self._num_cells,
self._cell_sizes)
#Sort the keys.
self._sorted_indices = tf.argsort(
self._cur_keys, direction='DESCENDING')
self._sorted_keys = tf.gather(self._cur_keys, self._sorted_indices)
#Get the sorted points and batch ids.
self._sorted_points = tf.gather(
self._point_cloud._points, self._sorted_indices)
self._sorted_batch_ids = tf.gather(
self._point_cloud._batch_ids, self._sorted_indices)
self._fast_DS = None
def get_DS(self):
""" Method to get the 2D-Grid datastructure.
Note: By default the data structure is not build on initialization,
but with this method
Returns:
A `int` `Tensor` of shape `[num_cells[0], num_cells[1], 2]`, where
`[i,j,0]:[i,j,1]` is the range of points in cell `i,j`.
The indices are with respect to the sorted points of the grid.
"""
if self._fast_DS is None:
#Build the fast access data structure.
self._fast_DS = build_grid_ds(
self._sorted_keys, self._num_cells, self._batch_size)
return self._fast_DS
|
from builton_sdk.api_models import Company
def test_rest_decorators():
company = Company("request", "props")
assert hasattr(company, "get")
assert hasattr(company, "refresh")
def test_init_sets_api_path():
company = Company("request", "props")
assert company.api_path == "companies"
|
from observer2 import Publisher, SubscriberOne, SubscriberTwo
pub = Publisher()
bob = SubscriberOne('Bob')
alice = SubscriberTwo('Alice')
john = SubscriberOne('John')
pub.register(bob, bob.update)
pub.register(alice, alice.receive)
pub.register(john)
pub.dispatch("It's lunchtime!")
pub.unregister(john)
pub.dispatch("Time for dinner") |
# Copyright (c) Hu Zhiming 2021/04/22 jimmyhu@pku.edu.cn All Rights Reserved.
import sys
sys.path.append('../')
from utils import LoadTrainingData, LoadTestData, RemakeDir, MakeDir, SeedTorch
from models import weight_init
from models.EHTaskModels import *
import torch
import torch.nn as nn
import torch.utils.data as data
import numpy as np
import time
import datetime
import argparse
import os
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# set the random seed to ensure reproducibility
SeedTorch(seed=0)
def main(args):
# Create the model
print('\n==> Creating the model...')
model = EHTask(args.eyeFeatureSize, args.headFeatureSize, args.gwFeatureSize, args.numClasses)
model.apply(weight_init)
#print('# Number of Model Parameters:', sum(param.numel() for param in model.parameters()))
# print the parameters
#for name, parameters in model.named_parameters():
#print(name, parameters)
model = torch.nn.DataParallel(model)
if args.loss == 'CrossEntropy':
criterion = nn.CrossEntropyLoss()
print('\n==> Loss Function: CrossEntropy')
# train the model
if args.trainFlag == 1:
# load the training data
train_loader = LoadTrainingData(args.datasetDir, args.batchSize)
# optimizer and loss
lr = args.learningRate
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=args.weightDecay)
expLR = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.gamma, last_epoch=-1)
# training start epoch
startEpoch = 0
# remake checkpoint directory
RemakeDir(args.checkpoint)
# training
localtime = time.asctime(time.localtime(time.time()))
print('\nTraining starts at ' + localtime)
# the number of training steps in an epoch
stepNum = len(train_loader)
numEpochs = args.epochs
startTime = datetime.datetime.now()
for epoch in range(startEpoch, numEpochs):
# adjust learning rate
lr = expLR.optimizer.param_groups[0]["lr"]
print('\nEpoch: {} | LR: {:.16f}'.format(epoch + 1, lr))
for i, (features, labels) in enumerate(train_loader):
# Move tensors to the configured device
features = features.reshape(-1, args.inputSize).to(device)
labels = labels.reshape(-1,).to(device)
#print(features.shape)
#print(labels.shape)
# Forward pass
outputs = model(features)
#print(outputs.shape)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
#torch.nn.utils.clip_grad_norm(model.parameters(), 30)
optimizer.step()
# output the loss
if (i+1) % int(stepNum/args.lossFrequency) == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, numEpochs, i+1, stepNum, loss.item()))
# adjust learning rate
expLR.step()
endTime = datetime.datetime.now()
totalTrainingTime = (endTime - startTime).seconds/60
print('\nEpoch [{}/{}], Total Training Time: {:.2f} min'.format(epoch+1, numEpochs, totalTrainingTime))
# save the checkpoint
if (epoch +1) % args.interval == 0:
savePath = os.path.join(args.checkpoint, "checkpoint_epoch_{}.tar".format(str(epoch+1).zfill(3)))
torch.save({
'epoch': epoch+1,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss.item(),
'lr': lr,
}, savePath)
localtime = time.asctime(time.localtime(time.time()))
print('\nTraining ends at ' + localtime)
# test all the existing models
# load the existing models to test
if os.path.isdir(args.checkpoint):
filelist = os.listdir(args.checkpoint)
checkpoints = []
checkpointNum = 0
for name in filelist:
# checkpoints are stored as tar files
if os.path.splitext(name)[-1][1:] == 'tar':
checkpoints.append(name)
checkpointNum +=1
# test the checkpoints
if checkpointNum:
print('\nCheckpoint Number : {}'.format(checkpointNum))
checkpoints.sort()
# load the test data
test_loader = LoadTestData(args.datasetDir, args.batchSize)
# load the test labels
testY = np.load(args.datasetDir + 'testY.npy')
testSize = testY.shape[0]
# save the predictions
if args.savePrd:
prdDir = args.prdDir
RemakeDir(prdDir)
localtime = time.asctime(time.localtime(time.time()))
print('\nTest starts at ' + localtime)
for name in checkpoints:
print("\n==> Test checkpoint : {}".format(name))
if device == torch.device('cuda'):
checkpoint = torch.load(args.checkpoint + name)
print('\nDevice: GPU')
else:
checkpoint = torch.load(args.checkpoint + name, map_location=lambda storage, loc: storage)
print('\nDevice: CPU')
model.load_state_dict(checkpoint['model_state_dict'])
epoch = checkpoint['epoch']
# the model's predictions
prdY = []
# evaluate mode
model.eval()
startTime = datetime.datetime.now()
for i, (features, labels) in enumerate(test_loader):
# Move tensors to the configured device
features = features.reshape(-1, args.inputSize).to(device)
#labels = labels.reshape(-1, 1).to(device)
# Forward pass
outputs = model(features)
_, predictions = torch.max(outputs.data, 1)
# save the predictions
predictions_npy = predictions.data.cpu().detach().numpy()
if(len(prdY) >0):
prdY = np.concatenate((prdY, predictions_npy))
else:
prdY = predictions_npy
endTime = datetime.datetime.now()
# average predicting time for a single sample.
avgTime = (endTime - startTime).seconds * 1000/testSize
print('\nAverage prediction time: {:.8f} ms'.format(avgTime))
# Calculate the prediction accuracy
chanceAccuracy = 1/args.numClasses*100
print('Chance Level Accuracy: {:.1f}%'.format(chanceAccuracy))
prdY = prdY.reshape(-1, 1)
correct = (testY == prdY).sum()
#print(testY.shape)
#print(prdY.shape)
accuracy = correct/testSize*100
print('Epoch: {}, Single Window Prediction Accuracy: {:.1f}%'.format(epoch, accuracy))
# Majority voting over the whole recording
testRecordingLabel = np.load(args.datasetDir + 'testRecordingLabel.npy')
itemLabel = np.unique(testRecordingLabel)
#print(itemLabel.shape)
itemNum = itemLabel.shape[0]
testY_MV = np.zeros(itemNum)
prdY_MV = np.zeros(itemNum)
#print(itemNum)
for i in range(itemNum):
#print(itemLabel[i])
index = np.where(testRecordingLabel == itemLabel[i])
testY_MV[i] = np.argmax(np.bincount(testY[index]))
prdY_MV[i] = np.argmax(np.bincount(prdY[index]))
#print(testY_MV)
#print(prdY_MV)
correct = (testY_MV == prdY_MV).sum()
accuracy = correct/itemNum*100
print('Epoch: {}, Majority Voting Prediction Accuracy: {:.1f}%'.format(epoch, accuracy))
# save the prediction results
if args.savePrd:
prdDir = args.prdDir + 'predictions_epoch_{}/'.format(str(epoch).zfill(3))
MakeDir(prdDir)
predictionResults = np.zeros(shape = (testSize, 3))
predictionResults[:, 0] = testY.reshape(-1,)
predictionResults[:, 1] = prdY.reshape(-1,)
predictionResults[:, 2] = testRecordingLabel.reshape(-1,)
np.savetxt(prdDir + 'predictions.txt', predictionResults, fmt="%d")
localtime = time.asctime(time.localtime(time.time()))
print('\nTest ends at ' + localtime)
else:
print('\n==> No valid checkpoints in directory {}'.format(args.checkpoint))
else:
print('\n==> Invalid checkpoint directory: {}'.format(args.checkpoint))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description= 'EHTask Model')
# the number of input features
parser.add_argument('--inputSize', default=1500, type=int,
help='the size of input features (default: 1500)')
# the size of eye-in-head features
parser.add_argument('--eyeFeatureSize', default=500, type=int,
help='the size of eye-in-head features (default: 500)')
# the size of head features
parser.add_argument('--headFeatureSize', default=500, type=int,
help='the size of head features (default: 500)')
# the size of gaze-in-world features
parser.add_argument('--gwFeatureSize', default=500, type=int,
help='the size of gaze-in-world features (default: 500)')
# the number of classes to predict
parser.add_argument('--numClasses', default=4, type=int,
help='the number of classes to predict (default: 4)')
# the directory that saves the dataset
parser.add_argument('-d', '--datasetDir', default = '../../TaskDataset/EHTask_Cross_User_5_Fold/Test_Fold_1/', type = str,
help = 'the directory that saves the dataset')
# trainFlag = 1 means train new models; trainFlag = 0 means test existing models
parser.add_argument('-t', '--trainFlag', default = 1, type = int, help = 'set the flag to train the model (default: 1)')
# path to save checkpoint
parser.add_argument('-c', '--checkpoint', default = '../checkpoint/EHTask_Cross_User_5_Fold/Test_Fold_1/', type = str,
help = 'path to save checkpoint')
# save the prediction results or not
parser.add_argument('--savePrd', default = 1, type = int, help = 'save the prediction results (1) or not (0) (default: 0)')
# the directory that saves the prediction results
parser.add_argument('-p', '--prdDir', default = '../predictions/EHTask_Cross_User_5_Fold/Test_Fold_1/', type = str,
help = 'the directory that saves the prediction results')
# the number of total epochs to run
parser.add_argument('-e', '--epochs', default=30, type=int,
help='number of total epochs to run (default: 30)')
# the batch size
parser.add_argument('-b', '--batchSize', default=256, type=int,
help='the batch size (default: 256)')
# the interval that we save the checkpoint
parser.add_argument('-i', '--interval', default=30, type=int,
help='the interval that we save the checkpoint (default: 30 epochs)')
# the initial learning rate.
parser.add_argument('--learningRate', default=1e-2, type=float,
help='initial learning rate (default: 1e-2)')
parser.add_argument('--weightDecay', '--wd', default=1e-4, type=float,
help='weight decay (default: 1e-4)')
parser.add_argument('--gamma', type=float, default=0.75,
help='Used to decay learning rate (default: 0.75)')
# the loss function.
parser.add_argument('--loss', default="CrossEntropy", type=str,
help='Loss function to train the network (default: CrossEntropy)')
# the frequency that we output the loss in an epoch.
parser.add_argument('--lossFrequency', default=3, type=int,
help='the frequency that we output the loss in an epoch (default: 3)')
main(parser.parse_args())
|
#!/usr/bin/python
import sys
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from HOG_detector import HOGDetector
class HOG:
def __init__(self, topic):
print topic
self.hog = HOGDetector()
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber(topic, Image, self.callback)
def callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
img = self.hog.detect(cv_image)
cv2.imshow("window", img)
cv2.waitKey(20)
except CvBridgeError as e:
print e
def main(args):
rospy.init_node('HOG', anonymous=True)
HOG(args[1])
try:
rospy.spin()
except KeyboardInterrupt:
print "Shutting down"
cv2.DestroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
import sys
version = sys.argv[1]
if version.startswith('http'):
print(version)
else:
next_version = version[:-1] + '%d' % (int(version[-1]) + 1)
print('Django>=%s,<%s' % (version, next_version))
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from edb.ir import ast as irast
from edb.schema import name as sn
from edb.pgsql import ast as pgast
from edb.pgsql import common
from edb.pgsql import types as pgtypes
from . import astutils
from . import context
def range_for_material_objtype(
typeref: irast.TypeRef,
path_id: irast.PathId, *,
include_overlays: bool=True,
env: context.Environment) -> pgast.BaseRangeVar:
from . import pathctx # XXX: fix cycle
if typeref.material_type is not None:
typeref = typeref.material_type
table_schema_name, table_name = common.get_objtype_backend_name(
typeref.id, typeref.module_id, catenate=False)
if typeref.name_hint.module in {'schema', 'cfg', 'sys'}:
# Redirect all queries to schema tables to edgedbss
table_schema_name = 'edgedbss'
relation = pgast.Relation(
schemaname=table_schema_name,
name=table_name,
path_id=path_id,
)
rvar = pgast.RangeVar(
relation=relation,
alias=pgast.Alias(
aliasname=env.aliases.get(typeref.name_hint.name)
)
)
overlays = env.rel_overlays.get(str(typeref.id))
if overlays and include_overlays:
set_ops = []
qry = pgast.SelectStmt()
qry.from_clause.append(rvar)
pathctx.put_path_value_rvar(qry, path_id, rvar, env=env)
pathctx.put_path_bond(qry, path_id)
set_ops.append(('union', qry))
for op, cte in overlays:
rvar = pgast.RangeVar(
relation=cte,
alias=pgast.Alias(
aliasname=env.aliases.get(hint=cte.name)
)
)
qry = pgast.SelectStmt(
from_clause=[rvar],
)
pathctx.put_path_value_rvar(qry, path_id, rvar, env=env)
pathctx.put_path_bond(qry, path_id)
if op == 'replace':
op = 'union'
set_ops = []
set_ops.append((op, qry))
rvar = range_from_queryset(set_ops, typeref.name_hint, env=env)
return rvar
def range_for_typeref(
typeref: irast.TypeRef,
path_id: irast.PathId, *,
include_overlays: bool=True,
common_parent: bool=False,
env: context.Environment) -> pgast.BaseRangeVar:
from . import pathctx # XXX: fix cycle
if not typeref.children:
rvar = range_for_material_objtype(
typeref, path_id, include_overlays=include_overlays, env=env)
elif common_parent:
rvar = range_for_material_objtype(
typeref.common_parent, path_id,
include_overlays=include_overlays, env=env)
else:
# Union object types are represented as a UNION of selects
# from their children, which is, for most purposes, equivalent
# to SELECTing from a parent table.
set_ops = []
for child in typeref.children:
c_rvar = range_for_typeref(
child, path_id=path_id,
include_overlays=include_overlays, env=env)
qry = pgast.SelectStmt(
from_clause=[c_rvar],
)
pathctx.put_path_value_rvar(qry, path_id, c_rvar, env=env)
if path_id.is_objtype_path():
pathctx.put_path_source_rvar(qry, path_id, c_rvar, env=env)
pathctx.put_path_bond(qry, path_id)
set_ops.append(('union', qry))
rvar = range_from_queryset(set_ops, typeref.name_hint, env=env)
rvar.query.path_id = path_id
return rvar
def range_for_set(
ir_set: irast.Set, *,
include_overlays: bool=True,
common_parent: bool=False,
env: context.Environment) -> pgast.BaseRangeVar:
return range_for_typeref(
ir_set.typeref,
ir_set.path_id,
include_overlays=include_overlays,
common_parent=common_parent,
env=env)
def table_from_ptrref(
ptrref: irast.PointerRef, *,
env: context.Environment) -> pgast.RangeVar:
"""Return a Table corresponding to a given Link."""
table_schema_name, table_name = common.get_pointer_backend_name(
ptrref.id, ptrref.module_id, catenate=False)
if ptrref.shortname.module in {'schema', 'cfg', 'sys'}:
# Redirect all queries to schema tables to edgedbss
table_schema_name = 'edgedbss'
relation = pgast.Relation(
schemaname=table_schema_name, name=table_name)
rvar = pgast.RangeVar(
relation=relation,
alias=pgast.Alias(
aliasname=env.aliases.get(ptrref.shortname.name)
)
)
return rvar
def range_for_ptrref(
ptrref: irast.BasePointerRef, *,
include_overlays: bool=True,
only_self: bool=False,
env: context.Environment) -> pgast.BaseRangeVar:
""""Return a Range subclass corresponding to a given ptr step.
The return value may potentially be a UNION of all tables
corresponding to a set of specialized links computed from the given
`ptrref` taking source inheritance into account.
"""
tgt_col = pgtypes.get_ptrref_storage_info(
ptrref, resolve_type=False, link_bias=True).column_name
cols = [
'source',
tgt_col
]
set_ops = []
if only_self:
ptrrefs = {ptrref}
else:
ptrrefs = {ptrref} | ptrref.descendants
for src_ptrref in ptrrefs:
table = table_from_ptrref(src_ptrref, env=env)
qry = pgast.SelectStmt()
qry.from_clause.append(table)
qry.rptr_rvar = table
# Make sure all property references are pulled up properly
for colname in cols:
selexpr = pgast.ColumnRef(
name=[table.alias.aliasname, colname])
qry.target_list.append(
pgast.ResTarget(val=selexpr, name=colname))
set_ops.append(('union', qry))
overlays = env.rel_overlays.get(src_ptrref.shortname)
if overlays and include_overlays:
for op, cte in overlays:
rvar = pgast.RangeVar(
relation=cte,
alias=pgast.Alias(
aliasname=env.aliases.get(cte.name)
)
)
qry = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=[col]
)
)
for col in cols
],
from_clause=[rvar],
)
set_ops.append((op, qry))
rvar = range_from_queryset(set_ops, ptrref.shortname, env=env)
return rvar
def range_for_pointer(
pointer: irast.Pointer, *,
env: context.Environment) -> pgast.BaseRangeVar:
ptrref = pointer.ptrref
if ptrref.derived_from_ptr is not None:
ptrref = ptrref.derived_from_ptr
return range_for_ptrref(ptrref, env=env)
def range_from_queryset(
set_ops: typing.Sequence[typing.Tuple[str, pgast.BaseRelation]],
objname: sn.Name, *,
env: context.Environment) -> pgast.BaseRangeVar:
if len(set_ops) > 1:
# More than one class table, generate a UNION/EXCEPT clause.
qry = pgast.SelectStmt(
all=True,
larg=set_ops[0][1]
)
for op, rarg in set_ops[1:]:
qry.op, qry.rarg = op, rarg
qry = pgast.SelectStmt(
all=True,
larg=qry
)
qry = qry.larg
rvar = pgast.RangeSubselect(
subquery=qry,
alias=pgast.Alias(
aliasname=env.aliases.get(objname.name),
)
)
else:
# Just one class table, so return it directly
rvar = set_ops[0][1].from_clause[0]
return rvar
def find_column_in_subselect_rvar(rvar: pgast.BaseRangeVar, name: str) -> int:
# Range over a subquery, we can inspect the output list
# of the subquery. If the subquery is a UNION (or EXCEPT),
# we take the leftmost non-setop query.
subquery = astutils.get_leftmost_query(rvar.subquery)
for i, rt in enumerate(subquery.target_list):
if rt.name == name:
return i
raise RuntimeError(f'cannot find {name!r} in {rvar} output')
def get_column(
rvar: pgast.BaseRangeVar,
colspec: typing.Union[str, pgast.ColumnRef], *,
nullable: bool=None) -> pgast.ColumnRef:
if isinstance(colspec, pgast.ColumnRef):
colname = colspec.name[-1]
else:
colname = colspec
ser_safe = False
if nullable is None:
if isinstance(rvar, pgast.RangeVar):
# Range over a relation, we cannot infer nullability in
# this context, so assume it's true.
nullable = True
elif isinstance(rvar, pgast.RangeSubselect):
col_idx = find_column_in_subselect_rvar(rvar, colname)
if astutils.is_set_op_query(rvar.subquery):
nullables = []
ser_safes = []
astutils.for_each_query_in_set(
rvar.subquery,
lambda q:
(nullables.append(q.target_list[col_idx].nullable),
ser_safes.append(q.target_list[col_idx].ser_safe))
)
nullable = any(nullables)
ser_safe = all(ser_safes)
else:
rt = rvar.subquery.target_list[col_idx]
nullable = rt.nullable
ser_safe = rt.ser_safe
elif isinstance(rvar, pgast.RangeFunction):
# Range over a function.
# TODO: look into the possibility of inspecting coldeflist.
nullable = True
elif isinstance(rvar, pgast.JoinExpr):
raise RuntimeError(
f'cannot find {colname!r} in unexpected {rvar!r} range var')
name = [rvar.alias.aliasname, colname]
return pgast.ColumnRef(name=name, nullable=nullable, ser_safe=ser_safe)
def rvar_for_rel(
rel: pgast.BaseRelation, *,
lateral: bool=False, colnames: typing.List[str]=[],
env: context.Environment) -> pgast.BaseRangeVar:
if isinstance(rel, pgast.Query):
alias = env.aliases.get(rel.name or 'q')
rvar = pgast.RangeSubselect(
subquery=rel,
alias=pgast.Alias(aliasname=alias, colnames=colnames),
lateral=lateral,
)
else:
alias = env.aliases.get(rel.name)
rvar = pgast.RangeVar(
relation=rel,
alias=pgast.Alias(aliasname=alias, colnames=colnames)
)
return rvar
def get_rvar_var(
rvar: pgast.BaseRangeVar,
var: pgast.OutputVar) -> pgast.OutputVar:
assert isinstance(var, pgast.OutputVar)
if isinstance(var, pgast.TupleVar):
elements = []
for el in var.elements:
val = get_rvar_var(rvar, el.name)
elements.append(
pgast.TupleElement(
path_id=el.path_id, name=el.name, val=val))
fieldref = pgast.TupleVar(elements, named=var.named)
else:
fieldref = get_column(rvar, var)
return fieldref
def strip_output_var(
var: pgast.OutputVar, *,
optional: typing.Optional[bool]=None,
nullable: typing.Optional[bool]=None) -> pgast.OutputVar:
if isinstance(var, pgast.TupleVar):
elements = []
for el in var.elements:
if isinstance(el.name, str):
val = pgast.ColumnRef(name=[el.name])
else:
val = strip_output_var(el.name)
elements.append(
pgast.TupleElement(
path_id=el.path_id, name=el.name, val=val))
result = pgast.TupleVar(elements, named=var.named)
else:
result = pgast.ColumnRef(
name=[var.name[-1]],
optional=optional if optional is not None else var.optional,
nullable=nullable if nullable is not None else var.nullable,
)
return result
def add_rel_overlay(
typeid: str, op: str, rel: pgast.BaseRelation, *,
env: context.Environment) -> None:
overlays = env.rel_overlays[typeid]
overlays.append((op, rel))
def cte_for_query(
rel: pgast.Query, *,
env: context.Environment) -> pgast.CommonTableExpr:
return pgast.CommonTableExpr(
query=rel,
alias=pgast.Alias(
aliasname=env.aliases.get(rel.name)
)
)
|
from utils import add_homework_path
add_homework_path(__file__)
|
# @yifan
# 2021.01.12
#
import numpy as np
from sklearn import cluster
class myKMeans():
def __init__(self, n_clusters=-1, trunc=-1):
self.KM = cluster.KMeans( n_clusters=n_clusters, n_init=11 )
self.cent = []
self.trunc = trunc
def truncate(self, X):
if self.trunc != -1:
X[:, self.trunc:] *= 0
return X
def fit(self, X):
X = X.reshape( -1, X.shape[-1] )
self.truncate(X)
self.KM.fit( X )
self.cent = np.array( self.KM.cluster_centers_ )
return self
def predict(self, X):
S = (list)(X.shape)
S[-1] = -1
X = X.reshape(-1, X.shape[-1])
idx = self.KM.predict(X)
return idx.reshape(S)
def inverse_predict(self, idx):
S = (list)(idx.shape)
S[-1] = -1
idx = idx.reshape(-1,)
X = self.cent[idx]
return X.reshape(S)
|
import cv2
import numpy as np
import glob
import pdb
fps = 30.0
#raspi_ids = [0,1]#[2,3,4]#range(2)
raspi_ids = [4]
#dataDir = '/Users/philipp/Documents/theater/push_up/tv_project/'
dataDir = '/home/pmueller/pushup/'
imgInDir = dataDir+'images_raw/'
videoInDir = dataDir+'videos_raw/'
#outDir = dataDir+'videos_out/'
outDir = '/BS/body-language2/archive00/pushup/videos_out/'
scene3Dir = videoInDir+'scene3/'
allVideoPaths = glob.glob(scene3Dir+'*.mp4')
getPaths = lambda fnames: map(lambda fname: scene3Dir+fname,fnames)
scene3Descr = {0:[{'speed':1,'videoPaths':getPaths(['keyboard_neu_cut.mp4']),'length':10},
{'speed':1,'videoPaths':allVideoPaths,'length':10},
{'speed':64,'videoPaths':allVideoPaths,'length':1}],
1:[{'speed':1,'videoPaths':getPaths(['Kinderarbeit_cut.mp4']),'length':10},
{'speed':1,'videoPaths':allVideoPaths,'length':10},
{'speed':64,'videoPaths':allVideoPaths,'length':1}],
2:[{'speed':1,'videoPaths':getPaths(['Crowd-Supermarked_cut.mp4']),'length':10},
{'speed':1,'videoPaths':allVideoPaths,'length':10},
{'speed':64,'videoPaths':allVideoPaths,'length':1}],
3:[{'speed':1,'videoPaths':getPaths(['Dr.Oetker2_cut.mp4']),'length':10},
{'speed':1,'videoPaths':allVideoPaths,'length':10},
{'speed':64,'videoPaths':allVideoPaths,'length':1}],
4:[{'speed':1,'videoPaths':getPaths(['Fische_cut.mp4']),'length':10},
{'speed':1,'videoPaths':allVideoPaths,'length':10},
{'speed':64,'videoPaths':allVideoPaths,'length':1}]}
out_width = 656
out_height = 512
def addBlackPadding(out,secs=10*60):
blackFrame = np.zeros((out_height*2,out_width,3),dtype='uint8')
for i in range(int(secs*fps)):
out.write(blackFrame)
return out
def padImage(img,direction,n_pix):
n_pix1 = int(np.floor(n_pix/2.0))
n_pix2 = int(np.ceil(n_pix/2.0))
if direction=='vertical':
pad_top = np.zeros((n_pix1,out_width,3),dtype='uint8')
pad_bot = np.zeros((n_pix2,out_width,3),dtype='uint8')
img = np.concatenate([pad_top,img,pad_bot],axis=0)
if direction=='horizontal':
pad_left = np.zeros((out_height,n_pix1,3),dtype='uint8')
pad_right = np.zeros((out_height,n_pix2,3),dtype='uint8')
img = np.concatenate([pad_left,img,pad_right],axis=1)
return img
def resizeImage(img):
imgRatio = float(img.shape[0])/img.shape[1]
screenRatio = float(out_height)/out_width
if imgRatio<screenRatio: # image too wide
size_multiplier = float(out_width)/img.shape[1]
new_height = int(img.shape[0]*size_multiplier)
img = cv2.resize(img,(out_width,new_height))
# pad top and bottom
img = padImage(img,'vertical',n_pix=out_height-new_height)
else:
size_multiplier = float(out_height)/img.shape[0]
new_width = int(img.shape[1]*size_multiplier)
img = cv2.resize(img,(new_width,out_height))
# pad left and right
img = padImage(img,'horizontal',n_pix=out_width-new_width)
# padd bottom with size of image to avoid displaying omxplayers messages on the screen
lowerPadding = np.zeros((out_height,out_width,3),dtype='uint8')
img = np.concatenate([img,lowerPadding],axis=0)
return img
def addStaticImg(out,imPath,secs=10*60):
img = cv2.imread(imPath)
# resize image to dimensions of video
# img = np.swapaxes(img,0,1)
img = resizeImage(img)
for i in range(int(secs*fps)):
out.write(img)
return out
# open out video files and add initial black padding
if cv2.__version__[:3]=='2.4':
fourcc = cv2.cv.CV_FOURCC(*'XVID')
else:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoWriters = {}
for raspi_id in raspi_ids:
out_path = outDir+'raspi'+str(raspi_id)+'.avi'
out = cv2.VideoWriter(out_path,
fourcc,fps,(out_width,out_height*2))
print('add black padding, raspi '+str(raspi_id))
out = addBlackPadding(out,60*60) # TODO: change to 120
videoWriters[raspi_id] = out
# --------------------------------------------
# STATIC IMGS FOR SCENE 1
# --------------------------------------------
for raspi_id in raspi_ids:
# get video writer for raspi_id
out = videoWriters[raspi_id]
# first add image for scene 1.4
imPath = imgInDir+'14_'+str(raspi_id)+'.jpg'
out = addStaticImg(out,imPath)
# then for scene 1.6
imPath = imgInDir+'16_'+str(raspi_id)+'.jpg'
out = addStaticImg(out,imPath)
# out = addBlackPadding(out)
videoWriters[raspi_id] = out
# --------------------------------------------
# VIDEOS FOR SCENE 2
# --------------------------------------------
fade_in_time = 3 # seconds of linear fade in
for raspi_id in raspi_ids:
print('writing videos for scene 2, raspi = '+str(raspi_id))
out = videoWriters[raspi_id]
# videoPath = videoInDir+'s2_'+str(raspi_id)+'.mp4'
videoPath = videoInDir+'s2_0.mp4'
cap = cv2.VideoCapture(videoPath)
# light leak video for 5 minutes, starting at different position for every raspi
for i in range(int(raspi_id*30*fps)):
ret,frame = cap.read()
for i in range(int(5*60*fps)):
ret,frame = cap.read()
if i<fade_in_time*fps:
fade_in_multiplier = float(i)/(fade_in_time*fps)
frame = (frame*fade_in_multiplier).astype('uint8')
frame = resizeImage(frame)
out.write(frame)
# then noise for 10 minutes
for i in range(int(10*60*fps)):
frame = np.random.randint(0,255,(120,180,1)).astype('uint8')
frame = np.concatenate([frame]*3,axis=2)
frame = resizeImage(frame)
out.write(frame)
def writeClips_scene3(out,videoPaths,speed,length=10):
if len(videoPaths)==1:
videoPath = videoPaths[0]
else:
videoPath = videoPaths[np.random.randint(len(videoPaths)-1)]
cap = cv2.VideoCapture(videoPath)
for i in range(int(length*60*fps)):
for _ in range(speed):
ret,frame = cap.read()
if not ret:
# have to start with next video
# print('start new vid')
if len(videoPaths)==1:
videoPath = videoPaths[0]
else:
videoPath = videoPaths[np.random.randint(len(videoPaths)-1)]
cap = cv2.VideoCapture(videoPath)
ret,frame = cap.read()
# else: print('old vid')
try:
frame = resizeImage(frame)
except: pdb.set_trace()
out.write(frame)
# --------------------------------------------
# VIDEOS FOR SCENE 3
# --------------------------------------------
for raspi_id in raspi_ids:
print('writing videos for scene 3, raspi = '+str(raspi_id))
out = videoWriters[raspi_id]
for videoInfo in scene3Descr[raspi_id]:
# print('speed = '+str(speed))
speed = videoInfo['speed']
videoPaths = videoInfo['videoPaths']
length = videoInfo['length']
writeClips_scene3(out,videoPaths,speed,length)
# finally, close all video out files
for raspi_id in raspi_ids:
videoWriters[raspi_id].release()
|
import functools
import numpy as np
from scipy.ndimage import map_coordinates
def uv_meshgrid(w, h):
uv = np.stack(np.meshgrid(range(w), range(h)), axis=-1)
uv = uv.astype(np.float64)
uv[..., 0] = ((uv[..., 0] + 0.5) / w - 0.5) * 2 * np.pi
uv[..., 1] = ((uv[..., 1] + 0.5) / h - 0.5) * np.pi
return uv
@functools.lru_cache()
def _uv_tri(w, h):
uv = uv_meshgrid(w, h)
sin_u = np.sin(uv[..., 0])
cos_u = np.cos(uv[..., 0])
tan_v = np.tan(uv[..., 1])
return sin_u, cos_u, tan_v
def uv_tri(w, h):
sin_u, cos_u, tan_v = _uv_tri(w, h)
return sin_u.copy(), cos_u.copy(), tan_v.copy()
def coorx2u(x, w=1024):
return ((x + 0.5) / w - 0.5) * 2 * np.pi
def coory2v(y, h=512):
return ((y + 0.5) / h - 0.5) * np.pi
def u2coorx(u, w=1024):
return (u / (2 * np.pi) + 0.5) * w - 0.5
def v2coory(v, h=512):
return (v / np.pi + 0.5) * h - 0.5
def uv2xy(u, v, z=-50):
c = z / np.tan(v)
x = c * np.cos(u)
y = c * np.sin(u)
return x, y
def pano_connect_points(p1, p2, z=-50, w=1024, h=512):
if p1[0] == p2[0]:
return np.array([p1, p2], np.float32)
u1 = coorx2u(p1[0], w)
v1 = coory2v(p1[1], h)
u2 = coorx2u(p2[0], w)
v2 = coory2v(p2[1], h)
x1, y1 = uv2xy(u1, v1, z)
x2, y2 = uv2xy(u2, v2, z)
if abs(p1[0] - p2[0]) < w / 2:
pstart = np.ceil(min(p1[0], p2[0]))
pend = np.floor(max(p1[0], p2[0]))
else:
pstart = np.ceil(max(p1[0], p2[0]))
pend = np.floor(min(p1[0], p2[0]) + w)
coorxs = (np.arange(pstart, pend + 1) % w).astype(np.float64)
vx = x2 - x1
vy = y2 - y1
us = coorx2u(coorxs, w)
ps = (np.tan(us) * x1 - y1) / (vy - np.tan(us) * vx)
cs = np.sqrt((x1 + ps * vx) ** 2 + (y1 + ps * vy) ** 2)
vs = np.arctan2(z, cs)
coorys = v2coory(vs)
return np.stack([coorxs, coorys], axis=-1)
def pano_stretch(img, mask, corners, kx, ky, order=1):
'''
img: [H, W, C]
corners: [N, 2] in image coordinate (x, y) format
kx: Stretching along front-back direction
ky: Stretching along left-right direction
order: Interpolation order. 0 for nearest-neighbor. 1 for bilinear.
'''
# Process image
sin_u, cos_u, tan_v = uv_tri(img.shape[1], img.shape[0])
u0 = np.arctan2(sin_u * kx / ky, cos_u)
v0 = np.arctan(tan_v * np.sin(u0) / sin_u * ky)
refx = (u0 / (2 * np.pi) + 0.5) * img.shape[1] - 0.5
refy = (v0 / np.pi + 0.5) * img.shape[0] - 0.5
# [TODO]: using opencv remap could probably speedup the process a little
stretched_img = np.stack([
map_coordinates(img[..., i], [refy, refx], order=order, mode='wrap')
for i in range(img.shape[-1])
], axis=-1)
stretched_mask = np.stack([
map_coordinates(mask[..., i], [refy, refx], order=order, mode='wrap')
for i in range(mask.shape[-1])
], axis=-1)
#stretched_label = np.stack([
# map_coordinates(label[..., i], [refy, refx], order=order, mode='wrap')
# for i in range(label.shape[-1])
#], axis=-1)
# Process corners
corners_u0 = coorx2u(corners[:, 0], img.shape[1])
corners_v0 = coory2v(corners[:, 1], img.shape[0])
corners_u = np.arctan2(np.sin(corners_u0) * ky / kx, np.cos(corners_u0))
corners_v = np.arctan(np.tan(corners_v0) * np.sin(corners_u) / np.sin(corners_u0) / ky)
cornersX = u2coorx(corners_u, img.shape[1])
cornersY = v2coory(corners_v, img.shape[0])
stretched_corners = np.stack([cornersX, cornersY], axis=-1)
return stretched_img, stretched_mask, stretched_corners
def visualize_pano_stretch(stretched_img, stretched_cor, title):
'''
Helper function for visualizing the effect of pano_stretch
'''
thikness = 2
color = (0, 255, 0)
for i in range(4):
xys = pano_connect_points(stretched_cor[i*2], stretched_cor[(i*2+2) % 8], z=-50)
xys = xys.astype(int)
blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
if len(blue_split) == 0:
cv2.polylines(stretched_img, [xys], False, color, 2)
else:
t = blue_split[0] + 1
cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)
for i in range(4):
xys = pano_connect_points(stretched_cor[i*2+1], stretched_cor[(i*2+3) % 8], z=50)
xys = xys.astype(int)
blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
if len(blue_split) == 0:
cv2.polylines(stretched_img, [xys], False, color, 2)
else:
t = blue_split[0] + 1
cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)
cv2.putText(stretched_img, title, (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 0), 2, cv2.LINE_AA)
return stretched_img.astype(np.uint8)
if __name__ == '__main__':
import argparse
import time
from PIL import Image
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('--i', default='data/valid/img/pano_abpohapclcyuuz.png')
parser.add_argument('--i_gt', default='data/valid/label_cor/pano_abpohapclcyuuz.txt')
parser.add_argument('--o', default='sample_stretched_pano.png')
parser.add_argument('--kx', default=2, type=float,
help='Stretching along front-back direction')
parser.add_argument('--ky', default=1, type=float,
help='Stretching along left-right direction')
args = parser.parse_args()
img = np.array(Image.open(args.i), np.float64)
with open(args.i_gt) as f:
cor = np.array([line.strip().split() for line in f], np.int32)
stretched_img, stretched_cor = pano_stretch(img, cor, args.kx, args.ky)
title = 'kx=%3.2f, ky=%3.2f' % (args.kx, args.ky)
visual_stretched_img = visualize_pano_stretch(stretched_img, stretched_cor, title)
Image.fromarray(visual_stretched_img).save(args.o)
|
from rest_framework import serializers
from play.models import Simulation
class SimulationSerializer(serializers.Serializer):
runs = serializers.IntegerField(min_value=1)
user_hand = serializers.ListField(
child=serializers.CharField(max_length=3, min_length=2, allow_blank=True),
min_length=0,
max_length=2,
required=False
)
additional_players = serializers.IntegerField(max_value=7, min_value=1)
additional_hands = serializers.ListField(
child=serializers.ListField(
child=serializers.CharField(max_length=3, min_length=2, allow_blank=True),
min_length=0,
max_length=3,
required=False),
min_length=0,
max_length=4,
required=False
)
flop_cards = serializers.ListField(
child=serializers.CharField(max_length=3, min_length=2, allow_blank=True),
min_length=0,
max_length=3,
required=False
)
turn_card = serializers.CharField(max_length=3, min_length=2, required=False, allow_blank=True)
river_card = serializers.CharField(max_length=3, min_length=2, required=False, allow_blank=True)
#Read only section
results = serializers.DictField(
child=serializers.DictField(
child=serializers.IntegerField(min_value=0)
),
read_only=True
)
wins = serializers.IntegerField(min_value=0, read_only=True)
ties = serializers.IntegerField(min_value=0, read_only=True)
losses = serializers.IntegerField(min_value=0, read_only=True)
def create(self, validated_data):
return Simulation(**validated_data)
def validate(self, data):
all_starting_hands = []
if data.get('user_hand'):
all_starting_hands = data['user_hand'].copy()
if data.get('additional_hands'):
for hand in data['additional_hands']:
all_starting_hands += hand
count_list = [card for card in all_starting_hands if all_starting_hands.count(card) > 1 and card]
if count_list:
duplicates = ' '.join(list(set(count_list)))
raise serializers.ValidationError("Cannot submit same card more than once (" + duplicates + ")")
return data
|
import numpy as np
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
Adopted from https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def iou(bbox_1, bbox_2):
"""
Get IoU value of two bboxes
:param bbox_1:
:param bbox_2:
:return: IoU
"""
w_1 = bbox_1[2] - bbox_1[0] + 1
h_1 = bbox_1[3] - bbox_1[1] + 1
w_2 = bbox_2[2] - bbox_2[0] + 1
h_2 = bbox_2[3] - bbox_2[1] + 1
area_1 = w_1 * h_1
area_2 = w_2 * h_2
overlap_bbox = (max(bbox_1[0], bbox_2[0]), max(bbox_1[1], bbox_2[1]),
min(bbox_1[2], bbox_2[2]), min(bbox_1[3], bbox_2[3]))
overlap_w = max(0, (overlap_bbox[2] - overlap_bbox[0] + 1))
overlap_h = max(0, (overlap_bbox[3] - overlap_bbox[1] + 1))
overlap_area = overlap_w * overlap_h
union_area = area_1 + area_2 - overlap_area
IoU = overlap_area * 1.0 / union_area
return IoU
def viou(traj_1, duration_1, traj_2, duration_2):
""" compute the voluminal Intersection over Union
for two trajectories, each of which is represented
by a duration [fstart, fend) and a list of bounding
boxes (i.e. traj) within the duration.
"""
if duration_1[0] >= duration_2[1] or duration_1[1] <= duration_2[0]:
return 0.
elif duration_1[0] <= duration_2[0]:
head_1 = duration_2[0] - duration_1[0]
head_2 = 0
if duration_1[1] < duration_2[1]:
tail_1 = duration_1[1] - duration_1[0]
tail_2 = duration_1[1] - duration_2[0]
else:
tail_1 = duration_2[1] - duration_1[0]
tail_2 = duration_2[1] - duration_2[0]
else:
head_1 = 0
head_2 = duration_1[0] - duration_2[0]
if duration_1[1] < duration_2[1]:
tail_1 = duration_1[1] - duration_1[0]
tail_2 = duration_1[1] - duration_2[0]
else:
tail_1 = duration_2[1] - duration_1[0]
tail_2 = duration_2[1] - duration_2[0]
v_overlap = 0
for i in range(tail_1 - head_1):
roi_1 = traj_1[head_1 + i]
roi_2 = traj_2[head_2 + i]
left = max(roi_1[0], roi_2[0])
top = max(roi_1[1], roi_2[1])
right = min(roi_1[2], roi_2[2])
bottom = min(roi_1[3], roi_2[3])
v_overlap += max(0, right - left + 1) * max(0, bottom - top + 1)
v1 = 0
for i in range(len(traj_1)):
v1 += (traj_1[i][2] - traj_1[i][0] + 1) * (traj_1[i][3] - traj_1[i][1] + 1)
v2 = 0
for i in range(len(traj_2)):
v2 += (traj_2[i][2] - traj_2[i][0] + 1) * (traj_2[i][3] - traj_2[i][1] + 1)
return float(v_overlap) / (v1 + v2 - v_overlap)
def viou_sx(traj_1, duration_1, traj_2, duration_2, frame_thresh=0.5):
""" compute the voluminal Intersection over Union
for two trajectories, each of which is represented
by a duration [fstart, fend) and a list of bounding
boxes (i.e. traj) within the duration.
"""
if duration_1[0] >= duration_2[1] or duration_1[1] <= duration_2[0]:
return 0.
elif duration_1[0] <= duration_2[0]:
head_1 = duration_2[0] - duration_1[0]
head_2 = 0
if duration_1[1] < duration_2[1]:
tail_1 = duration_1[1] - duration_1[0]
tail_2 = duration_1[1] - duration_2[0]
else:
tail_1 = duration_2[1] - duration_1[0]
tail_2 = duration_2[1] - duration_2[0]
else:
head_1 = 0
head_2 = duration_1[0] - duration_2[0]
if duration_1[1] < duration_2[1]:
tail_1 = duration_1[1] - duration_1[0]
tail_2 = duration_1[1] - duration_2[0]
else:
tail_1 = duration_2[1] - duration_1[0]
tail_2 = duration_2[1] - duration_2[0]
v_overlap = 0
for i in range(tail_1 - head_1):
roi_1 = traj_1[head_1 + i]
roi_2 = traj_2[head_2 + i]
left = max(roi_1[0], roi_2[0])
top = max(roi_1[1], roi_2[1])
right = min(roi_1[2], roi_2[2])
bottom = min(roi_1[3], roi_2[3])
roi_i_area = max(0, right - left + 1) * max(0, bottom - top + 1)
roi_1_area = (traj_1[i][2] - traj_1[i][0] + 1) * (traj_1[i][3] - traj_1[i][1] + 1)
roi_2_area = (traj_2[i][2] - traj_2[i][0] + 1) * (traj_2[i][3] - traj_2[i][1] + 1)
iou = float(roi_i_area) / (roi_1_area + roi_2_area - roi_i_area)
if iou >= frame_thresh:
v_overlap += 1
return float(v_overlap) / (max(duration_1[1], duration_2[1]) - min(duration_1[0], duration_2[0]))
|
#!/usr/bin/python
START_DELAY = 2 # seconds
REFRESH_FREQ = 0.5 # seconds
NUMPIXELS = 30 # number of pixels, must match with arduino
import urllib
import json
import serial
import time
import random
# Sets active pixel counter to 0
counter = 0
# Opens serial port
ser = serial.Serial('/dev/ttyACM0', 9600)
# Delay to allow Arduino startup
time.sleep(START_DELAY)
# Generates randomly shuffled list of pixels
random_list = range(NUMPIXELS)
random.shuffle(random_list)
# Enters loop to continuously pull JSON from server
while(True):
try:
# Pulls JSON from URL by visiting webapp, URL parameters
jraw=urllib.urlopen('https://docs.google.com/spreadsheets/d/1IE2KSebrycnvjFtUtkZdEslcSd63natw7JA25weiq64/gviz/tq?tq=select%20C%2C%20D%2C%20E%2C%20A')
jt=jraw.read()
except:
print("error opening link")
# Process raw JSON .txt file
jstr=jt.split("setResponse",1)[1].split(";",1)[0]
jstr=jstr[1:len(jstr)-1]
loaded=json.loads(jstr)
# Access data in JSON
values=loaded['table']['rows']
rows=len(values)
if rows != counter:
for i in range(rows - counter):
# Concatenates to "<pixel>^<r>,<g>#<b>"
rgb_row=str(str(random_list[(counter+i) % NUMPIXELS]) + '^' + str(int(values[rows-1-i]['c'][0]['f'])) + ',' + str(int(values[rows-1-i]['c'][1]['f'])) + '#' + str(int(values[rows-1-i]['c'][2]['f'])))
# Writes to serial
ser.write(rgb_row)
print(rgb_row)
print("executed")
# Updates counter
counter = rows
else:
print("skipped")
# Sets delay
time.sleep(REFRESH_FREQ) |
import pytest
import re
import numpy as np
import pandas as pd
from formulae.matrices import design_matrices
from formulae.parser import ParseError
# TODO: See interaction names.. they don't always work as expected
@pytest.fixture(scope="module")
def data():
np.random.seed(1234)
size = 20
data = pd.DataFrame(
{
"y": np.random.uniform(size=size),
"x1": np.random.uniform(size=size),
"x2": np.random.uniform(size=size),
"x3": [1, 2, 3, 4] * 5,
"f": np.random.choice(["A", "B"], size=size),
"g": np.random.choice(["A", "B"], size=size),
}
)
return data
@pytest.fixture(scope="module")
def pixel():
"""
X-ray pixel intensities over time dataset from R nlme package.
The output is a subset of this dataframe.
"""
from os.path import dirname, join
data_dir = join(dirname(__file__), "data")
data = pd.read_csv(join(data_dir, "Pixel.csv"))
data["Dog"] = data["Dog"].astype("category")
data["day"] = data["day"].astype("category")
data = data[data["Dog"].isin([1, 2, 3])]
data = data[data["day"].isin([2, 4, 6])]
data = data.sort_values(["Dog", "Side", "day"])
data = data.reset_index(drop=True)
return data
def compare_dicts(d1, d2):
if len(d1) != len(d2):
return False
if set(d1.keys()) != set(d2.keys()):
return False
for key in d1.keys():
if type(d1[key]) != type(d2[key]):
return False
elif isinstance(d1[key], dict):
outcome = compare_dicts(d1[key], d2[key])
if not outcome:
return False
elif isinstance(d1[key], np.ndarray):
if not all(d1[key] == d2[key]):
return False
else:
if d1[key] != d2[key]:
return False
return True
def test_empty_formula(data):
with pytest.raises(ValueError):
design_matrices("", data)
def test_empty_model(data):
dm = design_matrices("y ~ 0", data)
assert dm.common == None
assert dm.group == None
def test_common_intercept_only_model(data):
dm = design_matrices("y ~ 1", data)
assert len(dm.common.terms_info) == 1
assert dm.common.terms_info["Intercept"]["kind"] == "intercept"
assert dm.common.terms_info["Intercept"]["full_names"] == ["Intercept"]
assert all(dm.common.design_matrix == 1)
assert dm.group == None
def test_group_specific_intercept_only(data):
dm = design_matrices("y ~ 0 + (1|g)", data)
assert len(dm.group.terms_info) == 1
assert dm.group.terms_info["1|g"]["kind"] == "intercept"
assert dm.group.terms_info["1|g"]["groups"] == ["A", "B"]
assert dm.group.terms_info["1|g"]["full_names"] == ["1|g[A]", "1|g[B]"]
assert dm.common == None
def test_common_predictor(data):
dm = design_matrices("y ~ x1", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "x1"]
assert dm.common.terms_info["x1"]["kind"] == "numeric"
assert dm.common.terms_info["x1"]["full_names"] == ["x1"]
# uses alphabetic order
# reference is the first value by default
# reduced because we included intercept
dm = design_matrices("y ~ f", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "f"]
assert dm.common.terms_info["f"]["kind"] == "categoric"
assert dm.common.terms_info["f"]["levels"] == sorted(list(data["f"].unique()))
assert dm.common.terms_info["f"]["reference"] == sorted(list(data["f"].unique()))[0]
assert dm.common.terms_info["f"]["encoding"] == "reduced"
assert dm.common.terms_info["f"]["full_names"] == [
f"f[{l}]" for l in sorted(data["f"].unique())[1:]
]
def test_categoric_encoding(data):
# No intercept, one categoric predictor
dm = design_matrices("y ~ 0 + f", data)
assert list(dm.common.terms_info.keys()) == ["f"]
assert dm.common.terms_info["f"]["kind"] == "categoric"
assert dm.common.terms_info["f"]["levels"] == sorted(list(data["f"].unique()))
assert dm.common.terms_info["f"]["reference"] == sorted(list(data["f"].unique()))[0]
assert dm.common.terms_info["f"]["encoding"] == "full"
assert dm.common.terms_info["f"]["full_names"] == [
f"f[{l}]" for l in sorted(data["f"].unique())
]
assert dm.common.design_matrix.shape == (20, 2)
# Intercept, one categoric predictor
dm = design_matrices("y ~ 1 + f", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "f"]
assert dm.common.terms_info["f"]["kind"] == "categoric"
assert dm.common.terms_info["f"]["levels"] == sorted(list(data["f"].unique()))
assert dm.common.terms_info["f"]["reference"] == sorted(list(data["f"].unique()))[0]
assert dm.common.terms_info["f"]["encoding"] == "reduced"
assert dm.common.terms_info["f"]["full_names"] == [
f"f[{l}]" for l in sorted(data["f"].unique())[1:]
]
assert dm.common.design_matrix.shape == (20, 2)
# No intercept, two additive categoric predictors
dm = design_matrices("y ~ 0 + f + g", data)
assert list(dm.common.terms_info.keys()) == ["f", "g"]
assert dm.common.terms_info["f"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["f"]["levels"] == sorted(list(data["f"].unique()))
assert dm.common.terms_info["g"]["levels"] == sorted(list(data["g"].unique()))
assert dm.common.terms_info["f"]["reference"] == sorted(list(data["f"].unique()))[0]
assert dm.common.terms_info["g"]["reference"] == sorted(list(data["g"].unique()))[0]
assert dm.common.terms_info["f"]["encoding"] == "full"
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f"]["full_names"] == [
f"f[{l}]" for l in sorted(data["f"].unique())
]
assert dm.common.terms_info["g"]["full_names"] == [
f"g[{l}]" for l in sorted(data["g"].unique())[1:]
]
assert dm.common.design_matrix.shape == (20, 3)
# Intercept, two additive categoric predictors
dm = design_matrices("y ~ 1 + f + g", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "f", "g"]
assert dm.common.terms_info["f"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["f"]["levels"] == sorted(list(data["f"].unique()))
assert dm.common.terms_info["g"]["levels"] == sorted(list(data["g"].unique()))
assert dm.common.terms_info["f"]["reference"] == sorted(list(data["f"].unique()))[0]
assert dm.common.terms_info["g"]["reference"] == sorted(list(data["g"].unique()))[0]
assert dm.common.terms_info["f"]["encoding"] == "reduced"
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f"]["full_names"] == [
f"f[{l}]" for l in sorted(data["f"].unique())[1:]
]
assert dm.common.terms_info["g"]["full_names"] == [
f"g[{l}]" for l in sorted(data["g"].unique())[1:]
]
assert dm.common.design_matrix.shape == (20, 3)
# No intercept, two categoric predictors with interaction
dm = design_matrices("y ~ 0 + f + g + f:g", data)
assert list(dm.common.terms_info.keys()) == ["f", "g", "f:g"]
assert dm.common.terms_info["f"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["f:g"]["kind"] == "interaction"
assert dm.common.terms_info["f"]["levels"] == sorted(list(data["f"].unique()))
assert dm.common.terms_info["g"]["levels"] == sorted(list(data["g"].unique()))
assert dm.common.terms_info["f"]["reference"] == sorted(list(data["f"].unique()))[0]
assert dm.common.terms_info["g"]["reference"] == sorted(list(data["g"].unique()))[0]
assert dm.common.terms_info["f"]["encoding"] == "full"
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["terms"]["f"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["terms"]["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f"]["full_names"] == [
f"f[{l}]" for l in sorted(data["f"].unique())
]
assert dm.common.terms_info["g"]["full_names"] == [
f"g[{l}]" for l in sorted(data["g"].unique())[1:]
]
assert dm.common.terms_info["f:g"]["full_names"] == ["f[B]:g[B]"]
assert dm.common.design_matrix.shape == (20, 4)
# Intercept, two categoric predictors with interaction
dm = design_matrices("y ~ 1 + f + g + f:g", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "f", "g", "f:g"]
assert dm.common.terms_info["f"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["f:g"]["kind"] == "interaction"
assert dm.common.terms_info["f"]["levels"] == sorted(list(data["f"].unique()))
assert dm.common.terms_info["g"]["levels"] == sorted(list(data["g"].unique()))
assert dm.common.terms_info["f"]["reference"] == sorted(list(data["f"].unique()))[0]
assert dm.common.terms_info["g"]["reference"] == sorted(list(data["g"].unique()))[0]
assert dm.common.terms_info["f"]["encoding"] == "reduced"
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["terms"]["f"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["terms"]["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f"]["full_names"] == [
f"f[{l}]" for l in sorted(data["f"].unique())[1:]
]
assert dm.common.terms_info["g"]["full_names"] == [
f"g[{l}]" for l in sorted(data["g"].unique())[1:]
]
assert dm.common.terms_info["f:g"]["full_names"] == ["f[B]:g[B]"]
assert dm.common.design_matrix.shape == (20, 4)
# No intercept, interaction between two categorics
dm = design_matrices("y ~ 0 + f:g", data)
assert list(dm.common.terms_info.keys()) == ["f:g"]
assert dm.common.terms_info["f:g"]["kind"] == "interaction"
assert dm.common.terms_info["f:g"]["terms"]["f"]["encoding"] == "full"
assert dm.common.terms_info["f:g"]["terms"]["g"]["encoding"] == "full"
assert dm.common.terms_info["f:g"]["full_names"] == [
"f[A]:g[A]",
"f[A]:g[B]",
"f[B]:g[A]",
"f[B]:g[B]",
]
assert dm.common.design_matrix.shape == (20, 4)
# Intercept, interaction between two categorics
# It adds "g" -> It uses Patsy algorithm... look there if you're curious.
dm = design_matrices("y ~ 1 + f:g", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "g", "f:g"]
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["kind"] == "interaction"
assert dm.common.terms_info["f:g"]["terms"]["f"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["terms"]["g"]["encoding"] == "full"
assert dm.common.terms_info["f:g"]["full_names"] == ["f[B]:g[A]", "f[B]:g[B]"]
assert dm.common.design_matrix.shape == (20, 4)
# Same than before
dm = design_matrices("y ~ 1 + g + f:g", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "g", "f:g"]
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["kind"] == "interaction"
assert dm.common.terms_info["f:g"]["terms"]["f"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["terms"]["g"]["encoding"] == "full"
assert dm.common.terms_info["f:g"]["full_names"] == ["f[B]:g[A]", "f[B]:g[B]"]
assert dm.common.design_matrix.shape == (20, 4)
def test_categoric_encoding_with_numeric_interaction():
np.random.seed(1234)
size = 20
data = pd.DataFrame(
{
"y": np.random.uniform(size=size),
"x1": np.random.uniform(size=size),
"x2": np.random.uniform(size=size),
"x3": [1, 2, 3, 4] * 5,
"f": np.random.choice(["A", "B"], size=size),
"g": np.random.choice(["A", "B"], size=size),
"h": np.random.choice(["A", "B"], size=size),
"j": np.random.choice(["A", "B"], size=size),
}
)
dm = design_matrices("y ~ x1 + x2 + f:g + h:j:x2", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "x1", "x2", "g", "f:g", "j", "h:j:x2"]
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["kind"] == "interaction"
assert dm.common.terms_info["f:g"]["terms"]["f"]["encoding"] == "reduced"
assert dm.common.terms_info["f:g"]["terms"]["g"]["encoding"] == "full"
assert dm.common.terms_info["f:g"]["full_names"] == ["f[B]:g[A]", "f[B]:g[B]"]
assert dm.common.terms_info["j"]["encoding"] == "reduced"
assert dm.common.terms_info["h:j:x2"]["terms"]["h"]["encoding"] == "reduced"
assert dm.common.terms_info["h:j:x2"]["terms"]["j"]["encoding"] == "full"
assert dm.common.terms_info["h:j:x2"]["terms"]["x2"]["kind"] == "numeric"
def test_interactions(data):
# These two models are the same
dm = design_matrices("y ~ f * g", data)
dm2 = design_matrices("y ~ f + g + f:g", data)
assert compare_dicts(dm2.common.terms_info, dm.common.terms_info)
# When no intercept too
dm = design_matrices("y ~ 0 + f * g", data)
dm2 = design_matrices("y ~ 0 + f + g + f:g", data)
assert compare_dicts(dm2.common.terms_info, dm.common.terms_info)
# Mix of numeric/categoric
# "g" in "g" -> reduced
# "g" in "x1:g" -> reduced because x1 is present in formula
dm = design_matrices("y ~ x1 + g + x1:g", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "x1", "g", "x1:g"]
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["x1:g"]["terms"]["g"]["encoding"] == "reduced"
# "g" in "g" -> reduced
# "g" in "x1:g" -> full because x1 is not present in formula
dm = design_matrices("y ~ g + x1:g", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "g", "x1:g"]
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["x1:g"]["terms"]["g"]["encoding"] == "full"
# "g" in "x1:x2:g" is full, because x1:x2 is a new group and we don't have x1:x2 in the model
dm = design_matrices("y ~ x1 + g + x1:g + x1:x2:g", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "x1", "g", "x1:g", "x1:x2:g"]
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["x1:g"]["terms"]["g"]["encoding"] == "reduced"
assert dm.common.terms_info["x1:x2:g"]["terms"]["g"]["encoding"] == "full"
# "g" in "x1:x2:g" is reduced, because x1:x2 is a new group and we have x1:x2 in the model
dm = design_matrices("y ~ x1 + g + x1:x2 + x1:g + x1:x2:g", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "x1", "g", "x1:x2", "x1:g", "x1:x2:g"]
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["encoding"] == "reduced"
assert dm.common.terms_info["x1:g"]["terms"]["g"]["encoding"] == "reduced"
assert dm.common.terms_info["x1:x2:g"]["terms"]["g"]["encoding"] == "reduced"
# And now, since we don't have intercept, x1 and x1:x2 all "g" are full
dm = design_matrices("y ~ 0 + g + x1:g + x1:x2:g", data)
assert list(dm.common.terms_info.keys()) == ["g", "x1:g", "x1:x2:g"]
assert dm.common.terms_info["g"]["kind"] == "categoric"
assert dm.common.terms_info["g"]["encoding"] == "full"
assert dm.common.terms_info["x1:g"]["terms"]["g"]["encoding"] == "full"
assert dm.common.terms_info["x1:x2:g"]["terms"]["g"]["encoding"] == "full"
# Two numerics
dm = design_matrices("y ~ x1:x2", data)
assert "x1:x2" in dm.common.terms_info.keys()
assert np.allclose(dm.common["x1:x2"][:, 0], data["x1"] * data["x2"])
def test_built_in_transforms(data):
# {...} gets translated to I(...)
dm = design_matrices("y ~ {x1 + x2}", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "I(x1 + x2)"]
assert dm.common.terms_info["I(x1 + x2)"]["kind"] == "numeric"
assert np.allclose(
dm.common["I(x1 + x2)"], np.atleast_2d((data["x1"] + data["x2"]).to_numpy()).T
)
dm2 = design_matrices("y ~ I(x1 + x2)", data)
assert compare_dicts(dm.common.terms_info, dm2.common.terms_info)
# center()
dm = design_matrices("y ~ center(x1)", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "center(x1)"]
assert dm.common.terms_info["center(x1)"]["kind"] == "numeric"
assert np.allclose(dm.common["center(x1)"].mean(), 0)
# scale()
dm = design_matrices("y ~ scale(x1)", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "scale(x1)"]
assert dm.common.terms_info["scale(x1)"]["kind"] == "numeric"
assert np.allclose(dm.common["scale(x1)"].mean(), 0)
assert np.allclose(dm.common["scale(x1)"].std(), 1)
# standardize(), alias of scale()
dm = design_matrices("y ~ standardize(x1)", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "standardize(x1)"]
assert dm.common.terms_info["standardize(x1)"]["kind"] == "numeric"
assert np.allclose(dm.common["standardize(x1)"].mean(), 0)
assert np.allclose(dm.common["standardize(x1)"].std(), 1)
# C()
# Intercept, no extra arguments, reference is first value observed
dm = design_matrices("y ~ C(x3)", data)
assert list(dm.common.terms_info.keys()) == ["Intercept", "C(x3)"]
assert dm.common.terms_info["C(x3)"]["kind"] == "categoric"
assert dm.common.terms_info["C(x3)"]["encoding"] == "reduced"
assert dm.common.terms_info["C(x3)"]["reference"] == 1
assert dm.common.terms_info["C(x3)"]["levels"] == [1, 2, 3, 4]
assert dm.common.terms_info["C(x3)"]["full_names"] == ["C(x3)[2]", "C(x3)[3]", "C(x3)[4]"]
# No intercept, no extra arguments
dm = design_matrices("y ~ 0 + C(x3)", data)
assert list(dm.common.terms_info.keys()) == ["C(x3)"]
assert dm.common.terms_info["C(x3)"]["kind"] == "categoric"
assert dm.common.terms_info["C(x3)"]["encoding"] == "full"
assert dm.common.terms_info["C(x3)"]["reference"] == 1
assert dm.common.terms_info["C(x3)"]["levels"] == [1, 2, 3, 4]
assert dm.common.terms_info["C(x3)"]["full_names"] == [
"C(x3)[1]",
"C(x3)[2]",
"C(x3)[3]",
"C(x3)[4]",
]
# Specify levels, different to observed
lvls = [3, 2, 4, 1]
dm = design_matrices("y ~ C(x3, levels=lvls)", data)
assert dm.common.terms_info["C(x3, levels = lvls)"]["kind"] == "categoric"
assert dm.common.terms_info["C(x3, levels = lvls)"]["reference"] == 3
assert dm.common.terms_info["C(x3, levels = lvls)"]["levels"] == lvls
# Pass a reference not in the data
with pytest.raises(ValueError):
dm = design_matrices("y ~ C(x3, 5)", data)
# Pass categoric, remains unchanged
dm = design_matrices("y ~ C(f)", data)
dm2 = design_matrices("y ~ f", data)
d1 = dm.common.terms_info["C(f)"]
d2 = dm2.common.terms_info["f"]
assert d1["kind"] == d2["kind"]
assert d1["levels"] == d2["levels"]
assert d1["reference"] == d2["reference"]
assert d1["encoding"] == d2["encoding"]
assert not d1["full_names"] == d2["full_names"] # because one is 'C(f)' and other is 'f'
assert all(dm.common["C(f)"] == dm2.common["f"])
def test_external_transforms(data):
dm = design_matrices("y ~ np.exp(x1)", data)
assert np.allclose(dm.common["np.exp(x1)"][:, 0], np.exp(data["x1"]))
def add_ten(x):
return x + 10
dm = design_matrices("y ~ add_ten(x1)", data)
assert np.allclose(dm.common["add_ten(x1)"][:, 0], data["x1"] + 10)
def test_non_syntactic_names():
data = pd.DataFrame(
{
"My response": np.random.normal(size=10),
"$$#1@@": np.random.normal(size=10),
"-- ! Hi there!": np.random.normal(size=10),
}
)
dm = design_matrices("`My response` ~ `$$#1@@`*`-- ! Hi there!`", data)
assert list(dm.common.terms_info.keys()) == [
"Intercept",
"$$#1@@",
"-- ! Hi there!",
"$$#1@@:-- ! Hi there!",
]
assert np.allclose(dm.common["$$#1@@"][:, 0], data["$$#1@@"])
assert np.allclose(dm.common["-- ! Hi there!"][:, 0], data["-- ! Hi there!"])
assert np.allclose(dm.common["-- ! Hi there!"][:, 0], data["-- ! Hi there!"])
assert np.allclose(
dm.common["$$#1@@:-- ! Hi there!"][:, 0], data["$$#1@@"] * data["-- ! Hi there!"]
)
def test_categoric_group_specific():
data = pd.DataFrame(
{
"BP": np.random.normal(size=30),
"BMI": np.random.normal(size=30),
"age_grp": np.random.choice([0, 1, 2], size=30),
}
)
dm = design_matrices("BP ~ 0 + (C(age_grp)|BMI)", data)
list(dm.group.terms_info.keys()) == ["1|BMI", "C(age_grp)[1]|BMI", "C(age_grp)[2]|BMI"]
dm = design_matrices("BP ~ 0 + (0 + C(age_grp)|BMI)", data)
list(dm.group.terms_info.keys()) == [
"C(age_grp)[0]|BMI",
"C(age_grp)[1]|BMI",
"C(age_grp)[2]|BMI",
]
def test_interactions_in_group_specific(pixel):
# We have group specific terms with the following characteristics
# 1. expr=categoric, factor=categoric
# 2. expr=intercept, factor=categoric
# 3. expr=intercept, factor=interaction between categorics
# The desing matrices used for the comparison are loaded from text files.
# The encoding is implicitly checked when comparing names.
from os.path import dirname, join
data_dir = join(dirname(__file__), "data/group_specific")
slope_by_dog_original = np.loadtxt(join(data_dir, "slope_by_dog.txt"))
intercept_by_side_original = np.loadtxt(join(data_dir, "intercept_by_side.txt"))
intercept_by_side_dog_original = np.loadtxt(join(data_dir, "intercept_by_side_dog.txt"))
dog_and_side_by_day_original = np.loadtxt(join(data_dir, "dog_and_side_by_day.txt"))
dm = design_matrices("pixel ~ day + (0 + day | Dog) + (1 | Side/Dog)", pixel)
slope_by_dog = dm.group["day|Dog"]
intercept_by_side = dm.group["1|Side"]
intercept_by_side_dog = dm.group["1|Side:Dog"]
# Assert values in the design matrix
assert (slope_by_dog == slope_by_dog_original).all()
assert (intercept_by_side == intercept_by_side_original).all()
assert (intercept_by_side_dog == intercept_by_side_dog_original).all()
# Assert full names
names = [f"day[{d}]|{g}" for g in [1, 2, 3] for d in [2, 4, 6]]
assert dm.group.terms_info["day|Dog"]["full_names"] == names
names = [f"1|Side[{s}]" for s in ["L", "R"]]
assert dm.group.terms_info["1|Side"]["full_names"] == names
names = [f"1|Side:Dog[{s}:{d}]" for s in ["L", "R"] for d in [1, 2, 3]]
assert dm.group.terms_info["1|Side:Dog"]["full_names"] == names
# Another design matrix
dm = design_matrices("(0 + Dog:Side | day)", pixel)
dog_and_side_by_day = dm.group["Dog:Side|day"]
# Assert values in the design matrix
assert (dog_and_side_by_day == dog_and_side_by_day_original).all()
# Assert full names
names = [f"Dog[{d}]:Side[{s}]|{g}" for g in [2, 4, 6] for d in [1, 2, 3] for s in ["L", "R"]]
assert dm.group.terms_info["Dog:Side|day"]["full_names"] == names
def test_prop_response():
data = pd.DataFrame(
{
"x": np.array([1.6907, 1.7242, 1.7552, 1.7842, 1.8113, 1.8369, 1.8610, 1.8839]),
"n": np.array([59, 60, 62, 56, 63, 59, 62, 60]),
"y": np.array([6, 13, 18, 28, 52, 53, 61, 60]),
}
)
response = design_matrices("prop(y, n) ~ x", data).response
assert response.kind == "proportion"
assert response.design_vector.shape == (8, 2)
assert (np.less_equal(response.design_vector[:, 0], response.design_vector[:, 1])).all()
# Admit integer values for 'n'
response = design_matrices("prop(y, 62) ~ x", data).response
assert response.kind == "proportion"
assert response.design_vector.shape == (8, 2)
assert (np.less_equal(response.design_vector[:, 0], response.design_vector[:, 1])).all()
# Use aliases
response = design_matrices("proportion(y, n) ~ x", data).response
assert response.kind == "proportion"
assert response.design_vector.shape == (8, 2)
assert (np.less_equal(response.design_vector[:, 0], response.design_vector[:, 1])).all()
# Use aliases
response = design_matrices("p(y, n) ~ x", data).response
assert response.kind == "proportion"
assert response.design_vector.shape == (8, 2)
assert (np.less_equal(response.design_vector[:, 0], response.design_vector[:, 1])).all()
def test_prop_response_fails():
# x larger than n
with pytest.raises(ValueError):
design_matrices("prop(x, n) ~ 1", pd.DataFrame({"x": [2, 3], "n": [1, 2]}))
# x and/or n not integer
with pytest.raises(ValueError):
design_matrices("prop(x, n) ~ 1", pd.DataFrame({"x": [2, 3.3], "n": [4, 4]}))
with pytest.raises(ValueError):
design_matrices("prop(x, n) ~ 1", pd.DataFrame({"x": [2, 3], "n": [4.3, 4]}))
# x not a variable name
with pytest.raises(ValueError):
design_matrices("prop(10, n) ~ 1", pd.DataFrame({"x": [2, 3], "n": [1, 2]}))
# trials must be integer, not float
with pytest.raises(ValueError):
design_matrices("prop(x, 3.4) ~ 1", pd.DataFrame({"x": [2, 3], "n": [1, 2]}))
def test_categoric_responses():
data = pd.DataFrame(
{
"y1": np.random.choice(["A", "B", "C"], size=30),
"y2": np.random.choice(["A", "B"], size=30),
"y3": np.random.choice(["Hi there", "Bye bye", "What??"], size=30),
"x": np.random.normal(size=30),
}
)
# Multi-level response
response = design_matrices("y1 ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1, 2]
assert response.levels == ["A", "B", "C"]
assert response.binary is False
assert response.baseline == "A"
assert response.success is None
# Multi-level response, explicitly converted to binary
response = design_matrices("y1['A'] ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels == ["A", "B", "C"]
assert response.binary is True
assert response.baseline is None
assert response.success == "A"
# Default binary response
response = design_matrices("y2 ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels == ["A", "B"]
assert response.binary is True
assert response.baseline is None
assert response.success == "A"
# Binary response with explicit level
response = design_matrices("y2['B'] ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels == ["A", "B"]
assert response.binary is True
assert response.baseline is None
assert response.success == "B"
# Binary response with explicit level passed as identifier
response = design_matrices("y2[B] ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels == ["A", "B"]
assert response.binary is True
assert response.baseline is None
assert response.success == "B"
# Binary response with explicit level with spaces
response = design_matrices("y3['Bye bye'] ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels == ["Bye bye", "Hi there", "What??"]
assert response.binary is True
assert response.baseline is None
assert response.success == "Bye bye"
# Users trying to use nested brackets (WHY?)
with pytest.raises(ParseError, match=re.escape("Are you using nested brackets? Why?")):
design_matrices("y3[A[B]] ~ x", data)
# Users try to pass a number to use a number
with pytest.raises(
ParseError, match=re.escape("Subset notation only allows a string or an identifer")
):
design_matrices("y3[1] ~ x", data)
def test_binary_function():
size = 100
data = pd.DataFrame(
{
"y": np.random.randint(0, 5, size=size),
"x": np.random.randint(5, 10, size=size),
"g": np.random.choice(["a", "b", "c"], size=size),
}
)
# String value
term = design_matrices("y ~ binary(g, 'c')", data).common["binary(g, c)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["g"] == "c"))
# Numeric value
term = design_matrices("y ~ binary(x, 7)", data).common["binary(x, 7)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == 7))
# Variable name
# string
m = "b"
term = design_matrices("y ~ binary(g, m)", data).common["binary(g, m)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["g"] == m))
# numeric
z = 8
term = design_matrices("y ~ binary(x, z)", data).common["binary(x, z)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == z))
# Pass nothing
term = design_matrices("y ~ binary(x)", data).common["binary(x)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == 5))
# Values not found in the variable
with pytest.raises(ValueError):
design_matrices("y ~ binary(g, 'Not found')", data)
with pytest.raises(ValueError):
design_matrices("y ~ binary(x, 999)", data)
def test_B_function():
size = 100
data = pd.DataFrame(
{
"y": np.random.randint(0, 5, size=size),
"x": np.random.randint(5, 10, size=size),
"g": np.random.choice(["a", "b", "c"], size=size),
}
)
# String value
term = design_matrices("y ~ B(g, 'c')", data).common["B(g, c)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["g"] == "c"))
# Numeric value
term = design_matrices("y ~ B(x, 7)", data).common["B(x, 7)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == 7))
# Variable name
# string
m = "b"
term = design_matrices("y ~ B(g, m)", data).common["B(g, m)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["g"] == m))
# numeric
z = 8
term = design_matrices("y ~ B(x, z)", data).common["B(x, z)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == z))
# Pass nothing
term = design_matrices("y ~ B(x)", data).common["B(x)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == 5))
# Values not found in the variable
with pytest.raises(ValueError):
design_matrices("y ~ B(g, 'Not found')", data)
with pytest.raises(ValueError):
design_matrices("y ~ B(x, 999)", data)
def test_C_function():
size = 100
data = pd.DataFrame(
{
"y": np.random.randint(0, 5, size=size),
"x": np.random.randint(5, 10, size=size),
"g": np.random.choice(["a", "b", "c"], size=size),
}
)
term = design_matrices("y ~ C(x)", data).common.terms_info["C(x)"]
assert term["kind"] == "categoric"
assert term["levels"] == [5, 6, 7, 8, 9]
assert term["reference"] == 5
term = design_matrices("y ~ C(x, 7)", data).common.terms_info["C(x, 7)"]
assert term["kind"] == "categoric"
assert term["levels"] == [7, 5, 6, 8, 9]
assert term["reference"] == 7
l = [6, 8, 5, 7, 9]
term = design_matrices("y ~ C(x, levels=l)", data).common.terms_info["C(x, levels = l)"]
assert term["kind"] == "categoric"
assert term["levels"] == l
assert term["reference"] == 6
term = design_matrices("y ~ C(g)", data).common.terms_info["C(g)"]
assert term["kind"] == "categoric"
assert term["levels"] == ["a", "b", "c"]
assert term["reference"] == "a"
term = design_matrices("y ~ C(g, 'c')", data).common.terms_info["C(g, c)"]
assert term["kind"] == "categoric"
assert term["levels"] == ["c", "a", "b"]
assert term["reference"] == "c"
l = ["b", "c", "a"]
term = design_matrices("y ~ C(g, levels=l)", data).common.terms_info["C(g, levels = l)"]
assert term["kind"] == "categoric"
assert term["levels"] == l
assert term["reference"] == "b"
with pytest.raises(ValueError):
design_matrices("y ~ C(g, 'c', levels=l)", data)
def test_offset():
size = 100
data = pd.DataFrame(
{
"y": np.random.randint(0, 5, size=size),
"x": np.random.randint(5, 10, size=size),
"g": np.random.choice(["a", "b", "c"], size=size),
}
)
dm = design_matrices("y ~ offset(x)", data)
term = dm.common.terms_info["offset(x)"]
assert term["kind"] == "offset"
assert term["full_names"] == ["offset(x)"]
assert (dm.common["offset(x)"].flatten() == data["x"]).all()
with pytest.raises(ValueError):
design_matrices("y ~ offset(g)", data)
with pytest.raises(ValueError):
design_matrices("offset(y) ~ x", data)
def test_predict_prop():
data = pd.DataFrame(
{
"x": np.array([1.6907, 1.7242, 1.7552, 1.7842, 1.8113, 1.8369, 1.8610, 1.8839]),
"n": np.array([59, 60, 62, 56, 63, 59, 62, 60]),
"y": np.array([6, 13, 18, 28, 52, 53, 61, 60]),
}
)
# If trials is a variable, new dataset must have that variable
dm = design_matrices("prop(y, n) ~ x", data)
result = dm.response._evaluate_new_data(pd.DataFrame({"n": [10, 10, 30, 30]}))
assert (result == np.array([10, 10, 30, 30])[:, np.newaxis]).all()
# If trials is a constant value, return that same value
dm = design_matrices("prop(y, 70) ~ x", data)
result = dm.response._evaluate_new_data(pd.DataFrame({"n": [10, 10, 30, 30]}))
assert (result == np.array([70, 70, 70, 70])[:, np.newaxis]).all()
def test_predict_offset():
data = pd.DataFrame(
{
"x": np.array([1.6907, 1.7242, 1.7552, 1.7842, 1.8113, 1.8369, 1.8610, 1.8839]),
"n": np.array([59, 60, 62, 56, 63, 59, 62, 60]),
"y": np.array([6, 13, 18, 28, 52, 53, 61, 60]),
}
)
# If offset is a variable, new dataset must have that variable
dm = design_matrices("y ~ x + offset(x)", data)
result = dm.common._evaluate_new_data(pd.DataFrame({"x": [1, 2, 3]}))["offset(x)"]
assert (result == np.array([1, 2, 3])[:, np.newaxis]).all()
# If offset is a constant value, return that same value
dm = design_matrices("y ~ x + offset(10)", data)
result = dm.common._evaluate_new_data(pd.DataFrame({"x": [1, 2, 3]}))["offset(10)"]
assert (result == np.array([10, 10, 10])[:, np.newaxis]).all()
|
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
########################################################################
import math
from region import error
from region import geometry as mygeo
from generate.gen import cluster
def cos(invec, outvec):
"""
Desc: 对于一个节点, invec为进入这个节点的向量, outvec为从这个节点退出的向量
计算进入向量与退出向量之间夹角cos
Args:
invec : invec, (inx, iny)
outvec : outvec, (outx, outy)
Return:
从invec走向outvec的夹角, [-1,1]
Raise:
None
"""
(inx, iny) = invec
(outx, outy) = outvec
if (inx == 0 and iny == 0) or (outx == 0 and outy == 0):
raise error.RegionError("vector is zero")
inlen = math.sqrt(inx * inx + iny * iny)
outlen = math.sqrt(outx * outx + outy * outy)
return (1.0 * inx * outx + 1.0 * iny * outy) / (inlen * outlen)
def side(invec, outvec):
"""
Desc: 计算outvec相对于invec的位置, 左侧left, 右侧right, 如果是中间, 反悔
Args:
invec: invec, (inx, iny)
outvec: outvec, (outx, outy)
Returns:
1: left, -1: right, 0: 同向, -2, 逆向
Raises:
error.RegionError
"""
(inx, iny) = invec
(outx, outy) = outvec
if (inx == 0 and iny == 0) or (outx == 0 and outy == 0):
raise error.RegionError("vector is zero")
side1 = inx * outy - outx * iny
cos1 = inx * outx + iny * outy
if side1 < 0: # outlink is right of inlink
return -1
elif side1 > 0: # outlink is left of inlink
return 1
elif cos1 > 0: # side == 0 outlink is the same direction
return 1
else: # side = 0, outlink is inlink's anti extention
return -1
class Node(mygeo.Point):
"""
Desc: 区域生成的Point对象
"""
def __init__(self, x, y):
"""
Desc: 节点对象
Args:
self : self
x : x
y : y
Return:
None
Raise:
None
"""
mygeo.Point.__init__(self, x, y)
self.in_links = []
self.out_links = []
class Link(mygeo.Segment):
"""
Desc: 区域生成的线段对象
"""
def __init__(self, start, end):
"""
Desc: 用于面积生成split_seg
Args:
self : self
start : Node
end : Node
Return:
None
Raise:
None
"""
mygeo.Segment.__init__(self, start, end)
start.out_links.append(self)
end.in_links.append(self)
self.used = False
def leftest(self):
"""
Desc: 获取最左侧link
Args:
self : self
Return:
Link object
Raise:
None
"""
min_cs = 5.0
min_link = None
for link in self.end.out_links:
out_vec = link.vec()
in_vec = self.vec()
side1 = side(in_vec, out_vec)
cos1 = cos(in_vec, out_vec)
cs = side1 * cos1 + (1 - side1)
if cs < min_cs:
min_cs = cs
min_link = link
if min_link is None:
raise error.RegionError("there is not a leftest link")
return min_link
def vec(self):
"""
Desc: seg构建的向量, end - start构成了这个seg的向量
Args:
self : self
Return:
vec, tuple of (x, y)
Raise:
None
"""
if self.is_empty():
raise error.RegionError("empty object has not vec")
return (self.end.x - self.start.x, self.end.y - self.start.y)
def reverse(self):
"""
Desc: link反转, 获取当前link的一个反link, 即start, end对调
Args:
self : self
Return:
a new link
Raise:
None
"""
return Link(self.end, self.start)
class ValueDict(object):
"""
Desc: value dict, 查询的元素不存在, 就会给这个dict添加这个元素
"""
def __init__(self):
"""
Desc: 初始化
Args:
self : self
Return:
None
Raise:
None
"""
self.value_dict = {}
def find(self, key, value=None):
"""
Desc: 添加一个key, 如果key已经存在, 返回key对应的value
Args:
key : key
value : value, 如果value == None, 则认为value = key
Return:
key对应的value
Raise:
None
"""
key_str = key
if key_str in self.value_dict:
return self.value_dict[key_str]
else:
if value is None:
val = key
else:
val = value
self.value_dict[key_str] = val
return val
def is_in(self, key, value=None):
"""
Desc: 添加一个key, 如果key已经存在, 返回key对应的value
Args:
key : key
value : value, 如果value == None, 则认为value = key
Return:
True, if is in
False, if not in, and add key, value
Raise:
None
"""
key_str = key
if key_str in self.value_dict:
return True
else:
if value is None:
val = key
else:
val = value
self.value_dict[key_str] = val
return False
class RegionGenerator(object):
"""
Desc: region generator
"""
def __init__(self, segs):
"""
Desc: 初始化
Args:
self : self
segs : seg之间无重复, 除seg顶点外, 无其它的交点
Return:
None
Raise:
None
"""
self.links = []
self.node_dict = ValueDict()
self.link_dict = {}
for seg in segs:
start = self.node_dict.find(seg.start.askey(), Node(seg.start.x, seg.start.y))
end = self.node_dict.find(seg.end.askey(), Node(seg.end.x, seg.end.y))
if start == end: # seg.start seg.end 非常接近的时候
continue
link = self.__get_link(start, end)
if link is not None:
self.links.append(link)
self.links.append(link.reverse())
def __get_link(self, start, end):
"""
Desc: 根据start, end node生成一个link信息
Args:
self : self
start : Node
end : Node
Return:
Link,
Raise:
None
"""
ret = None
segstr = mygeo.Segment(start, end).askey()
if segstr not in self.link_dict:
self.link_dict[segstr] = 1
ret = Link(start, end)
return ret
def run(self):
"""
Desc: 区域生成, 对于一个link经过同一个节点, 则形成环
Args:
self : self
Return:
None
Raise:
None
"""
ret = []
for link in self.links:
if not link.used:
reg = self.get_a_region(link)
if reg is not None:
ret.append(reg)
return ret
def get_a_region(self, link):
"""
Desc: 从一个link出发, 生成一个region
Args:
self : self
link : 从这个link出发, 形成的一个region
Return:
mygeo.Region
Raise:
None
"""
nodes = [link.start]
next = link
while not next.used:
nodes.append(next.end)
next.used = True
next = next.leftest()
if not nodes[0] == nodes[-1]:
raise error.RegionError("first node is not equal to end node")
points = None
holes = []
node_dict = {}
temp_holes = []
i = 0
while i < len(nodes):
nd = nodes[i]
ndstr = nd.askey()
if ndstr in node_dict:
start_idx = node_dict[ndstr]
sub_nodes = nodes[start_idx: i]
if len(sub_nodes) >= 3:
region = mygeo.Region(sub_nodes)
if region.area > 0:
if mygeo.is_counter_clockwise(sub_nodes):
if points is None:
points = sub_nodes
else:
raise error.RegionError("too many ring is counter clockwise")
else:
holes.append(sub_nodes)
else:
temp_holes.append(sub_nodes)
nodes = nodes[:start_idx] + nodes[i:]
i = start_idx
else:
node_dict[ndstr] = i
i += 1
if points is not None:
return mygeo.Region(points, holes)
else:
return None
def segments_to_points(segments):
"""
Desc: 从segments中提取point, 放到points集合中
Args:
segments : list of mygeo.Segment
Return:
list of mygeo.Point
Raise:
None
"""
ret = []
point_dict = ValueDict()
for seg in segments:
if not point_dict.is_in(seg.start.askey(), seg.start):
ret.append(seg.start)
if not point_dict.is_in(seg.end.askey(), seg.end):
ret.append(seg.end)
return ret
def segments_to_cluster_points(segments):
"""
Desc: 从segments中提取point, 放到points集合中
Args:
segments : list of mygeo.Segment
Return:
list of cluster.Point
Raise:
None
"""
ret = []
point_dict = ValueDict()
for seg in segments:
if not point_dict.is_in(seg.start.askey(), seg.start):
ret.append(cluster.Point(seg.start.x, seg.start.y))
if not point_dict.is_in(seg.end.askey(), seg.end):
ret.append(cluster.Point(seg.end.x, seg.end.y))
return ret
def regions_to_points(regions):
"""
Desc: 根据region生成point
Args:
regions : regions
Return:
None
Raise:
None
"""
ret = []
point_dict = ValueDict()
for region in regions:
for pt in region.points:
if not point_dict.is_in(pt.askey(), pt):
ret.append(pt)
for hole in region.holes:
for pt in hole:
if not point_dict.is_in(pt.askey(), pt):
ret.append(pt)
return ret
def regions_to_cluster_points(regions):
"""
Desc: 根据region生成point
Args:
regions : regions
Return:
list of cluster.Point
Raise:
None
"""
ret = []
id = 0
point_dict = ValueDict()
for region in regions:
id += 1
for pt in region.points:
if not point_dict.is_in(pt.askey(), pt):
ret.append(cluster.Point(pt.x, pt.y, id))
for hole in region.holes:
for pt in hole:
if not point_dict.is_in(pt.askey(), pt):
ret.append(cluster.Point(pt.x, pt.y, id))
return ret
def clusters_to_pointmap(clusters):
"""
Desc: 根据cluster构建pointmap, ValueDict对象
Args:
clusters : clusters
Return:
ValueDict
Raise:
None
"""
point_dict = ValueDict()
for cluster in clusters:
center = cluster.center()
for ptstr in cluster.points:
point_dict.is_in(ptstr, center)
return point_dict
def __segment_2_newseg(segment, point_dict, seg_dict):
"""
Desc: 根据segment计算修正后的segment, 然后根据seg_dict进行去重
Args:
segment : segment
point_dict : point dict
seg_dict : seg dict
Return:
a new segment
Raise:
None
"""
start = point_dict.find(segment.start.askey(), segment.start)
end = point_dict.find(segment.end.askey(), segment.end)
start.trunc()
end.trunc()
if start == end:
return None
newseg = mygeo.Segment(start, end)
if seg_dict.is_in(newseg.askey(), newseg):
return None
return newseg
def simplify_by_pointmap(segments, regions, point_dict):
"""
Desc: 从point_dict中获取seg修正后的起终点, 然后返回新的segment
Args:
segments : segments
regions : regions
pointmap : pointmap
Return:
segs, 所有合法的seg, 去重
Raise:
None
"""
ret = []
seg_dict = ValueDict()
for seg in segments:
newseg = __segment_2_newseg(seg, point_dict, seg_dict)
if newseg is not None:
ret.append(newseg)
if regions is not None:
for region in regions:
for seg in region.segments():
newseg = __segment_2_newseg(seg, point_dict, seg_dict)
if newseg is not None:
ret.append(newseg)
return ret
class RegionAttract(object):
"""
Desc: 区域向周边seg进行吸附
"""
def __init__(self, regions, width):
"""
Desc: 初始化
Args:
self : self
regions : 需要做吸附的region list
width : 吸附的宽度阈值, 必须是int类型
Return:
None
Raise:
None
"""
grid_dict = {}
points = regions_to_points(regions)
point_map = ValueDict()
for pt in points:
grid = pt.grid(width)
item = [pt, None, width]
if point_map.is_in(pt.askey(), item):
continue
for grid_x in range(grid[0] - 1, grid[0] + 2):
for grid_y in range(grid[1] - 1, grid[1] + 2):
key_grid = (grid_x, grid_y)
if key_grid in grid_dict:
grid_dict[key_grid].append(item)
else:
grid_dict[key_grid] = [item]
self.grid_dict = grid_dict
self.regions = regions
self.point_map = point_map
self.width = width
def __modify_ring(self, points):
"""
Desc: 修正一个环上的所有点
Args:
self : self
points : 需要修正的点
point_map : point_map
Return:
list of mygeo.Point
Raise:
None
"""
new_points = []
for pt in points:
item = self.point_map.find(pt.askey(), None)
if item is None or item[1] is None:
new_points.append(pt)
else:
new_points.append(item[1])
return new_points
def __project(self, seg, point):
"""
Desc: 计算点在seg上的投影点, 以及点到seg投影点的距离
Args:
seg : mygeo.Segment
point : mygeo.Point
Return:
(project_point, dist)
Raise:
None
"""
l1 = seg.linestring()
d1 = l1.project(point.point())
pt = l1.interpolate(d1)
return mygeo.Point(pt.x, pt.y)
def run(self, segs):
"""
Desc: 将现有的regions向segs进行吸附
Args:
self : self
segs : segs
Return:
list of new regions
Raise:
None
"""
# 计算点在seg上的投影点, 和点到投影点的距离
for seg in segs:
for grid in seg.grids(self.width):
if grid not in self.grid_dict:
continue
for item in self.grid_dict[grid]:
(pt, spt, min_dist) = item
dist = mygeo.pt_2_seg_dist(pt, seg)
if dist < min_dist:
item[1] = self.__project(seg, pt)
item[2] = dist
# 基于point_map构建
ret = []
for reg in self.regions:
points = self.__modify_ring(reg.points)
holes = []
for hole in reg.holes:
holes.append(self.__modify_ring(hole))
newreg = None
try:
newreg = mygeo.Region(points, holes)
except error.RegionError:
error.debug("region not valid: %s" % (str(reg)))
if newreg is not None and newreg.polygon().is_valid:
ret.append(newreg)
else:
ret.append(reg)
return ret
|
import unittest
import http.client
import urllib.parse
class TestMongoDB(unittest.TestCase):
host = "localhost"
port = 80
#resource = "/test-classes/test.html"
def test_post(self):
params = urllib.parse.urlencode({'text':'lorem ipsum'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = http.client.HTTPConnection(self.host, self.port)
conn.request("POST", "/mongodb", params, headers)
response = conn.getresponse()
print(response.status, response.reason)
self.assertEqual(response.status, 200)
data = response.read()
print(data)
conn.close()
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python2
import urllib
import urllib2
import os.path
import utils
"""A Python script to help download a series of Isochrone files from
an OpenTripPlanner server"""
def buildRequestStringRaster(server_url, routing_params, date, time, lon_lat,
img_bbox, raster_res, otp_router_id=None):
reqStr = "/opentripplanner-api-webapp/ws" + "/wms" + '?'
# General OTP routing request stuff
reqStr += "&".join([name+'='+urllib2.quote(str(val)) for name, val \
in routing_params.iteritems()])
reqStr += '&'+'fromPlace'+'='+str(lon_lat[1])+','+str(lon_lat[0])
reqStr += '&'+'toPlace'+'='+str(lon_lat[1])+','+str(lon_lat[0])
reqStr += '&'+'time'+'='+date+'T'+urllib2.quote(time)
# Stuff specific to raster output
reqStr += '&'+'format'+'='+"image/geotiff"
reqStr += '&'+'srs'+'='+"EPSG:4326"
reqStr += '&'+'resolution'+'='+str(raster_res)
reqStr += '&'+'bbox'+'='+','.join(str(ii) for ii in img_bbox[0] + \
img_bbox[1])
if otp_router_id is not None:
reqStr += '&'+'routerId'+'='+otp_router_id
# Add server URL
url = server_url + reqStr
return url
def buildRequestStringVector(server_url, routing_params, date, time, lon_lat,
time_radius, vec_type, otp_router_id=None):
reqStr = "/opentripplanner-api-webapp/ws" + "/iso" + '?'
# General OTP routing request stuff
reqStr += "&".join([name+'='+urllib2.quote(str(val)) for name, val \
in routing_params.iteritems()])
reqStr += '&'+'fromPlace'+'='+str(lon_lat[1])+','+str(lon_lat[0])
reqStr += '&'+'toPlace'+'='+str(lon_lat[1])+','+str(lon_lat[0])
reqStr += '&'+'time'+'='+date+'T'+urllib2.quote(time)
# Stuff specific to raster output
reqStr += '&'+'walkTime'+'='+str(time_radius)
reqStr += '&'+'output'+'='+vec_type
if otp_router_id is not None:
reqStr += '&'+'routerId'+'='+otp_router_id
# Add server URL
url = server_url + reqStr
return url
def saveIsosForLocations(server_url, otp_router_id, save_path,
save_suffix, locations, date, times,
save_nearby_times, nearby_minutes, num_each_side,
routing_params,
raster_bounding_buf, raster_res,
iso_inc, iso_max, vec_types, re_download=False):
if os.path.exists(save_path) is False:
os.makedirs(save_path)
for loc in locations:
loc_name_orig = loc[0]
lon_lat = loc[1]
img_buf = raster_bounding_buf
img_bbox = [(lon_lat[0] - img_buf[0], lon_lat[1] - img_buf[1]),
(lon_lat[0] + img_buf[0], lon_lat[1] + img_buf[1])]
print "Saving info for location %s" % loc_name_orig
for time in times:
print "For time %s:" % time
if save_nearby_times is None:
mins_diffs = 0
else:
mins_diffs = utils.get_nearby_min_diffs(nearby_minutes,
num_each_side)
date_time_str_set = utils.get_date_time_string_set(date, time,
mins_diffs)
fname_set = utils.get_raster_filenames(loc_name_orig,
date_time_str_set, save_path, save_suffix)
print "About to save rasters at dates and times, to files:"
for date_time_tuple, fname in zip(date_time_str_set, fname_set):
date_mod, time_mod = date_time_tuple
if re_download or not os.path.exists(fname):
print " %s - %s -> %s" % (date_mod, time_mod, fname)
for date_time_tuple, fname in zip(date_time_str_set, fname_set):
if re_download or not os.path.exists(fname):
date_mod, time_mod = date_time_tuple
url = buildRequestStringRaster(server_url, routing_params,
date_mod, time_mod, lon_lat, img_bbox, raster_res,
otp_router_id)
print url
response = urllib2.urlopen(url)
data = response.read()
f = open(fname, "w")
f.write(data)
f.close()
# Now get the vectors, at different time radius.
# TODO: Remove once iso issue debugged successfully on NECTAR
# server.
continue
print "About to save vectors:"
isochrones = range(iso_inc, iso_max+1, iso_inc)
for iso in isochrones:
for vec_type in vec_types:
vec_fname = utils.vectorName(loc_name_orig, time, iso, vec_type,
save_path, save_suffix)
if re_download or not os.path.exists(vec_fname):
url = buildRequestStringVector(server_url, routing_params,
date, time, lon_lat, iso, vec_type, otp_router_id)
print url
response = urllib2.urlopen(url)
data = response.read()
f = open(vec_fname, "w")
f.write(data)
f.close()
print "DONE!\n"
return
def save_isos(multi_graph_iso_set, re_download=False):
for server_url, otp_router_id, save_path, save_suffix, isos_spec in \
multi_graph_iso_set:
saveIsosForLocations(server_url, otp_router_id, save_path,
save_suffix, re_download=re_download, **isos_spec)
def load_locations_from_shpfile(shpfile_name):
"""Desired output format is a list of tuples containing a location name,
and a lon, lat pair, e.g.:
("MONASH UNI CLAYTON", (145.13163, -37.91432))"""
locations = []
output_srs = osr.SpatialReference()
output_srs.ImportFromEPSG(OTP_ROUTER_EPSG)
locations_shp = ogr.Open(shpfile_name, 0)
if locations_shp is None:
print "Error, input locations shape file given, %s , failed to open." \
% (shpfile_name)
sys.exit(1)
locations_lyr = locations_shp.GetLayer(0)
locations_srs = locations_lyr.GetSpatialRef()
transform = None
if not locations_srs.IsSame(output_srs):
transform = osr.CoordinateTransformation(locations_srs, output_srs)
locations = []
for loc_feat in locations_lyr:
loc_name = loc_feat.GetField(LOCATION_NAME_FIELD)
loc_geom = loc_feat.GetGeometryRef()
if transform:
loc_geom.Transform(transform)
locations.append((loc_name, loc_geom.GetPoint_2D(0)))
return locations
|
"""Utility functions used by signals to attach Ratings to Comments"""
try:
from django.contrib.comments.models import Comment
except ImportError:
from django_comments.models import Comment
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.utils.encoding import smart_str
from livesettings.functions import config_value
from models import ProductRating
from product.models import Product
from satchmo_utils import url_join
import logging
from django.conf import settings
log = logging.getLogger('productratings')
def save_rating(comment=None, request=None, **kwargs):
"""Create a rating and save with the comment"""
# should always be true
if request.method != "POST":
return
data = request.POST.copy()
if 'rating' not in data:
return
raw = data['rating']
try:
rating = int(raw)
except ValueError:
log.error('Could not parse rating from posted rating: %s', raw)
return
if comment.content_type.app_label == "product" and comment.content_type.model == "product":
if hasattr(comment, 'rating'):
log.debug('editing existing comment %s, setting rating=%i', comment, rating)
productrating = comment.rating
productrating.rating = rating
else:
log.debug("Creating new rating for comment: %s = %i", comment, rating)
p = Product.objects.get(pk=comment.object_pk)
productrating = ProductRating(comment=comment, rating=rating)
productrating.save()
else:
log.debug('Not saving rating for comment on a %s object', comment.content_type.model)
def one_rating_per_product(comment=None, request=None, **kwargs):
site = Site.objects.get_current()
comments = Comment.objects.filter(object_pk__exact=comment.object_pk,
content_type__app_label__exact='product',
content_type__model__exact='product',
site__exact=site,
is_public__exact=True,
user__exact=request.user)
for c in comments:
if not c == comment:
c.delete()
def check_with_akismet(comment=None, request=None, **kwargs):
if config_value("PRODUCT", "AKISMET_ENABLE"):
akismet_key = config_value("PRODUCT", "AKISMET_KEY")
if akismet_key:
site = Site.objects.get_current()
shop = urlresolvers.reverse('satchmo_shop_home')
from akismet import Akismet
akismet = Akismet(
key=akismet_key,
blog_url='http://%s' % url_join(site.domain, shop))
if akismet.verify_key():
akismet_data = { 'comment_type': 'comment',
'referrer': request.META.get('HTTP_REFERER', ""),
'user_ip': comment.ip_address,
'user_agent': '' }
if akismet.comment_check(smart_str(comment.comment), data=akismet_data, build_data=True):
comment.is_public=False
comment.save()
log.info("Akismet marked comment #%i as spam", comment.id)
else:
log.debug("Akismet accepted comment #%i", comment.id)
else:
log.warn("Akismet key '%s' not accepted by akismet service.", akismet_key)
else:
log.info("Akismet enabled, but no key found. Please put in your admin settings.")
|
import copy
from PyQt5 import QtCore
from PyQt5.QtWidgets import QCheckBox, QDialog, QDoubleSpinBox, QGridLayout, QLabel, QMessageBox, QPushButton, QSizePolicy, QSpinBox
class EditModifierDialog(QDialog):
def __init__(self, Parent, CharacterWindow, StatModifier, StatModifierDescription="Stat Modifier"):
super().__init__(parent=Parent)
# Store Parameters
self.CharacterWindow = CharacterWindow
self.StatModifier = StatModifier
# Variables
self.StatModifierOriginalState = copy.deepcopy(self.StatModifier)
self.UnsavedChanges = False
self.Cancelled = False
# Inputs Size Policy
self.InputsSizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
# Prompt Label
self.PromptLabel = QLabel("Edit " + StatModifierDescription + ":")
self.PromptLabel.setAlignment(QtCore.Qt.AlignCenter)
# Base AC
if "Base AC" in self.StatModifier:
self.BaseACLabel = QLabel("Base AC")
self.BaseACLabel.setAlignment(QtCore.Qt.AlignCenter)
self.BaseACLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.BaseACLabel.setMargin(5)
self.BaseACSpinBox = QSpinBox()
self.BaseACSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.BaseACSpinBox.setButtonSymbols(self.BaseACSpinBox.NoButtons)
self.BaseACSpinBox.setRange(0, 1000000000)
self.BaseACSpinBox.setValue(StatModifier["Base AC"])
self.BaseACSpinBox.valueChanged.connect(self.UpdateStatModifier)
# Multipliers List
self.MultipliersList = []
# Multiplier Header Labels
self.StatLabel = QLabel("Stat")
self.StatLabel.setAlignment(QtCore.Qt.AlignCenter)
self.StatLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.StatLabel.setMargin(5)
self.MultiplierLabel = QLabel("Multiplier")
self.MultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.MultiplierLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.MultiplierLabel.setMargin(5)
self.RoundUpLabel = QLabel("Round Up")
self.RoundUpLabel.setAlignment(QtCore.Qt.AlignCenter)
self.RoundUpLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.RoundUpLabel.setMargin(5)
self.MinLabel = QLabel("Min")
self.MinLabel.setAlignment(QtCore.Qt.AlignCenter)
self.MinLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.MinLabel.setMargin(5)
self.MaxLabel = QLabel("Max")
self.MaxLabel.setAlignment(QtCore.Qt.AlignCenter)
self.MaxLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.MaxLabel.setMargin(5)
self.MultipliersList.append((self.StatLabel, self.MultiplierLabel, self.RoundUpLabel, self.MinLabel, self.MaxLabel))
# Strength Multiplier
self.StrengthMultiplierLabel = QLabel("Strength")
self.StrengthMultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.StrengthMultiplierSpinBox = QDoubleSpinBox()
self.StrengthMultiplierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.StrengthMultiplierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.StrengthMultiplierSpinBox.setButtonSymbols(self.StrengthMultiplierSpinBox.NoButtons)
self.StrengthMultiplierSpinBox.setRange(-1000000000.0, 1000000000.0)
self.StrengthMultiplierSpinBox.setValue(StatModifier["Strength Multiplier"])
self.StrengthMultiplierSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.StrengthMultiplierRoundUpCheckBox = QCheckBox()
self.StrengthMultiplierRoundUpCheckBox.setChecked(StatModifier["Strength Multiplier Round Up"])
self.StrengthMultiplierRoundUpCheckBox.stateChanged.connect(self.UpdateStatModifier)
self.StrengthMinSpinBox = QSpinBox()
self.StrengthMinSpinBox.setSizePolicy(self.InputsSizePolicy)
self.StrengthMinSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.StrengthMinSpinBox.setButtonSymbols(self.StrengthMinSpinBox.NoButtons)
self.StrengthMinSpinBox.setRange(-1, 1000000000)
self.StrengthMinSpinBox.setSpecialValueText("None")
self.StrengthMinSpinBox.setValue(StatModifier["Strength Min"] if StatModifier["Strength Min"] is not None else -1)
self.StrengthMinSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.StrengthMaxSpinBox = QSpinBox()
self.StrengthMaxSpinBox.setSizePolicy(self.InputsSizePolicy)
self.StrengthMaxSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.StrengthMaxSpinBox.setButtonSymbols(self.StrengthMaxSpinBox.NoButtons)
self.StrengthMaxSpinBox.setRange(-1, 1000000000)
self.StrengthMaxSpinBox.setSpecialValueText("None")
self.StrengthMaxSpinBox.setValue(StatModifier["Strength Max"] if StatModifier["Strength Max"] is not None else -1)
self.StrengthMaxSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.MultipliersList.append((self.StrengthMultiplierLabel, self.StrengthMultiplierSpinBox, self.StrengthMultiplierRoundUpCheckBox, self.StrengthMinSpinBox, self.StrengthMaxSpinBox))
# Dexterity Multiplier
self.DexterityMultiplierLabel = QLabel("Dexterity")
self.DexterityMultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.DexterityMultiplierSpinBox = QDoubleSpinBox()
self.DexterityMultiplierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.DexterityMultiplierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.DexterityMultiplierSpinBox.setButtonSymbols(self.DexterityMultiplierSpinBox.NoButtons)
self.DexterityMultiplierSpinBox.setRange(-1000000000.0, 1000000000.0)
self.DexterityMultiplierSpinBox.setValue(StatModifier["Dexterity Multiplier"])
self.DexterityMultiplierSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.DexterityMultiplierRoundUpCheckBox = QCheckBox()
self.DexterityMultiplierRoundUpCheckBox.setChecked(StatModifier["Dexterity Multiplier Round Up"])
self.DexterityMultiplierRoundUpCheckBox.stateChanged.connect(self.UpdateStatModifier)
self.DexterityMinSpinBox = QSpinBox()
self.DexterityMinSpinBox.setSizePolicy(self.InputsSizePolicy)
self.DexterityMinSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.DexterityMinSpinBox.setButtonSymbols(self.DexterityMinSpinBox.NoButtons)
self.DexterityMinSpinBox.setRange(-1, 1000000000)
self.DexterityMinSpinBox.setSpecialValueText("None")
self.DexterityMinSpinBox.setValue(StatModifier["Dexterity Min"] if StatModifier["Dexterity Min"] is not None else -1)
self.DexterityMinSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.DexterityMaxSpinBox = QSpinBox()
self.DexterityMaxSpinBox.setSizePolicy(self.InputsSizePolicy)
self.DexterityMaxSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.DexterityMaxSpinBox.setButtonSymbols(self.DexterityMaxSpinBox.NoButtons)
self.DexterityMaxSpinBox.setRange(-1, 1000000000)
self.DexterityMaxSpinBox.setSpecialValueText("None")
self.DexterityMaxSpinBox.setValue(StatModifier["Dexterity Max"] if StatModifier["Dexterity Max"] is not None else -1)
self.DexterityMaxSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.MultipliersList.append((self.DexterityMultiplierLabel, self.DexterityMultiplierSpinBox, self.DexterityMultiplierRoundUpCheckBox, self.DexterityMinSpinBox, self.DexterityMaxSpinBox))
# Constitution Multiplier
self.ConstitutionMultiplierLabel = QLabel("Constitution")
self.ConstitutionMultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ConstitutionMultiplierSpinBox = QDoubleSpinBox()
self.ConstitutionMultiplierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.ConstitutionMultiplierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.ConstitutionMultiplierSpinBox.setButtonSymbols(self.ConstitutionMultiplierSpinBox.NoButtons)
self.ConstitutionMultiplierSpinBox.setRange(-1000000000.0, 1000000000.0)
self.ConstitutionMultiplierSpinBox.setValue(StatModifier["Constitution Multiplier"])
self.ConstitutionMultiplierSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.ConstitutionMultiplierRoundUpCheckBox = QCheckBox()
self.ConstitutionMultiplierRoundUpCheckBox.setChecked(StatModifier["Constitution Multiplier Round Up"])
self.ConstitutionMultiplierRoundUpCheckBox.stateChanged.connect(self.UpdateStatModifier)
self.ConstitutionMinSpinBox = QSpinBox()
self.ConstitutionMinSpinBox.setSizePolicy(self.InputsSizePolicy)
self.ConstitutionMinSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.ConstitutionMinSpinBox.setButtonSymbols(self.ConstitutionMinSpinBox.NoButtons)
self.ConstitutionMinSpinBox.setRange(-1, 1000000000)
self.ConstitutionMinSpinBox.setSpecialValueText("None")
self.ConstitutionMinSpinBox.setValue(StatModifier["Constitution Min"] if StatModifier["Constitution Min"] is not None else -1)
self.ConstitutionMinSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.ConstitutionMaxSpinBox = QSpinBox()
self.ConstitutionMaxSpinBox.setSizePolicy(self.InputsSizePolicy)
self.ConstitutionMaxSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.ConstitutionMaxSpinBox.setButtonSymbols(self.ConstitutionMaxSpinBox.NoButtons)
self.ConstitutionMaxSpinBox.setRange(-1, 1000000000)
self.ConstitutionMaxSpinBox.setSpecialValueText("None")
self.ConstitutionMaxSpinBox.setValue(StatModifier["Constitution Max"] if StatModifier["Constitution Max"] is not None else -1)
self.ConstitutionMaxSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.MultipliersList.append((self.ConstitutionMultiplierLabel, self.ConstitutionMultiplierSpinBox, self.ConstitutionMultiplierRoundUpCheckBox, self.ConstitutionMinSpinBox, self.ConstitutionMaxSpinBox))
# Intelligence Multiplier
self.IntelligenceMultiplierLabel = QLabel("Intelligence")
self.IntelligenceMultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.IntelligenceMultiplierSpinBox = QDoubleSpinBox()
self.IntelligenceMultiplierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.IntelligenceMultiplierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.IntelligenceMultiplierSpinBox.setButtonSymbols(self.IntelligenceMultiplierSpinBox.NoButtons)
self.IntelligenceMultiplierSpinBox.setRange(-1000000000.0, 1000000000.0)
self.IntelligenceMultiplierSpinBox.setValue(StatModifier["Intelligence Multiplier"])
self.IntelligenceMultiplierSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.IntelligenceMultiplierRoundUpCheckBox = QCheckBox()
self.IntelligenceMultiplierRoundUpCheckBox.setChecked(StatModifier["Intelligence Multiplier Round Up"])
self.IntelligenceMultiplierRoundUpCheckBox.stateChanged.connect(self.UpdateStatModifier)
self.IntelligenceMinSpinBox = QSpinBox()
self.IntelligenceMinSpinBox.setSizePolicy(self.InputsSizePolicy)
self.IntelligenceMinSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.IntelligenceMinSpinBox.setButtonSymbols(self.IntelligenceMinSpinBox.NoButtons)
self.IntelligenceMinSpinBox.setRange(-1, 1000000000)
self.IntelligenceMinSpinBox.setSpecialValueText("None")
self.IntelligenceMinSpinBox.setValue(StatModifier["Intelligence Min"] if StatModifier["Intelligence Min"] is not None else -1)
self.IntelligenceMinSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.IntelligenceMaxSpinBox = QSpinBox()
self.IntelligenceMaxSpinBox.setSizePolicy(self.InputsSizePolicy)
self.IntelligenceMaxSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.IntelligenceMaxSpinBox.setButtonSymbols(self.IntelligenceMaxSpinBox.NoButtons)
self.IntelligenceMaxSpinBox.setRange(-1, 1000000000)
self.IntelligenceMaxSpinBox.setSpecialValueText("None")
self.IntelligenceMaxSpinBox.setValue(StatModifier["Intelligence Max"] if StatModifier["Intelligence Max"] is not None else -1)
self.IntelligenceMaxSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.MultipliersList.append((self.IntelligenceMultiplierLabel, self.IntelligenceMultiplierSpinBox, self.IntelligenceMultiplierRoundUpCheckBox, self.IntelligenceMinSpinBox, self.IntelligenceMaxSpinBox))
# Wisdom Multiplier
self.WisdomMultiplierLabel = QLabel("Wisdom")
self.WisdomMultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.WisdomMultiplierSpinBox = QDoubleSpinBox()
self.WisdomMultiplierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.WisdomMultiplierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.WisdomMultiplierSpinBox.setButtonSymbols(self.WisdomMultiplierSpinBox.NoButtons)
self.WisdomMultiplierSpinBox.setRange(-1000000000.0, 1000000000.0)
self.WisdomMultiplierSpinBox.setValue(StatModifier["Wisdom Multiplier"])
self.WisdomMultiplierSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.WisdomMultiplierRoundUpCheckBox = QCheckBox()
self.WisdomMultiplierRoundUpCheckBox.setChecked(StatModifier["Wisdom Multiplier Round Up"])
self.WisdomMultiplierRoundUpCheckBox.stateChanged.connect(self.UpdateStatModifier)
self.WisdomMinSpinBox = QSpinBox()
self.WisdomMinSpinBox.setSizePolicy(self.InputsSizePolicy)
self.WisdomMinSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.WisdomMinSpinBox.setButtonSymbols(self.WisdomMinSpinBox.NoButtons)
self.WisdomMinSpinBox.setRange(-1, 1000000000)
self.WisdomMinSpinBox.setSpecialValueText("None")
self.WisdomMinSpinBox.setValue(StatModifier["Wisdom Min"] if StatModifier["Wisdom Min"] is not None else -1)
self.WisdomMinSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.WisdomMaxSpinBox = QSpinBox()
self.WisdomMaxSpinBox.setSizePolicy(self.InputsSizePolicy)
self.WisdomMaxSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.WisdomMaxSpinBox.setButtonSymbols(self.WisdomMaxSpinBox.NoButtons)
self.WisdomMaxSpinBox.setRange(-1, 1000000000)
self.WisdomMaxSpinBox.setSpecialValueText("None")
self.WisdomMaxSpinBox.setValue(StatModifier["Wisdom Max"] if StatModifier["Wisdom Max"] is not None else -1)
self.WisdomMaxSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.MultipliersList.append((self.WisdomMultiplierLabel, self.WisdomMultiplierSpinBox, self.WisdomMultiplierRoundUpCheckBox, self.WisdomMinSpinBox, self.WisdomMaxSpinBox))
# Charisma Multiplier
self.CharismaMultiplierLabel = QLabel("Charisma")
self.CharismaMultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.CharismaMultiplierSpinBox = QDoubleSpinBox()
self.CharismaMultiplierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.CharismaMultiplierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.CharismaMultiplierSpinBox.setButtonSymbols(self.CharismaMultiplierSpinBox.NoButtons)
self.CharismaMultiplierSpinBox.setRange(-1000000000.0, 1000000000.0)
self.CharismaMultiplierSpinBox.setValue(StatModifier["Charisma Multiplier"])
self.CharismaMultiplierSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.CharismaMultiplierRoundUpCheckBox = QCheckBox()
self.CharismaMultiplierRoundUpCheckBox.setChecked(StatModifier["Charisma Multiplier Round Up"])
self.CharismaMultiplierRoundUpCheckBox.stateChanged.connect(self.UpdateStatModifier)
self.CharismaMinSpinBox = QSpinBox()
self.CharismaMinSpinBox.setSizePolicy(self.InputsSizePolicy)
self.CharismaMinSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.CharismaMinSpinBox.setButtonSymbols(self.CharismaMinSpinBox.NoButtons)
self.CharismaMinSpinBox.setRange(-1, 1000000000)
self.CharismaMinSpinBox.setSpecialValueText("None")
self.CharismaMinSpinBox.setValue(StatModifier["Charisma Min"] if StatModifier["Charisma Min"] is not None else -1)
self.CharismaMinSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.CharismaMaxSpinBox = QSpinBox()
self.CharismaMaxSpinBox.setSizePolicy(self.InputsSizePolicy)
self.CharismaMaxSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.CharismaMaxSpinBox.setButtonSymbols(self.CharismaMaxSpinBox.NoButtons)
self.CharismaMaxSpinBox.setRange(-1, 1000000000)
self.CharismaMaxSpinBox.setSpecialValueText("None")
self.CharismaMaxSpinBox.setValue(StatModifier["Charisma Max"] if StatModifier["Charisma Max"] is not None else -1)
self.CharismaMaxSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.MultipliersList.append((self.CharismaMultiplierLabel, self.CharismaMultiplierSpinBox, self.CharismaMultiplierRoundUpCheckBox, self.CharismaMinSpinBox, self.CharismaMaxSpinBox))
# Proficiency Multiplier
self.ProficiencyMultiplierLabel = QLabel("Proficiency")
self.ProficiencyMultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ProficiencyMultiplierSpinBox = QDoubleSpinBox()
self.ProficiencyMultiplierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.ProficiencyMultiplierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.ProficiencyMultiplierSpinBox.setButtonSymbols(self.ProficiencyMultiplierSpinBox.NoButtons)
self.ProficiencyMultiplierSpinBox.setRange(-1000000000.0, 1000000000.0)
self.ProficiencyMultiplierSpinBox.setValue(StatModifier["Proficiency Multiplier"])
self.ProficiencyMultiplierSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.ProficiencyMultiplierRoundUpCheckBox = QCheckBox()
self.ProficiencyMultiplierRoundUpCheckBox.setChecked(StatModifier["Proficiency Multiplier Round Up"])
self.ProficiencyMultiplierRoundUpCheckBox.stateChanged.connect(self.UpdateStatModifier)
self.ProficiencyMinSpinBox = QSpinBox()
self.ProficiencyMinSpinBox.setSizePolicy(self.InputsSizePolicy)
self.ProficiencyMinSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.ProficiencyMinSpinBox.setButtonSymbols(self.ProficiencyMinSpinBox.NoButtons)
self.ProficiencyMinSpinBox.setRange(-1, 1000000000)
self.ProficiencyMinSpinBox.setSpecialValueText("None")
self.ProficiencyMinSpinBox.setValue(StatModifier["Proficiency Min"] if StatModifier["Proficiency Min"] is not None else -1)
self.ProficiencyMinSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.ProficiencyMaxSpinBox = QSpinBox()
self.ProficiencyMaxSpinBox.setSizePolicy(self.InputsSizePolicy)
self.ProficiencyMaxSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.ProficiencyMaxSpinBox.setButtonSymbols(self.ProficiencyMaxSpinBox.NoButtons)
self.ProficiencyMaxSpinBox.setRange(-1, 1000000000)
self.ProficiencyMaxSpinBox.setSpecialValueText("None")
self.ProficiencyMaxSpinBox.setValue(StatModifier["Proficiency Max"] if StatModifier["Proficiency Max"] is not None else -1)
self.ProficiencyMaxSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.MultipliersList.append((self.ProficiencyMultiplierLabel, self.ProficiencyMultiplierSpinBox, self.ProficiencyMultiplierRoundUpCheckBox, self.ProficiencyMinSpinBox, self.ProficiencyMaxSpinBox))
# Level Multiplier
if "Level Multiplier" in self.StatModifier:
self.LevelMultiplierLabel = QLabel("Level")
self.LevelMultiplierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.LevelMultiplierSpinBox = QDoubleSpinBox()
self.LevelMultiplierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.LevelMultiplierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.LevelMultiplierSpinBox.setButtonSymbols(self.LevelMultiplierSpinBox.NoButtons)
self.LevelMultiplierSpinBox.setRange(-1000000000.0, 1000000000.0)
self.LevelMultiplierSpinBox.setValue(StatModifier["Level Multiplier"])
self.LevelMultiplierSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.LevelMultiplierRoundUpCheckBox = QCheckBox()
self.LevelMultiplierRoundUpCheckBox.setChecked(StatModifier["Level Multiplier Round Up"])
self.LevelMultiplierRoundUpCheckBox.stateChanged.connect(self.UpdateStatModifier)
self.LevelMinSpinBox = QSpinBox()
self.LevelMinSpinBox.setSizePolicy(self.InputsSizePolicy)
self.LevelMinSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.LevelMinSpinBox.setButtonSymbols(self.LevelMinSpinBox.NoButtons)
self.LevelMinSpinBox.setRange(-1, 1000000000)
self.LevelMinSpinBox.setSpecialValueText("None")
self.LevelMinSpinBox.setValue(StatModifier["Level Min"] if StatModifier["Level Min"] is not None else -1)
self.LevelMinSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.LevelMaxSpinBox = QSpinBox()
self.LevelMaxSpinBox.setSizePolicy(self.InputsSizePolicy)
self.LevelMaxSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.LevelMaxSpinBox.setButtonSymbols(self.LevelMaxSpinBox.NoButtons)
self.LevelMaxSpinBox.setRange(-1, 1000000000)
self.LevelMaxSpinBox.setSpecialValueText("None")
self.LevelMaxSpinBox.setValue(StatModifier["Level Max"] if StatModifier["Level Max"] is not None else -1)
self.LevelMaxSpinBox.valueChanged.connect(self.UpdateStatModifier)
self.MultipliersList.append((self.LevelMultiplierLabel, self.LevelMultiplierSpinBox, self.LevelMultiplierRoundUpCheckBox, self.LevelMinSpinBox, self.LevelMaxSpinBox))
# Manual Modifier
self.ManualModifierLabel = QLabel("Manual Modifier")
self.ManualModifierLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ManualModifierLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.ManualModifierLabel.setMargin(5)
self.ManualModifierSpinBox = QSpinBox()
self.ManualModifierSpinBox.setSizePolicy(self.InputsSizePolicy)
self.ManualModifierSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.ManualModifierSpinBox.setButtonSymbols(self.ManualModifierSpinBox.NoButtons)
self.ManualModifierSpinBox.setRange(-1000000000, 1000000000)
self.ManualModifierSpinBox.setValue(StatModifier["Manual Modifier"])
self.ManualModifierSpinBox.valueChanged.connect(self.UpdateStatModifier)
# Buttons
self.DoneButton = QPushButton("Done")
self.DoneButton.clicked.connect(self.Done)
self.CancelButton = QPushButton("Cancel")
self.CancelButton.clicked.connect(self.Cancel)
# Layout
self.Layout = QGridLayout()
self.Layout.addWidget(self.PromptLabel, 0, 0, 1, 2)
if "Base AC" in self.StatModifier:
self.Layout.addWidget(self.BaseACLabel, 1, 0, 1, 2)
self.Layout.addWidget(self.BaseACSpinBox, 2, 0, 1, 2)
self.MultipliersLayout = QGridLayout()
for Row in range(len(self.MultipliersList)):
RowWidgets = self.MultipliersList[Row]
self.MultipliersLayout.addWidget(RowWidgets[0], Row, 0)
self.MultipliersLayout.addWidget(RowWidgets[1], Row, 1)
self.MultipliersLayout.addWidget(RowWidgets[2], Row, 2, QtCore.Qt.AlignCenter)
self.MultipliersLayout.addWidget(RowWidgets[3], Row, 3)
self.MultipliersLayout.addWidget(RowWidgets[4], Row, 4)
for Row in range(1, len(self.MultipliersList)):
self.MultipliersLayout.setRowStretch(Row, 1)
for Column in [1, 3, 4]:
self.MultipliersLayout.setColumnStretch(Column, 1)
self.Layout.addLayout(self.MultipliersLayout, 3, 0, 1, 2)
self.Layout.addWidget(self.ManualModifierLabel, 4, 0, 1, 2)
self.Layout.addWidget(self.ManualModifierSpinBox, 5, 0, 1, 2)
self.Layout.addWidget(self.DoneButton, 6, 0)
self.Layout.addWidget(self.CancelButton, 6, 1)
self.Layout.setRowStretch(3, 1)
self.setLayout(self.Layout)
# Set Window Title and Icon
self.setWindowTitle(self.CharacterWindow.ScriptName)
self.setWindowIcon(self.CharacterWindow.WindowIcon)
# Update Display
self.UpdateDisplay()
# Execute Dialog
self.exec_()
def UpdateStatModifier(self):
if not self.ValidInput():
return
# Base AC
if "Base AC" in self.StatModifier:
self.StatModifier["Base AC"] = self.BaseACSpinBox.value()
# Strength Multiplier
self.StatModifier["Strength Multiplier"] = self.StrengthMultiplierSpinBox.value()
self.StatModifier["Strength Multiplier Round Up"] = self.StrengthMultiplierRoundUpCheckBox.isChecked()
self.StatModifier["Strength Min"] = self.StrengthMinSpinBox.value() if self.StrengthMinSpinBox.value() != -1 else None
self.StatModifier["Strength Max"] = self.StrengthMaxSpinBox.value() if self.StrengthMaxSpinBox.value() != -1 else None
# Dexterity Multiplier
self.StatModifier["Dexterity Multiplier"] = self.DexterityMultiplierSpinBox.value()
self.StatModifier["Dexterity Multiplier Round Up"] = self.DexterityMultiplierRoundUpCheckBox.isChecked()
self.StatModifier["Dexterity Min"] = self.DexterityMinSpinBox.value() if self.DexterityMinSpinBox.value() != -1 else None
self.StatModifier["Dexterity Max"] = self.DexterityMaxSpinBox.value() if self.DexterityMaxSpinBox.value() != -1 else None
# Constitution Multiplier
self.StatModifier["Constitution Multiplier"] = self.ConstitutionMultiplierSpinBox.value()
self.StatModifier["Constitution Multiplier Round Up"] = self.ConstitutionMultiplierRoundUpCheckBox.isChecked()
self.StatModifier["Constitution Min"] = self.ConstitutionMinSpinBox.value() if self.ConstitutionMinSpinBox.value() != -1 else None
self.StatModifier["Constitution Max"] = self.ConstitutionMaxSpinBox.value() if self.ConstitutionMaxSpinBox.value() != -1 else None
# Intelligence Multiplier
self.StatModifier["Intelligence Multiplier"] = self.IntelligenceMultiplierSpinBox.value()
self.StatModifier["Intelligence Multiplier Round Up"] = self.IntelligenceMultiplierRoundUpCheckBox.isChecked()
self.StatModifier["Intelligence Min"] = self.IntelligenceMinSpinBox.value() if self.IntelligenceMinSpinBox.value() != -1 else None
self.StatModifier["Intelligence Max"] = self.IntelligenceMaxSpinBox.value() if self.IntelligenceMaxSpinBox.value() != -1 else None
# Wisdom Multiplier
self.StatModifier["Wisdom Multiplier"] = self.WisdomMultiplierSpinBox.value()
self.StatModifier["Wisdom Multiplier Round Up"] = self.WisdomMultiplierRoundUpCheckBox.isChecked()
self.StatModifier["Wisdom Min"] = self.WisdomMinSpinBox.value() if self.WisdomMinSpinBox.value() != -1 else None
self.StatModifier["Wisdom Max"] = self.WisdomMaxSpinBox.value() if self.WisdomMaxSpinBox.value() != -1 else None
# Charisma Multiplier
self.StatModifier["Charisma Multiplier"] = self.CharismaMultiplierSpinBox.value()
self.StatModifier["Charisma Multiplier Round Up"] = self.CharismaMultiplierRoundUpCheckBox.isChecked()
self.StatModifier["Charisma Min"] = self.CharismaMinSpinBox.value() if self.CharismaMinSpinBox.value() != -1 else None
self.StatModifier["Charisma Max"] = self.CharismaMaxSpinBox.value() if self.CharismaMaxSpinBox.value() != -1 else None
# Proficiency Multiplier
self.StatModifier["Proficiency Multiplier"] = self.ProficiencyMultiplierSpinBox.value()
self.StatModifier["Proficiency Multiplier Round Up"] = self.ProficiencyMultiplierRoundUpCheckBox.isChecked()
self.StatModifier["Proficiency Min"] = self.ProficiencyMinSpinBox.value() if self.ProficiencyMinSpinBox.value() != -1 else None
self.StatModifier["Proficiency Max"] = self.ProficiencyMaxSpinBox.value() if self.ProficiencyMaxSpinBox.value() != -1 else None
# Level Multiplier
if "Level Multiplier" in self.StatModifier:
self.StatModifier["Level Multiplier"] = self.LevelMultiplierSpinBox.value()
self.StatModifier["Level Multiplier Round Up"] = self.LevelMultiplierRoundUpCheckBox.isChecked()
self.StatModifier["Level Min"] = self.LevelMinSpinBox.value() if self.LevelMinSpinBox.value() != -1 else None
self.StatModifier["Level Max"] = self.LevelMaxSpinBox.value() if self.LevelMaxSpinBox.value() != -1 else None
# Manual Modifier
self.StatModifier["Manual Modifier"] = self.ManualModifierSpinBox.value()
# Update Unsaved Changes
self.UnsavedChanges = True
# Update Display
self.UpdateDisplay()
def UpdateDisplay(self):
SpinBoxes = [self.StrengthMultiplierSpinBox, self.DexterityMultiplierSpinBox, self.ConstitutionMultiplierSpinBox, self.IntelligenceMultiplierSpinBox, self.WisdomMultiplierSpinBox, self.CharismaMultiplierSpinBox, self.ProficiencyMultiplierSpinBox, self.ManualModifierSpinBox]
if "Base AC" in self.StatModifier:
SpinBoxes.append(self.BaseACSpinBox)
if "Level Multiplier" in self.StatModifier:
SpinBoxes.append(self.LevelMultiplierSpinBox)
for SpinBox in SpinBoxes:
if type(SpinBox) is QDoubleSpinBox:
StyleSheetPrefix = "QDoubleSpinBox "
else:
StyleSheetPrefix = "QSpinBox "
PositiveStyleSheet = StyleSheetPrefix + "{background-color: darkgreen;}"
ZeroStyleSheet = StyleSheetPrefix + "{}"
NegativeStyleSheet = StyleSheetPrefix + "{background-color: darkred;}"
if SpinBox.value() > 0.0:
SpinBox.setStyleSheet(PositiveStyleSheet)
elif SpinBox.value() == 0.0:
SpinBox.setStyleSheet(ZeroStyleSheet)
else:
SpinBox.setStyleSheet(NegativeStyleSheet)
def Done(self):
if self.ValidInput(Alert=True):
self.close()
def Cancel(self):
self.StatModifier.update(self.StatModifierOriginalState)
self.UnsavedChanges = False
self.Cancelled = True
self.close()
def ValidInput(self, Alert=False):
MinsAndMaxes = [(self.StrengthMinSpinBox, self.StrengthMaxSpinBox), (self.DexterityMinSpinBox, self.DexterityMaxSpinBox), (self.ConstitutionMinSpinBox, self.ConstitutionMaxSpinBox), (self.IntelligenceMinSpinBox, self.IntelligenceMaxSpinBox), (self.WisdomMinSpinBox, self.WisdomMaxSpinBox), (self.CharismaMinSpinBox, self.CharismaMaxSpinBox), (self.ProficiencyMinSpinBox, self.ProficiencyMaxSpinBox)]
if "Level Multiplier" in self.StatModifier:
MinsAndMaxes.append((self.LevelMinSpinBox, self.LevelMaxSpinBox))
for MinAndMax in MinsAndMaxes:
if MinAndMax[0].value() > MinAndMax[1].value():
if Alert:
self.CharacterWindow.DisplayMessageBox("Multiplier minimums must be less than or equal to maximums.", Icon=QMessageBox.Warning, Parent=self)
return False
return True
|
#!/usr/bin/env python3
import rospy
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
import math
import time
from std_srvs.srv import Empty
X = 0.0
Y = 0.0
yaw = 0.0
def pose_callback(pose):
global X, Y, yaw
rospy.loginfo("X=%f, Y=%f\n", pose.x, pose.y)
X = pose.x
Y = pose.y
yaw = pose.theta
def move(speed, distance, is_forward):
velocity_message = Twist()
global X, Y
X0 = X
Y0 = Y
if is_forward:
velocity_message.linear.x = abs(speed)
else:
velocity_message.linear.x = -abs(speed)
distance_moved = 0.0
loop_rate = rospy.Rate(10)
cmd_vel_topic = '/turtle1/cmd_vel'
velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)
while True:
rospy.loginfo("Turtlesim moves forward")
velocity_publisher.publish(velocity_message)
loop_rate.sleep()
# rospy.loginfo("%f %f %f %f", X,Y,X0,Y0)
distance_moved = math.sqrt(((X - X0) ** 2) + ((Y - Y0) ** 2))
print(distance_moved,X,Y,X0,Y0)
if not (distance_moved < distance):
rospy.loginfo("reached")
rospy.logwarn("Stopping the Robot")
break
velocity_message.linear.x = 0
velocity_publisher.publish(velocity_message)
def rotate(angular_speed_degree, relative_angle_degree, clockwise):
global yaw
velocity_message = Twist()
velocity_message.linear.x = 0
velocity_message.angular.z = 0
theta0 = yaw
angular_speed = math.radians(abs(angular_speed_degree))
if clockwise:
velocity_message.angular.z = -abs(angular_speed)
else:
velocity_message.angular.z = abs(angular_speed)
angle_moved = 0.0
loop_rate = rospy.Rate(10)
and_vel_topic = '/turtle1/cmd_vel'
velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)
t0 = rospy.Time.now().to_sec()
while (True):
rospy.loginfo("Turtlesim rotates")
velocity_publisher.publish(velocity_message)
t1 = rospy.Time.now().to_sec()
current_angle_degree = (t1 - t0) * angular_speed_degree
loop_rate.sleep()
if current_angle_degree > relative_angle_degree:
rospy.loginfo("reached")
break
velocity_message.angular.z = 0
velocity_publisher.publish(velocity_message)
def go_to_goal(x_goal, y_goal):
global X
global Y, yaw
velocity_message = Twist()
cmd_vel_topic = '/turtle1/cmd_vel'
while True:
K_linear = 0.5
distance = abs(math.sqrt(((x_goal - X) ** 2) + ((y_goal - Y) ** 2)))
linear_speed = distance * K_linear
K_angular = 4.0
desired_angle_goal = math.atan2(y_goal - Y, x_goal - X)
angular_speed = (desired_angle_goal - yaw) * K_angular
velocity_message.linear.x = linear_speed
velocity_message.angular.z = angular_speed
velocity_publisher.publish(velocity_message)
print('x=', X, 'y=', Y)
if distance < 0.01:
break
def setDesiredOrientation(desired_angle_radians):
relative_angle_radians = desired_angle_radians - yaw
if relative_angle_radians < 0:
clockwise = 1
else:
clockwise = 0
print(relative_angle_radians)
print(desired_angle_radians)
rotate(30, math.degrees(abs(relative_angle_radians)), clockwise)
def hexagon(side_length):
for _ in range(6):
move(1.0,3.0,True)
rotate(10,60,False)
if __name__ == '__main__':
try:
rospy.init_node('turtlesim_motion_pose', anonymous=True)
cmd_vel_topic = '/turtle1/cmd_vel'
velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)
position_topic = '/turtle1/pose'
rospy.Subscriber(position_topic, Pose, pose_callback)
time.sleep(1)
hexagon(3.0)
except rospy.ROSInterruptException:
rospy.loginfo("node terminated") |
import sys
from twisted.python import usage
from twisted.internet import reactor
from twisted.internet.defer import (
maybeDeferred, Deferred, DeferredQueue, inlineCallbacks, returnValue)
from twisted.internet.task import deferLater
from vumi.service import Worker, WorkerCreator
from vumi.servicemaker import VumiOptions
import yaml
from go.apps.dialogue.vumi_app import dialogue_js_config
from go.apps.jsbox.outbound import mk_inbound_push_trigger
from go.apps.jsbox.utils import jsbox_js_config
from go.vumitools.api import VumiApi
class ScriptError(Exception):
"""
An error to be caught and displayed nicely by a script handler.
"""
class JsBoxSendOptions(VumiOptions):
optParameters = [
["user-account-key", None, None,
"User account that owns the conversation."],
["conversation-key", None, None,
"Conversation to send messages to."],
["vumigo-config", None, None,
"File containing persistence configuration."],
["hz", None, "60.0",
"Maximum number of messages to send per second."],
["exclude-addresses-file", None, None,
"File containing addresses to exclude, one per line."],
]
def postOptions(self):
VumiOptions.postOptions(self)
if not self['vumigo-config']:
raise usage.UsageError(
"Please provide the vumigo-config parameter.")
if not self['user-account-key']:
raise usage.UsageError(
"Please provide the user-account-key parameter.")
if not self['conversation-key']:
raise usage.UsageError(
"Please provide the conversation-key parameter.")
try:
hz = float(self['hz'])
except (TypeError, ValueError):
hz_okay = False
else:
hz_okay = bool(hz > 0)
if not hz_okay:
raise usage.UsageError(
"Please provide a positive float for hz")
self['hz'] = hz
def get_vumigo_config(self):
with file(self['vumigo-config'], 'r') as stream:
return yaml.safe_load(stream)
class Ticker(object):
"""
An object that limits calls to a fixed number per second.
:param float hz:
Times per second that :meth:``tick`` may be called.
"""
clock = reactor
def __init__(self, hz):
self._hz = hz
self._min_dt = 1.0 / hz
self._last = None
def tick(self):
d = Deferred()
delay = 0
if self._last is None:
self._last = self.clock.seconds()
else:
now = self.clock.seconds()
dt = now - self._last
delay = 0 if (dt > self._min_dt) else (self._min_dt - dt)
self._last = now
self.clock.callLater(delay, d.callback, None)
return d
class JsBoxSendWorker(Worker):
WORKER_QUEUE = DeferredQueue()
stdout = sys.stdout
stderr = sys.stderr
JSBOX_CONFIG = {
'jsbox': lambda conv: jsbox_js_config(conv.config),
'dialogue': dialogue_js_config,
}
SUPPORTED_APPS = tuple(JSBOX_CONFIG.keys())
SEND_DELAY = 0.01 # No more than 100 msgs/second to the queue.
def send_inbound_push_trigger(self, to_addr, conversation):
self.emit('Starting %r [%s] -> %s' % (
conversation.name, conversation.key, to_addr))
msg = mk_inbound_push_trigger(to_addr, conversation)
return self.send_to_conv(conversation, msg)
@inlineCallbacks
def send_jsbox(self, user_account_key, conversation_key, hz=60,
addr_exclude_path=None):
conv = yield self.get_conversation(user_account_key, conversation_key)
delivery_class = self.get_delivery_class(conv)
excluded_addrs = self.get_excluded_addrs(addr_exclude_path)
to_addrs = yield self.get_contact_addrs_for_conv(
conv, delivery_class, excluded_addrs)
ticker = Ticker(hz=hz)
for i, to_addr in enumerate(to_addrs):
yield self.send_inbound_push_trigger(to_addr, conv)
if (i + 1) % 100 == 0:
self.emit("Messages sent: %s / %s" % (i + 1, len(to_addrs)))
yield ticker.tick()
def get_delivery_class(self, conv):
config_loader = self.JSBOX_CONFIG[conv.conversation_type]
config = config_loader(conv)
return config.get('delivery_class')
def get_excluded_addrs(self, addr_exclude_path):
if addr_exclude_path is None:
return set()
excluded_addrs = set()
with open(addr_exclude_path, 'r') as exclude_file:
for line in exclude_file.readlines():
line = line.strip()
if line:
excluded_addrs.add(line)
return excluded_addrs
@inlineCallbacks
def get_contact_addrs_for_conv(self, conv, delivery_class, excluded_addrs):
addrs = []
for contacts in (yield conv.get_opted_in_contact_bunches(
delivery_class)):
for contact in (yield contacts):
addr = contact.addr_for(delivery_class)
if addr not in excluded_addrs:
addrs.append(addr)
self.emit("Addresses collected: %s" % (len(addrs),))
returnValue(addrs)
@inlineCallbacks
def send_to_conv(self, conv, msg):
publisher = self._publishers[conv.conversation_type]
yield publisher.publish_message(msg)
# Give the reactor time to actually send the message.
yield deferLater(reactor, self.SEND_DELAY, lambda: None)
@inlineCallbacks
def make_publisher(self, conv_type):
routing_key = '%s_transport.inbound' % (conv_type,)
self._publishers[conv_type] = yield self.publish_to(routing_key)
@inlineCallbacks
def get_conversation(self, user_account_key, conversation_key):
user_api = self.vumi_api.get_user_api(user_account_key)
conv = yield user_api.get_wrapped_conversation(conversation_key)
if conv is None:
raise ScriptError("Conversation not found: %s" % (
conversation_key,))
if conv.conversation_type not in self.SUPPORTED_APPS:
raise ScriptError("Unsupported conversation type: %s" % (
conv.conversation_type,))
returnValue(conv)
@inlineCallbacks
def startWorker(self):
self.vumi_api = yield VumiApi.from_config_async(
self.config, self._amqp_client)
self._publishers = {}
for conv_type in self.SUPPORTED_APPS:
yield self.make_publisher(conv_type)
self.WORKER_QUEUE.put(self)
def emit(self, obj, err=False):
msg = '%s\n' % (obj,)
if err:
self.stderr.write(msg)
else:
self.stdout.write(msg)
@inlineCallbacks
def main(options):
worker_creator = WorkerCreator(options.vumi_options)
service = worker_creator.create_worker_by_class(
JsBoxSendWorker, options.get_vumigo_config())
service.startService()
worker = yield JsBoxSendWorker.WORKER_QUEUE.get()
yield worker.send_jsbox(
options['user-account-key'], options['conversation-key'],
options['hz'], options['exclude-addresses-file'])
reactor.stop()
if __name__ == '__main__':
try:
options = JsBoxSendOptions()
options.parseOptions()
except usage.UsageError, errortext:
print '%s: %s' % (sys.argv[0], errortext)
print '%s: Try --help for usage details.' % (sys.argv[0])
sys.exit(1)
def _eb(f):
f.printTraceback()
def _main():
maybeDeferred(main, options).addErrback(_eb)
reactor.callLater(0, _main)
reactor.run()
|
from ..mockingjay.hubconf import mockingjay_local as audio_albert_local
from ..mockingjay.hubconf import mockingjay_gdriveid as audio_albert_gdriveid
def audio_albert(refresh=False, *args, **kwargs):
"""
The default model
refresh (bool): whether to download ckpt/config again if existed
"""
raise NotImplementedError
kwargs['ckpt'] = 'TBD'
return audio_albert_gdriveid(refresh=refresh, *args, **kwargs)
|
# Generated by Django 3.2.5 on 2021-07-18 23:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Config',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(max_length=10)),
('level', models.SmallIntegerField(max_length=1)),
('token', models.CharField(max_length=3)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nick', models.CharField(max_length=25, unique=True)),
('password', models.CharField(max_length=16)),
('age', models.PositiveSmallIntegerField(blank=True, max_length=4)),
('config', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='login.config')),
],
),
]
|
# Project libraries
from . import BaseResource
from .app import App
from .build import Build
class PostDeploy(BaseResource):
_strs = ["output"]
_ints = ["exit_code"]
def __init__(self):
super(PostDeploy, self).__init__()
class AppSetup(BaseResource):
_strs = ["id", "failure_message", "postdeploy:exit_code", "postdeploy:output", "resolved_success_url", "status"]
_map = {"app": App, "build": Build, "postdeploy": PostDeploy}
_dates = ["created_at", "updated_at"]
_pks = ["id"]
def __init__(self):
super(AppSetup, self).__init__()
def __repr__(self):
return "<appsetup '{0}' - '{1}'>".format(self.id, self.status)
|
print("hello, directory 1")i
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import xml.dom.minidom
from python.Log import *
class XmlFileUtil:
'android strings.xml file util'
@staticmethod
def writeToFile(keys, values, directory, filename):
if not os.path.exists(directory):
os.makedirs(directory)
fo = open(directory + "/" + filename, "wb")
fo.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n".encode())
for x in range(len(keys)):
if values[x] is None or values[x] == '':
Log.error("Key:" + keys[x] +
"\'s value is None. Index:" + str(x + 1))
continue
content = " <string name=\"" + keys[x].strip() + "\">" + values[x].strip() + "</string>\n"
fo.write(content.encode())
fo.write("</resources>".encode())
fo.close()
@staticmethod
def getKeysAndValues(path):
if path is None:
Log.error('file path is None')
return
dom = xml.dom.minidom.parse(path)
root = dom.documentElement
itemlist = root.getElementsByTagName('string')
keys = []
values = []
for index in range(len(itemlist)):
item = itemlist[index]
key = item.getAttribute("name")
value = item.firstChild.data
keys.append(key)
values.append(value)
return (keys, values)
|
"""Convex hull tester."""
from itertools import tee
import cv2
import convexhull
COLOR = (0, 0, 255) #BGR
def pairwise(iterable):
"""Return paired items from iterable object as such
s -> (s0,s1), (s1,s2), (s2, s3), ...
Parameters
----------
iterable : iterable object
object to iterate over
Returns
-------
iterable object
iterable with each entry being one adjacent pair
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def test(image, thicc=3):
"""Colored visiualization of the convex hulll of the image.
Parameters
----------
image : numpy.array
binary image to be processed
Returns
-------
numpy.array
BGR image with convex hull drawn over it
"""
result = convexhull.convex_hull(image)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
for point_a, point_b in pairwise(result):
cv2.line(image, point_a, point_b, COLOR, int(thicc))
return image
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:mgboy time:2020/8/2
import win32api
import win32con, winreg, os, sys
def Judge_Key(key_name=None,
reg_root=win32con.HKEY_CURRENT_USER, # 根节点
reg_path=r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run", # 键的路径
abspath=None
):
reg_flags = win32con.WRITE_OWNER | win32con.KEY_WOW64_64KEY | win32con.KEY_ALL_ACCESS
try:
key = winreg.OpenKey(reg_root, reg_path, 0, reg_flags)
location, type = winreg.QueryValueEx(key, key_name)
print("键存在", "location(数据):", location, "type:", type)
feedback = 0
if location != abspath:
feedback = 1
print('键存在,但程序位置发生改变')
except FileNotFoundError as e:
print("键不存在", e)
feedback = 1
except PermissionError as e:
print("权限不足", e)
feedback = 2
except:
print("Error")
feedback = 3
return feedback
def AutoRun(switch="open", # 开:open # 关:close
key_name=None,
abspath=os.path.abspath(sys.argv[0])):
# 如果没有自定义路径,就用os.path.abspath(sys.argv[0])获取主程序的路径,如果主程序已经打包成exe格式,就相当于获取exe文件的路径
judge_key = Judge_Key(reg_root=win32con.HKEY_CURRENT_USER,
reg_path=r"Software\Microsoft\Windows\CurrentVersion\Run", # 键的路径
key_name=key_name,
abspath=abspath)
# 注册表项名
KeyName = r'Software\Microsoft\Windows\CurrentVersion\Run'
key = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, KeyName, 0, win32con.KEY_ALL_ACCESS)
if switch == "open":
# 异常处理
try:
if judge_key == 0:
print("已经开启了,无需再开启")
elif judge_key == 1:
win32api.RegSetValueEx(key, key_name, 0, win32con.REG_SZ, abspath)
win32api.RegCloseKey(key)
print('开机自启动添加成功!')
except:
print('添加失败')
elif switch == "close":
try:
if judge_key == 0:
win32api.RegDeleteValue(key, key_name) # 删除值
win32api.RegCloseKey(key)
print('成功删除键!')
elif judge_key == 1:
print("键不存在")
elif judge_key == 2:
print("权限不足")
else:
print("出现错误")
except:
print('删除失败')
def check(state=1):
"""开机自启动函数"""
if state == 1:
AutoRun(switch='open', key_name='wallzoom') # 键的名称应该起得特别一些,起码和已经存在的自启动软件名称不一致
else:
AutoRun(switch='close', key_name='wallzoom')
if __name__ == "__main__":
check(state=0)
|
"""
A simple factory.
"""
import logger
class Factory(object):
def __init__(self):
self._reg = {}
self.log = logger.get()
pass
def register(self, name, cls):
if name not in self._reg:
self.log.info('Registering class "{}"'.format(name), verbose=2)
self._reg[name] = cls
else:
raise Exception('Class "{}" already registered')
pass
def create(self, _clsname, **kwargs):
if _clsname in self._reg:
return self._reg[_clsname](**kwargs)
else:
raise Exception('Class "{}" not registered'.format(_clsname))
def create_from_main(self, _clsname, **kwargs):
if _clsname in self._reg:
return self._reg[_clsname](**kwargs).init_from_main()
else:
raise Exception('Class "{}" not registered'.format(_clsname))
|
# source: https://github.com/TheAILearner/Snake-Game-using-OpenCV-Python/blob/master/snake_game_using_opencv.ipynb
import numpy as np
import cv2
import random
import time
def collision_with_apple(apple_position, score):
apple_position = [random.randrange(1,50)*10,random.randrange(1,50)*10]
score += 1
return apple_position, score
def collision_with_boundaries(snake_head):
if snake_head[0]>=500 or snake_head[0]<0 or snake_head[1]>=500 or snake_head[1]<0 :
return 1
else:
return 0
def collision_with_self(snake_position):
snake_head = snake_position[0]
if snake_head in snake_position[1:]:
return 1
else:
return 0
img = np.zeros((500,500,3),dtype='uint8')
# Initial Snake and Apple position
snake_position = [[250,250],[240,250],[230,250]]
apple_position = [random.randrange(1,50)*10,random.randrange(1,50)*10]
score = 0
prev_button_direction = 1
button_direction = 1
snake_head = [250,250]
while True:
cv2.imshow('a',img)
cv2.waitKey(1)
img = np.zeros((500,500,3),dtype='uint8')
# Display Apple
cv2.rectangle(img,(apple_position[0],apple_position[1]),(apple_position[0]+10,apple_position[1]+10),(0,0,255),3)
# Display Snake
for position in snake_position:
cv2.rectangle(img,(position[0],position[1]),(position[0]+10,position[1]+10),(0,255,0),3)
# Takes step after fixed time
t_end = time.time() + 0.05
k = -1
while time.time() < t_end:
if k == -1:
k = cv2.waitKey(1)
else:
continue
# 0-Left, 1-Right, 3-Up, 2-Down, q-Break
# a-Left, d-Right, w-Up, s-Down
if k == ord('a') and prev_button_direction != 1:
button_direction = 0
elif k == ord('d') and prev_button_direction != 0:
button_direction = 1
elif k == ord('w') and prev_button_direction != 2:
button_direction = 3
elif k == ord('s') and prev_button_direction != 3:
button_direction = 2
elif k == ord('q'):
break
else:
button_direction = button_direction
prev_button_direction = button_direction
# Change the head position based on the button direction
if button_direction == 1:
snake_head[0] += 10
elif button_direction == 0:
snake_head[0] -= 10
elif button_direction == 2:
snake_head[1] += 10
elif button_direction == 3:
snake_head[1] -= 10
# Increase Snake length on eating apple
if snake_head == apple_position:
apple_position, score = collision_with_apple(apple_position, score)
snake_position.insert(0,list(snake_head))
else:
snake_position.insert(0,list(snake_head))
snake_position.pop()
# On collision kill the snake and print the score
if collision_with_boundaries(snake_head) == 1 or collision_with_self(snake_position) == 1:
font = cv2.FONT_HERSHEY_SIMPLEX
img = np.zeros((500,500,3),dtype='uint8')
cv2.putText(img,'Your Score is {}'.format(score),(140,250), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('a',img)
cv2.waitKey(0)
break
cv2.destroyAllWindows() |
import argparse, subprocess, os, sys, glob, re
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video_file', nargs = '?',
help = 'The relative path(s) to the video file(s). Default is all files in the current directory that end in .mov or .mp4.')
parser.add_argument('-o', '--output_file', nargs = '?', default = '',
help = 'Where to save the file(s) with the hard-coded subtitles. Default are the video file names + "-subbed" in mp4 format.')
parser.add_argument('-s', '--subtitle_file', nargs = '?', default = '',
help = 'Relative path to the srt file(s). Default assumes they\'re the same name as the video file(s) but with an .srt extension.')
parser.add_argument('-r', '--rename', default = False, action = 'store_true',
help = 'Whether to automatically rename files to avoid overwriting. The default prompts for whether to overwrite for each output file that already exists.')
parser.add_argument('-c', '--color', default = 'ffffff',
help = 'The color (in hex) to use for the subtitles. Default is "ffffff" (white).')
parser.add_argument('-si', '--size', default = '24',
help = 'The font size to use for the subtitles in pt. Default is 24.')
parser.add_argument('-b', '--border_style', default = '3',
help = 'The border style to use for the subtitles. Default is 3 (opaque box). 1 uses an outline + drop shadow.')
parser.add_argument('-af', '--additional_formatting', default = '',
help = 'Additional formatting to use for the subtitles, specified using SubStation Alpha style fields. Default is empty.')
parser.add_argument('-se', '--soft_embed', default = False, action = 'store_true',
help = 'Set this if you want to soft-embed the subtitles instead (in which case rendering options can be set by the video playback software).')
parser.add_argument('-vc', '--video_codec', default = 'libx264',
help = 'Set this to change the video codec. Default is libx264. The other valid option is libx265.')
parser.add_argument('-comp', '--compression', default = '22',
help = 'Set this to change the compression ratio for the output video. The default is 22. Lower numbers mean less compression.')
args = parser.parse_args()
if not args.video_file:
args.video_file = glob.glob('*.mov') + glob.glob('*.mp4')
else:
args.video_file = glob.glob(args.video_file)
if not args.subtitle_file:
args.subtitle_file = [f'{file[:-4]}.srt' for file in args.video_file]
else:
args.subtitle_file = glob.glob(args.subtitle_file)
if not args.output_file:
args.output_file = [f'{file[:-4]}-subbed.mp4' for file in args.video_file]
else:
args.output_file = args.output_file.split(':')
if args.additional_formatting:
args.additional_formatting = ',' + args.additional_formatting
if not len(args.video_file) == len(args.subtitle_file) == len(args.output_file):
print('Error: number of files do not match. Exiting...')
sys.exit(1)
if args.video_codec == 'libx264':
codec_string = f'libx264 -crf {args.compression}'
elif args.video_codec == 'libx265':
codec_string = f'libx265 -x265-params log-level=error:crf={args.compression}'
else:
print('Error: addsub only supports libx264 and libx265 codecs. Choose a valid codec and rerun.')
for vf, sf, of in tuple(zip(args.video_file, args.subtitle_file, args.output_file)):
if not os.path.isfile(vf) or not os.path.isfile(sf):
print(f'Video or subtitle file not found: {vf} (video), {sf} (subtitle). Skipping...')
continue
if os.path.isfile(of) and not args.rename:
overwrite = input(f'File "{of}" already exists. Overwrite? [y/n/rename] ').lower()
if not overwrite in ['y', 'rename', 'r']:
print('Skipping...')
continue
elif overwrite in ['r', 'rename']:
counter = 1
of = f'{of[:-4]}-{counter}{of[-4:]}'
while os.path.isfile(of):
of = f'{of[:(-5-len(str(counter - 1)))]}-{counter}{of[-4:]}'
counter += 1
elif os.path.isfile(of):
counter = 1
of = f'{of[:-4]}-{counter}{of[-4:]}'
while os.path.isfile(of):
of = f'{of[:(-5-len(str(counter - 1)))]}-{counter}{of[-4:]}'
counter += 1
# Convert sbv to srt to add them
if os.path.splitext(sf)[1] == '.sbv':
from captionstransformer.sbv import Reader
from captionstransformer.srt import Writer
with open(sf) as r:
reader = Reader(r)
with open(f'{os.path.splitext(sf)[0]}.srt', 'w') as w:
writer = Writer(w)
writer.set_captions(reader.read())
writer.write()
writer.close()
print(f'Adding subtitles to {vf}...')
if not args.soft_embed:
subprocess.call(f'ffmpeg -hide_banner -loglevel warning -i "{vf}" -vf "subtitles=\'{sf}\':force_style=\'Fontsize={args.size},PrimaryColour=&H{args.color}&,BorderStyle={args.border_style}{args.additional_formatting}\'" -c:v {codec_string} -c:a copy "{of}"', cwd = os.getcwd(), shell = True)
else:
subprocess.call(f'ffmpeg -hide_banner -loglevel warning -i "{vf}" -i "{sf}" -c:v {codec_string} -c:a copy -c:s mov_text -disposition:s:0 default "{of}"', cwd = os.getcwd(), shell = True)
# If the subtitle started off as sbv, try to delete the temporarily needed srt file
# This isn't hugely important, so just pass if it fails
if os.path.splitext(sf)[1] == '.sbv':
try:
os.remove(f'{os.path.splitext(sf)[0]}.srt')
except:
pass
print('Done!') |
"""
[2015-08-19] Challenge #228 [Intermediate] Use a Web Service to Find Bitcoin Prices
https://www.reddit.com/r/dailyprogrammer/comments/3hj4o2/20150819_challenge_228_intermediate_use_a_web/
# Desciption
Modern web services are the core of the net. One website can leverage 1 or more other sites for rich data and mashups.
Some notable examples include the Google maps API which has been layered with crime data, bus schedule apps, and more.
Today's a bit of a departure from the typical challenge, there's no puzzle to solve but there is code to write. For
this challenge, you'll be asked to implement a call to a simple RESTful web API for Bitcoin pricing. This API was
chosen because it's freely available and doesn't require any signup or an API key. Furthermore, it's a simple GET
request to get the data you need. Other APIs work in much the same way but often require API keys for use.
The Bitcoin API we're using is documented here: http://bitcoincharts.com/about/markets-api/ Specifically we're
interested in the `/v1/trades.csv` endpoint.
Your native code API (e.g. the code you write and run locally) should take the following parameters:
- The short name of the bitcoin market. Legitimate values are (choose one):
bitfinex
bitstamp
btce
itbit
anxhk
hitbtc
kraken
bitkonan
bitbay
rock
cbx
cotr
vcx
- The short name of the currency you wish to see the price for Bitcoin in. Legitimate values are (choose one):
KRW
NMC
IDR
RON
ARS
AUD
BGN
BRL
BTC
CAD
CHF
CLP
CNY
CZK
DKK
EUR
GAU
GBP
HKD
HUF
ILS
INR
JPY
LTC
MXN
NOK
NZD
PEN
PLN
RUB
SAR
SEK
SGD
SLL
THB
UAH
USD
XRP
ZAR
The API call you make to the bitcoincharts.com site will yield a plain text response of the most recent trades,
formatted as CSV with the following fields: UNIX timestamp, price in that currency, and amount of the trade. For
example:
1438015468,349.250000000000,0.001356620000
Your API should return the current value of Bitcoin according to that exchange in that currency. For example, your API
might look like this (in F# notation to show types and args):
val getCurrentBitcoinPrice : exchange:string -> currency:string -> float
Which basically says take two string args to describe the exchange by name and the currency I want the price in and
return the latest price as a floating point value. In the above example my code would return `349.25`.
Part of today's challenge is in understanding the API documentation, such as the format of the URL and what endpoint to
contact.
# Note
Many thanks to /u/adrian17 for finding this API for this challenge - it doesn't require any signup to use.
"""
def main():
pass
if __name__ == "__main__":
main()
|
import sys
import pandas as pd
import numpy as np
import pickle
from sqlalchemy import create_engine
# import tokenize_function
from models.tokenizer_function import Tokenizer
# import sklearn
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.externals import joblib
def load_data(database_filepath):
"""
Load data from the sqlite database.
Args:
database_filepath: the path of the database file
Returns:
X (DataFrame): messages
Y (DataFrame): One-hot encoded categories
category_names (List)
"""
# load data from database
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('DisasterResponse', engine)
X = df['message']
Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)
category_names = Y.columns
return X, Y, category_names
def build_model():
"""
build NLP pipeline - count words, tf-idf, multiple output classifier,
grid search the best parameters
Args:
None
Returns:
cross validated classifier object
"""
#
pipeline = Pipeline([
('tokenizer', Tokenizer()),
('vec', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators = 100)))
])
# grid search
parameters = {'clf__estimator__max_features':['sqrt', 0.5],
'clf__estimator__n_estimators':[50, 100]}
cv = GridSearchCV(estimator=pipeline, param_grid = parameters, cv = 5, n_jobs = 10)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
Evaluate the model performances, in terms of f1-score, precison and recall
Args:
model: the model to be evaluated
X_test: X_test dataframe
Y_test: Y_test dataframe
category_names: category names list defined in load data
Returns:
perfomances (DataFrame)
"""
# predict on the X_test
y_pred = model.predict(X_test)
# build classification report on every column
performances = []
for i in range(len(category_names)):
performances.append([f1_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),
precision_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),
recall_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro')])
# build dataframe
performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],
index = category_names)
return performances
def save_model(model, model_filepath):
"""
Save model to pickle
"""
joblib.dump(model, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""05_Groupby.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Y4LOqH8fPH5Ez3UYmoRzw86G7wcBfeNe
# Introduction to Pandas

Pandas is an open-source, BSD-licensed Python library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language. You can think of pandas as an extremely powerful version of Excel, with a lot more features.
## **About iPython Notebooks**
iPython Notebooks are interactive coding environments embedded in a webpage. You will be using iPython notebooks in this class. You only need to write code between the ### START CODE HERE ### and ### END CODE HERE ### comments. After writing your code, you can run the cell by either pressing "SHIFT"+"ENTER" or by clicking on "Run Cell" (denoted by a play symbol) in the left bar of the cell.
**In this notebook you will learn -**
* Series
* DataFrames
* Missing Data
* GroupBy
* Merging, Joining and Concatenating
* Operations
* Data Input and Output
## Importing Pandas
To import Pandas simply write the following:
"""
import numpy as np
import pandas as pd
"""# Groupby
The groupby method allows you to group rows of data together and call aggregate functions.
"""
import pandas as pd
# Create dataframe
data = {'Company':['GOOG','GOOG','MSFT','MSFT','FB','FB'],
'Person':['Sam','Charlie','Amy','Vanessa','Carl','Sarah'],
'Sales':[200,120,340,124,243,350]}
df = pd.DataFrame(data)
df
"""** Now you can use the .groupby() method to group rows together based off of a column name. For instance let's group based off of Company. This will create a DataFrameGroupBy object:**"""
df.groupby('Company')
"""You can save this object as a new variable:"""
by_comp = df.groupby("Company")
by_comp
"""And then call aggregate methods off the object:"""
by_comp.mean()
df.groupby('Company').mean()
"""More examples of aggregate methods:"""
by_comp.std()
by_comp.min()
by_comp.max()
by_comp.count()
by_comp.describe()
by_comp.describe().transpose()
by_comp.describe().transpose()['GOOG']
"""# Great Job!""" |
import numpy as np
import matplotlib.pyplot as plt
n_component=np.random.randint(2,11)
n_samples = np.random.randint(40,200,size=n_component)
n_dimension=np.random.randint(2,51)
random_mu=100*np.random.random((n_component,n_dimension))
random_sigma=10*np.random.random((n_component,n_dimension))
X=np.zeros((0,n_dimension))
label_y=np.zeros(0)
for component_index in range(n_component):
X=np.concatenate((
X,random_sigma[component_index]*np.random.randn(
n_samples[component_index],n_dimension)+\
random_mu[component_index]),axis=0)
label_y=np.concatenate((
label_y,component_index*np.ones(
n_samples[component_index])
))
# only show 2 dimension
plt.scatter(X[:,0],X[:,1],c=label_y,s=3)
|
from tokenizers import BertWordPieceTokenizer
def train(data):
tokenizer = BertWordPieceTokenizer()
tokenizer.train(files=data, vocab_size=52_000, min_frequency=2)
return tokenizer
if __name__ == "__main__":
import argparse
import sys
import os
command_line = " ".join(sys.argv[1:])
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--train_data", type=str, help="Path to training data")
parser.add_argument("--output_dir_name", type=str, help="Output directory")
args = parser.parse_args()
tokenizer = train(args.train_data)
tok_dir = args.output_dir_name + "/tokenizers"
if not os.path.exists(tok_dir):
os.mkdir(tok_dir)
tokenizer.save_model(tok_dir)
|
import tempfile
from otoole.visualise import create_res
url = "tests/fixtures/simplicity-v0.2.1.zip"
def test_create_res():
_, path_to_resfile = tempfile.mkstemp(suffix=".pdf")
create_res(url, path_to_resfile)
|
# Copyright the Karmabot authors and contributors.
# All rights reserved. See AUTHORS.
#
# This file is part of 'karmabot' and is distributed under the BSD license.
# See LICENSE for more details.
import re
from karmabot.core import storage
# TODO: regular expressions in this module should be
# replaced with something more robust and more efficient.
# TODO: stripping listen commands such as --/++
class Command(object):
def __init__(self, parent, format, handler,
help=None, visible=True, exclusive=False):
self.parent = parent
self.format = format
self.handler = handler
self.help = help
self.visible = visible
self.exclusive = exclusive
self.state = None
def to_regex(self):
def sub_parameter(match):
name = match.group(1)
if name == "subject":
parameter_regex = r"(?:\([^()]+\))|[#!\w]+"
else:
# This regex may come back to haunt me.
parameter_regex = r".+"
return r"(?P<{name}>{regex})".format(name=name,
regex=parameter_regex)
regex = self.format
regex = regex.replace("+", r"\+")
regex = re.sub(r"{(\w+)}", sub_parameter, regex)
return regex
class CommandParser(object):
def __init__(self, command_infos):
self.command_infos = command_infos
def __call__(self, text, context, handled=False):
return self.handle_command(text, context, handled)
def handle_command(self, text, context, handled=False):
for command_info in self.command_infos:
match = command_info["re"].search(text)
if match:
instance = None
match_group = match.groupdict()
subject = match_group.get('subject', None)
command = command_info['command']
match_group.update({'context': context})
if subject:
match_group.update(
{'subject': storage.db.get(subject)})
handler_cls = command.handler.__module__.split('.').pop()
instance = match_group['subject'].facets.get(handler_cls)
substitution = self.dispatch_command(command,
instance, match_group)
handled = True
if substitution:
# Start over with the new string
newtext = ''.join([text[:match.start()], substitution,
text[match.end():]])
return self.handle_command(newtext, context, True)
if command_info["exclusive"]:
break
return (handled, text)
def dispatch_command(self, command, instance, kw):
if instance:
context = kw.get('context')
command.handler(instance, **kw)
if context:
storage.db.set(instance.subject.key,
instance.subject)
return None
else:
return command.handler(command, **kw)
|
from board import boards
# Error handling
def get_input_int(string):
for i in range(5):
for attempt in range(10):
try:
return int(input(string))
except:
print('Invalid Input')
else:
break
else:
print('Too many failed attempts.')
exit()
def play_game(guesses, user_guess, rand, choice, letters, misses, word_length, show, b=boards):
# Loop which occurs the entirety of the game
while guesses > 0 and user_guess != rand:
# User enters if they would like to guess letter or word
choice = input(f"Please enter 'l' if you would like to guess a letter or 'w' if you would like to guess the {word_length} letter word: ")
if choice == "l":
user_guess = input('Please guess a letter: ')
# Checks if letter is in the word
if user_guess[0] in rand:
letters.append(user_guess)
print('Correct')
print(b[6-guesses])
# Creates a list of idices that point to the the guessed letter in the word
hold = [i for i, x in enumerate(rand) if x==user_guess[0]]
for x in hold:
show[x] = user_guess
print(' '.join(show))
# Checks if letter is not in the word
elif user_guess[0] not in rand:
misses.append(user_guess)
print('That letter is not in the word.')
guesses -= 1
print(b[6-guesses])
print("Guesses: " + ', '.join(misses))
print(f'You have {guesses} guesses left.')
if guesses == 0:
print(f"You lose...\nThe word was {rand}.")
elif choice == "w":
user_guess = input(f'Please guess the {word_length} letter word: ')
# Checks if guessed word is the word
if user_guess == rand:
print("You Win!")
exit()
# This is for if the guessed word was incorrect
else:
print("That is incorrect.")
guesses -= 1
print(b[6-guesses])
print("Guesses: " + ', '.join(misses))
print(f'You have {guesses} guesses left.')
if guesses == 0:
print(f"You lose...\nThe word was {rand}.")
else:
print('Invalid Input') |
class FinancialSession(object):
''' The main entry point of the API.
'''
def begin(self):
pass
def list_accounts(self):
pass
def list_transactions(self, account):
pass
def end(self):
pass
class Account(object):
''' An account object.
'''
def __init__(self, name, number, clearing_number):
self.name = name
self.number = number
self.clearing_number = clearing_number
class Transaction(object):
''' A transaction
'''
def __init__(self, message, amount, timestamp):
self.message = message
self.amount = amount
self.timestamp = timestamp |
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.api.validation import parameter_types
from tacker.sol_refactored.api.schemas import common_types
# SOL003 5.5.2.3
CreateVnfRequest_V200 = {
'type': 'object',
'properties': {
'vnfdId': common_types.Identifier,
'vnfInstanceName': {'type': 'string', 'maxLength': 255},
'vnfInstanceDescription': {'type': 'string', 'maxLength': 1024},
'metadata': parameter_types.keyvalue_pairs,
},
'required': ['vnfdId'],
'additionalProperties': True,
}
# SOL003 5.5.2.4
InstantiateVnfRequest_V200 = {
'type': 'object',
'properties': {
'flavourId': common_types.IdentifierInVnfd,
'instantiationLevelId': common_types.IdentifierInVnfd,
'extVirtualLinks': {
'type': 'array',
'items': common_types.ExtVirtualLinkData},
'extManagedVirtualLinks': {
'type': 'array',
'items': common_types.ExtManagedVirtualLinkData},
'vimConnectionInfo': {
'type': 'object',
'patternProperties': {
'^.*$': common_types.VimConnectionInfo
},
},
'localizationLanguage': {'type': 'string', 'maxLength': 255},
'additionalParams': parameter_types.keyvalue_pairs,
'extensions': parameter_types.keyvalue_pairs,
'vnfConfigurableProperties': parameter_types.keyvalue_pairs
},
'required': ['flavourId'],
'additionalProperties': True,
}
# SOL003 5.5.2.8
TerminateVnfRequest_V200 = {
'type': 'object',
'properties': {
'terminationType': {
'type': 'string',
'enum': [
'FORCEFUL',
'GRACEFUL']
},
'gracefulTerminationTimeout': {
'type': 'integer', 'minimum': 1
},
'additionalParams': parameter_types.keyvalue_pairs,
},
'required': ['terminationType'],
'additionalProperties': True,
}
# SOL013 8.3.4
_SubscriptionAuthentication = {
'type': 'object',
'properties': {
'authType': {
'type': 'array',
'items': {
'type': 'string',
'enum': [
'BASIC',
'OAUTH2_CLIENT_CREDENTIALS',
'TLS_CERT']
}
},
'paramsBasic': {
'type': 'object',
'properties': {
'userName': {'type': 'string'},
'password': {'type': 'string'}
},
# NOTE: must be specified since the way to specify them out of
# band is not supported.
'required': ['userName', 'password']
},
'paramsOauth2ClientCredentials': {
'type': 'object',
'properties': {
'clientId': {'type': 'string'},
'clientPassword': {'type': 'string'},
'tokenEndpoint': {'type': 'string'}
},
# NOTE: must be specified since the way to specify them out of
# band is not supported.
'required': ['clientId', 'clientPassword', 'tokenEndpoint']
}
},
'required': ['authType'],
'additionalProperties': True,
}
# SOL003 4.4.1.5 inner
_VnfProductVersions = {
'type': 'array',
'items': {
'type': 'objects',
'properties': {
'vnfSoftwareVersion': {'type': 'string'},
'vnfdVersions': {
'type': 'array',
'items': {'type': 'string'}
}
},
'required': ['vnfSoftwareVersion'],
'additionalProperties': True,
}
}
# SOL003 4.4.1.5 inner
_VnfProducts = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'vnfProductName': {'type': 'string'},
'versions': _VnfProductVersions
},
'required': ['vnfProductName'],
'additionalProperties': True,
}
}
# SOL003 4.4.1.5 inner
_VnfProductsFromProviders = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'type': 'object',
'properties': {
'vnfProvider': {'type': 'string'},
'vnfProducts': _VnfProducts
}
},
'required': ['vnfProvider'],
'additionalProperties': True,
}
}
# SOL003 4.4.1.5
_VnfInstanceSubscriptionFilter = {
'type': 'object',
'properties': {
'vnfdIds': {
'type': 'array',
'items': common_types.Identifier
},
'vnfProductsFromProviders': _VnfProductsFromProviders,
'vnfInstanceIds': {
'type': 'array',
'items': common_types.Identifier
},
'vnfInstanceNames': {
'type': 'array',
'items': {'type': 'string'}
}
},
'additionalProperties': True,
}
# SOL003 5.5.3.12
_LifecycleChangeNotificationsFilter = {
'type': 'object',
'properties': {
'vnfInstanceSubscriptionFilter': _VnfInstanceSubscriptionFilter,
'notificationTypes': {
'type': 'array',
'items': {
'type': 'string',
'enum': [
'VnfLcmOperationOccurrenceNotification',
'VnfIdentifierCreationNotification',
'VnfIdentifierDeletionNotification']
}
},
'operationTypes': {
'type': 'array',
'items': {
'type': 'string',
'enum': [
'INSTANTIATE',
'SCALE',
'SCALE_TO_LEVEL',
'CHANGE_FLAVOUR',
'TERMINATE',
'HEAL',
'OPERATE',
'CHANGE_EXT_CONN',
'MODIFY_INFO']
}
},
'operationStates': {
'type': 'array',
'items': {
'type': 'string',
'enum': [
'STARTING',
'PROCESSING',
'COMPLETED',
'FAILED_TEMP',
'FAILED',
'ROLLING_BACK',
'ROLLED_BACK']
}
}
},
'additionalProperties': True,
}
# SOL003 5.5.2.15
LccnSubscriptionRequest_V200 = {
'type': 'object',
'properties': {
'filter': _LifecycleChangeNotificationsFilter,
'callbackUri': {'type': 'string', 'maxLength': 255},
'authentication': _SubscriptionAuthentication,
'verbosity': {
'type': 'string',
'enum': ['FULL', 'SHORT']
}
},
'required': ['callbackUri'],
'additionalProperties': True,
}
|
# Generated by Django 3.2.5 on 2021-09-14 10:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('participant_profile', '0032_participantcert_verified'),
]
operations = [
migrations.RemoveField(
model_name='participantcert',
name='level',
),
]
|
import os
import pytest
from dagster import (
PipelineDefinition,
ModeDefinition,
execute_pipeline,
ResourceDefinition,
DagsterInvalidDefinitionError,
)
from dagster_aws.s3.resources import S3Resource
from dagster.core.storage.file_cache import fs_file_cache, LocalFileHandle
from dagster.seven import mock
from dagster.utils.test import get_temp_dir
from dagster_examples.airline_demo.cache_file_from_s3 import cache_file_from_s3
def execute_solid_with_resources(solid_def, resources, environment_dict):
pipeline_def = PipelineDefinition(
name='{}_solid_test'.format(solid_def.name),
solid_defs=[solid_def],
mode_definitions=[ModeDefinition(resources=resources)],
)
return execute_pipeline(pipeline_def, environment_dict)
def test_cache_file_from_s3_basic():
s3_session = mock.MagicMock()
with get_temp_dir() as temp_dir:
pipeline_result = execute_solid_with_resources(
cache_file_from_s3,
resources={
'file_cache': fs_file_cache,
's3': ResourceDefinition.hardcoded_resource(S3Resource(s3_session)),
},
environment_dict={
'solids': {
'cache_file_from_s3': {
'inputs': {'bucket_data': {'bucket': 'some-bucket', 'key': 'some-key'}}
}
},
'resources': {'file_cache': {'config': {'target_folder': temp_dir}}},
},
)
# assert the download occured
assert s3_session.download_file.call_count == 1
assert pipeline_result.success
solid_result = pipeline_result.result_for_solid('cache_file_from_s3')
assert solid_result.success
expectation_results = solid_result.expectation_results_during_compute
assert len(expectation_results) == 1
expectation_result = expectation_results[0]
assert expectation_result.success
assert expectation_result.label == 'file_handle_exists'
path_in_metadata = expectation_result.metadata_entries[0].entry_data.path
assert isinstance(path_in_metadata, str)
assert os.path.exists(path_in_metadata)
assert isinstance(solid_result.result_value(), LocalFileHandle)
assert 'some-key' in solid_result.result_value().path_desc
def test_cache_file_from_s3_specify_target_key():
s3_session = mock.MagicMock()
with get_temp_dir() as temp_dir:
pipeline_result = execute_solid_with_resources(
cache_file_from_s3,
resources={
'file_cache': fs_file_cache,
's3': ResourceDefinition.hardcoded_resource(S3Resource(s3_session)),
},
environment_dict={
'solids': {
'cache_file_from_s3': {
'inputs': {'bucket_data': {'bucket': 'some-bucket', 'key': 'some-key'}},
'config': {'file_key': 'specified-file-key'},
}
},
'resources': {'file_cache': {'config': {'target_folder': temp_dir}}},
},
)
# assert the download occured
assert s3_session.download_file.call_count == 1
assert pipeline_result.success
solid_result = pipeline_result.result_for_solid('cache_file_from_s3')
assert solid_result.success
assert isinstance(solid_result.result_value(), LocalFileHandle)
assert 'specified-file-key' in solid_result.result_value().path_desc
def test_cache_file_from_s3_skip_download():
with get_temp_dir() as temp_dir:
s3_session_one = mock.MagicMock()
pipeline_result_one = execute_solid_with_resources(
cache_file_from_s3,
resources={
'file_cache': fs_file_cache,
's3': ResourceDefinition.hardcoded_resource(S3Resource(s3_session_one)),
},
environment_dict={
'solids': {
'cache_file_from_s3': {
'inputs': {'bucket_data': {'bucket': 'some-bucket', 'key': 'some-key'}}
}
},
'resources': {'file_cache': {'config': {'target_folder': temp_dir}}},
},
)
assert pipeline_result_one.success
# assert the download occured
assert s3_session_one.download_file.call_count == 1
s3_session_two = mock.MagicMock()
pipeline_result_two = execute_solid_with_resources(
cache_file_from_s3,
resources={
'file_cache': fs_file_cache,
's3': ResourceDefinition.hardcoded_resource(S3Resource(s3_session_two)),
},
environment_dict={
'solids': {
'cache_file_from_s3': {
'inputs': {'bucket_data': {'bucket': 'some-bucket', 'key': 'some-key'}}
}
},
'resources': {'file_cache': {'config': {'target_folder': temp_dir}}},
},
)
assert pipeline_result_two.success
# assert the download did not occur because file is already there
assert s3_session_two.download_file.call_count == 0
def test_cache_file_from_s3_overwrite():
with get_temp_dir() as temp_dir:
s3_session_one = mock.MagicMock()
pipeline_result_one = execute_solid_with_resources(
cache_file_from_s3,
resources={
'file_cache': fs_file_cache,
's3': ResourceDefinition.hardcoded_resource(S3Resource(s3_session_one)),
},
environment_dict={
'solids': {
'cache_file_from_s3': {
'inputs': {'bucket_data': {'bucket': 'some-bucket', 'key': 'some-key'}}
}
},
'resources': {
'file_cache': {'config': {'target_folder': temp_dir, 'overwrite': True}}
},
},
)
assert pipeline_result_one.success
# assert the download occured
assert s3_session_one.download_file.call_count == 1
s3_session_two = mock.MagicMock()
pipeline_result_two = execute_solid_with_resources(
cache_file_from_s3,
resources={
'file_cache': fs_file_cache,
's3': ResourceDefinition.hardcoded_resource(s3_session_two),
},
environment_dict={
'solids': {
'cache_file_from_s3': {
'inputs': {'bucket_data': {'bucket': 'some-bucket', 'key': 'some-key'}}
}
},
'resources': {
'file_cache': {'config': {'target_folder': temp_dir, 'overwrite': True}}
},
},
)
assert pipeline_result_two.success
# assert the download did not occur because file is already there
assert s3_session_two.download_file.call_count == 0
def test_missing_resources():
with pytest.raises(DagsterInvalidDefinitionError):
with get_temp_dir() as temp_dir:
execute_solid_with_resources(
cache_file_from_s3,
resources={'file_cache': fs_file_cache},
environment_dict={
'solids': {
'cache_file_from_s3': {
'inputs': {'bucket_data': {'bucket': 'some-bucket', 'key': 'some-key'}}
}
},
'resources': {'file_cache': {'config': {'target_folder': temp_dir}}},
},
)
|
from twisted.plugin import IPlugin
from heufybot.moduleinterface import IBotModule
from heufybot.modules.commandinterface import BotCommand
from zope.interface import implements
class ChatmapCommand(BotCommand):
implements(IPlugin, IBotModule)
name = "DBChatmap"
chatmapBaseUrl = "https://chatmap.raptorpond.com/"
def triggers(self):
return ["chatmap", "addmap", "remmap"]
def actions(self):
return super(ChatmapCommand, self).actions() + [ ("userlocation-updated", 1, self.setLocation),
("userlocation-deleted", 1, self.deleteLocation) ]
def load(self):
self.help = "Commands: chatmap, addmap, remmap | View the Desert Bus Chatmap or add or remove your location to" \
" or from it."
self.commandHelp = {
"chatmap": "chatmap | View the Desert Bus Chatmap.",
"addmap": "addmap | Add your location to the Desert Bus Chatmap. Your location needs to already exist in storage.",
"remmap": "remmap | Remove your location from the Desert Bus Chatmap."
}
self.apiKey = None
if "dbchatmap" in self.bot.storage["api-keys"]:
self.apiKey = self.bot.storage["api-keys"]["dbchatmap"]
def execute(self, server, source, command, params, data):
if command == "chatmap":
self.replyPRIVMSG(server, source, "Desert Bus Chatmap: {}".format(self.chatmapBaseUrl))
return
if not self.apiKey:
self.replyPRIVMSG(server, source, "No Desert Bus Chatmap API key found.")
return
if command == "addmap":
loc = self.bot.moduleHandler.runActionUntilValue("userlocation", server, source, data["user"].nick, True)
if not loc or not loc["success"]:
return
self.replyPRIVMSG(server, source, self.setLocation(data["user"].nick, loc["place"], False))
elif command == "remmap":
self.replyPRIVMSG(server, source, self.deleteLocation(data["user"].nick))
def setLocation(self, nick, location, checkExists = True):
url = "{}api/chatizen/{}".format(self.chatmapBaseUrl, nick.lower())
extraHeaders = { "Cookie": "password={}".format(self.apiKey) }
if checkExists:
result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, None, extraHeaders)
if not result or result.status_code == 404:
return
userloc = self.bot.moduleHandler.runActionUntilValue("geolocation-place", location)
data = "{{ \"lat\": {}, \"lon\": {} }}".format(userloc["latitude"], userloc["longitude"])
setResult = self.bot.moduleHandler.runActionUntilValue("post-url", url, data, extraHeaders)
if setResult and setResult.status_code == 204:
return "Your location has been added to the chatmap."
else:
self.bot.log.warn(setResult)
return "Something went wrong while adding your location to the chatmap."
def deleteLocation(self, nick):
url = "{}api/chatizen/{}".format(self.chatmapBaseUrl, nick.lower())
extraHeaders = {"Cookie": "password={}".format(self.apiKey) }
result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, None, extraHeaders)
if not result or result.status_code == 404:
return "Your location on the chatmap could not be determined."
deleteResult = self.bot.moduleHandler.runActionUntilValue("delete-url", url, extraHeaders)
if deleteResult.status_code == 204:
return "Your location has been removed from the chatmap."
else:
self.bot.log.warn(deleteResult)
return "Something went wrong while removing your location from the chatmap."
chatmapCommand = ChatmapCommand()
|
import glob
import json
import optparse
import os
import matplotlib.pyplot as plt
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
from common.utils import DATA_DIR
def count_min_posts(folder, min_posts):
json_pattern = os.path.join('..', DATA_DIR, 'pos_tags_{}_embeds_filtered'.format(dataset), folder, '*.json')
json_files = [pos_json for pos_json in glob.glob(json_pattern) if pos_json.endswith('.json')]
sum_controls = 0
sum_depression = 0
for jfile in json_files:
with open(jfile, encoding='utf-8') as f:
pos_tags = json.load(f)
label = pos_tags['label']
if len(pos_tags['tokens']) < min_posts:
continue
if label == 'control':
sum_controls += 1
elif label == 'schizophrenia':
sum_depression += 1
print('Overall controls num: {} and depression num: {}'.format(sum_controls, sum_depression))
def count_all_posts():
json_pattern = os.path.join('..', DATA_DIR, 'pos_tags_{}_embeds'.format(dataset), '*.json')
json_files = [pos_json for pos_json in glob.glob(json_pattern) if pos_json.endswith('.json')]
sum_all_posts = 0
for jfile in json_files:
with open(jfile, encoding='utf-8') as f:
pos_tags = json.load(f)
posts_list = pos_tags['tokens'] # each post is a list of tokens
sum_all_posts += len(posts_list)
print('Overall posts num: {}\n'.format(sum_all_posts))
def count_users_with_embeddings():
json_pattern = os.path.join('..', DATA_DIR, 'pos_tags_{}_embeds'.format(dataset), '*.json')
json_files = [pos_json for pos_json in glob.glob(json_pattern) if pos_json.endswith('.json')]
sum_controls = 0
sum_patients = 0
for jfile in json_files:
with open(jfile, encoding='utf-8') as f:
pos_tags = json.load(f)
label = pos_tags['label'] # each post is a list of tokens
if label == 'control':
sum_controls += 1
elif label == patients_label:
sum_patients += 1
else:
print("Unidentified label: {}".format(label))
print('Overall controls num: {} and patients num: {}'.format(sum_controls, sum_patients))
def print_svd():
print("*******Starting to run!*******")
json_pattern = os.path.join('..', DATA_DIR, 'pos_tags_{}_embeds'.format(dataset), '*.json')
json_files = [pos_json for pos_json in glob.glob(json_pattern) if pos_json.endswith('.json')]
all_embeddings = []
for i, file in enumerate(json_files):
with open(file, encoding='utf-8') as f:
pos_tags = json.load(f)
posts_list = pos_tags['embeddings']
all_embeddings.extend(posts_list)
if i % 150 == 0:
print("Finished loading {} users".format(i))
if i == 6:
break
print("*******Finished loading all of the vectors*******")
# standardize the data
X = StandardScaler().fit_transform(all_embeddings)
svd = TruncatedSVD(n_components=2).fit_transform(X)
# colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, X.shape[0])]
plt.scatter(svd[:, 0], svd[:, 1], s=0.5, linewidths=0.5, alpha=0.7)
plt.savefig('dbscan_svd.png')
def count_participants_datafile():
from tqdm import tqdm
import pandas as pd
import sys
data = pd.read_csv(os.path.join('..', DATA_DIR, 'all_data_{}.csv'.format(dataset)))
# data = data.head(10_000)
controls = 0
patients = 0
for _, data in tqdm(data.iterrows(), file=sys.stdout, total=len(data), leave=False, desc='Users'):
user_id = data['id']
label = data['label']
if label == 'control':
controls += 1
elif label == patients_label:
patients += 1
print('There are {} controls and {} patients in {}'.format(controls, patients, options.dataset))
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('--dataset', choices=['rsdd', 'smhd', 'tssd'], default='rsdd', action="store")
parser.add_option('--svd', default=False, action="store_true")
parser.add_option('--count_posts', default=False, action="store_true")
parser.add_option('--count_users', default=False, action="store_true")
parser.add_option('--count_datafile', default=False, action="store_true")
parser.add_option('--count_min_posts', default=False, action="store_true")
parser.add_option('--folder', default='45_100', action="store")
parser.add_option('--min_posts', default=20, action="store")
options, _ = parser.parse_args()
dataset = options.dataset
patients_label = 'depression' if options.dataset == 'rsdd' else 'schizophrenia'
if options.svd:
print_svd()
if options.count_posts:
count_all_posts()
if options.count_users:
count_users_with_embeddings()
if options.count_datafile:
count_participants_datafile()
if options.count_min_posts:
count_min_posts(options.folder, options.min_posts)
|
# -*- coding: utf-8 -*-
"""Ultralight math notebook.
Auto-print top-level expressions, auto-assign last result as _.
"""
# This is the kind of thing thinking with macros does to your program. ;)
from ast import Expr
from macropy.core.quotes import macros, q, ast_literal
def nb(body, args):
p = args[0] if args else q[print] # custom print function hook
newbody = []
with q as init:
_ = None
theprint = ast_literal[p]
newbody.extend(init)
for stmt in body:
if type(stmt) is not Expr:
newbody.append(stmt)
continue
with q as newstmts:
_ = ast_literal[stmt.value]
if _ is not None:
theprint(_)
newbody.extend(newstmts)
return newbody
|
import timeit
class Runtime:
def __init__(self, report_name, meter):
self.report_name = report_name
self.meter = meter
def __call__(self, fn):
def wrapper_bench(*args, **kwargs):
start = timeit.timeit()
result = fn(*args, **kwargs)
end = timeit.timeit()
elapsed = end - start
report_name = self.report_name
if isinstance(report_name, type(lambda:1)):
report_name = self.report_name(*args, **kwargs)
# get_client().timing(report_name, elapsed)
return result
return wrapper_bench
class Catch:
def __init__(self, report_name, meter):
self.report_name = report_name
self.meter = meter
def __call__(self, fn):
def wrapper_catch(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as err:
self.meter.message(err)
raise err
return wrapper_catch
class Decorators:
def runtime(self, report_name):
"""wrapper method for functions"""
return Runtime(report_name, self)
def catch(self, report_name):
"""wrapper method for functions"""
return Catch(report_name, self)
|
import dash_carbon_components as dca
import dash_html_components as html
from ... import LOA_REPO_URL, REPO_URL
landing_layout = dca.Grid(
style={
'padding': '16px',
'height': 'calc(100% - 75px)',
'overflow': 'auto',
'width': '75%'
},
className='bx--grid--narrow bx--grid--full-width',
children=[
dca.Row(children=[
dca.Column(columnSizes=['sm-4'], children=[
dca.Card(
id='landing_card',
children=[
html.H1("NeSA Demo",
style={
'padding-top': '10px',
'padding-bottom': '10px',
}),
html.P(
"Welcome to Neuro-Sybmolic Agent (NeSA) Demo, "
"where you can explore, understand and interact "
"with NeSA which is Logical Optimal Action (LOA).",
className="lead",
style={
'padding-top': '10px',
'padding-bottom': '10px',
}
),
html.Hr(className="my-2"),
html.P(
"Click the buttons below to find for each code "
"of NeSA Demo and LOA.",
style={
'padding-top': '10px',
'padding-bottom': '10px',
}
),
dca.Button(
id='learn_more_button',
size='sm',
children='NeSA Demo Repo',
kind='primary',
href=REPO_URL,
style={
'padding': '10px',
'right': '10px',
'left': '0px',
}
),
dca.Button(
id='learn_more_button',
size='sm',
children='LOA Repo',
kind='primary',
href=LOA_REPO_URL,
style={
'padding': '10px',
'right': '0px',
'left': '10px',
}
),
]
),
]),
]),
])
|
# Generated by Django 3.1.5 on 2021-03-05 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0015_post_header_image'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(default='This user has not written anything here.'),
),
]
|
"""
Plot the relationship (mean of heads) for global Transformer.
Plot the relationship of local-transformer and global-transformer.
python3 plot_relation2.py --id 26 --point_id 10 --stage 0 --save
"""
import argparse
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
import random
import os
import numpy as np
from collections import OrderedDict
import h5py
import math
import sys
sys.path.append("..")
from data import ModelNet40
import datetime
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
# import models as models
from plot21 import plot21H
from utils import set_seed
def parse_args():
"""Parameters"""
parser = argparse.ArgumentParser('training')
parser.add_argument('--num_points', type=int, default=5000, help='Point Number')
# for ploting 26 airplane
parser.add_argument('--id', default=800, type=int, help='ID of the example 2468')
parser.add_argument('--save', action='store_true', default=False, help='use normals besides x,y,z')
parser.add_argument('--show', action='store_true', default=True, help='use normals besides x,y,z')
return parser.parse_args()
def plot_xyz(xyz, args, name="figure.pdf" ): # xyz: [n,3] selected_xyz:[3]
fig = pyplot.figure()
ax = Axes3D(fig)
# ax = fig.gca(projection='3d')
x_vals = xyz[:, 0]
y_vals = xyz[:, 1]
z_vals = xyz[:, 2]
ax.set_xlim3d(min(x_vals)*0.9, max(x_vals)*0.9)
ax.set_ylim3d(min(y_vals)*0.9, max(y_vals)*0.9)
ax.set_zlim3d(min(z_vals)*0.9, max(z_vals)*0.9)
color = x_vals+y_vals+z_vals
norm = pyplot.Normalize(vmin=min(color), vmax=max(color))
ax.scatter(x_vals, y_vals, z_vals, c=color, cmap='hsv', norm=norm)
ax.set_axis_off()
ax.get_xaxis().get_major_formatter().set_useOffset(False)
# pyplot.tight_layout()
if args.show:
pyplot.show()
if args.save:
fig.savefig(name, bbox_inches='tight', pad_inches=0.00, transparent=True)
pyplot.close()
def main():
args = parse_args()
# print(f"args: {args}")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
print('==> Preparing data ...')
# train_set =ModelNet40(partition='train', num_points=args.num_points)
test_set = ModelNet40(partition='test', num_points=args.num_points)
data, label = test_set.__getitem__(args.id)
plot_xyz(data, args, name=f"forstructure/Image-{args.id}-{args.num_points}.pdf" )
if __name__ == '__main__':
set_seed(32) # must
main()
|
#
# Input: {4, 2, -3, 1, 6}
# Output: true
# Input: {4, 2, 0, 1, 6}
# Output: true
# Input: {-3, 2, 3, 1, 6}
# Output: false
#given a list return true if zero sum is achievable with any of the elements from the list
#return false if not achievable
def zerosum(inputlist):
seen_nums = []
curr_collect = []
curr_counter= 0
for i, num in enumerate(inputlist):
if num + curr_counter == 0:
return True
else:
curr_collect.append(num)
print curr_collect
print i
print "*" * 100
#collect from [-i:-1]
# seen_nums.append(curr_last_num + num)
# seen_nums.append(num + inputlist[i-1])
# print curr_collect
zerosum([4,2,-3,1,6, 9])
|
"""
This module takes care of starting the API Server, Loading the DB and Adding the endpoints
"""
import os
from flask import Flask, request, jsonify, url_for, send_from_directory
from flask_migrate import Migrate
from flask_swagger import swagger
from flask_cors import CORS
from api.utils import APIException, generate_sitemap
from api.models import db
from api.routes import api
from api.admin import setup_admin
from api.models import User
from flask_jwt_extended import JWTManager
ENV = os.getenv("FLASK_ENV")
static_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../public/')
app = Flask(__name__)
app.url_map.strict_slashes = False
# Setup the Flask-JWT-Extended extension
app.config["JWT_SECRET_KEY"] = os.environ.get('JWT_SECRET', 'sample key')
jwt = JWTManager(app)
# database condiguration
db_url = os.getenv("DATABASE_URL")
if db_url is not None:
app.config['SQLALCHEMY_DATABASE_URI'] = db_url.replace("postgres://", "postgresql://")
else:
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:////tmp/test.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
MIGRATE = Migrate(app, db)
db.init_app(app)
# Allow CORS requests to this API
CORS(app)
# add the admin
setup_admin(app)
# Add all endpoints form the API with a "api" prefix
app.register_blueprint(api, url_prefix='/api')
# Handle/serialize errors like a JSON object
@app.errorhandler(APIException)
def handle_invalid_usage(error):
return jsonify(error.to_dict()), error.status_code
# generate sitemap with all your endpoints
@app.route('/')
def sitemap():
if ENV == "development":
return generate_sitemap(app)
return send_from_directory(static_file_dir, 'index.html')
# any other endpoint will try to serve it like a static file
@app.route('/<path:path>', methods=['GET'])
def serve_any_other_file(path):
if not os.path.isfile(os.path.join(static_file_dir, path)):
path = 'index.html'
response = send_from_directory(static_file_dir, path)
response.cache_control.max_age = 0 # avoid cache memory
return response
##### USERS #####
# All users
@app.route('/user', methods=['GET', 'POST'])
def handle_users():
"""
All Users
"""
# GET all users
if request.method == 'GET':
users = User.query.all()
all_users = list(map(lambda x: x.serialize(), users))
return jsonify(all_users), 200
# Create (POST) a new user
if request.method == 'POST':
user_to_add = request.json
# Data validation
if user_to_add is None:
raise APIException("You need to specify the request body as a json object", status_code=400)
if 'user_name' not in user_to_add or user_to_add['user_name'] == "":
raise APIException('You need to specify the username', status_code=400)
if 'email' not in user_to_add or user_to_add['email'] == "":
raise APIException('You need to specify the email', status_code=400)
if 'password' not in user_to_add or user_to_add['password'] == "":
raise APIException('You need to create a valid password', status_code=400)
new_user = User(user_name=user_to_add["user_name"], email=user_to_add["email"], password=user_to_add["password"])
db.session.add(new_user)
db.session.commit()
return jsonify(new_user.serialize()), 200
return "Invalid Method", 404
# Get, Edit or delete a specific user
@app.route('/user/<int:user_id>', methods=['PUT', 'GET', 'DELETE'])
def handle_single_user(user_id):
"""
Single user
"""
user = User.query.get(user_id)
# Data validation
if user is None:
raise APIException('User not found in data base', status_code=404)
# Modify (PUT) a user
if request.method == 'PUT':
request_body = request.json
if "user_name" in request_body:
user.user_name = request_body["user_name"]
if "email" in request_body:
user.email = request_body["email"]
if "password" in request_body:
user.password = request_body["password"]
if "is_active" in request_body:
user.is_active = request_body["is_active"]
db.session.commit()
return jsonify(user.serialize()), 200
# GET a user
elif request.method == 'GET':
return jsonify(user.serialize()), 200
# DELETE a user
elif request.method == 'DELETE':
# user_planet_list = Fav_planet.query.filter_by(user_id=user_id).first()
# db.session.delete(user_planet_list)
db.session.delete(user)
db.session.commit()
return "User deleted", 200
return "Invalid Method", 404
# this only runs if `$ python src/main.py` is executed
if __name__ == '__main__':
PORT = int(os.environ.get('PORT', 3001))
app.run(host='0.0.0.0', port=PORT, debug=True) |
from app import db
class Note(db.Model):
id_ = db.Column(db.String(16), primary_key=True)
text = db.Column(db.Text)
response = db.Column(db.String(100))
created_at = db.Column(db.DateTime)
read_at = db.Column(db.DateTime)
password = db.Column(db.String(64))
|
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
import libmeme as ml
import libmeme_engine as engine
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
class my_script(ml.script_object):
def __init__(self, **kwargs):
return super(my_script, self).__init__(self, **kwargs)
def on_activate(self): return
def on_deactivate(self): return
def on_tick(self): return
def on_timer(self): return
test = my_script(enabled = True)
test.call("on_tick")
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
engine.assets.load("image", "my_image", "../foo.png")
engine.assets.load("texture", "my_texture", "my_image")
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * # |
# 4.https://the-internet.herokuapp.com/dropdown
# si pe pagina de heruko selectezi o optiune si apoi verifici ca e selectata cea care trebuie
# cu un asert
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
def test_drop_down():
driver = webdriver.Chrome('C:/Users/dan_1/PycharmProjects/Automationproject/chromedriver.exe')
driver.get('https://the-internet.herokuapp.com/dropdown')
time.sleep(2)
dropdown_list = Select(driver.find_element(By.ID, 'dropdown'))
dropdown_list.select_by_value('1')
time.sleep(2)
option1 = driver.find_element(By.CSS_SELECTOR, "#dropdown > option:nth-child(2)")
assert option1.is_selected(), "Option 1 is not selected"
print("Option 1 is selected!")
time.sleep(2)
driver.quit()
|
"""A machine learning model to recognize the centers of circles.
The general outline is that we can create arbitrary circles and pass them into
a tensorflow model which will locate their centers. This should be a relatively
simple task, since at first at least there will be no partial circles (meaning
that the entire model can be described as learning the average function over
our 2d input space).
Unsurprisingly, this model performs (very) poorly.
"""
import sys
import matplotlib.pyplot as plt
import seaborn
import tensorflow as tf
from utils.circles import random_boxed_circle
# 100x100 greyscale image
x = tf.placeholder(tf.float32, [None, 10000])
W = tf.Variable(tf.zeros([10000, 2]))
b = tf.Variable(tf.zeros([2]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_prime = tf.placeholder(tf.float32, [None, 2])
mse = tf.reduce_mean(tf.squared_difference(y, y_prime))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(mse)
if __name__ == "__main__":
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
mses = []
for _ in range(10000):
# zip(*i) is a fast way to transpose the iterable "i", so this takes
# [(img, (x, y)), ...] and converts it to ([img, ...], [(x, y), ...])
xs, ys = zip(*[random_boxed_circle(100, 100, 5) for i in range(50)])
xs = [x.flatten() for x in xs]
sess.run(train_step, feed_dict={x: xs, y_prime: ys})
if _ % 100 == 0:
mses.append(sess.run(mse, feed_dict={x: xs, y_prime: ys}))
plt.plot(mses)
plt.show()
|
# -*- coding: utf-8 -*-
"""
(c) 2019 - Copyright Red Hat Inc
Authors:
Michal Konecny <mkonecny@redhat.com>
"""
from __future__ import unicode_literals, absolute_import
import unittest
import sys
import os
import json
from mock import patch, MagicMock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.plugins as plugins # noqa: E402
import pagure.lib.query # noqa: E402
import tests # noqa: E402
class PagureFlaskApiPluginViewProjecttests(tests.Modeltests):
"""Tests for the flask API of pagure for viewing enabled plugins on project"""
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PagureFlaskApiPluginViewProjecttests, self).setUp()
tests.create_projects(self.session)
def test_view_plugin_on_project(self):
"""Test viewing plugins on a project."""
# Install plugin
repo = pagure.lib.query.get_authorized_project(self.session, "test")
plugin = plugins.get_plugin("Mail")
plugin.set_up(repo)
dbobj = plugin.db_object()
dbobj.active = True
dbobj.project_id = repo.id
dbobj.mail_to = "serg@wh40k.com"
plugin.install(repo, dbobj)
self.session.add(dbobj)
self.session.commit()
# Retrieve all plugins on project
output = self.app.get("/api/0/test/settings/plugins")
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data,
{
"plugins": [{"Mail": {"mail_to": "serg@wh40k.com"}}],
"total_plugins": 1,
},
)
def test_viewing_plugin_on_project_no_plugin(self):
"""Test viewing plugins on a project, which doesn't
have any installed.
"""
# Retrieve all plugins on project
output = self.app.get("/api/0/test/settings/plugins")
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(data, {"plugins": [], "total_plugins": 0})
if __name__ == "__main__":
unittest.main(verbosity=2)
|
# Copyright (c) 2019 Yubico AB
# Copyright (c) 2019 Oleg Moiseenko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
from .ctap import CtapDevice, CtapError, STATUS
from .hid import CAPABILITY, CTAPHID
from .pcsc import PCSCDevice
from smartcard.Exceptions import CardConnectionException
from threading import Event
import struct
import six
AID_FIDO = b'\xa0\x00\x00\x06\x47\x2f\x00\x01'
SW_SUCCESS = (0x90, 0x00)
SW_UPDATE = (0x91, 0x00)
SW1_MORE_DATA = 0x61
class CardSelectException(Exception):
"""can't select u2f/fido2 application on the card"""
pass
class CtapNfcDevice(CtapDevice):
"""
CtapDevice implementation using the pcsc NFC transport.
"""
def __init__(self, dev):
self._dev = dev
self._dev.connect()
self._capabilities = 0
result, sw1, sw2 = self._dev.select_applet(AID_FIDO)
if (sw1, sw2) != SW_SUCCESS:
raise CardSelectException('Select error')
if result == b'U2F_V2':
self._capabilities |= CAPABILITY.NMSG
try: # Probe for CTAP2 by calling GET_INFO
self.call(CTAPHID.CBOR, b'\x04')
self._capabilities |= CAPABILITY.CBOR
except CtapError:
pass
@property
def pcsc_device(self):
return self._dev
def __repr__(self):
return 'CtapNfcDevice(%s)' % self._dev.reader.name
@property
def version(self):
"""CTAP NFC protocol version.
:rtype: int
"""
return 2 if self._capabilities & CAPABILITY.CBOR else 1
@property
def capabilities(self):
"""Capabilities supported by the device."""
return self._capabilities
def _chain_apdus(self, cla, ins, p1, p2, data=b''):
while len(data) > 250:
to_send, data = data[:250], data[250:]
header = struct.pack('!BBBBB', 0x90, ins, p1, p2, len(to_send))
resp, sw1, sw2 = self._dev.apdu_exchange(header + to_send)
if (sw1, sw2) != SW_SUCCESS:
return resp, sw1, sw2
apdu = struct.pack('!BBBB', cla, ins, p1, p2)
if data:
apdu += struct.pack('!B', len(data)) + data
resp, sw1, sw2 = self._dev.apdu_exchange(apdu + b'\x00')
while sw1 == SW1_MORE_DATA:
apdu = b'\x00\xc0\x00\x00' + struct.pack('!B', sw2) # sw2 == le
lres, sw1, sw2 = self._dev.apdu_exchange(apdu)
resp += lres
return resp, sw1, sw2
def _call_apdu(self, apdu):
if len(apdu) >= 7 and six.indexbytes(apdu, 4) == 0:
# Extended APDU
data_len = struct.unpack('!H', apdu[5:7])[0]
data = apdu[7:7+data_len]
else:
# Short APDU
data_len = six.indexbytes(apdu, 4)
data = apdu[5:5+data_len]
(cla, ins, p1, p2) = six.iterbytes(apdu[:4])
resp, sw1, sw2 = self._chain_apdus(cla, ins, p1, p2, data)
return resp + struct.pack('!BB', sw1, sw2)
def _call_cbor(self, data=b'', event=None, on_keepalive=None):
event = event or Event()
# NFCCTAP_MSG
resp, sw1, sw2 = self._chain_apdus(0x80, 0x10, 0x80, 0x00, data)
last_ka = None
while not event.is_set():
while (sw1, sw2) == SW_UPDATE:
ka_status = six.indexbytes(resp, 0)
if on_keepalive and last_ka != ka_status:
try:
ka_status = STATUS(ka_status)
except ValueError:
pass # Unknown status value
last_ka = ka_status
on_keepalive(ka_status)
# NFCCTAP_GETRESPONSE
resp, sw1, sw2 = self._chain_apdus(0x80, 0x11, 0x00, 0x00, b'')
if (sw1, sw2) != SW_SUCCESS:
raise CtapError(CtapError.ERR.OTHER) # TODO: Map from SW error
return resp
raise CtapError(CtapError.ERR.KEEPALIVE_CANCEL)
def call(self, cmd, data=b'', event=None, on_keepalive=None):
if cmd == CTAPHID.MSG:
return self._call_apdu(data)
elif cmd == CTAPHID.CBOR:
return self._call_cbor(data, event, on_keepalive)
else:
raise CtapError(CtapError.ERR.INVALID_COMMAND)
@classmethod # selector='CL'
def list_devices(cls, selector='', pcsc_device=PCSCDevice):
"""
Returns list of readers in the system. Iterator.
:param selector:
:param pcsc_device: device to work with. PCSCDevice by default.
:return: iterator. next reader
"""
for d in pcsc_device.list_devices(selector):
try:
yield cls(d)
except CardConnectionException:
pass
|
import sys
def mapper():
for line in sys.stdin:
# your code here
if line.startswith(','): #skip header
continue
data = line.split(',')
print "{0}\t{1}".format(data[1], data[6])
mapper() |
SECRET_KEY = 'UJenkeiWSQpF3X5_KO3g9A' |
import os,sys
import argparse
from chinn.models import PartialDeepSeaModel, NNClassifier
from chinn import train
import torch
def get_args():
parser = argparse.ArgumentParser(description="Train distance matched models")
parser.add_argument('data_name', help='The prefix of the data (without _[train|valid|test].hdf5')
parser.add_argument('model_name', help="The prefix of the model.")
parser.add_argument('model_dir', help='Directory for storing the models.')
parser.add_argument('-s', '--sigmoid', action='store_true', default=False,
help='Use Sigmoid at end of feature extraction. Tanh will be used by default. Default: False.')
parser.add_argument('-d', '--distance', action='store_true', default=False,
help='Include distance as a feature for classifier. Default: False.')
return parser.parse_args()
if __name__=='__main__':
args = get_args()
legacy = True
deepsea_model = PartialDeepSeaModel(4, use_weightsum=True, leaky=True, use_sigmoid=args.sigmoid)
n_filters = deepsea_model.num_filters[-1]*4
if args.distance:
n_filters += 1
classifier = NNClassifier(n_filters, legacy=legacy)
train.test(deepsea_model, classifier, args.model_name, args.data_name, False, data_set='test',
save_probs=True, use_distance=False, model_dir=args.model_dir, legacy=legacy, plot=False)
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class H5bench(CMakePackage):
"""A benchmark suite for measuring HDF5 performance."""
homepage = 'https://github.com/hpc-io/h5bench'
git = 'https://github.com/hpc-io/h5bench.git'
maintainers = ['jeanbez', 'sbyna']
version('latest', branch='master', submodules=True)
version('develop', branch='develop', submodules=True)
version('1.2', commit='866af6777573d20740d02acc47a9080de093e4ad', submodules=True)
version('1.1', commit='1276530a128025b83a4d9e3814a98f92876bb5c4', submodules=True)
version('1.0', commit='9d3438c1bc66c5976279ef203bd11a8d48ade724', submodules=True)
variant('metadata', default=False, when='@1.2:', description='Enables metadata benchmark')
variant('amrex', default=False, when='@1.2:', description='Enables AMReX benchmark')
variant('exerciser', default=False, when='@1.2:', description='Enables exerciser benchmark')
variant('openpmd', default=False, when='@1.2:', description='Enables OpenPMD benchmark')
variant('e3sm', default=False, when='@1.2:', description='Enables E3SM benchmark')
variant('all', default=False, when='@1.2:', description='Enables all h5bench benchmarks')
depends_on('cmake@3.10:', type='build')
depends_on('mpi')
depends_on('hdf5+mpi@1.12.0:1,develop-1.12:')
depends_on('parallel-netcdf', when='+e3sm')
depends_on('parallel-netcdf', when='+all')
@run_after('install')
def install_config(self):
install_tree('h5bench_patterns/sample_config',
self.prefix.share.patterns)
install('metadata_stress/hdf5_iotest.ini',
self.prefix.share)
def setup_build_environment(self, env):
env.set('HDF5_HOME', self.spec['hdf5'].prefix)
def cmake_args(self):
args = [
self.define_from_variant('H5BENCH_METADATA', 'metadata'),
self.define_from_variant('H5BENCH_AMREX', 'amrex'),
self.define_from_variant('H5BENCH_EXERCISER', 'exerciser'),
self.define_from_variant('H5BENCH_OPENPMD', 'openpmd'),
self.define_from_variant('H5BENCH_E3SM', 'e3sm'),
self.define_from_variant('H5BENCH_ALL', 'all')
]
return args
|
# Generated by Django 3.0.7 on 2020-10-26 02:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0018_auto_20201024_1557'),
]
operations = [
migrations.AddField(
model_name='empresa',
name='descripcion',
field=models.TextField(blank=True, max_length=500, null=True, verbose_name='Dirección'),
),
migrations.AddField(
model_name='empresa',
name='mision',
field=models.TextField(blank=True, max_length=500, null=True, verbose_name='Dirección'),
),
migrations.AddField(
model_name='empresa',
name='vision',
field=models.TextField(blank=True, max_length=500, null=True, verbose_name='Dirección'),
),
]
|
import torch
from functools import reduce
from torchtext import data
from tqdm.autonotebook import tqdm
from abc import ABC, abstractmethod
import logging
import os.path
import json
import csv
from transformers import BertTokenizer, BertModel
logging.basicConfig(level=logging.DEBUG)
module_logger = logging.getLogger('data_loading')
class Dataset(ABC):
sentences_filename = 'sentences.pt'
inputs_filename = 'inputs.pt'
indices_filename = 'indices.pt'
pools_filename = 'pools.pt'
def get_raw(self):
with open(self.filename, 'r') as f:
out = [x for x in csv.reader(f, delimiter='\t', quotechar=None, strict=True)]
return out
def get_data(self):
if self.data is None:
self.load()
return self.data
def get_encoded(self):
if self.encoded_data is None:
self._compute_encoded()
return self.encoded_data
def __init__(self, filename, model_label, batch_size, run_name):
self.run_name = run_name
self.filename = filename
self.model_label = model_label
self.batch_size = batch_size
self.tokenizer = BertTokenizer.from_pretrained(model_label, do_lower_case=True)
self.data = None
self.encoded_sentence_field = ('sentence',
data.Field(use_vocab=False, tokenize=self.encode, pad_token=self.tokenizer.pad_token_id))
self.index_field = ('index', data.LabelField(use_vocab=False))
self.encoded_fields = [self.encoded_sentence_field, self.index_field]
def encode(self, sentence, second_sentence=None):
return self.tokenizer.encode(sentence, second_sentence, add_special_tokens=True)
def decode(self, sentence):
return self.tokenizer.decode(sentence)
def get_decoded_tokens(self, ids):
return self.tokenizer.convert_ids_to_tokens(ids)
@abstractmethod
def load(self):
raise Exception("Not implemented on for this class")
@abstractmethod
def _compute_encoded(self):
raise Exception("Not implemented on for this class")
def bert_iter(self, encoded_data, batch_size):
# BucketIterator transposes the sentence data, we have to transpose it back
# pair ID data does not need to be transposed (perhaps because it is one dimensional?)
return (({'input_ids': tensor,
'attention_mask': (tensor != self.tokenizer.pad_token_id) * 1}, indices)
for tensor, indices in ((x.sentence.transpose(0, 1), x.index)
for x in data.BucketIterator(encoded_data, batch_size, sort_key=lambda x: len(x.sentence))))
@staticmethod
def aggregate_data(sentences, batch_sentences, indices, batch_indices, inputs, batch_inputs, pools, batch_pools):
# padding solution is pretty hacky and probably not the most space efficient, but it works for what
# we want to do
if sentences is None and indices is None and inputs is None and pools is None:
sentences = batch_sentences
indices = batch_indices
inputs = batch_inputs
pools = batch_pools
else:
indices = torch.cat((indices, batch_indices))
if sentences.shape[1] > batch_sentences.shape[1]:
#existing tensor is bigger, pad inputs
batch_sentences = torch.nn.functional.pad(batch_sentences,
(0, 0, 0, sentences.shape[1] - batch_sentences.shape[1]))
batch_inputs = torch.nn.functional.pad(batch_inputs, (0, inputs.shape[1] - batch_inputs.shape[1]))
elif sentences.shape[1] < batch_sentences.shape[1]:
#inputs are bigger, pad existing
sentences = torch.nn.functional.pad(sentences,
(0, 0, 0, batch_sentences.shape[1] - sentences.shape[1]))
inputs = torch.nn.functional.pad(inputs, (0, batch_inputs.shape[1] - inputs.shape[1]))
sentences = torch.cat((sentences, batch_sentences))
inputs = torch.cat((inputs, batch_inputs))
pools = torch.cat((pools, batch_pools))
return sentences, indices, inputs, pools
def _save(self, tensor, name, folder):
module_logger.info('Caching {} in {}'.format(name, folder))
torch.save(tensor, os.path.join(folder, name))
def _load(self, name, folder):
module_logger.info('Loading {} from {}'.format(name, folder))
return torch.load(os.path.join(folder, name))
def get_metadata(self):
return {
'file': self.filename,
'model': self.model_label,
'batch_size': self.batch_size,
'run_name': self.run_name
}
def save_computed_embeddings(self, sentences, inputs, indices, pools, metadata, save_sub_location=None):
if save_sub_location:
folder = os.path.join('cache', self.run_name, save_sub_location)
else:
folder = os.path.join('cache', self.run_name)
module_logger.info('Caching info for this run in {}'.format(folder))
module_logger.info('Please pass this folder in to future invocations to use cached data')
if not os.path.exists(folder):
os.makedirs(folder)
if metadata is not None:
with open(os.path.join(folder, 'metadata.json'), 'w+') as metadata_file:
metadata_file.write(json.dumps(metadata)+'\n')
self._save(sentences, self.sentences_filename, folder)
self._save(inputs, self.inputs_filename, folder)
self._save(indices, self.indices_filename, folder)
self._save(pools, self.pools_filename, folder)
def load_saved_embeddings(self, folder):
module_logger.info('Loading embedding data from {}...'.format(folder))
return self._load(self.sentences_filename, folder), self._load(self.inputs_filename, folder),\
self._load(self.indices_filename, folder), self._load(self.pools_filename, folder)
def bert_word_embeddings(self, encoded_data, save_sub_location=None):
module_logger.info("Loading '{}' model".format(self.model_label))
bert_model = BertModel.from_pretrained(self.model_label)
sentences = None
indices = None
inputs = None
pools = None
for batch, batch_indices in tqdm(self.bert_iter(encoded_data, self.batch_size), desc="Feature extraction"):
with torch.no_grad():
model_out = bert_model(**batch)
batch_sentences = model_out[0]
batch_pools = model_out[1]
batch_inputs = batch['input_ids']
sentences, indices, inputs, pools = self.aggregate_data(sentences, batch_sentences, indices,
batch_indices, inputs, batch_inputs, pools, batch_pools)
module_logger.info('processed {}/{} sentences, batch max sentence length {}, total max sentence length {}'
.format(sentences.shape[0], len(encoded_data), batch_sentences.shape[1], sentences.shape[1]))
ordered_sentences, ordered_inputs, ordered_indices, ordered_pools = self.reorder(sentences, inputs, indices, pools)
self.save_computed_embeddings(ordered_sentences, ordered_inputs, ordered_indices, ordered_pools, self.get_metadata(), save_sub_location)
return ordered_sentences, ordered_inputs, ordered_indices, ordered_pools
@staticmethod
def reorder(sentences, inputs, indices, pools):
if len(indices.shape) > 1:
grouping_length = indices.shape[1]
# dealing with a grouping of sentences, like pairs
ordered_sentences = torch.zeros([sentences.shape[0] // grouping_length, grouping_length,
sentences.shape[1], sentences.shape[2]])
ordered_inputs = torch.zeros(inputs.shape[0] // grouping_length, grouping_length, inputs.shape[1])
ordered_pools = torch.zeros(pools.shape[0] // grouping_length, grouping_length, pools.shape[1])
indices = [tuple(x) for x in indices]
ordered_indices = sorted(indices)
else:
ordered_sentences = torch.zeros(sentences.shape)
ordered_inputs = torch.zeros(inputs.shape)
ordered_pools = torch.zeros(pools.shape)
ordered_indices = range(0, sentences.shape[0])
for current_index, original_index in enumerate(indices):
ordered_sentences[original_index] = sentences[current_index]
ordered_inputs[original_index] = inputs[current_index]
ordered_pools[original_index] = pools[current_index]
return ordered_sentences, ordered_inputs, ordered_indices, ordered_pools
def aggregate_sentence_embeddings(self, ordered_sentences, ordered_inputs, ordered_indices,
aggregation_metric=torch.mean):
selection_matrix = reduce(lambda x,y: x & y, (ordered_inputs != x for x in self.tokenizer.all_special_ids))
output_dimensions = list(ordered_sentences.shape)
del output_dimensions[-2] # remove the second last to dimensions, which is the token count
output = torch.zeros(torch.Size(output_dimensions))
for index in ordered_indices:
output[index] = aggregation_metric(ordered_sentences[index][selection_matrix[index]], axis=0)
return output
# for MSR paraphrase data and our paraphrase data
class ParaphraseDataset(Dataset):
label_field_name = "label"
sent1_field_name = "sentence_1"
sent2_field_name = "sentence_2"
def __init__(self, filename, model_label, batch_size, run_name, indices=(0, 1, 2)):
super().__init__(filename, model_label, batch_size, run_name)
self.flattened_encoded_data = None
self.encoded_data = None
self.labels = None
self.indices = indices
def get_raw_for_output(self):
indices = self.indices
raw_data = self.get_raw()
row_1 = ('true_label', 'sentence_1', 'sentence_2') + tuple(x for index, x in enumerate(raw_data[0])
if index not in indices)
return [row_1] + [
(row[indices[0]], row[indices[1]], row[indices[2]]) +
tuple(x for index, x in enumerate(row) if index not in indices) for row in raw_data[1:]
]
def get_flattened_encoded(self):
if self.flattened_encoded_data is None:
self._compute_flattened_encoded()
return self.flattened_encoded_data
def get_labels(self):
if self.labels is None:
# labels must be floats (not ints) or the function to compute loss gags
self.labels = torch.tensor([x.label for x in self.get_data()], dtype=torch.float32).unsqueeze(1)
return self.labels
def load(self):
indices = self.indices
tokenized_field = data.Field(use_vocab=False, tokenize=lambda x: self.tokenizer.tokenize(x))
label_field = data.LabelField(preprocessing=lambda x: int(x), use_vocab=False)
field_array = [('unused', None)] * (max(indices) + 1)
field_array[indices[0]] = (self.label_field_name, label_field)
field_array[indices[1]] = (self.sent1_field_name, tokenized_field)
field_array[indices[2]] = (self.sent2_field_name, tokenized_field)
self.data = data.TabularDataset(
path=self.filename,
format="tsv", skip_header=True,
fields=field_array,
csv_reader_params={'strict': True, 'quotechar':None}
)
# take X examples of sentence pairs and convert them into 2X rows of encoded single sentences with pair IDS so they
# can be processed separately by bert
def _compute_flattened_encoded(self):
paraphrase_data = self.get_data()
self.flattened_encoded_data = data.Dataset(
[data.Example.fromlist([self.encode(row.sentence_1), [index, 0]], self.encoded_fields) for index, row in
enumerate(paraphrase_data)] +
[data.Example.fromlist([self.encode(row.sentence_2), [index, 1]], self.encoded_fields) for index, row in
enumerate(paraphrase_data)],
self.encoded_fields)
def _compute_encoded(self):
paraphrase_data = self.get_data()
self.encoded_data = data.Dataset(
[data.Example.fromlist([self.encode(row.sentence_1, row.sentence_2), index], self.encoded_fields)
for index, row in enumerate(paraphrase_data)],
self.encoded_fields)
@staticmethod
def combine_sentence_embeddings(sentence_embedding_pairs, combination_metric=torch.sub):
return torch.stack([combination_metric(pair[0], pair[1]) for pair in sentence_embedding_pairs])
@staticmethod
def bert_cls_embeddings(sentence_embeddings):
return sentence_embeddings[:,0]
# for Paige's word vector similarity
class WordInspectionDataset(Dataset):
def __init__(self, filename, model_label, batch_size, run_name):
super().__init__(filename, model_label, batch_size, run_name)
self.data = None
self.encoded_data = None
def load(self):
tokenized_field = data.Field(use_vocab=False, tokenize=lambda x: self.tokenizer.tokenize(x))
label_field = data.LabelField(preprocessing=lambda x: int(x), use_vocab=False)
fields = [
('sentence_id', label_field),
('pair_id', label_field),
('sentence', tokenized_field),
('word', tokenized_field),
('figurative', label_field)
]
self.data = data.TabularDataset(
path=self.filename,
format="tsv", skip_header=True,
fields=fields,
csv_reader_params={'strict': True, 'quotechar': None}
)
def _compute_encoded(self):
self.encoded_data = data.Dataset(
[data.Example.fromlist([self.encode(row.sentence), index], self.encoded_fields) for index, row in
enumerate(self.get_data())],
self.encoded_fields)
class SentenceParaphraseInspectionDataset(ParaphraseDataset):
def __init__(self, filename, model_label, batch_size, run_name):
super().__init__(filename, model_label, batch_size, run_name, None) # indices unused
def load(self):
tokenized_field = data.Field(use_vocab=False, tokenize=lambda x: self.tokenizer.tokenize(x))
label_field = data.LabelField(preprocessing=lambda x: int(x), use_vocab=False)
prob_field = data.LabelField(preprocessing=lambda x: float(x), use_vocab=False)
fields = [
('classifier_prob', prob_field),
('classifier_judgment', label_field),
(self.label_field_name, label_field),
(self.sent1_field_name, tokenized_field),
(self.sent2_field_name, tokenized_field),
('Idiom', tokenized_field)
]
self.data = data.TabularDataset(
path=self.filename,
format="tsv", skip_header=True,
fields=fields,
csv_reader_params={'strict': True, 'quotechar': None}
)
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from subprocess import call, Popen, PIPE
from wa import Command
from wa.framework import pluginloader
from wa.framework.configuration.core import MetaConfiguration, RunConfiguration
from wa.framework.exception import NotFoundError
from wa.framework.target.descriptor import list_target_descriptions
from wa.utils.types import caseless_string
from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin,
get_params_rst, underline)
from wa.utils.misc import which
from devlib.utils.misc import escape_double_quotes
class ShowCommand(Command):
name = 'show'
description = 'Display documentation for the specified plugin (workload, instrument, etc.).'
def initialize(self, context):
self.parser.add_argument('plugin', metavar='PLUGIN',
help='The name of the plugin to display documentation for.')
def execute(self, state, args):
name = args.plugin
rst_output = None
if name == caseless_string('settings'):
rst_output = get_rst_for_global_config()
rst_output += get_rst_for_envars()
plugin_name = name.lower()
kind = 'global:'
else:
plugin = get_plugin(name)
if plugin:
rst_output = get_rst_from_plugin(plugin)
plugin_name = plugin.name
kind = '{}:'.format(plugin.kind)
else:
target = get_target_description(name)
if target:
rst_output = get_rst_from_target(target)
plugin_name = target.name
kind = 'target:'
if not rst_output:
raise NotFoundError('Could not find plugin or alias "{}"'.format(name))
if which('pandoc'):
p = Popen(['pandoc', '-f', 'rst', '-t', 'man'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, _ = p.communicate(rst_output)
# Make sure to double escape back slashes
output = output.replace('\\', '\\\\\\')
# Correctly format the title and page number of the man page
title, body = output.split('\n', 1)
title = '.TH {}{} 1'.format(kind, plugin_name)
output = '\n'.join([title, body])
call('echo "{}" | man -l -'.format(escape_double_quotes(output)), shell=True)
else:
print rst_output
def get_plugin(name):
for plugin in pluginloader.list_plugins():
if name == plugin.name:
return plugin
if hasattr(plugin, 'alias'):
for alias in plugin.alias:
if name == alias:
return plugin
def get_target_description(name):
targets = list_target_descriptions()
for target in targets:
if name == target.name:
return target
def get_rst_from_target(target):
text = underline(target.name, '~')
if hasattr(target, 'description'):
desc = strip_inlined_text(target.description or '')
text += desc
text += underline('Device Parameters:', '-')
text += get_params_rst(target.conn_params)
text += get_params_rst(target.platform_params)
text += get_params_rst(target.target_params)
text += get_params_rst(target.assistant_params)
text += '.. Note: For available runtime parameters please see the documentation'
return text + '\n'
def get_rst_for_global_config():
text = underline('Global Configuration')
text += 'These parameters control the behaviour of WA/run as a whole, they ' \
'should be set inside a config file (either located in $WA_USER_DIRECTORY/config.yaml ' \
'or one which is specified with -c), or into config/global section of the agenda.\n\n'
cfg_points = MetaConfiguration.config_points + RunConfiguration.config_points
text += get_params_rst(cfg_points)
return text
def get_rst_for_envars():
text = underline('Environment Variables')
text += '''WA_USER_DIRECTORY: str
This is the location WA will look for config.yaml, plugins, dependencies,
and it will also be used for local caches, etc. If this variable is not set,
the default location is ``~/.workload_automation`` (this is created when WA
is installed).
.. note.. This location must be writable by the user who runs WA.'''
return text
|
import datetime
import os
import random
from pathlib import Path
import discord
from discord.ext import commands
from util import post_stats_log
from util.var import website, get_blacklist_servers
class BotEvents(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.posting = post_stats_log.PostStats(self.bot)
self.base_dir = Path(__file__).resolve().parent.parent
@commands.Cog.listener()
async def on_guild_join(self, guild):
try:
message = 'The prefix is **m$** ,A full list of all commands is available by typing ```m$help```'
e = discord.Embed(
color=discord.Color.random(),
title=self.bot.description,
description=message,
timestamp=datetime.datetime.utcnow()
)
e.set_image(
url=random.choice(
open(
self.base_dir /
os.path.join('util', 'images_list.txt'), 'r'
).readlines()
)
)
e.set_thumbnail(url=self.bot.user.avatar_url)
e.set_author(name='Hatsune Miku', url=website)
await guild.system_channel.send(embed=e)
except:
pass
e34 = discord.Embed(
title=f'{guild.name}',
color=discord.Color.green(),
description='Added',
timestamp=datetime.datetime.utcnow()
)
if guild.icon:
e34.set_thumbnail(url=guild.icon_url)
if guild.banner:
e34.set_image(url=guild.banner_url_as(format="png"))
c = self.bot.get_channel(844548967127973888)
e34.add_field(name='**Total Members**', value=guild.member_count)
e34.add_field(name='**Bots**',
value=sum(1 for member in guild.members if member.bot))
e34.add_field(name="**Region**",
value=str(guild.region).capitalize(), inline=True)
e34.add_field(name="**Server ID**", value=guild.id, inline=True)
await c.send(embed=e34)
await c.send(f'We are now currently at **{len(self.bot.guilds)} servers**')
await self.posting.post_guild_stats_all()
# when bot leaves the server
@commands.Cog.listener()
async def on_guild_remove(self, guild):
e34 = discord.Embed(
title=f'{guild.name}',
color=discord.Color.red(),
description='Left',
timestamp=datetime.datetime.utcnow()
)
if guild.icon:
e34.set_thumbnail(url=guild.icon_url)
if guild.banner:
e34.set_image(url=guild.banner_url_as(format="png"))
c = self.bot.get_channel(844548967127973888)
e34.add_field(name='**Total Members**', value=guild.member_count)
e34.add_field(name='**Bots**',
value=sum(1 for member in guild.members if member.bot))
e34.add_field(name="**Region**",
value=str(guild.region).capitalize(), inline=True)
e34.add_field(name="**Server ID**", value=guild.id, inline=True)
await c.send(embed=e34)
await c.send(f'We are now currently at **{len(self.bot.guilds)} servers**')
await self.posting.post_guild_stats_all()
# on message event
@commands.Cog.listener()
async def on_message(self, message):
if self.bot.user.mentioned_in(message) and message.mention_everyone is False and message.content.lower() in ('<@!840276343946215516>', '<@840276343946215516>') or message.content.lower() in ('<@!840276343946215516> prefix', '<@840276343946215516> prefix'):
if not message.author.bot:
await message.channel.send('The prefix is **m$** ,A full list of all commands is available by typing ```m$help```')
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
guild = ctx.guild
if isinstance(error, commands.CommandOnCooldown):
e1 = discord.Embed(
title="Command Error!", description=f"`{error}`", color=discord.Color.random())
e1.set_footer(text=f"{ctx.author.name}")
await ctx.channel.send(embed=e1, delete_after=3)
elif isinstance(error, commands.MissingPermissions):
e3 = discord.Embed(
title="Command Error!", description=f"`{error}`", color=discord.Color.random())
e3.set_footer(text=f"{ctx.author.name}")
await ctx.send(embed=e3, delete_after=3)
elif isinstance(error, commands.MissingRequiredArgument):
e4 = discord.Embed(
title="Command Error!", description=f"`{error}`", color=discord.Color.random())
e4.set_footer(text=f"{ctx.author.name}")
await ctx.channel.send(embed=e4, delete_after=2)
elif isinstance(error, commands.CommandNotFound):
if ctx.guild.id not in get_blacklist_servers():
e2 = discord.Embed(
title="Command Error!", description=f"`{error}`", color=discord.Color.random())
e2.set_footer(text=f"{ctx.author.name}")
await ctx.channel.send(embed=e2, delete_after=3)
elif isinstance(error, commands.CommandInvokeError):
e7 = discord.Embed(title="Oh no, I guess I have not been given proper access! Or some internal error",
description=f"`{error}`", color=discord.Color.random())
e7.add_field(name="Command Error Caused By:",
value=f"{ctx.command}")
e7.add_field(name="By", value=f"{ctx.author.name}")
e7.set_thumbnail(
url=random.choice(
open(
self.base_dir /
os.path.join('util', 'images_list.txt'), 'r'
).readlines()
)
)
e7.set_footer(text=f"{ctx.author.name}")
await ctx.channel.send(embed=e7, delete_after=5)
else:
c = self.bot.get_channel(844539081979592724)
haaha = ctx.author.avatar_url
e9 = discord.Embed(title="Oh no there was some error",
description=f"`{error}`", color=discord.Color.random())
e9.add_field(name="**Command Error Caused By**",
value=f"{ctx.command}")
e9.add_field(
name="**By**", value=f"**ID** : {ctx.author.id}, **Name** : {ctx.author.name}")
e9.set_thumbnail(url=f"{haaha}")
e9.set_footer(text=f"{ctx.author.name}")
await ctx.channel.send(embed=e9, delete_after=2)
await c.send(embed=e9)
await ctx.send('**Sending the error report info to my developer**', delete_after=2)
e = discord.Embed(
title=f'In **{ctx.guild.name}**', description=f'User affected {ctx.message.author}', color=discord.Color.red())
if ctx.guild.icon:
e.set_thumbnail(url=ctx.guild.icon_url)
if ctx.guild.banner:
e.set_image(url=ctx.guild.banner_url_as(format="png"))
e.add_field(name='**Total Members**', value=ctx.guild.member_count)
e.add_field(
name='**Bots**', value=sum(1 for member in ctx.guild.members if member.bot))
e.add_field(name="**Region**",
value=str(ctx.guild.region).capitalize(), inline=True)
e.add_field(name="**Server ID**", value=ctx.guild.id, inline=True)
await ctx.send('**Error report was successfully sent**', delete_after=2)
await c.send(embed=e)
def setup(bot):
bot.add_cog(BotEvents(bot))
|
from matplotlib import pyplot as plt
__author__ = 'tonnpa'
class Oddball:
def __init__(self, graph):
self._graph = graph
self._egonetworks = dict((node, {}) for node in graph.nodes())
self._run()
@property
def egonetworks(self):
return self._egonetworks
@property
def graph(self):
return self._graph
def _run(self):
for node in self.nodes():
# extract egonetwork
egonetwork = self.graph.subgraph(self.graph.neighbors(node) + [node])
self._egonetworks[node]['nodes'] = len(egonetwork.nodes()) # number of nodes in egonet
self._egonetworks[node]['edges'] = len(egonetwork.edges()) # number of edges in egonet
self.plot()
def e_count(self, node):
return int(self.egonetworks[node]['edges'])
def n_count(self, node):
return int(self.egonetworks[node]['nodes'])
def nodes(self):
return self.graph.nodes()
def plot(self, threshold=0.75):
labels, nfeat, efeat = [], [], []
for node in self.nodes():
labels.append(node)
nfeat.append(self.n_count(node))
efeat.append(self.e_count(node))
plt.title('Node vs. Edge Feature')
plt.xlabel('#Nodes')
plt.ylabel('#Edges')
x_max = int(max(nfeat)*1.2)
y_max = int(max(efeat)*1.2)
plt.axis([0, x_max, 0, y_max])
# thresholds identifying extremes
plt.plot([i for i in range(1, x_max)], [i-1 for i in range(1,x_max)])
plt.plot([i for i in range(1, x_max)], [(i-1)*i/2 for i in range(1, x_max)])
plt.scatter(nfeat, efeat, c='c')
for label, x, y in zip(labels, nfeat, efeat):
if x > x_max*threshold or y > y_max*threshold:
plt.annotate(
label,
xy=(x, y), xytext = (-15, 15),
textcoords = 'offset points',
horizontalalignment = 'right',
verticalalignment = 'bottom',
arrowprops = dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
plt.show() |
def rank(numbers):
### Kodunuzu rank fonksiyonun blogu icerisinde girintilemeye dikkat ederek yaziniz.
### Asagidaki satirlari kodunuzu test etmek icin kullanabilirsiniz.
print(rank([21, 4, 13, 5, 8, 17, 23, 6, 22]))
print(rank([21, 4, 13, 5, 8, 17, 22, 6, 23]))
|
# some calculations about corona values
# start date somewhen in 3/2020
# added multiprocessing in 6/2020
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Process
# process files with these suffixes
localSuffixes = ["BW", "BY", "GER", "WORLD", "US"]
# flexinterval
flexInt = 7
def processWorker(localSuffix):
print (f"------ processing {localSuffix} -----\n")
#set the filename to process
dataFile = "coronaData_" + localSuffix + ".csv"
# read the data
total = pd.read_csv(dataFile)
# calculate the difference of totals
i = 0
# StillInfected - no of people infected but not curated
# Diff - no of new infections from last value
# percent - percentage of new infections from StillInfected (ratio)
# DiffDiff - change of change (difference of changes from last change value to today)
AktDiff = pd.DataFrame(columns=['StillInfected', 'Diff', 'Percent', 'DiffDiff'])
lastDiff = 0
# go through all values and calculate the addiional kpis for each day
for x in range(len(total)):
# get actual Number of infected people
aktval=total.iloc[i]['Total']
if (i != 0):
lastval=total.iloc[i-1]['Total']
else:
lastval = 0
# get actual Number of curated people
aktCurated=total.iloc[i]['Curated']
if (i != 0):
lastCurated=total.iloc[i-1]['Curated']
else:
lastCurated = 0
# number of deaths
aktDeaths=total.iloc[i]['Deaths']
# calculate number of still Infected
stillInfected = aktval - aktCurated - aktDeaths
# calculate difference between last value and actual value
diff=aktval-lastval
lastStillInfected = lastval - lastCurated
# calculate percentage of change (how much new infected people in relation to still infected) in relation to last value
if (lastStillInfected != 0):
percent=(diff) / lastStillInfected *100
else:
#if first value, set to zero
percent=0
# calculate difference of difference (2nd diff)
DiffDiff = diff-lastDiff
lastDiff = diff
# calculate relative rate to last week (assumption: incubation time approx 5 days, people who are positive will not infect more)
if (i > 7):
infectedLastWeek = AktDiff.iloc[i-5]['StillInfected']
if (infectedLastWeek != 0):
relInfectedToLastWeek = diff / infectedLastWeek *100
else:
relInfectedToLastWeek = 0
else:
infectedLastWeek = 0
relInfectedToLastWeek = 0
# copy last 30 days in container relInfectedToLastWeek30
if (i > len(total)-20):
relInfectedToLastWeek20 = relInfectedToLastWeek
else:
relInfectedToLastWeek20 = 0
# calculate "reproduction" rate
if (i > 7):
#sum of new Infections day -8 to -5
sum4Infected1 = 0
#sum of new Infections day -4 to -1
sum4Infected2 = 0
# calculate the sum of new infections day -8 to -5 (they might not know that they are infected the day before)
# be careful with the index! we are in day -1, because the array is not yet written so index (i) is today, but not present
for k in range(4):
sum4Infected1 =sum4Infected1 + AktDiff.iloc[i-k-5]['Diff']
# calculate the sum of new infections day -4 to -1 (they might not know that they are infected the day before)
for k in range(3):
sum4Infected2 =sum4Infected2 + AktDiff.iloc[i-k-1]['Diff']
# add the number for today
sum4Infected2 = sum4Infected2 + diff
# calculate reproduction rate
reproRate = sum4Infected2 / sum4Infected1
else:
reproRate = 0
# copy last 30 days in container reproRate30
if (i > len(total)-20):
reproRate20 = reproRate
else:
reproRate20 = 0
# calculate "reproductionflex" rate
if (i > (2*flexInt)-1):
#sum of new Infections day -2xflexint to -flexint
sumFlexInfected1 = 0
#sum of new Infections day flexint to -1
sumFlexInfected2 = 0
# calculate the sum of new infections day -8 to -5 (they might not know that they are infected the day before)
# be careful with the index! we are in day -1, because the array is not yet written so index (i) is today, but not present
for k in range(flexInt):
sumFlexInfected1 =sumFlexInfected1 + AktDiff.iloc[i-k-flexInt-1]['Diff']
# calculate the sum of new infections day -4 to -1 (they might not know that they are infected the day before)
for k in range(flexInt-1):
sumFlexInfected2 =sumFlexInfected2 + AktDiff.iloc[i-k-1]['Diff']
# add the number for today
sumFlexInfected2 = sumFlexInfected2 + diff
# calculate reproduction rate
reproFlexRate = sumFlexInfected2 / sumFlexInfected1
else:
reproFlexRate = 0
# copy last 30 days in container reproRate30
if (i > len(total)-20):
reproFlexRate20 = reproFlexRate
else:
reproFlexRate20 = 0
AktDiff = AktDiff.append({'reproRate' : reproRate, 'reproRate20' : reproRate20,'reproFlexRate' : reproFlexRate, 'reproFlexRate20' : reproFlexRate20, 'relInfectedToLastWeek' : relInfectedToLastWeek, 'relInfectedToLastWeek20' : relInfectedToLastWeek20, 'StillInfected' : stillInfected, 'Diff' : diff, 'Percent': percent, 'DiffDiff' : DiffDiff}, ignore_index=True)
i=i+1
# add the calculated kpis to the values matrix
outValue = pd.concat([total , AktDiff], axis=1)
# create some nice charts
SMALL_SIZE = 6
MEDIUM_SIZE = 8
BIGGER_SIZE = 8
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# get values of last date in strings
lastDate = str(outValue.iloc[-1]['Date'])
lastTotal = str(outValue.iloc[-1]['Total'])
lastCurated = str(outValue.iloc[-1]['Curated'])
lastDeaths = str(outValue.iloc[-1]['Deaths'])
lastStillInfected = f"{outValue.iloc[-1]['StillInfected']:.0f}"
print(f"Last date: {lastDate}\n")
# ------------ first chart (containing of 4 subplots) ----------------
fig, axes = plt.subplots(4,1)
axes[0].xaxis.set_visible(False)
axes[1].xaxis.set_visible(False)
axes[2].xaxis.set_visible(False)
# subchart 1: total
outValue.plot(x='Date', y='Total', ax=axes[0], title='data taken from Berliner Morgenpost - ' + dataFile + " (" + lastDate + ")" +'\n\nAnzahl Infizierte (akt. Wert: ' + lastTotal + ")")
axes[0].get_legend().remove()
# subchart 2: curated
outValue.plot(x='Date', y='Curated', color='green', ax=axes[1], title='Anzahl wieder gesund (akt. Wert: ' + lastCurated + ")")
axes[1].get_legend().remove()
# subchart 3: deaths
outValue.plot(x='Date', y='Deaths', color='black', ax=axes[2], title='Anzahl Tote (akt. Wert: ' + lastDeaths + ")")
axes[2].get_legend().remove()
# subchart 4: stillInfected
outValue.plot(x='Date', y='StillInfected', color='red', ax=axes[3], title='Anzahl noch infiziert (akt. Wert: ' + lastStillInfected + ")")
axes[3].get_legend().remove()
# autoformat the layout to fit
plt.tight_layout()
# save it in a file
plt.savefig("Absolute_Values_" + localSuffix + ".png", dpi=300)
# ------------ second chart (containing of 3 subplots): the calculated values -------------
# get values of last date in strings
lastPercent = f"{outValue.iloc[-1]['Percent']:.2f}"
lastDiff = f"{outValue.iloc[-1]['Diff']:.0f}"
lastDiffDiff = f"{outValue.iloc[-1]['DiffDiff']:.0f}"
fig, axes = plt.subplots(3,1)
axes[0].xaxis.set_visible(False)
axes[1].xaxis.set_visible(False)
outValue.plot.bar(x='Date', y='Percent', color='red', ax = axes[0], title='data taken from Berliner Morgenpost - ' + dataFile +" (" + lastDate + ")"+ '\n\nNeuinfektionen in Prozent zur Gesamtzahl der Infizierten (akt. Wert: ' + lastPercent + ")")
outValue.plot.bar(x='Date', y='Diff', color='blue', ax = axes[1], title='Unterschied zum Vortag (absolut) (akt. Wert: ' + lastDiff + ")")
outValue.plot.bar(x='Date', y='DiffDiff', color='blue', ax = axes[2], title='Unterschiedsänderung zum Vortag (absolut) (akt. Wert: ' + lastDiffDiff + ")")
plt.tight_layout()
# save it in a file
plt.savefig("Relative_Values_" + localSuffix + ".png", dpi=300)
##
#fig, axes = plt.subplots(2,1)
# subplots with full nd second with only last 30 day due to scaling
fig, axes = plt.subplots(4,1)
axes[0].xaxis.set_visible(False)
axes[1].xaxis.set_visible(False)
axes[2].xaxis.set_visible(False)
lastRelInfectedToLastWeek = f"{outValue.iloc[-1]['relInfectedToLastWeek']:.2f}"
reproRate = f"{outValue.iloc[-1]['reproRate']:.2f}"
outValue.plot.bar(x='Date', y='relInfectedToLastWeek', ax = axes[0] ,color='red', title='data taken from Berliner Morgenpost - ' + dataFile +" (" + lastDate + ")" +'\n\nRelativer Wert der Änderung zum Wert vor 5 Tagen (akt. Wert: ' + lastRelInfectedToLastWeek + ")")
outValue.plot.bar(x='Date', y='relInfectedToLastWeek20', ax = axes[1] ,color='red', title='Relativer Wert der Änderung zum Wert vor 5 Tagen (letzte 20 Tageswerte) (akt. Wert: ' + lastRelInfectedToLastWeek + ")")
outValue.plot.bar(x='Date', y='reproRate', ax = axes[2] ,color='blue', title='Reproduktionsrate Intervall 4 Tage (akt. Wert: ' + reproRate + ")")
outValue.plot.bar(x='Date', y='reproRate20', ax = axes[3] ,color='blue', title='Reproduktionsrate Intervall 4 Tage (letzte 20 Tage)(akt. Wert: ' + reproRate + ")")
plt.tight_layout()
# save it in a file
plt.savefig("Relative_Values_LastWeek" + localSuffix + ".png", dpi=300)
# reprorates
fig, axes = plt.subplots(4,1)
axes[0].xaxis.set_visible(False)
axes[1].xaxis.set_visible(False)
axes[2].xaxis.set_visible(False)
reproRate = f"{outValue.iloc[-1]['reproRate']:.2f}"
reproFlexRate = f"{outValue.iloc[-1]['reproFlexRate']:.2f}"
outValue.plot.bar(x='Date', y='reproFlexRate', ax = axes[0] ,color='red', title='data taken from Berliner Morgenpost - ' + dataFile +" (" + lastDate + ")" +'\n\nReprorate ' + " (Interval: " + f"{flexInt} Tage) :" + reproFlexRate )
outValue.plot.bar(x='Date', y='reproFlexRate20', ax = axes[1] ,color='red', title='Reproduktionsrate Intervall ' + f"{flexInt}" + ' Tage (letzte 20 Tageswerte)')
outValue.plot.bar(x='Date', y='reproRate', ax = axes[2] ,color='blue', title='Reproduktionsrate Intervall 4 Tage (akt. Wert: ' + reproRate + ")")
outValue.plot.bar(x='Date', y='reproRate20', ax = axes[3] ,color='blue', title='Reproduktionsrate Intervall 4 Tage (letzte 20 Tage)(akt. Wert: ' + reproRate + ")")
plt.tight_layout()
# save it in a file
plt.savefig("Reprorate" + localSuffix + ".png", dpi=300)
print(f"Suffix {localSuffix} done.")
return
if __name__ == '__main__':
jobs = []
for localSuffix in localSuffixes:
print(f"starting process for suffix {localSuffix}")
# spawn the worker
x = Process(target=processWorker, args=(localSuffix,))
x.start()
jobs.append(x)
# all processes started
for x in jobs:
# wait for all jobs to close
x.join()
|
# -*- coding: utf-8 -*
from urllib.request import Request, urlopen
import json
Coin=("adzcoin", "auroracoin-qubit", "bitcoin", "bitcoin-cash", "bitcoin-gold", "dash", "digibyte-groestl",
"digibyte-qubit", "digibyte-skein", "electroneum", "ethereum", "ethereum-classic", "expanse", "feathercoin",
"gamecredits", "geocoin", "globalboosty", "groestlcoin", "litecoin","maxcoin", "monacoin","monero","musicoin",
"myriadcoin-groestl","myriadcoin-skein", "myriadcoin-yescrypt", "sexcoin", "siacoin", "startcoin", "verge-scrypt",
"vertcoin", "zcash", "zclassic", "zcoin", "zencash")
ApiData= ""
Api= "&api_key="+ApiData
Action=("getminingandprofitsstatistics", "getautoswitchingandprofitsstatistics", "getuserallbalances", "getblockcount","getblocksfound",
"getblockstats", "getcurrentworkers","getdashboarddata","getdifficulty", "getestimatedtime", "gethourlyhashrates","getnavbardata",
"getpoolhashrate", "getpoolinfo", "getpoolsharerate", "getpoolstatus", "gettimesincelastblock" ,"gettopcontributors", "getuserbalance",
"getuserhashrate", "getusersharerate", "getuserstatus", "getusertransactions", "getuserworkers", "public")
def menu_coin():
global c
try:
c
except NameError:
for index, group in enumerate(Coin):
print("%s: %s" % (index, group))
c = int(input("coin to choose: "))
print("selected: ", Coin[c])
else:
print("default coin: ", Coin[c])
def menu_action():
global a
try:
a
except NameError:
for index, group in enumerate(Action):
print("%s: %s" % (index, group))
a = int(input("action: "))
print("selected:",Action[a])
else:
print("default Action: ", Action[a])
def fonction(c):
Url="https://"+Coin[c]+".miningpoolhub.com/index.php?page=api&action="+Action[a]+Api
print("url:", Url)
Req = Request(Url, headers={'User-Agent': 'Mozilla/5.0'})
Webpage = urlopen(Req).read()
jsonToPython = json.loads(Webpage)
print(jsonToPython)
c=33 #comment to enable coin menu selection
a=18 #comment to enable action menu selection
menu_coin()
menu_action()
print()
fonction(c) |
# -*- coding: utf-8 -*-
import logging
import click
from ..hello import HelloBase, HelloHTML
@click.command()
@click.option(
"-f",
"--format",
"output_format",
type=click.Choice(
["plain", "html"],
case_sensitive=False
),
help=(
"Select output format, default to 'plain'."
),
default="plain",
)
@click.option(
"-c",
"--container",
help=(
"Define the HTML container name to use around text. "
"Default to 'p' to make a paragraph."
),
default=None,
)
@click.argument("name", required=False)
@click.pass_context
def greet_command(context, output_format, container, name):
"""
Greet someone or something.
"""
logger = logging.getLogger("video-registry")
logger.debug("Required format: {}".format(output_format))
logger.debug("Required container: {}".format(container))
if output_format == "plain" and container:
logger.warning("Defining a HTML container in plain format has no sense.")
if name == "ass":
logger.critical("Please do not be so crude.")
raise click.Abort()
if output_format == "html":
builder_cls = HelloHTML
else:
builder_cls = HelloBase
builder = builder_cls(
name=name,
container=container,
)
click.echo(builder.greet())
|
import os
from subprocess import PIPE, Popen
from time import sleep
from mininet.net import Mininet
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from p4_mininet import P4Switch, P4Host
from p4runtime_switch import P4RuntimeSwitch
import conf
from topology import Topology
from compile_switch import run_compile_bmv2
def configure_switch(queue_rate):
info(' ')
info("Configuring switch")
proc = Popen(["simple_switch_CLI"], stdin=PIPE)
proc.communicate(input="set_queue_rate " + str(queue_rate))
info("Configuration complete")
info(' ')
def run():
# output_file = run_compile_bmv2(conf.SWITCH_PROGRAM_PATH)
num_hosts = conf.NUM_HOSTS
topo = Topology(conf.BEHAVIORAL_EXE,
None,
conf.LOG_FILE,
conf.THRIFT_PORT,
conf.PCAP_DUMP,
num_hosts,
conf.NOTIFICATIONS_ADDR)
info('Topology generated\n')
net = Mininet(topo = topo,
host = P4Host,
switch = P4RuntimeSwitch,
controller = None)
info('Network configuration generated\n')
info('Starting network')
net.start()
info('Network started\n')
sw_mac = ["00:aa:bb:00:00:%02x" % n for n in xrange(num_hosts)]
sw_addr = ["10.0.%d.1" % n for n in xrange(num_hosts)]
for n in xrange(num_hosts):
h = net.get('h%d' % (n + 1))
h.setARP(sw_addr[n], sw_mac[n])
h.setDefaultRoute("dev %s via %s" % (h.defaultIntf().name, sw_addr[n]))
h.describe(sw_addr[n], sw_mac[n])
sleep(1)
configure_switch(10)
CLI( net )
print('hello')
net.stop()
setLogLevel('info')
run()
|
'''
Multithreaded realtime detection uses a VideoStream class
For some reason, inference time is slower than gstreamer.
'''
from edgetpu.detection.engine import DetectionEngine
from PIL import Image
import argparse
import imutils
from threading import Thread
from queue import LifoQueue
import time
import cv2
# default confidence threshold is 0.4
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model",
default="models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite",
help="path to TensorFlow Lite object detection model")
ap.add_argument("-l", "--labels",
default="models/coco_labels.txt",
help="path to labels file")
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# Video Stream class, creates a LIFO queue
class VideoStream:
# initialize the file video stream
def __init__(self, queueSize=128):
global capfps
global capwid
global caphei
cap = cv2.VideoCapture(0)
capfps = cap.get(cv2.CAP_PROP_FPS)
capwid = round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
caphei = round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.stream = cap
self.stopped = False
# initialize the queue
self.Q = LifoQueue(maxsize=queueSize)
# thread to read frames from stream
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
while True:
if self.stopped:
return
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# stop video if end of video file
if not grabbed:
self.stop()
return
# add the frame to the queue
self.Q.put(frame)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def clearQ(self):
# empty the queue so it doesn't hit max size
with self.Q.mutex:
self.Q.queue.clear()
return self.Q.empty()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
# parse labels file, load into directory.
print("[INFO] parsing class labels...")
labels = {}
for row in open(args["labels"]):
(classID, label) = row.strip().split(maxsplit=1)
labels[int(classID)] = label.strip()
# load the Google Coral object detection model
print("[INFO] loading Coral model...")
model = DetectionEngine(args["model"])
# initialize the pi camera video stream`
vs = VideoStream().start()
time.sleep(2.0)
# loop over the frames from the video stream
print("[INFO] looping over frames...")
while vs.more():
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 500 pixels
# also, clear the queue
frame = vs.read()
# vs.clearQ()
frame = cv2.resize(frame, None, fx=0.5, fy=0.5)
orig = frame.copy()
# prepare the frame for object detection by converting (1) it
# from BGR to RGB channel ordering and then (2) from a NumPy
# array to PIL image format
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
# make predictions on the input frame
start = time.time()
results = model.detect_with_image(frame, threshold=args["confidence"],
keep_aspect_ratio=True, relative_coord=False)
end = time.time()
print("Detection time: " + str(end-start))
# loop over the results
for r in results:
# extract the bounding box and box and predicted class label
box = r.bounding_box.flatten().astype("int")
(startX, startY, endX, endY) = box
label = labels[r.label_id]
# draw the bounding box and label on the image
cv2.rectangle(orig, (startX, startY), (endX, endY),
(0, 255, 0), 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
text = "{}: {:.2f}%".format(label, r.score * 100)
cv2.putText(orig, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# show the output frame and wait for a key press
cv2.imshow("Frame", orig)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
|
class Flower:
color = 'unknown'
rose = Flower()
rose.color = "red"
violet = Flower()
violet.color = "green"
this_pun_is_for_you = "I don't sleep at night cause I'm thinking of you"
print("Roses are {},".format(rose.color))
print("violets are {},".format(violet.color))
print(this_pun_is_for_you)
#A class represents and defines a concept, while an object is a specific instance of a class.
class Furniture:
color = ""
material = ""
table = Furniture()
table.color = "brown"
table.material = "wood"
couch = Furniture()
couch.color = "red"
couch.material = "leather"
def describe_furniture(piece):
return ("This piece of furniture is made of {} {}".format(piece.color, piece.material))
print(describe_furniture(table))
# Should be "This piece of furniture is made of brown wood"
print(describe_furniture(couch))
# Should be "This piece of furniture is made of red leather"
class Dog:
years = 0
def dog_years(self):
return self.years*7
fido=Dog()
fido.years=3
print(fido.dog_years())
class Apple:
def __init__(self,color,flavor):
"""First initialization method"""
self.color = color
self.flavor = flavor
#without __str__ it will be
#jonagold <__main__.Apple object at 0x0000024E4DE5AA00>
#which printed the position of the object in memory.
def __str__(self):
"""That function returns formatted string"""
return "This apple is {} and its flavor is {}".format(self.color, self.flavor)
jonagold = Apple("red","sweet")
print('jonagold.color',jonagold.color)#correct
print('jonagold',jonagold)
class Person:
def __init__(self, name):
self.name = name
def greeting(self):
"""Outputs a message with the name of the person"""
# Should return "hi, my name is " followed by the name of the Person.
return "hi, my name is {}".format(self.name)
# Create a new instance with a name of your choice
some_person = Person("Ruslan")
# Call the greeting method
print(some_person.greeting())
#print(help(Person))
class Animal:
sound = ""
def __init__(self, name):
self.name = name
def speak(self):
print("{sound} I'm {name}! {sound}".format(name = self.name, sound = self.sound))
class Piglet(Animal):
sound = "Oink!"
hamlet = Piglet("Hamlet")
hamlet.speak()
class Cow(Animal):
sound = "Moooo"
milke = Cow("Milky White")
milke.speak()
class Clothing:
material = ""
def __init__(self,name):
self.name = name
def checkmaterial(self):
print("This {} is made of {}".format(self.name,self.material))
class Shirt(Clothing):
material="Cotton"
#pass
polo = Shirt("Polo")
polo.checkmaterial()
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('media', '0003_file_alt_text'),
]
operations = [
migrations.AlterModelOptions(
name='file',
options={'ordering': ['title']},
),
migrations.AddField(
model_name='file',
name='height',
field=models.PositiveSmallIntegerField(null=True, blank=True, default=0),
),
migrations.AddField(
model_name='file',
name='width',
field=models.PositiveSmallIntegerField(null=True, blank=True, default=0),
),
]
|
# Documentation can be found via
# https://microbit-micropython.readthedocs.io
# Load routines that allow us to use the microbit
from microbit import *
# load radio library if we need it
# import radio
# load neopixel library if we need it
# import neopixel
def forever():
"""
Code to run again and again
"""
pass
def on_button_A():
"""
Code to run when button A is pressed on its own
"""
pass
def on_button_B():
"""
Code to run when button B is pressed on its own
"""
pass
def on_button_AB():
"""
Code to run when button A+B are pressed together
"""
pass
def on_shake():
"""
Code to run when the accelerometer detects the
microbit is being shaken
"""
pass
# Code to run on start
display.scroll('Hello!')
# end of on start code
# main program loop
while True:
# forever code
forever()
# buttons
if button_a.is_pressed() and button_b.is_pressed():
on_button_AB()
elif button_a.is_pressed():
on_button_A()
elif button_b.is_pressed():
on_button_B()
# shake
if accelerometer.is_gesture('shake'):
on_shake()
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Row, Column
class DemoForm(forms.Form):
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'E-mail address'}))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column('Email', css_class='form-group col-md-6 mb-0'),
)
)
|
"""
Audio decoder implemented with BASS dll.
"""
import ctypes
import ctypes.util
from typing import io
import numpy
BASS = ctypes.cdll.LoadLibrary(ctypes.util.find_library('bass'))
class AudioProcessError(Exception):
"""
Error occurred during audio processing.
"""
pass
def decode_audio(audio_file: io):
"""
Decode audio from file-like object `audio_file`.
Parameters
----------
audio_file
Returns
-------
"""
data = audio_file.read()
if not BASS.BASS_Init(0):
raise AudioProcessError("BASS Error: Initialization failed.")
# Flags are Float, Mono, Decode, Prescan
bass_stream = BASS.BASS_StreamCreateFile(
True, data, 0, len(data), 0x220102)
if bass_stream == 0:
raise AudioProcessError("BASS Error: Failed to create stream with error code {}."
.format(BASS.BASS_ErrorGetCode()))
# Flag is Frequency
freq = ctypes.c_float()
BASS.BASS_ChannelGetAttribute(bass_stream, 0x1, ctypes.byref(freq))
if abs(freq.value - 44100) > 1e-3:
raise AudioProcessError("Audio with non-44.1k sample rate not supported.")
# Flag is Bytes
decoded_len = BASS.BASS_ChannelGetLength(bass_stream, 0)
if decoded_len == -1:
raise AudioProcessError("BASS Error: Failed to get decoded length with error code {}."
.format(BASS.BASS_ErrorGetCode()))
result = numpy.require(numpy.zeros(decoded_len // 4),
dtype='f', requirements=['A', 'O', 'W', 'C'])
BASS.BASS_ChannelGetData(
bass_stream, result.ctypes.data_as(ctypes.c_void_p), decoded_len)
BASS.BASS_StreamFree(bass_stream)
BASS.BASS_Free()
return result
|
import numpy as np
import jsonpickle
class NumpyHandler(jsonpickle.handlers.BaseHandler):
"""
Automatic conversion of numpy float to python floats
Required for jsonpickle to work correctly
"""
def flatten(self, obj, data):
data['repr'] = repr(obj)
return data
def restore(self, data):
return eval('np.' + data['repr'])
class NumpyFloatHandler(jsonpickle.handlers.BaseHandler):
"""
Automatic conversion of numpy float to python floats
Required for jsonpickle to work correctly
"""
def flatten(self, obj, data):
data['value'] = float(obj)
return data
def restore(self, data):
return np.float64(float(data['value']))
def register_handlers():
print '!!'
jsonpickle.handlers.registry.register(np.ndarray, NumpyHandler)
jsonpickle.handlers.registry.register(np.matrix, NumpyHandler)
jsonpickle.handlers.registry.register(np.float64, NumpyFloatHandler)
return True
|
import typing
import jk_prettyprintobj
from .MWPageContent import MWPageContent
from .MWTimestamp import MWTimestamp
from .MWPageRevision import MWPageRevision
from .MWNamespaceInfo import MWNamespaceInfo
class MWPage(jk_prettyprintobj.DumpMixin):
def __init__(self, title:str, searchTitle:typing.Union[str,None], namespace:MWNamespaceInfo, pageID:int, mainRevision:MWPageRevision):
assert isinstance(title, str)
self.title = title
if searchTitle is not None:
assert isinstance(searchTitle, str)
self.searchTitle = searchTitle
else:
self.searchTitle = title
assert isinstance(namespace, MWNamespaceInfo)
self.namespace = namespace
assert isinstance(pageID, int)
self.pageID = pageID
assert isinstance(mainRevision, MWPageRevision)
self.mainRevision = mainRevision
#
def _dumpVarNames(self) -> list:
return [
"title",
"searchTitle",
"namespace",
"pageID",
"mainRevision",
]
#
#
|
# Generated by Django 3.0.5 on 2020-05-16 10:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0040_stockitemtestresult'),
]
operations = [
migrations.AddField(
model_name='stockitemtestresult',
name='notes',
field=models.CharField(blank=True, help_text='Test notes', max_length=500, verbose_name='Notes'),
),
]
|
from unittest import TestCase
__author__ = 'pdoren'
__project__ = 'DeepEnsemble'
class TestITLFunctions(TestCase):
# noinspection PyStringFormat,PyTypeChecker
def test_cross_information_potential(self):
from deepensemble.utils.utils_functions import ITLFunctions
import numpy as np
N = 50
n_classes = 2
y1 = np.squeeze(np.random.binomial(1, 0.5, (N, n_classes)))
y2 = y1.copy()
m = int(0.8 * N)
y2[:m] = 1 - y2[:m]
s = 1.06 * np.std(y1) * (len(y1)) ** (-0.2)
if n_classes > 1:
Y = [y1, y2]
else:
Y = [y1[:, np.newaxis], y2[:, np.newaxis]]
DY = []
for y in Y:
dy = np.tile(y, (len(y), 1, 1))
dy = dy - np.transpose(dy, axes=(1, 0, 2))
DY.append(dy)
DYK = []
for dy in DY:
DYK.append(ITLFunctions.kernel_gauss_diff(dy, np.sqrt(2) * s).eval())
p1 = np.prod(np.array([dyk for dyk in DYK]), axis=0)
self.assertTrue(p1.size == N ** 2, 'Problem V_J2 (%g != %g)' % (p1.size, N ** 2))
V_J2 = np.mean(p1)
V_k_i = []
for dyk in DYK:
V_k_i.append(np.mean(dyk, axis=0))
V_k = [np.mean(V_i) for V_i in V_k_i]
p2 = np.prod(V_k_i, axis=0)
self.assertTrue(p2.size == N, 'Problem V_nc2 (%g != %g)' % (p2.size, N))
V_nc2 = np.mean(p2)
V_M2 = np.prod(V_k)
V_nc1, V_J1, V_M1 = ITLFunctions.get_cip([Y[1]], Y[0], s=s)
self.assertTrue(abs(V_nc1.eval() - V_nc2) < 0.00001, 'Problem V_nc (%g != %g)' % (V_nc1.eval(), V_nc2))
self.assertTrue(abs(V_J1.eval() - V_J2) < 0.00001, 'Problem V_J (%g != %g)' % (V_J1.eval(), V_J2))
self.assertTrue(abs(V_M1.eval() - V_M2) < 0.00001, 'Problem V_M (%g != %g)' % (V_M1.eval(), V_M2))
V_c2 = V_nc2 ** 2 / (V_J2 * V_M2)
V_c1 = ITLFunctions.cross_information_potential([Y[1]], Y[0], s=s, dist='CS')
self.assertTrue(abs(V_c1.eval() - V_c2) < 0.00001, 'Problem V_c (%g != %g)' % (V_c1.eval(), V_c2))
# noinspection PyTypeChecker
def test_mutual_information_cs(self):
from deepensemble.utils.utils_functions import ITLFunctions
import numpy as np
from sklearn.metrics import mutual_info_score
y1 = np.array([1, 1, 0, 0, 1, 1, 0])
y2 = np.array([0, 0, 1, 1, 0, 0, 1])
Y = [y1[:, np.newaxis], y2[:, np.newaxis]]
s = 1.06 * np.std(y1) * (len(y1)) ** (-0.2)
Ics = ITLFunctions.mutual_information_cs([Y[1]], Y[0], s=max(s, 0.00001))
I = mutual_info_score(y1, y2)
self.assertFalse(abs(Ics.eval() - I) < 0.01, 'Problem Ics and I')
# noinspection PyTypeChecker
def test_mutual_information_ed(self):
from deepensemble.utils.utils_functions import ITLFunctions
import numpy as np
from sklearn.metrics import mutual_info_score
y1 = np.array([1, 1, 0, 0, 1, 1, 0])
y2 = np.array([0, 0, 1, 1, 0, 0, 1])
Y = [y1[:, np.newaxis], y2[:, np.newaxis]]
s = 1.06 * np.std(y1) * (len(y1)) ** (-0.2)
Ied = ITLFunctions.mutual_information_ed([Y[1]], Y[0], s=max(s, 0.00001))
I = mutual_info_score(y1, y2)
self.assertFalse(abs(Ied.eval() - I) < 0.01, 'Problem Ied and I')
|
"""The venstar component."""
import logging
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
)
from homeassistant.const import STATE_ON
DOMAIN = "venstar"
ATTR_FAN_STATE = "fan_state"
ATTR_HVAC_STATE = "hvac_mode"
CONF_HUMIDIFIER = "humidifier"
DEFAULT_SSL = False
VALID_FAN_STATES = [STATE_ON, HVAC_MODE_AUTO]
VALID_THERMOSTAT_MODES = [HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_OFF, HVAC_MODE_AUTO]
HOLD_MODE_OFF = "off"
HOLD_MODE_TEMPERATURE = "temperature"
VENSTAR_TIMEOUT = 5
VENSTAR_SLEEP = 1.0
_LOGGER = logging.getLogger(__name__)
|
'''
TODO user test cases
'''
import asyncio
from fastapi import status
from fastapi.testclient import TestClient
from requests.auth import HTTPBasicAuth
from app.tests.fixtures import client, event_loop # noqa: F401
from app.schemas.user import UserInput
from app.repositories.user import UserRepository
from app.usecases.user import UserUseCase
from app.core.config import settings
# from app.core.security import get_auth_header
def get_url(path: str) -> str:
return f"{settings.API_V1_STR}{path}"
def test_register_user(
client: TestClient, event_loop: asyncio.AbstractEventLoop # noqa: F811
):
'''
Test registering a user
'''
url = get_url("/register")
user_input = UserInput(
email="email@email.com", first_name="F", last_name="L", password="123"
)
response = client.post(url, json=user_input.dict())
assert response.status_code == status.HTTP_204_NO_CONTENT
async def check_user():
return await UserRepository.check_user_exists(user_input.email)
assert event_loop.run_until_complete(check_user())
def test_read_current_user(
client: TestClient, event_loop: asyncio.AbstractEventLoop # noqa: F811
):
'''
Test a registerd user reading it's authenticated credentials
'''
user_input = UserInput(
email="email2@email.com", first_name="F", last_name="L", password="123"
)
async def add_user():
return await UserUseCase.register_user(user_input)
event_loop.run_until_complete(add_user())
url = get_url("/__user__")
basic_auth = HTTPBasicAuth(
username=user_input.email, password=user_input.password
)
response = client.get(url, auth=basic_auth)
assert response.status_code == status.HTTP_200_OK
assert response.json()['username'] == user_input.email
|
import os
from gym.envs.registration import register
register(
id='heist-auto-unpool-v0',
entry_point='gym_autoencoder.heist.envs:AutoencoderUnpoolEnv',
)
register(
id='heist-auto-maxpool-v0',
entry_point='gym_autoencoder.heist.envs:AutoencoderMaxPoolEnv',
)
register(
id='heist-auto-maxpool-big-v0',
entry_point='gym_autoencoder.heist.envs:AutoencoderMaxPoolBigEnv',
)
register(
id='heist-auto-no-bottleneck-v0',
entry_point='gym_autoencoder.heist.envs:AutoencoderNoBottleneckEnv',
)
register(
id='heist-vae-alex-v0',
entry_point='gym_autoencoder.heist.envs:VaritionalAlexEnv',
)
register(
id='heist-vae-paper-v0',
entry_point='gym_autoencoder.heist.envs:VaritionalPaperEnv',
)
|
import numpy as np
from nnmnkwii.io import hts
from os.path import join
from glob import glob
from tqdm import tqdm
paths = sorted(glob(join('../data/basic5000', "label_phone_align", "*.lab")))
for i, filepath in tqdm(enumerate(paths)):
label = hts.load(filepath)
end_times_ = label.end_times
end_times = []
for j, end_time in enumerate(end_times_):
str_tmp = str(label[j])
if '-a+' in str_tmp or '-i+' in str_tmp or '-u+' in str_tmp or '-e+' in str_tmp or '-o+' in str_tmp or '-cl+' in str_tmp or '-N+' in str_tmp or '-A+' in str_tmp or '-I+' in str_tmp or '-U+' in str_tmp or '-E+' in str_tmp or '-O+' in str_tmp:
end_times.append(end_time-1)
end_index = np.array(end_times) / 50000
mora_index = np.array([0]*int(end_index[-1]))
mora_index = [1 if i in list(end_index.astype(int)) else 0 for i in range(end_index[-1].astype(int)+1)]
indices = label.silence_frame_indices().astype(int)
mora_index = np.delete(mora_index, indices, axis=0)
mora_index = mora_index.nonzero()
np.savetxt('../data/basic5000/mora_index/squeezed_mora_index_' + '0'*(4-len(str(i+1))) + str(i+1) + '.csv', mora_index)
|
from explainer.real_time.pytorch_fixes import SaliencyModel
from explainer.real_time.resnet_encoder import resnet50encoder
import torch.nn.functional as F
from torch.autograd import Variable
def get_pretrained_saliency_fn(model_dir, cuda=True, return_classification_logits=False):
''' returns a saliency function that takes images and class selectors as inputs. If cuda=True then places the model on a GPU.
You can also specify model_confidence - smaller values (~0) will show any object in the image that even slightly resembles the specified class
while higher values (~5) will show only the most salient parts.
Params of the saliency function:
images - input images of shape (C, H, W) or (N, C, H, W) if in batch. Can be either a numpy array, a Tensor or a Variable
selectors - class ids to be masked. Can be either an int or an array with N integers. Again can be either a numpy array, a Tensor or a Variable
model_confidence - a float, 6 by default, you may want to decrease this value to obtain more complete saliency maps.
returns a Variable of shape (N, 1, H, W) with one saliency maps for each input image.
'''
saliency = SaliencyModel(resnet50encoder(pretrained=True), 5, 64, 3, 64, fix_encoder=True, use_simple_activation=False, allow_selector=True)
saliency.minimialistic_restore(model_dir)
saliency.train(False)
if cuda:
saliency = saliency.cuda()
def fn(images, selectors, model_confidence=6):
selectors = Variable(selectors)
masks, _, cls_logits = saliency(images * 2, selectors, model_confidence=model_confidence)
sal_map = F.upsample(masks, (images.size(2), images.size(3)), mode='bilinear')
if not return_classification_logits:
return sal_map
return sal_map, cls_logits
return fn
class RealTimeSaliencyExplainer(object):
def __init__(self, model_dir, cuda=True, return_classification_logits=False):
self.saliency_fn = get_pretrained_saliency_fn(model_dir, cuda, return_classification_logits)
def explain(self, inp, ind):
mask_var = self.saliency_fn(inp, ind)
return mask_var.data
|
import torch
from torch.autograd import Variable as V
import torchvision.models as models
from torchvision import transforms as trn
from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Sequential, Module, ModuleList, LogSoftmax, Softmax, functional as F
import os
from PIL import Image
import numpy as np
import sys
sys.path.append('../../')
from torch.utils.data import DataLoader
from data_processor.train_dataset import ImageDataset, PlaceDataset, transform as aug_tf
# th architecture to use
arch = 'resnet50'
# load the pre-trained weights
model_file = '%s_places365.pth.tar' % arch
if not os.access(model_file, os.W_OK):
weight_url = 'http://places2.csail.mit.edu/models_places365/' + model_file
os.system('wget ' + weight_url)
def fix(s):
return s
s = s.split('.')
for i in range(len(s)):
if len(s[i]) == 1:
try:
int(s[i])
s[i-1] = s[i-1] + s[i]
del s[i]
break
except:
pass
return ".".join(s)
from alexnet import places_alexnet
from resnet2 import resnet50
model =places_alexnet() #models.__dict__[arch](num_classes=365)#places_resnet()#
# checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
# state_dict = {fix(str.replace(k,'module.','')): v for k,v in checkpoint['state_dict'].items()}
# model.load_state_dict(state_dict)
model.eval()
# model = model.to('cuda:0')
# print(model)
test_data = PlaceDataset("../../data/places/places365_standard","../../data/places/places365_standard/val.txt", "../../data/places/places365_standard/names.txt")
test_data_loader = DataLoader(test_data, 16, True, num_workers = 0)
total = 0
correct = 0
correct5 = 0
for images, labels in test_data_loader:
total += labels.size(0)
# labels = labels.unsqueeze(1)
# images = images.to('cuda:0')
# out, _= model.forward(images)
out = model.forward(images)
out = out.to('cpu').detach()
h_x = F.softmax(out, 1).data.squeeze()
_, predicted = torch.max(out, 1)
probs, idx = out.sort(0, True)
correct += (predicted == labels).sum().item()
print("Top 5 acc:", round(correct5/total, 2), "Top 1 acc:", round(correct/total, 2), end="\r")
# print(out.shape, labels.shape)
correct5 += np.equal(np.argsort(h_x)[:, -5:], labels[:, None]).any(axis=1).sum().item()
print("Top 1 acc:", correct/total)
print("Top 5 acc:", correct5/total) |
"""
Milestone 3 Code (P7 & P8) - Batch User Interface
Team 64: Py Eaters
Team Leader: Bardia Parmoun
Team Members: Hao Lin
Benjamin Richards
Ian Holmes
Authors: Benjamin Richards and Hao Lin
Submitted on 02/04/2020
"""
# Libraries
from Cimpl import (copy, create_color, set_color, Image, get_width, get_height,
choose_file, load_image, show, get_color, create_image,
save_as)
from T64_image_filters import (red_channel, green_channel, blue_channel,
combine,two_tone, three_tone, sepia, posterize,
extreme_contrast, detect_edges,
detect_edges_better, flip_horizontal,
flip_vertical)
# Functions
def _find_filter(command: str) -> int:
""" Author: Bardia Parmoun
Type annotation: str -> function
This functions finds the specific filter that is associated with the one
letter command and it will return the function for that filter. It returns
the index of the array of the filters were the function is located at.
>>> _find_filter("X")
2
"""
# In the arrays of ACCEPTED_INPUTS and FILTERS the index for each filter in
# The FILTERS array is less than the index of the same filter from the other
# Array by 2.
for i in range (2, len(ACCEPTED_INPUTS) - 1):
if ACCEPTED_INPUTS [i] == command:
return i - 2
def apply_filter(image: Image, command: str) -> Image:
""" Author: Benjamin Richards
Type annotation: Cimpl.Image, str -> Cimpl.Image
This functions takes the image and the command and applies the specific
filter on the image.
>>> apply_filter(original_image, "X")
(applies the extreme contrast filter to the original image)
"""
# Finds the filter using the _find_filter function
selected_filter = FILTERS[_find_filter(command)]
# For detect_edges and detect_edges_better the threshold value is 10
if selected_filter == detect_edges:
image = detect_edges(image, THRESHOLD)
elif selected_filter == detect_edges_better:
image = detect_edges_better(image, THRESHOLD)
# For two_tone and three_tone there are preset colour values
elif selected_filter == two_tone:
image = two_tone(image, COLOUR1, COLOUR3)
elif selected_filter == three_tone:
image = three_tone(image, COLOUR1, COLOUR2, COLOUR3)
else:
image = selected_filter(image)
return image
def read_file() -> list:
""" Author: Hao Lin
Type annotation: (None) -> List of strings
This functions reads the list of commands from a text file and puts them in
an array.
The format of the text file
(name of the original image) (name of the final image) (the commands)
Possible commands: '2','3','X','T','P','E','I','V','H'
>>> read_file()
[['miss_sullivan.jpg', 'test1.jpg', '2', 'X', 'P'],
['miss_sullivan.jpg', 'test2.jpg', 'V', 'H']]
"""
filename = input("Enter your filename (with the .txt extension): ")
file = open(filename, "r")
# A list of commands each a element is an array with all the elements of the
# Text file
commands =[]
for line in file:
# Splits every line based on the spaces and adds them to a 2D array
commands += [line.split()]
file.close()
return commands
# Main Code
# Assumptions: Cimpl.py, T64_image_filters.py, and simple_Cimpl_filters.py are
# In the same folder as this program
commands = read_file()
# Constants
THRESHOLD = 10
COLOUR1 = "yellow"
COLOUR2 = "magenta"
COLOUR3 = "cyan"
# A list of all the possible commands
ACCEPTED_INPUTS = ["L","S","2","3","X","T","P","E","I","V","H","Q"]
# A list of all the filters.
FILTERS = [two_tone, three_tone, extreme_contrast, sepia, posterize,
detect_edges, detect_edges_better, flip_vertical,flip_horizontal]
for l in commands:
# Loads the original image (first element of the line)
original_image = load_image (l[0])
for i in range (2, len(l)):
# Goes through the commands and applies them to the image
original_image = apply_filter(original_image, l[i])
# Saves the final image (second element of the line)
save_as(original_image, l[1])
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class CommonConfig(AppConfig):
name = 'geotrek.common'
verbose_name = _("Common")
def ready(self):
import geotrek.common.lookups # NOQA
|
import os
from pymongo import MongoClient
import time
import uuid
import web
# Global variables
import os
here = os.path.dirname(__file__)
filedir = os.path.join(here, 'static/pub') # File storage directory
class DBManager():
def __init__(self):
self.client = MongoClient()
self.db = self.client.nics
self.collection = self.db.multimedia
def find(self, search_parameters = None):
return list(self.collection.find( search_parameters ).sort("timestamp", -1))
def insert(self, file_pointer, metadata):
id = uuid.uuid4().hex
if file_pointer is not None:
filepath=file_pointer.filename.replace('\\','/')
filename=filepath.split('/')[-1]
filename = id+filename
fout = open(os.path.abspath(
os.path.join(filedir, filename)) ,'w')
fout.write(file_pointer.file.read())
fout.close()
else:
return False
if 'TITLE' not in metadata.keys() or\
metadata['TITLE'] == '':
metadata['TITLE'] = time.strftime("%c")
if 'AUTHOR' not in metadata.keys() or\
metadata['AUTHOR'] == '':
metadata['AUTHOR'] = "Anonymous"
for k in metadata.keys():
if metadata[k] == "":
del metadata[k]
datagram = dict()
datagram["_id"] = id
datagram["filename"] = filename
datagram["timestamp"] = time.time()
datagram["metadata"] = metadata
print datagram
self.collection.insert(datagram)
return id
def delete(self, id, filename = None):
if filename is None:
filename = self.find({ '_id' : id})[0]['filename']
self.collection.remove({ "_id": id })
os.remove(os.path.abspath(
os.path.join(filedir, filename)))
return True
def update(self, id, metadata):
filename = self.find({ '_id' : id})[0]['filename']
self.collection.update(
{ "_id": id },
{ "$set" :
{ "metadata": metadata }},
upsert = True
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.