content
stringlengths 5
1.05M
|
|---|
from faker import Faker
import attr
from fixtures.base import BaseClass
fake = Faker()
@attr.s
class RegisterUser(BaseClass):
username: str = attr.ib(default=None)
password: str = attr.ib(default=None)
@staticmethod
def random():
return RegisterUser(username=fake.email(), password=fake.password())
@attr.s
class RegisterUserResponse:
message: str = attr.ib(validator=attr.validators.instance_of(str))
uuid: int = attr.ib(validator=attr.validators.instance_of(int))
@attr.s
class RegisterUserInvalidResponse:
message: str = attr.ib(validator=attr.validators.instance_of(str))
|
import numpy as np
import ast
import json
from utils import regresiveSustitution
from utils import rowOps
from utils import getMultipliers
from utils import swapRows
from utils import swapCols
from utils import isSquared
def gaussTotal(A,b):
A = ast.literal_eval(A)
b = ast.literal_eval(b)
res = {}
pivots = []
# Convert into numpys arr
A = np.array(A).astype(float)
b = np.array(b).astype(float)
# Appends last column to A matrix
A = np.concatenate([A, b.reshape((A.shape[0], 1))], axis=1)
# Validates if matrix is squared
if not isSquared(np.delete(A, -1, axis=1)):
res["status"] = 'Not square + 1 col matrix!'
res["error"] = True
return res
# Determines if det is 0
if (np.linalg.det(np.delete(A, -1, axis=1)) == 0):
res["status"] = 'Determinant is 0'
res["error"] = True
return res
times = A[:, 0].size - 1
indexes = np.arange(0, times+1)
for nCol in range(0,times):
absMat = np.absolute(A[nCol:,nCol:-1])
mVal = np.amax(absMat)
mRow = np.where(absMat == mVal)[0][0]
mCol = np.where(absMat == mVal)[1][0]
if (A[nCol][nCol] < mVal):
if(nCol + mRow != nCol):
A, indexes = swapRows(A, nCol, mRow, indexes)
if(nCol + mCol != nCol):
A = swapCols(A, nCol, mCol)
multipliers = getMultipliers(A, nCol)
# Validates if any multiplier is different to zero
if (not np.count_nonzero(multipliers) == 0):
A = rowOps(A, nCol, multipliers)
pivots.append({"step": nCol, "matrix": json.dumps(A.tolist())})
values = regresiveSustitution(A,times,indexes)
res["pivots"] = pivots
res["values"] = values
res["error"] = False
return res
|
import math
import cupy
from cupy.core import internal
from cupyx.scipy import fft
from cupyx.scipy.ndimage import filters
from cupyx.scipy.ndimage import _util
def _check_conv_inputs(in1, in2, mode, convolution=True):
if in1.ndim == in2.ndim == 0:
return in1 * (in2 if convolution else in2.conj())
if in1.ndim != in2.ndim:
raise ValueError('in1 and in2 should have the same dimensionality')
if in1.size == 0 or in2.size == 0:
return cupy.array([], dtype=in1.dtype)
if mode not in ('full', 'same', 'valid'):
raise ValueError('acceptable modes are "valid", "same", or "full"')
return None
def _direct_correlate(in1, in2, mode='full', output=float, convolution=False,
boundary='constant', fillvalue=0.0, shift=False):
if in1.ndim != 1 and (in1.dtype.kind == 'b' or
(in1.dtype.kind == 'f' and in1.dtype.itemsize < 4)):
raise ValueError('unsupported type in SciPy')
# Swaps inputs so smaller one is in2:
# NOTE: when mode != 'valid' we can only swap with a constant-0 boundary
swapped_inputs = False
orig_in1_shape = in1.shape
if _inputs_swap_needed(mode, in1.shape, in2.shape) or (
in2.size > in1.size and boundary == 'constant' and fillvalue == 0):
in1, in2 = in2, in1
swapped_inputs = True
# Due to several optimizations, the second array can only be 2 GiB
if in2.nbytes >= (1 << 31):
raise RuntimeError('smaller array must be 2 GiB or less, '
'use method="fft" instead')
# At this point, in1.size > in2.size
# (except some cases when boundary != 'constant' or fillvalue != 0)
# Figure out the output shape and the origin of the kernel
if mode == 'full':
out_shape = tuple(x1+x2-1 for x1, x2 in zip(in1.shape, in2.shape))
offsets = tuple(x-1 for x in in2.shape)
elif mode == 'valid':
out_shape = tuple(x1-x2+1 for x1, x2 in zip(in1.shape, in2.shape))
offsets = (0,) * in1.ndim
else: # mode == 'same':
# In correlate2d: When using "same" mode with even-length inputs, the
# outputs of correlate and correlate2d differ: There is a 1-index
# offset between them.
# This is dealt with by using "shift" parameter.
out_shape = orig_in1_shape
if orig_in1_shape == in1.shape:
offsets = tuple((x-shift)//2 for x in in2.shape)
else:
offsets = tuple((2*x2-x1-(not convolution)+shift)//2
for x1, x2 in zip(in1.shape, in2.shape))
# Check the output
if not isinstance(output, cupy.ndarray):
output = cupy.empty(out_shape, output)
elif output.shape != out_shape:
raise ValueError('out has wrong shape')
# Get and run the CuPy kernel
int_type = _util._get_inttype(in1)
kernel = filters._get_correlate_kernel(
boundary, in2.shape, int_type, offsets, fillvalue)
in2 = _reverse(in2) if convolution else in2.conj()
if not swapped_inputs or convolution:
kernel(in1, in2, output)
elif output.dtype.kind != 'c':
# Avoids one array copy
kernel(in1, in2, _reverse(output))
else:
kernel(in1, in2, output)
output = cupy.ascontiguousarray(_reverse(output))
if swapped_inputs and (mode != 'valid' or not shift):
cupy.conjugate(output, out=output)
return output
def _reverse(x):
# Reverse array `x` in all dimensions
return x[(slice(None, None, -1),) * x.ndim]
def _inputs_swap_needed(mode, shape1, shape2, axes=None):
# See scipy's documentation in scipy.signal.signaltools
if mode != 'valid' or not shape1:
return False
if axes is None:
axes = tuple(range(len(shape1)))
not_ok1 = any(shape1[i] < shape2[i] for i in axes)
not_ok2 = any(shape1[i] > shape2[i] for i in axes)
if not_ok1 and not_ok2:
raise ValueError('For "valid" mode, one must be at least '
'as large as the other in every dimension')
return not_ok1
def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):
# See scipy's documentation in scipy.signal.signaltools
s1, s2 = in1.shape, in2.shape
axes = _init_nd_and_axes(in1, axes)
# Length-1 axes can rely on broadcasting rules, no fft needed
axes = [ax for ax in axes if s1[ax] != 1 and s2[ax] != 1]
if sorted_axes:
axes.sort()
# Check that unused axes are either 1 (broadcast) or the same length
for ax, (dim1, dim2) in enumerate(zip(s1, s2)):
if ax not in axes and dim1 != dim2 and dim1 != 1 and dim2 != 1:
raise ValueError('incompatible shapes for in1 and in2:'
' {} and {}'.format(s1, s2))
# Check that input sizes are compatible with 'valid' mode.
if _inputs_swap_needed(mode, s1, s2, axes=axes):
# Convolution is commutative
in1, in2 = in2, in1
return in1, in2, tuple(axes)
def _init_nd_and_axes(x, axes):
# See documentation in scipy.fft._helper._init_nd_shape_and_axes
# except shape argument is always None and doesn't return new shape
axes = internal._normalize_axis_indices(axes, x.ndim, sort_axes=False)
if not len(axes):
raise ValueError('when provided, axes cannot be empty')
if any(x.shape[ax] < 1 for ax in axes):
raise ValueError('invalid number of data points specified')
return axes
def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False):
# See scipy's documentation in scipy.signal.signaltools
real = (in1.dtype.kind != 'c' and in2.dtype.kind != 'c')
fshape = ([fft.next_fast_len(shape[a], real) for a in axes]
if calc_fast_len else shape)
fftn, ifftn = (fft.rfftn, fft.irfftn) if real else (fft.fftn, fft.ifftn)
# Perform the convolution
sp1 = fftn(in1, fshape, axes=axes)
sp2 = fftn(in2, fshape, axes=axes)
out = ifftn(sp1 * sp2, fshape, axes=axes)
return out[tuple(slice(x) for x in shape)] if calc_fast_len else out
def _apply_conv_mode(full, s1, s2, mode, axes):
# See scipy's documentation in scipy.signal.signaltools
if mode == 'full':
return cupy.ascontiguousarray(full)
if mode == 'valid':
s1 = [full.shape[a] if a not in axes else s1[a] - s2[a] + 1
for a in range(full.ndim)]
starts = [(cur-new)//2 for cur, new in zip(full.shape, s1)]
slices = tuple(slice(start, start+length)
for start, length in zip(starts, s1))
return cupy.ascontiguousarray(full[slices])
__EXP_N1 = 0.36787944117144232159553 # exp(-1)
def _optimal_oa_block_size(overlap):
"""
Computes the optimal block size for the OA method given the overlap size.
Computed as ``ceil(-overlap*W(-1/(2*e*overlap)))`` where ``W(z)`` is the
Lambert W function solved as per ``scipy.special.lambertw(z, -1)`` with a
fixed 4 iterations.
Returned size should still be given to ``cupyx.scipy.fft.next_fast_len()``.
"""
# This function is 10x faster in Cython (but only 1.7us in Python). Can be
# easily moved to Cython by:
# * adding `DEF` before `__EXP_N1`
# * changing `import math` to `from libc cimport math`
# * adding `@cython.cdivision(True)` before the function
# * adding `Py_ssize_t` as the type for the `overlap` argument
# * adding a cast `<Py_ssize_t>` or `int(...)` to the return value
# * adding the following type declarations:
# cdef double z, w, ew, wew, wewz
# cdef int i
# Compute W(-1/(2*e*overlap))
z = -__EXP_N1/(2*overlap) # value to compute for
w = -1 - math.log(2*overlap) # initial guess
for i in range(4):
ew = math.exp(w)
wew = w*ew
wewz = wew - z
w -= wewz/(wew + ew - (w + 2)*wewz/(2*w + 2))
return math.ceil(-overlap*w)
def _calc_oa_lens(s1, s2):
# See scipy's documentation in scipy.signal.signaltools
# Set up the arguments for the conventional FFT approach.
fallback = (s1+s2-1, None, s1, s2)
# Use conventional FFT convolve if sizes are same.
if s1 == s2 or s1 == 1 or s2 == 1:
return fallback
# Make s1 the larger size
swapped = s2 > s1
if swapped:
s1, s2 = s2, s1
# There cannot be a useful block size if s2 is more than half of s1.
if s2 >= s1//2:
return fallback
# Compute the optimal block size from the overlap
overlap = s2-1
block_size = fft.next_fast_len(_optimal_oa_block_size(overlap))
# Use conventional FFT convolve if there is only going to be one block.
if block_size >= s1:
return fallback
# Get step size for each of the blocks
in1_step, in2_step = block_size-s2+1, s2
if swapped:
in1_step, in2_step = in2_step, in1_step
return block_size, overlap, in1_step, in2_step
def _oa_reshape_inputs(in1, in2, axes, shape_final,
block_size, overlaps, in1_step, in2_step):
# Figure out the number of steps and padding.
# This would get too complicated in a list comprehension.
nsteps1 = []
nsteps2 = []
pad_size1 = []
pad_size2 = []
for i in range(in1.ndim):
if i not in axes:
pad_size1 += [(0, 0)]
pad_size2 += [(0, 0)]
continue
curnstep1, curpad1, curnstep2, curpad2 = 1, 0, 1, 0
if in1.shape[i] > in1_step[i]:
curnstep1 = math.ceil((in1.shape[i]+1)/in1_step[i])
if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]:
curnstep1 += 1
curpad1 = curnstep1*in1_step[i] - in1.shape[i]
if in2.shape[i] > in2_step[i]:
curnstep2 = math.ceil((in2.shape[i]+1)/in2_step[i])
if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]:
curnstep2 += 1
curpad2 = curnstep2*in2_step[i] - in2.shape[i]
nsteps1 += [curnstep1]
nsteps2 += [curnstep2]
pad_size1 += [(0, curpad1)]
pad_size2 += [(0, curpad2)]
# Pad array to a size that can be reshaped to desired shape if necessary
if not all(curpad == (0, 0) for curpad in pad_size1):
in1 = cupy.pad(in1, pad_size1, mode='constant', constant_values=0)
if not all(curpad == (0, 0) for curpad in pad_size2):
in2 = cupy.pad(in2, pad_size2, mode='constant', constant_values=0)
# We need to put each new dimension before the corresponding dimension
# being reshaped in order to get the data in the right layout at the end.
reshape_size1 = list(in1_step)
reshape_size2 = list(in2_step)
for i, iax in enumerate(axes):
reshape_size1.insert(iax+i, nsteps1[i])
reshape_size2.insert(iax+i, nsteps2[i])
return in1.reshape(*reshape_size1), in2.reshape(*reshape_size2)
|
"""GUI"""
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from shiftscheduler.gui import lower_frame as lower
from shiftscheduler.gui import upper_frame as upper
from shiftscheduler.gui import util
from shiftscheduler.i18n import gettext
_ = gettext.GetTextFn('gui/gui')
def CreateGUI():
root = tk.Tk()
root.minsize(900, 600)
root.maxsize(900, 600)
root.title(_('Shift Scheduler v0.1'))
util.SetGridWeights(root, row_weights=(1, 4))
lower_frame = lower.LowerFrame(root)
util.SetGrid(lower_frame, 1, 0)
upper_frame = upper.UpperFrame(root, lower_frame)
util.SetGrid(upper_frame, 0, 0)
return root
|
"""Work with the graph of FamPlex entities and relations."""
from typing import Container, Dict, Generator, List, Tuple
from collections import defaultdict, deque
from famplex.load import load_entities, load_equivalences, load_relations
class FamplexGraph(object):
"""Provides methods for working with graph of FamPlex entities and relations
FamPlex is an ontology of protein families and complexes. Individual terms
are genes/proteins. There are higher level terms for families and
complexes. Terms can be connected by isa or partof relationships.
X isa Y expressing that X is a member of the Y family; Z partof W
expressing that Z is a constituent of the W complex.
Each term in the FamPlex ontology exists within a namespace and has an
identifier which is unique within that namespace. Individual genes and
proteins have either HGNC or Uniprot as a namespace. FamPlex has its own
namespace for families and complexes and the unique identifiers are
designed to be human readable. Identifiers for Uniprot are simply Uniprot
IDs. For HGNC the HGNC Symbol is used instead of the HGNC unique ID.
If X isa Y or X partof Y we say that X is a child of Y and Y is a parent of
X. This is contrary to the terminology used within graph theory for trees
where X is a child of Y if there is an edge from Y to X. However, this is
consistent with how these terms are often used for the hierarchical
relationships within ontologies.
We say Y is above X in the FamPlex ontology if there is a path of isa and
partof edges from X to Y. We also say that Y is an ancestor of X.
X is then below Y in the FamPlex ontology and we also say X is a descendant
of Y.
Attributes
----------
root_classes : set
Set of top level families and complexes in the FamPlex ontology
"""
def __init__(self):
# Graphs are stored internally as a dictionary mapping tuples of
# the form (namespace, id) to a list of tuples of the form
# (namespace, id, relation_type). This is a variant of the adjacency
# list representation of a graph but allowing for multiple edge types.
# Contains forward isa and partof relationships between terms
graph = defaultdict(list)
# Contains reversed isa and partof relationships
reverse_graph = defaultdict(list)
relations = load_relations()
left_set = set()
right_set = set()
# Loop through table populating edges of the above graphs.
# By looking at the set of all terms that appear on the right in the
# relations.csv table which do not appear on the left we can identify
# the top level families and complexes within famplex.
for namespace1, id1, relation, namespace2, id2 in relations:
graph[(namespace1, id1)].append((namespace2, id2, relation))
reverse_graph[(namespace2, id2)].\
append((namespace1, id1, relation))
left_set.add((namespace1, id1))
right_set.add((namespace2, id2))
graph = dict(graph)
reverse_graph = dict(reverse_graph)
# Sort edges in adjaceny lists by alphabetical order
for node, edges in graph.items():
graph[node] = sorted(edges, key=lambda x: (x[0].lower(),
x[1].lower()))
for node, edges in reverse_graph.items():
reverse_graph[node] = sorted(edges,
key=lambda x: (x[0].lower(),
x[1].lower()))
self._graph: Dict[Tuple[str, str], List[Tuple[str, str, str]]] = graph
self._reverse_graph: Dict[Tuple[str, str],
List[Tuple[str, str, str]]] = reverse_graph
root_class_mapping = defaultdict(list)
root_classes = sorted(right_set - left_set, key=lambda x: x[1].lower())
# Build up an dictionary mapping terms to the top level families
# or complexes to which they belong. Families and complexes can overlap
# so there can be multiple top level terms above a given term.
for entry in root_classes:
for node in self.traverse(entry, ['isa', 'partof'],
direction='down'):
root_class_mapping[node].append(entry)
root_class_mapping = dict(root_class_mapping)
entities = load_entities()
for entity in entities:
entry = ('FPLX', entity)
if entry not in root_class_mapping:
root_class_mapping[entry] = [entry]
for node, roots in root_class_mapping.items():
root_class_mapping[node] = sorted(roots,
key=lambda x: (x[0].lower(),
x[1].lower()))
equivalences = defaultdict(list)
reverse_equivalences = defaultdict(list)
for ns, id_, fplx_id in load_equivalences():
equivalences[fplx_id].append((ns, id_))
reverse_equivalences[(ns, id_)].append(fplx_id)
equivalences = dict(equivalences)
reverse_equivalences = dict(reverse_equivalences)
# Blank lines are to aid in reading of type hints
self.root_classes: List[Tuple[str, str]] = root_classes
self._root_class_mapping: Dict[Tuple[str, str],
List[Tuple[str, str]]] = \
root_class_mapping
self._equivalences: Dict[str, List[Tuple[str, str]]] = equivalences
self._reverse_equivalences: Dict[Tuple[str, str], List[str]] = \
reverse_equivalences
self.__error_message = 'Given input is not in the FamPlex ontology.'
def in_famplex(self, namespace: str, id_: str) -> bool:
"""Returns True if input term is a member of the FamPlex ontology.
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
Returns
-------
bool
"""
return (namespace, id_) in self._root_class_mapping
def raise_value_error_if_not_in_famplex(self, namespace: str,
id_: str) -> None:
"""Raise a value error if input is not in FamPlex ontology
This can be used in functions where we desire an exception to be
raised when the input is not in the FamPlex ontology but the
most natural way of writing the function will not lead to any
exceptions being raised.
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
if not self.in_famplex(namespace, id_):
raise ValueError(self.__error_message)
def parent_edges(self, namespace: str,
id_: str) -> List[Tuple[str, str, str]]:
"""Returns node and relation type for all parents of input
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
Returns
-------
list
List of all tuples of the form (namespace, id, relation_type) where
(namespace, id) is a parent of the input and relation_type is the
type of relation connecting them.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
edges = self._graph.get((namespace, id_))
if edges is None:
self.raise_value_error_if_not_in_famplex(namespace, id_)
return []
return edges
def child_edges(self, namespace: str,
id_: str) -> List[Tuple[str, str, str]]:
"""Returns node and relation type for all children of input
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
Returns
-------
list
List of all tuples of the form (namespace, id, relation_type) where
(namespace, id) is a child of the input and relation_type is the
type of relation connecting them.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
edges = self._reverse_graph.get((namespace, id_))
if edges is None:
self.raise_value_error_if_not_in_famplex(namespace, id_)
return []
return edges
def root_terms(self, namespace: str, id_: str) -> List[Tuple[str, str]]:
"""Returns top level terms above the input term
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
Returns
-------
list
List of terms above the input that are top level families and/or
complexes within the FamPlex ontology. Values are sorted in case
insensitive alphabetical order, first by namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
roots = self._root_class_mapping.get((namespace, id_))
if roots is None:
raise ValueError(self.__error_message)
return roots
def equivalences(self, fplx_id: str) -> List[Tuple[str, str]]:
"""Return list of equivalent terms from other namespaces.
Parameters
----------
fplx_id : str
A valid Famplex ID
Returns
-------
list
List of tuples of the form (namespace, id) of equivalent terms
from other namespaces.
Raises
------
ValueError
If fplx_id an ID in the FamPlex ontology.
"""
self.raise_value_error_if_not_in_famplex('FPLX', fplx_id)
equiv = self._equivalences.get(fplx_id)
if equiv is None:
return []
return equiv
def reverse_equivalences(self, namespace: str, id_: str) -> List[str]:
"""Get equivalent FamPlex terms to a given term from another namespace
Parameters
----------
namespace : str
Namespace of a term
id_ : str
id_ of a term
Returns
-------
list
List of FamPlex IDs for families or complexes equivalent to the
term given by (namespace, id_)
"""
equiv = self._reverse_equivalences.get((namespace, id_))
equiv = [] if equiv is None else equiv
return equiv
def relation(self, namespace1: str, id1: str,
namespace2: str, id2: str,
relation_types: Container[str]) -> bool:
"""General function for determining if two entities are related
Parameters
----------
namespace1 : str
Namespace of first term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id1 : str
Identifier of first term.
namespace2 : str
Namespace of second term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id2 : str
Identifier of second term.
relation_types : container
Function returns True if the first term is connected to the second
by one of the relations in this container. Valid relations are
'isa', and 'partof'.
Returns
-------
bool
True if the term given by (namespace1, id1) has one of the
specified relations with the term given by (namespace2, id2). Will
return False if either of (namespace1, id1) or (namespace2, id2) is
not in the FamPlex ontology.
"""
roots1 = self._root_class_mapping.get((namespace1, id1))
roots2 = self._root_class_mapping.get((namespace2, id2))
if roots1 is None or roots2 is None:
return False
if set(roots1) & set(roots2):
node1, node2 = (namespace1, id1), (namespace2, id2)
for node in self.traverse(node1, relation_types,
direction='up'):
if node2 == node:
return True
return False
def traverse(self, source: Tuple[str, str],
relation_types: Container[str],
direction: str) -> Generator[Tuple[str, str], None, None]:
"""Function for traversing FampPlex graph in breadth first order
Parameters
----------
source : tuple
Tuple of the form (namespace, id) specifying where traversal is to
begin.
relation_types : container
Traversal will follow edges from these specified relation_types.
Valid relation types are isa and partof.
direction : str
One of 'up' or 'down'. If 'up' traversal will follow isa and partof
edges in breadth first order to nodes above the source. If 'down'
traversal will follow reversed edges to nodes below the source.
Returns
-------
generator
Generator iterating through nodes in the traversal. The source node
is included in the traversal.
"""
if direction == 'down':
graph = self._reverse_graph
elif direction == 'up':
graph = self._graph
else:
raise ValueError
visited = {source}
queue = deque([source])
while queue:
node = queue.pop()
try:
children = graph[node]
except KeyError:
children = []
for ns, id_, rel in children:
if (ns, id_) not in visited and rel in relation_types:
queue.appendleft((ns, id_))
visited.add((ns, id_))
yield node
|
import mock
from appdaemon.plugins.hass.hassapi import Hass
def patch_hass():
"""
Patch the Hass API and returns a tuple of:
- The patched functions (as Dict)
- A callback to un-patch all functions
"""
#### Non-actionable functions ####
# Patch the __init__ method to skip Hass initialisation
patch___init__ = mock.patch.object(Hass, '__init__')
patched___init__ = patch___init__.start()
patched___init__.return_value = None
# Path the log method
patch_log = mock.patch.object(Hass, 'log')
_patched_log = patch_log.start()
#### Actionable functions ####
# Callback registrations functions
patch_run_daily = mock.patch.object(Hass, 'run_daily')
patch_run_in = mock.patch.object(Hass, 'run_in')
patch_listen_event = mock.patch.object(Hass, 'listen_event')
patch_listen_state = mock.patch.object(Hass, 'listen_state')
# State functions / attr
patch_set_state = mock.patch.object(Hass, 'set_state')
patch_get_state = mock.patch.object(Hass, 'get_state')
patch_time = mock.patch.object(Hass, 'time')
patch_passed_args = mock.patch.object(Hass, 'args', create=True)
# Interactions functions
patch_call_service = mock.patch.object(Hass, 'call_service')
patch_turn_on = mock.patch.object(Hass, 'turn_on')
patch_turn_off = mock.patch.object(Hass, 'turn_off')
## Initialize patches
patched_run_daily = patch_run_daily.start()
patched_run_in = patch_run_in.start()
patched_listen_event = patch_listen_event.start()
patched_listen_state = patch_listen_state.start()
patched_set_state = patch_set_state.start()
patched_get_state = patch_get_state.start()
patched_time = patch_time.start()
patched_passed_args = patch_passed_args.start()
patched_call_service = patch_call_service.start()
patched_turn_on = patch_turn_on.start()
patched_turn_off = patch_turn_off.start()
## Setup un-patch callback
def unpatch_callback():
patch___init__.stop()
patch_log.stop()
patch_run_daily.stop()
patch_run_in.stop()
patch_listen_event.stop()
patch_listen_state.stop()
patch_get_state.stop()
patch_time.stop()
patch_passed_args.stop()
patch_call_service.stop()
patch_turn_off.stop()
patch_turn_on.stop()
return ({
'run_daily': patched_run_daily,
'run_in': patched_run_in,
'listen_event': patched_listen_event,
'listen_state': patched_listen_state,
'set_state': patched_set_state,
'get_state': patched_get_state,
'time': patched_time,
'passed_args': patched_passed_args,
'call_service': patched_call_service,
'turn_on': patched_turn_on,
'turn_off': patched_turn_off
}, unpatch_callback)
|
import csv
import jieba
import pymongo
import re
from findpath import nlppath
from parseutil import parse, parseNum
genset = set(('一代', '二代', '三代', '四代'))
number_re = re.compile('(\d+\.\d+|\d*)[wkq]?\d*')
id_re = re.compile('[a-z0-9]{3,}')
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["paopao"]
items = mydb['accounts'].find()
_items = [x['unparsed']['content'] for x in items]
items = [parse(x, dirty = True) for x in _items]
items = [x for x in items if x != '']
content = '\n'.join(items)
words = mydb['dicts'].find()
words = [x for x in words]
word_map = dict([[x['name'], x['alias']] for x in words])
allf = [x['name'] for x in words if len(x['name']) >= 1]
f = [x['name'] for x in words if len(x['name']) >= 2]
def mycut(x):
x = jieba.cut(x)
y = []
z = []
for i in x:
if i not in genset:
if len(z) == 0:
y.append(i)
else:
for j in z:
y.append(j + i)
z = []
else:
z.append(i)
return y
words = [re.sub(r'\s', '', x.split(' ')[0]) for x in f]
ws = set(words)
allws = set(allf)
words = [x for x in ws]
edge = {}
degree = {}
for x in words:
edge[x] = []
degree[x] = 0
for x in words:
for y in words:
if x.count(y) > 0 and x != y:
edge[x].append(y)
degree[y] += 1
orders = [x for x in words if degree[x] == 0]
for i in range(len(words)):
x = orders[i]
for y in edge[x]:
degree[y] -= 1
if degree[y] == 0:
orders.append(y)
f = open(nlppath('dict.txt'), 'w')
nw = []
for x in orders:
cnt = content.count(x)
if cnt == 0: continue
nw.append([x, cnt + 5])
content = re.sub(x, ' ', content)
nw.sort(key=lambda x: len(x[0]) * 100000 - x[1])
s = ''
for x in nw:
s += x[0] + ' ' + str(x[1]) + '\n'
f.write(s)
f.close()
jieba.load_userdict(nlppath('dict.txt'))
content = '\n'.join(items)
cuts = [[y for y in mycut(x) if y != ' ' and y !=
'' and y != '\n'] for x in items]
current = 0
total = 0
left = []
number = ''
for sentence in cuts:
for i in sentence:
if i in allws:
current += len(i)
else:
m = re.match(number_re, i)
if m != None and m.span()[1] == len(i):
number = number + ', ' + i
elif re.match(id_re, i) != None:
continue
else:
left.append(i)
total += len(i)
print('coverage rate: ', current * 1.0 / total)
cnt = {}
for i in left:
if i not in cnt:
cnt[i] = 0
cnt[i] += 1
for i in words:
if i in cnt:
del cnt[i]
words = [[i, cnt[i]] for i in cnt]
words.sort(key=lambda x: -x[1])
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 13:13:19 2019
@author: Florian Ulrich Jehn
"""
import pandas as pd
import os
import numpy as np
import math
# Get the location of this file
file_dir = os.path.dirname(os.path.abspath(__file__))
def read_attributes():
att_df = pd.read_csv("all_catchment_attributes.csv", encoding='latin-1', sep=";", index_col=1)
return att_df
def elongation_ratio(area_catchment, max_basin_length):
"""
Calculates the elongation ration for a given catchment
Watershed-based Morphometric Analysis: A Review S Sukristiyanti et al
"""
diameter_catchment_circle = math.sqrt(area_catchment/math.pi) * 2
return diameter_catchment_circle / max_basin_length
def calculate_elongation(att_df):
"""Calculate the elongation ratio for all catchments"""
att_df["elongation_ratio"] = list(map(elongation_ratio, att_df["area_m2_watershed"], att_df["max_flow_len"]))
return att_df
def calculate_yearly_means(att_df):
for data_type in ["et", "dis", "prec"]:
if data_type == "et":
df = read_df(data_type+"_mm_1991_2018_corrected.csv")
else:
df = read_df(data_type+"_mm_1991_2018.csv")
means = df.groupby(df.index.year).sum().mean()
means.name = data_type + "_mean"
means.index = means.index.astype(int)
att_df = att_df.join(means)
return att_df
def read_df(name):
os.chdir(os.path.abspath(os.path.join(file_dir, os.pardir+os.sep))+os.sep+"cleaned_data")
df = pd.read_csv(name, sep=";", index_col=0)
df.index = pd.to_datetime(df.index, format='%Y-%m-%d')
return df
# Get the data, add some and sort it
att_df = read_attributes()
att_df = calculate_elongation(att_df)
att_df = calculate_yearly_means(att_df)
att_df["runoff_ratio"] = att_df["dis_mean"] / att_df["prec_mean"]
# Only use categories
cleaned_cat = ['gauge',
'durchlässigkeit_huek250', 'dominating_soil_type_bk500',
'land_use_corine',
'area_m2_watershed_cat',
'grundwasserneubildung_gwn_1000_cat',
'greundigkeit_physgru_1000_cat',
'slope_mean_dem_40_cat',
'elongation_ratio_cat', "et_mean_cat",
"dis_mean_cat", "prec_mean_cat", "runoff_ratio_cat"]
cleaned_num = []
for item in cleaned_cat:
if "cat" in item:
cleaned_num.append(item[:-4])
else:
cleaned_num.append(item)
# Go the the cleaned data folder
os.chdir(os.path.abspath(os.path.join(file_dir, os.pardir+os.sep))+os.sep+"cleaned_data")
# Save
att_df[cleaned_num].to_csv("cleaned_catchment_attributes_num.csv", sep=";")
|
#!flask/bin/python
from flask import Flask, jsonify, abort
from dictionary_reader import search
import sys
import pika
import logging
import platform
app = Flask(__name__)
@app.route('/dictionary/api/v1.0/words/<string:word>', methods=['GET'])
def getSentimentValue(word):
logger = logging.getLogger('Dictionary Service')
if len(word.strip()) == 0: abort(404)
word_value = search(word)
if word_value[0]["value"] == 'unknown':
#send_to_unknown(word)
word_value[0]["value"] = 0
logger.info("DICTIONARY: word: " + word + " - hostname: " + platform.node())
return jsonify(word_value)
@app.errorhandler(404)
def not_found(error):
return jsonify({'error': 'Word cannot be empty'}), 404
def send_to_unknown(word):
message = word
connection = pika.BlockingConnection(pika.ConnectionParameters(host='172.17.0.5'))
channel = connection.channel()
channel.queue_declare(queue='unknown')
channel.basic_publish(exchange='', routing_key='unknown', body=message)
print(" [x] Sent " + message)
connection.close()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
app.run(host= '0.0.0.0', debug=True)
|
from django.contrib import admin
from meetings.models import Meeting, Template
@admin.register(Meeting)
class MeetingAdmin(admin.ModelAdmin):
pass
@admin.register(Template)
class TemplateAdmin(admin.ModelAdmin):
pass
|
"""Base callback."""
import abc
import numpy as np
import mzcn as mz
class BaseCallback(abc.ABC):
"""
DataGenerator callback base class.
To build your own callbacks, inherit `mz.data_generator.callbacks.Callback`
and overrides corresponding methods.
A batch is processed in the following way:
- slice data pack based on batch index
- handle `on_batch_data_pack` callbacks
- unpack data pack into x, y
- handle `on_batch_x_y` callbacks
- return x, y
"""
def on_batch_data_pack(self, data_pack: mz.DataPack):
"""
`on_batch_data_pack`.
:param data_pack: a sliced DataPack before unpacking.
"""
@abc.abstractmethod
def on_batch_unpacked(self, x: dict, y: np.ndarray):
"""
`on_batch_unpacked`.
:param x: unpacked x.
:param y: unpacked y.
"""
|
# import modules ----------------------------------------
import trimesh
from shapely.geometry import LineString
import numpy as np
import matplotlib.pyplot as plt
# Load in STL file --------------------------------------
stl_mesh = trimesh.load_mesh("3DBenchy.stl")
# Generte Intersection Rays -----------------------------
num = 1200 # number of rays
ray_origins = np.zeros([num,3])
ray_origins[:,0] = 30 # x coordinates
ray_origins[:,1] = np.linspace(-15,15,num) # y coordinates
ray_origins[:,2] = 20 # z coordinates
ray_directions = np.zeros([num,3])
ray_directions[:,0] = -1 # x component
ray_directions[:,1] = 0 # y component
ray_directions[:,2] = 0 # z component
# stack rays into line segments for visualization as Path3D
ray_visualize = trimesh.load_path(np.hstack((ray_origins, ray_origins + ray_directions*5.0)).reshape(-1, 2, 3))
# create a visualization scene with rays, hits, and mesh
scene = trimesh.Scene([stl_mesh, ray_visualize])# create a visualization scene with rays, hits, and mesh
# show the visualization
scene.show() # Show rays with 3D render of STL model
# create Cross section ------------------------------
stl_slice = stl_mesh.section(plane_origin=[0,0,20], plane_normal=[0,0,1])
stl_slice.show()
# Calculate intersection points ---------------------
locations, index_ray, index_tri = stl_mesh.ray.intersects_location( ray_origins=ray_origins, ray_directions=ray_directions)
# Plot cross section overlayed with intersections ---
for ray in range(num):
plt.plot(locations[index_ray==ray,0],locations[index_ray==ray,1],"*")
plt.plot(stl_slice._vertices[:,0],stl_slice._vertices[:,1],".")
plt.show()
# Create Object Function --------------------------
obj_func = np.zeros(num)
check = np.zeros(num)
for ray in range(num):
check[ray] = np.sum(index_ray==ray)
obj_func[ray] = np.sqrt(np.sum(locations[index_ray==ray,0]**2 + locations[index_ray==ray,1]**2))
if (np.any(check%2==1)):
print("intersetion is odd... fuck")
plt.plot(obj_func)
plt.show()
# stop code but keep param (debugging)
print("end")
|
""" file: test_history_cython.py
"""
import unittest
import numpy as np
from maxr.integrator.history import coefficients
from maxr.ext.coefficients import IntegratorCoeffs
class BaseHarness(unittest.TestCase):
"Base test class for history integrators"
order = 1
def test_equal_py_cy(self):
"Python and Cython implementations should give the same coefficients"
for length in range(1, 40):
self.assertTrue(np.allclose(
IntegratorCoeffs(length, self.order).as_array(),
coefficients(length, self.order)))
class TestFirstOrderIntegrator(BaseHarness):
order = 1
class TestSecondOrderIntegrator(BaseHarness):
order = 2
class TestThirdOrderIntegrator(BaseHarness):
order = 3
del BaseHarness
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
####################################################################
# This module 'knightsmove' simulates a knight's move #
# from point A -> B such that it takes minimum number of steps #
# between A and B. It finds all the possible solutions. #
# Here are the stats: #
# #
# BoardSize #Sol TimeTaken A->B #StepCount #
# ========= ===== ========= ========= ========= #
# 10x10 57 0m, 0.038s 0,0->9,9 6 #
# 20x20 179763 0m, 5.301s 0,0->19,19 15 #
# 30x30 2721049 1m,23.055s 0,0->29,29 21 #
# 40x40 10400727 5m,44.751s 0,0->39,39 27 #
####################################################################
import sys
# a list of lists, inteneded to be used as a C styled
# 2D array. The inner lists will have tupples representing
# a square.
board = []
# a 2D matrix of cost from each of the squares of the board to
# (dx,dy). This map is unique for each new value of (dx, dy).
# That means every time one invokes this program this cost matrix
# is generated again.
# It has the same dimensions as the board itself, only difference
# is that instead of tupple, it will store a number representing
# the number of knight's steps (cost) to reach to desitnation square.
costmap = []
# L1: the edge of the smallest square enclosing the square (dx,dy)
# the numbers represent the costs from the respective squares to (dx, dy)
l1cost = [2, 3, 2]
# L2: edge next after L1
l2cost = [4, 1, 2 ,1, 4]
SAME_ROW= 0
SAME_COL = 1
# A line L or edge is represented as [(x1,y1), (x2, y2)]. Given
# two lines L1, L2 this function returns (x,y), the intersection.
# For more info read about crammers rule
#
# Note:
# p and q are line segments and not the line, the approach works on
# line and we pass a line segment. So after the function
# calculates (x,y) the point of intersection, we need to additionally
# check that the point lies on the line. Remember lines are infinite and
# line segments are finite, thus, even if with this approach we get a
# (x,y) that may possibly lie outside the line segments.
# For this matter this function will assumes the L1 is the credentials
# for the board lines and L2 always the enclosure squares. To be more clear,
# p should represnt the actual board edge and q as enclosing square edge.
def intersection(p, q):
A = (p[0][1] - p[1][1])
B = (p[1][0] - p[0][0])
C = (p[0][0]*p[1][1] - p[1][0]*p[0][1])
D = (q[0][1] - q[1][1])
E = (q[1][0] - q[0][0])
F = (q[0][0]*q[1][1] - q[1][0]*q[0][1])
L1 = (A, B, -C)
L2 = (D, E, -F)
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
# The x,y represent the point where p, q should meet if they
# are lines (infinite), which they are not. They are segments(finite)
# so we need to validate x,y if it is present on both the edges
if p[0][0] <= x <= p[1][0] and p[0][1] <= y <= p[1][1]:
if q[0][0] <= x <= q[1][0] and q[0][1] <= y <= q[1][1]:
#print p, " and ", q, "intersect at", x,y
return x,y
else:
return False
else:
return False
#
# returns: how many iterations are to be done.
def get_num_iters(t, n):
r, c = t[0], t[1]
rn = n - 1 - r
cn = n - 1 - c
limit = max(r,c)
limitn = max(rn, cn)
return(max(limit, limitn))
# Based on tuple 't' and board size 'n', this
# returns a list of tuples each representing the
# possible squares a knight can reach from t.
def make_knights_moves(t, n):
D1, D2 = [1, -1], [2, -2] #D1 is displacement1
knight_t= []
for d1 in D1:
for d2 in D2:
if ((t[0] + d1 >= 0) and (t[1] + d2) < n and
(t[0] + d1 < n) and (t[1] + d2) >= 0):
knight_t.append((t[0] + d1,t[1] + d2),)
if ((t[0] + d2 >= 0) and (t[1] + d1) < n and
(t[0] + d2 < n) and (t[1] + d1) >= 0):
knight_t.append((t[0] + d2,t[1] + d1),)
#print "From Origin:", t, "Possible KM:", knight_t
return knight_t
# Given two rectangles R1 = [ (r1,c1), (r1,c2), (r2,c1), (r2,c2) ]
# R2 = [ (r3,c3), (r3,c4), (r4,c3), (r4,c4) ]
# clips R1 w.r.t R2 and returns R3 such that R3 forms the intersection
# rectangle.
# The order of tupples in R is very important. The order is [1,2,3,4]
# 1 2
# +----+
# | |
# | |
# +----+
# 3 4
# where 1,2,3,4 are the tuples representing the vertex of the rectangle.
# algo used:
# 1. form the set of all the intersection points, say s1
# 2. form the set of all the points of R1 inside R2, say s2
# 3. form the set of all the points of R2 inside R1, say s3
# 4. Finally, R = s1 U s2 U s3
#
# Returns: R
# Please also remember that the return value is again a R but the tupples
# may not be in the same order. If no overlap, R is empty.
def clip_square(paper_sq, scissor_sq):
# n is north, e is east ...
# _e is edge, h is horizontal, v is vertical
# _p is paper, _s is scissor
# do ee_s is eastedge_scissors
#print "Paper", paper_sq
#print "scissor", scissor_sq
clipped_sq = []
ne_p = [paper_sq[0], paper_sq[1]]
se_p = [paper_sq[2], paper_sq[3]]
ee_p = [paper_sq[1], paper_sq[3]]
we_p = [paper_sq[0], paper_sq[2]]
ne_s = [scissor_sq[0], scissor_sq[1]]
se_s = [scissor_sq[2], scissor_sq[3]]
ee_s = [scissor_sq[1], scissor_sq[3]]
we_s = [scissor_sq[0], scissor_sq[2]]
# First we find all the intersection points.
clipped_sq = []
for paper_edge in [ne_p, se_p]:
for scissor_edge in [ee_s, we_s]:
common = intersection(paper_edge, scissor_edge)
if common:
clipped_sq.append(common)
for paper_edge in [ee_p, we_p]:
for scissor_edge in [ne_s, se_s]:
common = intersection(paper_edge, scissor_edge)
if common:
clipped_sq.append(common)
#print "Intersection", clipped_sq
for paper_vertex in paper_sq:
if (paper_vertex[0] > scissor_sq[0][0] and
paper_vertex[1] > scissor_sq[0][1] and
paper_vertex[0] < scissor_sq[3][0] and
paper_vertex[1] < scissor_sq[3][1]):
#print "Including PV", paper_vertex
clipped_sq.append(paper_vertex)
for scissors_vertex in scissor_sq:
if (scissors_vertex[0] > paper_sq[0][0] and
scissors_vertex[1] > paper_sq[0][1] and
scissors_vertex[0] < paper_sq[3][0] and
scissors_vertex[1] < paper_sq[3][1]):
#print "Including SV", scissors_vertex
clipped_sq.append(scissors_vertex)
# possible duplicates in case of overlapped edges.
# We remove them before returning.
return list(set(clipped_sq))
def get_overlapping_edges(big_sq, lil_sq):
#print "big sq", big_sq
#print "lil_sq", lil_sq
lil_h = []
lil_v = []
for i in xrange(4):
if lil_sq[i][0][0] == lil_sq[i][1][0]:
lil_h.append(lil_sq[i])
else:
lil_v.append(lil_sq[i])
aligned_edge = []
offset_list = []
# hle : horizontal lil edge
# vbe : vertical big edge
for hle in lil_h:
for hbe in [big_sq[0], big_sq[3]]:
# we compare horizontal edges on both the squares.
# the x coordinate is common if they overlap
#print "check h b/w", hle, hbe
if hbe[0][0] == hle[0][0]:
#print "appending hle", hle
aligned_edge.append(hle)
hlil_min_x = abs(min(hle[0][1], hle[1][1]))
hlil_max_x = abs(max(hle[0][1], hle[1][1]))
hbig_min_x = abs(min(hbe[0][1], hbe[1][1]))
hbig_max_x = abs(max(hbe[0][1], hbe[1][1]))
#print "hlil_min_x:", hlil_min_x, "hlil_max_x:", hlil_max_x
#print "hbig_min_x:", hbig_min_x, "hbig_max_x:", hbig_max_x
up_offset = abs(hlil_min_x - hbig_min_x)
down_offset= abs(hbig_max_x - hlil_max_x)
#print "up_offset:", up_offset, "down_offset:",down_offset
offset_list.append((up_offset, down_offset),)
for vle in lil_v:
for vbe in [big_sq[1], big_sq[2]]:
#print "check v b/w", vle, vbe
if vle[0][1] == vbe[0][1]:
#print "appending vle", vle
aligned_edge.append(vle)
vlil_min_y = abs(min(vle[0][0], vle[1][0]))
vlil_max_y = abs(max(vle[0][0], vle[1][0]))
vbig_min_y = abs(min(vbe[0][0], vbe[1][0]))
vbig_max_y = abs(max(vbe[0][0], vbe[1][0]))
#print "vlil_min_y:", vlil_min_y, "vlil_max_y:", vlil_max_y
#print "vbig_min_y:", vbig_min_y, "vbig_max_y:", vbig_max_y
left_offset = abs(vbig_min_y - vlil_min_y)
right_offset= abs(vbig_max_y - vlil_max_y)
#print "left_offset:", left_offset, "right_offset:", right_offset
offset_list.append((left_offset,right_offset),)
#print "aligned edge", aligned_edge
#print "offset_list", offset_list
return aligned_edge, offset_list
# Give it a tuple t, level l and board size n, it makes a enclosing square
# l level from t. Then determines which enclosing square edge is biggest
# that lies in the board of size n. Also calcluates what part of the edge
# lies inside the board.
# returns:
# [(x1,y1),(x2,y2)], length, left_offset, right_offset
#
def find_biggest_edge_inside_board(t, l, n):
r, c = t[0], t[1]
es = 2 * l + 1
e_t = ()
enclosure_sq = [(r - l, c - l), (r - l, c + l),
(r + l, c - l), (r + l, c + l)]
board_sq = [(0, 0), (0, n - 1),
(n - 1, 0), (n - 1, n - 1)]
#print "find_biggest_edge_inside_board:", t, "level", l
#print "Enclosure Square: ", enclosure_sq
#print "board_sq: ", board_sq
# clip the enclosing square w.r.t board
clipped_tup_list = clip_square (enclosure_sq, board_sq)
#print "Clipped tuple list:", clipped_tup_list
# what clip_square() returned us is a list of 4 tupples that forms
# the veertices of the 'intersecting' rectangle. We do not know the
# order, so we now form the edges based on the x and y cordinates.
clipped_edges = []
for i in xrange(len(clipped_tup_list)):
for j in xrange(i + 1, len(clipped_tup_list), 1):
if (clipped_tup_list[i][0] == clipped_tup_list[j][0] or
clipped_tup_list[i][1] == clipped_tup_list[j][1]):
clipped_edges.append([clipped_tup_list[i],clipped_tup_list[j]])
if len(clipped_edges) != 4:
print "Something wrong with clipping, Aborting"
print "clipped tupple List", clipped_tup_list
print "clipped_edges", clipped_edges
sys.exit()
# order: N W E S
enclosure_edges = [[enclosure_sq[0],enclosure_sq[1]], [enclosure_sq[0], enclosure_sq[2]],
[enclosure_sq[1], enclosure_sq[3]], [enclosure_sq[2], enclosure_sq[3]]]
#print "Enclosure Edges:", enclosure_edges, "\nClipped edges: ", clipped_edges
# we have the enclosure_square edges and also the intersecting square edges. We need to
# see which edge can be used. Basically, the longes edge inside the intersecting square
# that is aligned/overlapping the enclosure_sq edge is our candidate. We also need to see
# the extent of alignment i.e offset calculations of the clipped edge w.r.t corresponding
# enslosing square edge.
encsq_clipped_edge, encsq_edge_offset = get_overlapping_edges(enclosure_edges, clipped_edges)
#print "Enclosure Sq clipped edge", encsq_clipped_edge
edge_len_list = []
for i in xrange(len(encsq_clipped_edge)):
a = 0
if encsq_clipped_edge[i][0][0] != encsq_clipped_edge[i][1][0]:
edge_len_list.append(abs(encsq_clipped_edge[i][0][0] - encsq_clipped_edge[i][1][0]))
else:
edge_len_list.append(abs(encsq_clipped_edge[i][0][1] - encsq_clipped_edge[i][1][1]))
#print "Clipped Edges lenghts:", edge_len_list
# Finally we have our eligible set of edges which form that part of enclosure
# square that is inside the board. Lets calculate the largest of such edge and
# return that.
max_len_index = edge_len_list.index(max(edge_len_list))
#print "LONGEST EDGE INSIDE", encsq_clipped_edge[max_len_index]
#print "WITH PRE-OFFSET:", encsq_edge_offset[max_len_index][0], " POST-OFFSET:", encsq_edge_offset[max_len_index][1]
return encsq_clipped_edge[max_len_index], max(edge_len_list),encsq_edge_offset[max_len_index][0], encsq_edge_offset[max_len_index][1]
def put_cost_list_on_egde(rc, costList, n, direction):
r,c = rc[0], rc[1]
#print "CL:", costList, "to be copied at", rc, "length: ", len(costList), "Direction", direction, "(0: SAME_ROW, 1: SAME_COL)"
if direction == SAME_ROW:
if r >= 0 and r < n: # copy direction is left to right; SAME ROW
for i in xrange(len(costList)):
if (c+i) >=0 and (c+i) < n:
#print "SAMEROW placing @ [",r,"][",c+i,"] copy: ", costList[i]
costmap[r][c+i] = costList[i]
elif c >= 0 and c < n: # SAME COLUMN; direction is DOWN
for i in xrange(len(costList)):
if (r+i) >=0 and (r+i) < n:
#print "SAMECOL placing @ [",r+i,"][",c,"] copy: ", costList[i]
costmap[r+i][c] = costList[i]
#display_board(costmap, n)
# we make the cost list for l > 2 onwards.
# The aim is to pick up a corner square that is in the board.
# find all the location knight could possible be moved from "this"/present
# location. Get the cost of all the locations where the knight
# could be placed. The min of all those cost and we add one to it
# will be the min cost to (dx, dy) from present location.
#
# t: tupple i.e (dx,dy)
# l: level. Starting from the dx,dy which enslosing square it is.
# edge size at each level is 2*l + 1
# n: board size
def make_edge_cost_list(t, l, n):
#es : edge size
es = 2 * l + 1
costList = []
r, c = t[0], t[1]
# need to find out which side is feasible
# obviously we dont want to straddle across
# boundary during calculation.
edge, length, l_off, r_off = find_biggest_edge_inside_board(t, l ,n)
#print "Level ", l, "Max Edge:", edge, "Lenght:", length, "l_off:", l_off, "r_off:", r_off
if length != 0:
costList = [n+1 for x in xrange(es)]
# this is a small hack, as we do not know
# the edge tuples are properly ordered in
# increasing order of their dimension.
r, c = 0, 0
if edge[0][0] == edge[1][0]:
loop_direction = SAME_ROW
r, c = edge[0][0], min(edge[0][1], edge[1][1])
else:
loop_direction = SAME_COL
r, c = min(edge[0][0], edge[1][0]), edge[0][1]
for i in xrange(length + 1):
cost = n + 1
if loop_direction == SAME_ROW:
for krc in make_knights_moves((r , c + i),n):
if costmap[krc[0]][krc[1]] > n:
continue
if costmap[krc[0]][krc[1]] < cost:
cost = costmap[krc[0]][krc[1]] + 1
costList[l_off + i] = cost
#print "SR costList", costList
else:
for krc in make_knights_moves((r + i , c ),n):
if costmap[krc[0]][krc[1]] > n:
continue
if costmap[krc[0]][krc[1]] < cost:
cost = costmap[krc[0]][krc[1]] + 1
costList[l_off + i] = cost
#print "SC costList", costList
# using the symmetry to make costList complete at
# those points where it still contains n+1
mid = es / 2
for i in xrange(mid, es, 1):
if costList[i] < costList[-i-1]:
costList[-i-1] = costList[i]
else:
costList[i] = costList[-i-1]
return costList
# t is (dx,dy) i.e dest tupple always
def make_next_cost_sq(t, l, n):
costList = []
if l == 1:
costList = l1cost
elif l == 2:
costList = l2cost
else:
costList = make_edge_cost_list(t, l , n)
rc = (t[0] - l, t[1] - l) # NW corner tupple
put_cost_list_on_egde(rc, costList, n, SAME_ROW)
rc = (t[0] - l, t[1] - l) # NW corner tupple
put_cost_list_on_egde(rc, costList, n, SAME_COL)
rc = (t[0] + l, t[1] - l) # SW corner tupple
put_cost_list_on_egde(rc, costList, n, SAME_ROW)
rc = (t[0] - l, t[1] + l) # NE corner tupple
put_cost_list_on_egde(rc, costList, n, SAME_COL)
def fix_corner_cost(n):
four_corners = [(0,0),(0,n-1),(n-1,0),(n-1,n-1)]
for t in four_corners:
r,c = t[0],t[1]
for sq_r in range(r-1, r+2,1):
for sq_c in range(c-1, c+2, 1):
if sq_r < 0 or sq_r >= n or sq_c < 0 or sq_c >=n:
continue
#print "Check corner:", (sq_r, sq_c)
if costmap[sq_r][sq_c] == 0:
continue
next_moves = make_knights_moves((sq_r,sq_c),n)
cost_next_moves = [costmap[m[0]][m[1]] for m in next_moves]
if min(cost_next_moves) >= costmap[sq_r][sq_c]:
print "Fixing: cost: ", costmap[sq_r][sq_c], "@(",sq_r,",",sq_c,")", " to ",
costmap[sq_r][sq_c] = min(cost_next_moves) + 1
print costmap[sq_r][sq_c]
def make_cost_board(t, n):
# l, is level, d is direction
l = get_num_iters(t, n)
#print "Total Level", l
for l in xrange(1, l + 1, 1):
#print "Lvl: ", l
make_next_cost_sq(t, l, n)
#display_board(costmap, n)
# There is a 1/1000 possibility that the square
# be choosen is a corner on as source or desitination
# and the other be its diagonally adjacent. The cost on
# the source is fixed as 2 but its actually 4, we need
# to check that condition and fix it. If we dont address
# this chances are the solution may not converge at that
# point.
#fix_corner_cost(t,n)
fix_corner_cost(n)
def display_board(board, n):
print "Board:\n"
for r in xrange(n):
print board[r]
print
print
def initialise_board(n):
b = []
for r in xrange(n):
rowlist = []
for c in xrange(n):
rowlist.append((r,c),)
b.append(rowlist)
return b
# given s tupple and d tupple on a chess board
# of size n x n. Finds all the minimum moves from s to d.
def find_knight_min_moves(s, path, n):
sr, sc = s[0], s[1]
next_moves = make_knights_moves((s),n)
cost_next_moves = [costmap[m[0]][m[1]] for m in next_moves]
min_indices = [i for i, v in enumerate(cost_next_moves) if v == min(cost_next_moves)]
#print "Source", s
#print "next_moves", next_moves
#print "cost ", cost_next_moves
#print "mins ", min_indices
for i, v in enumerate(min_indices):
next_path = list(path)
if cost_next_moves[v] == 0:
next_path.append(next_moves[min_indices[i]])
print "MIN PATH", next_path
return
else:
#print "= Existing Path:", path
#print "= Attempt with New Source:", next_moves[min_indices[i]]
next_path.append(next_moves[min_indices[i]])
find_knight_min_moves(next_moves[min_indices[i]], next_path, n)
def initialise_costmap(n):
l = [[ n+1 for x in xrange(n)] for i in xrange(n)]
return l
def usage():
print "usage:\n", sys.argv[0], "<n> <dx> <dy> <sx> <sy> \n\n"
print "To test all possible permutations: (debug)"
print sys.argv[0], "<n> <dx> <dy> <sx> <sy> 1"
sys.exit(0)
if len(sys.argv) < 6:
usage()
check_all = 0
bsize = int(sys.argv[1])
dx = int(sys.argv[2])
dy = int(sys.argv[3])
sx = int(sys.argv[4])
sy = int(sys.argv[5])
try:
check_all = int(sys.argv[6])
except IndexError:
pass
if sx < 0 or sy < 0 or sx >= bsize or sy >= bsize:
print "Dont worry!, it works horizontally. Give me sane input\n"
usage()
if dx < 0 or dy < 0 or dx >= bsize or dy >= bsize:
print "Dont worry!, it works horizontally. Give me sane input\n"
usage()
# this check_all is there to test the code for all
# possible costmaps for a borad size.
# Just post fix an extra 1 to trigger this code.
# please note that the rest of the arguments wont
# be considered now as it tests all permutations.
if check_all:
error_rc = []
for r in xrange(bsize):
print "Creating and checking",bsize," Costmaps at row:", r,
for c in xrange(bsize):
#board = initialise_board(bsize)
costmap = initialise_costmap(bsize)
costmap[r][c] = 0
make_cost_board((r,c), bsize)
for er in xrange(bsize ):
if bsize + 1 in costmap[er]:
error_rc.append((r,c))
print "Error @", r,c
display_board(costmap, bsize)
break
print ".........[Done]"
if not len(error_rc):
print "\n\nAll Clean :)\n"
else:
print "Errors", set(error_rc)
else:
#board = initialise_board(bsize)
costmap = initialise_costmap(bsize)
costmap[dx][dy] = 0
make_cost_board((dx,dy), bsize)
display_board(costmap, bsize)
find_knight_min_moves((sx,sy), [(sx,sy)] ,bsize)
|
# -*- coding: utf-8 -*-
from typing import Any, Dict
from dateutil.relativedelta import relativedelta
from datetime import datetime
def compute_freelance_skills(payload: Dict[str, Any]) -> Dict[str, Any]:
"""
Receives the freelance payload and count the months experience with every skill
"""
result = {'freelance': {'id': payload['freelance']['id'], 'computed_skills': []}}
skills = {}
for professional_experience in payload['freelance']['professional_experiences']:
for skill in professional_experience['skills']:
if skill['id'] not in skills:
skills[skill['id']] = {
'id': skill['id'],
'name': skill['name'],
'months': {},
}
current: datetime = professional_experience['start_date']
last: datetime = professional_experience['end_date']
last = last.replace(day=1)
while current < last:
year_month = current.strftime('%Y%m')
skills[skill['id']]['months'][year_month] = None
current += relativedelta(months=1)
for skill in skills.values():
computed_skill = {
'id': skill['id'],
'name': skill['name'],
'duration_in_months': len(skill['months']),
}
result['freelance']['computed_skills'].append(computed_skill)
return result
|
import os
import logging
from django.core.management.base import BaseCommand
from manti_by.apps.blog.models import Post
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Check posts state"
def handle(self, *args, **options):
checked = 0
for post in Post.objects.all():
try:
post.ogg_release_ready = (
os.path.exists(post.release_ogg_file)
and os.path.getsize(post.release_ogg_file) > 0
)
post.mp3_preview_ready = (
os.path.exists(post.preview_mp3_file)
and os.path.getsize(post.preview_mp3_file) > 0
)
post.ogg_preview_ready = (
os.path.exists(post.preview_ogg_file)
and os.path.getsize(post.preview_ogg_file) > 0
)
self.stdout.write(
"Checked post #%d: %d %d %d"
% (
post.id,
1 if post.ogg_release_ready else 0,
1 if post.mp3_preview_ready else 0,
1 if post.ogg_preview_ready else 0,
)
)
post.save()
checked += 1
except Exception as e:
logger.error(e)
self.stdout.write("Checked %d posts" % checked)
|
from multiprocessing import Pool, TimeoutError
import json
import pika
import sys
from raw_replayer import RawReplayerFactory
from summary_replayer import SummaryReplayerFactory
from transfer_summary import TransferSummaryFactory
import toml
import argparse
import logging
class OverMind:
"""
Top level class that listens to for requests
"""
def __init__(self, configuration):
self._pool = None
# Import the configuration
self._config = {}
with open(configuration, 'r') as config_file:
self._config = toml.loads(config_file.read())
logging.basicConfig(level=logging.DEBUG)
def run(self):
"""
Event Loop
"""
# Start up the pool processes
self._pool = Pool(processes=4)
self.createConnection()
self._chan.basic_consume(self._receiveMsg, 'gracc.osg.requests')
# The library gives us an event loop built-in, so lets use it!
# This program only responds to messages on the rabbitmq, so no
# reason to listen to anything else.
try:
self._chan.start_consuming()
except KeyboardInterrupt:
self._chan.stop_consuming()
sys.exit(1)
def createConnection(self):
self.parameters = pika.URLParameters(self._config['AMQP']['url'])
self._conn = pika.adapters.blocking_connection.BlockingConnection(self.parameters)
self._chan = self._conn.channel()
# Create the exchange, if it doesn't already exist
# TODO: capture exit codes on all these call
self._chan.exchange_declare(exchange=self._config["AMQP"]['exchange'], exchange_type='direct')
self._chan.queue_declare(queue=self._config["AMQP"]['queue'])
self._chan.queue_bind(self._config["AMQP"]['queue'], self._config["AMQP"]['exchange'])
#self._chan.queue_declare(queue="request_raw", durable=True, auto_delete=False, exclusive=False)
def _receiveMsg(self, channel, method_frame, header_frame, body):
"""
Receive messages from the RabbitMQ queue
"""
msg_body = {}
try:
msg_body = json.loads(body)
except ValueError, e:
logging.warning("Unable to json parse the body of the message")
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
return
logging.debug("Incoming Message:")
logging.debug(str(msg_body))
# TODO: some sort of whitelist, authentication?
if msg_body['kind'] == 'raw':
logging.debug("Received raw message, dispatching")
self._pool.apply_async(RawReplayerFactory, (msg_body, self.parameters, self._config))
elif msg_body['kind'] == 'summary':
logging.debug("Received summary message, dispatching")
self._pool.apply_async(SummaryReplayerFactory, (msg_body, self.parameters, self._config))
elif msg_body['kind'] == 'transfer_summary':
logging.debug("Received transfer_summary message, dispatching")
self._pool.apply_async(TransferSummaryFactory, (msg_body, self.parameters, self._config))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def main():
# Parse arguments
parser = argparse.ArgumentParser(description="GRACC Request Daemon")
parser.add_argument("-c", "--configuration", help="Configuration file location",
default="/etc/graccreq/config.toml", dest='config')
args = parser.parse_args()
# Create and run the OverMind
overmind = OverMind(args.config)
overmind.run()
|
#####
#
# visualize_incoming_annotations.py
#
# Spot-check the annotations received from iMerit by visualizing annotated bounding
# boxes on a sample of images and display them in HTML.
#
# Modified in 2021 March to use the new format (iMerit batch 12 onwards), which is a
# COCO formatted JSON with relative coordinates for the bbox.
#
#####
import argparse
import io
import json
import os
from collections import defaultdict
from random import sample
from tqdm import tqdm
from write_html_image_list import write_html_image_list # Assumes ai4eutils is on the path
#from data_management.megadb.schema import sequences_schema_check
from data_management.megadb.megadb_utils import MegadbUtils
from data_management.cct_json_utils import IndexedJsonDb
from visualization import visualization_utils as vis_utils
def get_image_rel_path(dataset_seq_images, dataset_name, seq_id, frame_num):
images = dataset_seq_images[dataset_name][seq_id]
for im in images:
if im['frame_num'] == frame_num:
return im['file']
return None
def visualize_incoming_annotations(args):
print('Connecting to MegaDB to get the datasets table...')
megadb_utils = MegadbUtils()
datasets_table = megadb_utils.get_datasets_table()
print('Loading the MegaDB entries...')
with open(args.megadb_entries) as f:
sequences = json.load(f)
print(f'Total number of sequences: {len(sequences)}')
dataset_seq_images = defaultdict(dict)
for seq in sequences:
dataset_seq_images[seq['dataset']][seq['seq_id']] = seq['images']
print('Loading incoming annotation entries...')
incoming = IndexedJsonDb(args.incoming_annotation)
print(f'Number of images in this annotation file: {len(incoming.image_id_to_image)}')
if args.num_to_visualize != -1 and args.num_to_visualize <= len(incoming.image_id_to_image):
incoming_id_to_anno = sample(list(incoming.image_id_to_annotations.items()),
args.num_to_visualize)
else:
incoming_id_to_anno = incoming.image_id_to_annotations.items()
# The file_name field in the incoming json looks like alka_squirrels.seq2020_05_07_25C.frame119221.jpg
# we need to use the dataset, sequence and frame info to find the actual path in blob storage
# using the sequences
images_html = []
for image_id, annotations in tqdm(incoming_id_to_anno):
if args.trim_to_images_bboxes_labeled and annotations[0]['category_id'] == 5:
# category_id 5 is No Object Visible
continue
anno_file_name = incoming.image_id_to_image[image_id]['file_name']
parts = anno_file_name.split('.')
dataset_name = parts[0]
seq_id = parts[1].split('seq')[1]
frame_num = int(parts[2].split('frame')[1])
im_rel_path = get_image_rel_path(dataset_seq_images, dataset_name, seq_id, frame_num)
if im_rel_path is None:
print(f'Not found in megadb entries: dataset {dataset_name},'
f' seq_id {seq_id}, frame_num {frame_num}')
continue
im_full_path = megadb_utils.get_full_path(datasets_table, dataset_name, im_rel_path)
# download the image
container_client = megadb_utils.get_storage_client(datasets_table, dataset_name)
downloader = container_client.download_blob(im_full_path)
image_file = io.BytesIO()
blob_props = downloader.download_to_stream(image_file)
image = vis_utils.open_image(image_file)
boxes = [anno['bbox'] for anno in annotations]
classes = [anno['category_id'] for anno in annotations]
vis_utils.render_iMerit_boxes(boxes, classes, image)
file_name = '{}_gtbbox.jpg'.format(os.path.splitext(anno_file_name)[0].replace('/', '~'))
image = vis_utils.resize_image(image, args.output_image_width)
image.save(os.path.join(args.output_dir, 'rendered_images', file_name))
images_html.append({
'filename': '{}/{}'.format('rendered_images', file_name),
'title': '{}, number of boxes: {}'.format(anno_file_name, len([b for b in boxes if len(b) > 0])),
'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
})
# Write to HTML
images_html = sorted(images_html, key=lambda x: x['filename'])
write_html_image_list(
filename=os.path.join(args.output_dir, 'index.html'),
images=images_html,
options={
'headerHtml': '<h1>Sample annotations from {}</h1>'.format(args.incoming_annotation)
})
print('Visualized {} images.'.format(len(images_html)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'incoming_annotation', type=str,
help='Path to a json in the COCO format with relative coordinates for the bbox from annotators')
parser.add_argument(
'megadb_entries', type=str,
help='Path to a json list of MegaDB entries to look up image path in blob storage')
parser.add_argument(
'output_dir', action='store', type=str,
help='Output directory for html and rendered images')
parser.add_argument(
'--trim_to_images_bboxes_labeled', action='store_true',
help='Only include images that have been sent for bbox labeling (but '
'may be actually empty). Turn this on if QAing annotations.')
parser.add_argument(
'--num_to_visualize', action='store', type=int, default=200,
help='Number of images to visualize. If trim_to_images_bboxes_labeled, there may be fewer than specified')
parser.add_argument(
'-w', '--output_image_width', type=int, default=700,
help='an integer indicating the desired width in pixels of the output '
'annotated images. Use -1 to not resize.')
args = parser.parse_args()
assert 'COSMOS_ENDPOINT' in os.environ and 'COSMOS_KEY' in os.environ
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'rendered_images'), exist_ok=True)
visualize_incoming_annotations(args)
if __name__ == '__main__':
main()
|
from enum import IntFlag
from .chinese.extractors import *
from recognizers_text import *
from .english.extractors import *
from .english.parsers import *
from .models import *
from .parsers import *
class SequenceOptions(IntFlag):
NONE = 0
class SequenceRecognizer(Recognizer[SequenceOptions]):
def __init__(self, target_culture: str = None, options: SequenceOptions = SequenceOptions.NONE,
lazy_initialization: bool = True):
if options < SequenceOptions.NONE or options > SequenceOptions.NONE:
raise ValueError()
super().__init__(target_culture, options, lazy_initialization)
def initialize_configuration(self):
self.register_model('PhoneNumberModel', Culture.English,
lambda options: PhoneNumberModel(PhoneNumberParser(),
BasePhoneNumberExtractor(EnglishPhoneNumberExtractorConfiguration())))
self.register_model('EmailModel', Culture.English,
lambda options: EmailModel(EmailParser(), EnglishEmailExtractor()))
self.register_model('PhoneNumberModel', Culture.Chinese,
lambda options: PhoneNumberModel(PhoneNumberParser(),
BasePhoneNumberExtractor(ChinesePhoneNumberExtractorConfiguration())))
self.register_model('IpAddressModel', Culture.English,
lambda options: IpAddressModel(IpParser(), EnglishIpExtractor()))
self.register_model('MentionModel', Culture.English,
lambda options: MentionModel(MentionParser(), EnglishMentionExtractor()))
self.register_model('HashtagModel', Culture.English,
lambda options: HashtagModel(HashtagParser(), EnglishHashtagExtractor()))
self.register_model('URLModel', Culture.English,
lambda options: URLModel(
URLParser(), BaseURLExtractor(EnglishURLExtractorConfiguration(options)))
)
self.register_model('URLModel', Culture.Chinese,
lambda options: URLModel(
URLParser(), BaseURLExtractor(ChineseURLExtractorConfiguration(options)))
)
self.register_model('GUIDModel', Culture.English,
lambda options: GUIDModel(GUIDParser(), EnglishGUIDExtractor()))
def get_phone_number_model(self, culture: str = None, fallback_to_default_culture: bool = True) -> Model:
if culture and (culture.lower().startswith("zh-") or culture.lower().startswith("ja-")):
return self.get_model('PhoneNumberModel', Culture.Chinese, fallback_to_default_culture)
return self.get_model('PhoneNumberModel', culture, fallback_to_default_culture)
def get_ip_address_model(self, culture: str = None, fallback_to_default_culture: bool = True) -> Model:
return self.get_model('IpAddressModel', culture, fallback_to_default_culture)
def get_mention_model(self, culture: str = None, fallback_to_default_culture: bool = True) -> Model:
return self.get_model('MentionModel', culture, fallback_to_default_culture)
def get_hashtag_model(self, culture: str = None, fallback_to_default_culture: bool = True) -> Model:
return self.get_model('HashtagModel', culture, fallback_to_default_culture)
def get_url_model(self, culture: str = None, fallback_to_default_culture: bool = True) -> Model:
if culture and (culture.lower().startswith("zh-") or culture.lower().startswith("ja-")):
return self.get_model('URLModel', Culture.Chinese, fallback_to_default_culture)
return self.get_model('URLModel', culture, fallback_to_default_culture)
def get_guid_model(self, culture: str = None, fallback_to_default_culture: bool = True) -> Model:
return self.get_model('GUIDModel', culture, fallback_to_default_culture)
def get_email_model(self, culture: str = None, fallback_to_default_culture: bool = True) -> Model:
return self.get_model('EmailModel', culture, fallback_to_default_culture)
def recognize_phone_number(query: str, culture: str, options: SequenceOptions = SequenceOptions.NONE,
fallback_to_default_culture: bool = True) -> List[ModelResult]:
recognizer = SequenceRecognizer(culture, options)
model = recognizer.get_phone_number_model(culture, fallback_to_default_culture)
return model.parse(query)
def recognize_email(query: str, culture: str, options: SequenceOptions = SequenceOptions.NONE,
fallback_to_default_culture: bool = True) -> List[ModelResult]:
recognizer = SequenceRecognizer(culture, options)
model = recognizer.get_email_model(culture, fallback_to_default_culture)
return model.parse(query)
def recognize_ip_address(query: str, culture: str, options: SequenceOptions = SequenceOptions.NONE,
fallback_to_default_culture: bool = True) -> List[ModelResult]:
recognizer = SequenceRecognizer(culture, options)
model = recognizer.get_ip_address_model(culture, fallback_to_default_culture)
return model.parse(query)
def recognize_mention(query: str, culture: str, options: SequenceOptions = SequenceOptions.NONE,
fallback_to_default_culture: bool = True) -> List[ModelResult]:
recognizer = SequenceRecognizer(culture, options)
model = recognizer.get_mention_model(culture, fallback_to_default_culture)
return model.parse(query)
def recognize_hashtag(query: str, culture: str, options: SequenceOptions = SequenceOptions.NONE,
fallback_to_default_culture: bool = True) -> List[ModelResult]:
recognizer = SequenceRecognizer(culture, options)
model = recognizer.get_hashtag_model(culture, fallback_to_default_culture)
return model.parse(query)
def recognize_url(query: str, culture: str, options: SequenceOptions = SequenceOptions.NONE,
fallback_to_default_culture: bool = True) -> List[ModelResult]:
recognizer = SequenceRecognizer(culture, options)
model = recognizer.get_url_model(culture, fallback_to_default_culture)
return model.parse(query)
def recognize_guid(query: str, culture: str, options: SequenceOptions = SequenceOptions.NONE,
fallback_to_default_culture: bool = True) -> List[ModelResult]:
recognizer = SequenceRecognizer(culture, options)
model = recognizer.get_guid_model(culture, fallback_to_default_culture)
return model.parse(query)
|
from config import *
# Position Evaluation Matrices
wp = [900, 900, 900, 900, 900, 900, 900, 900,
50, 50, 50, 50, 50, 50, 50, 50,
10, 10, 20, 30, 30, 20, 10, 10,
5, 5, 10, 25, 25, 10, 5, 5,
0, 0, 0, 20, 20, 0, 0, 0,
5, -5,-10, 0, 0,-10, -5, 5,
5, 10, 10,-20,-20, 10, 10, 5,
0, 0, 0, 0, 0, 0, 0, 0]
wk = [-50,-40,-30,-30,-30,-30,-40,-50,
-40,-20, 0, 0, 0, 0,-20,-40,
-30, 0, 10, 15, 15, 10, 0,-30,
-30, 5, 15, 20, 20, 15, 5,-30,
-30, 0, 15, 20, 20, 15, 0,-30,
-30, 5, 10, 15, 15, 10, 5,-30,
-40,-20, 0, 5, 5, 0,-20,-40,
-50,-40,-30,-30,-30,-30,-40,-50]
wb = [-20,-10,-10,-10,-10,-10,-10,-20,
-10, 0, 0, 0, 0, 0, 0,-10,
-10, 0, 5, 10, 10, 5, 0,-10,
-10, 5, 5, 10, 10, 5, 5,-10,
-10, 0, 10, 10, 10, 10, 0,-10,
-10, 10, 10, 10, 10, 10, 10,-10,
-10, 5, 0, 0, 0, 0, 5,-10,
-20,-10,-10,-10,-10,-10,-10,-20]
wr = [ 0, 0, 0, 0, 0, 0, 0, 0,
5, 10, 10, 10, 10, 10, 10, 5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
0, 0, 0, 5, 5, 0, 0, 0]
wq = [-20,-10,-10, -5, -5,-10,-10,-20,
-10, 0, 0, 0, 0, 0, 0,-10,
-10, 0, 5, 5, 5, 5, 0,-10,
-5, 0, 5, 5, 5, 5, 0, -5,
0, 0, 5, 5, 5, 5, 0, -5,
-10, 5, 5, 5, 5, 5, 0,-10,
-10, 0, 5, 0, 0, 0, 0,-10,
-20,-10,-10, -5, -5,-10,-10,-20]
bp = [0, 0, 0, 0, 0, 0, 0, 0,
5, 10, 10,-20,-20, 10, 10, 5,
5, -5,-10, 0, 0,-10, -5, 5,
0, 0, 0, 20, 20, 0, 0, 0,
5, 5, 10, 25, 25, 10, 5, 5,
10, 10, 20, 30, 30, 20, 10, 10,
50, 50, 50, 50, 50, 50, 50, 50,
900, 900, 900, 900, 900, 900, 900, 900]
bk = [-50,-40,-30,-30,-30,-30,-40,-50,
-40,-20, 0, 0, 0, 0,-20,-40,
-30, 0, 10, 15, 15, 10, 0,-30,
-30, 5, 15,20, 20, 15, -5,-30,
-30, 0, 15, 20, 20, 15, 0,-30,
-30, 5, 10, 15, 15, 10, -5,-30,
-40,-20, 0, 5, 5, 0,-20,-40,
-50,-40,-30,-30,-30,-30,-40,-50]
bb = [-20,-10,-10,-10,-10,-10,-10,-20,
-10, 5, 0, 0, 0, 0, 5,-10,
-10, 10, 10, 10, 10, 10, 10,-10,
-10, 0, 10, 10, 10, 10, 0,-10,
-10, 5, 5, 10,10, 5, 5,-10,
-10, 0, 5, 10, 10, 5, 0,-10,
-10, 0, 0, 0, 0, 0, 0,-10,
-20,-10,-10,-10,-10,-10,-10,-20]
br = [ 0, 0, 0, 5, 5, 0, 0, 0,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
5, -0, 10, 10, 10, 10, 10, 5,
0, 0, 0, 0, 0, 0, 0, 0]
bq = [-20,-10,-10, -5, -5,-10,-10,-20,
-10, 0, 0, 0, 0, 0, 0,-10,
-10, 0, 5, 5, 5, 5, 0,-10,
-5, 0, 5, 5, 5, 5, 0, -5,
-5, 0, 5, 5, 5, 5, 0, -5,
-10, 0, 5, 5, 5, 5, 0,-10,
-10, 0, 0, 0, 0, 0, 0,-10,
-20,-10,-10, -5, -5,-10,-10,-20]
def piece_values(piece, position, style='traditional'):
if style == 'traditional':
if piece[2:] == 'pawn':
return 1
elif piece[2:] == 'knight':
return 3
elif piece[2:] == 'bishop':
return 3
elif piece[2:] == 'rook':
return 5
elif piece[2:] == 'queen':
return 9
elif piece[2:] == 'king':
return 90
file_num = ord(position[0]) - 65
rank = -(int(position[1]) - 8)
# bug occurs where list index is out of range sometimes, todo fix this
try:
if style == 'advanced':
#return a value which is the piece value plus any value it has for its current position
if piece == 'w_pawn':
return 1 + wp[(rank*8)+file_num]/100.0
elif piece == 'w_knight':
return 3 + wk[(rank*8)+file_num]/100.0
elif piece == 'w_bishop':
return 3 + wb[(rank*8)+file_num]/100.0
elif piece == 'w_rook':
return 5 + wr[(rank*8)+file_num]/100.0
elif piece == 'w_queen':
return 9 + wq[(rank*8)+file_num]/100.0
elif piece == 'w_king':
return 90
elif piece == 'b_pawn':
return 1 + bp[(rank*8)+file_num]/100.0
elif piece == 'b_knight':
return 3 + bk[(rank*8)+file_num]/100.0
elif piece == 'b_bishop':
return 3 + bb[(rank*8)+file_num]/100.0
elif piece == 'b_rook':
return 5 + br[(rank*8)+file_num]/100.0
elif piece == 'b_queen':
return 9 + bq[(rank*8)+file_num]/100.0
elif piece == 'b_king':
return 90
except:
return piece_values(piece, position, 'traditional') - 1
return 0
def calc_score(inBoard, style='traditional'):
score = 0
for key in board:
if inBoard[key] == chess_pieces['w_pawn']: score += piece_values('w_pawn', key, style)
elif inBoard[key] == chess_pieces['w_knight']: score += piece_values('w_knight', key, style)
elif inBoard[key] == chess_pieces['w_bishop']: score += piece_values('w_bishop', key, style)
elif inBoard[key] == chess_pieces['w_rook']: score += piece_values('w_rook', key, style)
elif inBoard[key] == chess_pieces['w_queen']: score += piece_values('w_queen', key, style)
elif inBoard[key] == chess_pieces['w_king']: score += piece_values('w_king', key, style)
elif inBoard[key] == chess_pieces['b_pawn']: score -= piece_values('b_pawn', key, style)
elif inBoard[key] == chess_pieces['b_knight']: score -= piece_values('b_knight', key, style)
elif inBoard[key] == chess_pieces['b_bishop']: score -= piece_values('b_bishop', key, style)
elif inBoard[key] == chess_pieces['b_rook']: score -= piece_values('b_rook', key, style)
elif inBoard[key] == chess_pieces['b_queen']: score -= piece_values('b_queen', key, style)
elif inBoard[key] == chess_pieces['b_king']: score -= piece_values('b_king', key, style)
return score
def check_insuf_mat():
Wpieces = [0,0,0,0,0]
Bpieces = [0,0,0,0,0]
for key in board:
if board[key] == chess_pieces['w_pawn']: Wpieces[0] += 1
elif board[key] == chess_pieces['w_knight']: Wpieces[1] += 1
elif board[key] == chess_pieces['w_bishop']: Wpieces[2] += 1
elif board[key] == chess_pieces['w_rook']: Wpieces[3] += 1
elif board[key] == chess_pieces['w_queen']: Wpieces[4] += 1
elif board[key] == chess_pieces['b_pawn']: Bpieces[0] += 1
elif board[key] == chess_pieces['b_knight']: Bpieces[1] += 1
elif board[key] == chess_pieces['b_bishop']: Bpieces[2] += 1
elif board[key] == chess_pieces['b_rook']: Bpieces[3] += 1
elif board[key] == chess_pieces['b_queen']: Bpieces[4] += 1
print(Wpieces)
if Wpieces[0] == 0 and Wpieces[3] == 0 and Wpieces[4] == 0 \
and Bpieces[0] == 0 and Bpieces[3] == 0 and Bpieces[4] == 0 \
and (Wpieces[1] + Wpieces[2] < 2) and (Bpieces[1] + Bpieces[2] < 2):
return True
return False
|
from dedoc.attachments_handler.attachments_handler import AttachmentsHandler
from dedoc.converters.concrete_converters.docx_converter import DocxConverter
from dedoc.converters.concrete_converters.excel_converter import ExcelConverter
from dedoc.converters.concrete_converters.pptx_converter import PptxConverter
from dedoc.converters.file_converter import FileConverterComposition
from dedoc.metadata_extractor.concreat_metadata_extractors.base_metadata_extractor import BaseMetadataExtractor
from dedoc.metadata_extractor.concreat_metadata_extractors.docx_metadata_extractor import DocxMetadataExtractor
from dedoc.metadata_extractor.metadata_extractor_composition import MetadataExtractorComposition
from dedoc.readers.csv_reader.csv_reader import CSVReader
from dedoc.readers.docx_reader.docx_reader import DocxReader
from dedoc.readers.excel_reader.excel_reader import ExcelReader
from dedoc.readers.json_reader.json_reader import JsonReader
from dedoc.readers.pptx_reader.pptx_reader import PptxReader
from dedoc.readers.reader_composition import ReaderComposition
from dedoc.readers.txt_reader.raw_text_reader import RawTextReader
from dedoc.structure_constructor.concreat_structure_constructors.linear_constructor import LinearConstructor
from dedoc.structure_constructor.concreat_structure_constructors.tree_constructor import TreeConstructor
from dedoc.structure_constructor.structure_constructor_composition import StructureConstructorComposition
"""MANAGER SETTINGS"""
def get_manager_config(config: dict) -> dict:
return dict(
converter=FileConverterComposition(converters=[DocxConverter(config=config),
ExcelConverter(config=config),
PptxConverter(config=config)]),
reader=ReaderComposition(readers=[DocxReader(config=config),
ExcelReader(config=config),
PptxReader(),
CSVReader(),
RawTextReader(config=config),
JsonReader(),
]),
structure_constructor=StructureConstructorComposition(
extractors={"linear": LinearConstructor(), "tree": TreeConstructor()},
default_extractor=LinearConstructor()
),
document_metadata_extractor=MetadataExtractorComposition(extractors=[
DocxMetadataExtractor(),
BaseMetadataExtractor()
]),
attachments_extractor=AttachmentsHandler(config=config)
)
|
__author__ = 'thatcher'
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
from .cityscapes_evaluationJ_forHumanEval import CityscapesSemSegEvaluatorJ_forHumanEval # i.21.4.22.10:12) 사람의결과 이밸류에이션위해 추가.(사람vs모델 해보려고).
from .coco_evaluation import COCOEvaluator
from .rotated_coco_evaluation import RotatedCOCOEvaluator
from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset
from .lvis_evaluation import LVISEvaluator
from .panoptic_evaluation import COCOPanopticEvaluator
from .panoptic_evaluationJ_forHumanEval import COCOPanopticEvaluatorJ_forHumanEval # i.21.4.21.21:23) 사람의결과 이밸류에이션위해 추가.(사람vs모델 해보려고).
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
from .sem_seg_evaluation import SemSegEvaluator
from .testing import print_csv_format, verify_results
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
import collections
import torch
import copy
import torch.nn as nn
def intable(value):
try:
int(value)
return True
except:
return False
# change with _module
def get_object(model, name):
if isinstance(
model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
):
model = model.module
if name == "self":
return model
attributes = name.split(".")
if len(attributes) == 1:
raise RuntimeError("Can not find the " + name + " in model")
current = model
for i in range(1, len(attributes)):
name = attributes[i]
try:
if intable(name):
current = current[int(name)]
else:
current = getattr(current, name)
except Exception as e:
raise RuntimeError("Can not find the " + name + " in model")
return current
def is_type(object, object_class):
if isinstance(object_class, list):
if type(object) in object_class:
return True
if type(object) == object_class:
return True
return False
def set_object(model, name, nn_module):
base_ptr = model
if isinstance(
model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
):
model = model.module
if name == "self":
return nn_module
attributes = name.split(".")
prefix = ".".join(attributes[:-1])
prefix_object = get_object(model, prefix)
setattr(prefix_object, attributes[-1], nn_module)
return base_ptr
def get_exclusion_names(graph, object_class, attribute_name, attribute_dim):
modules = graph.get_modules()
exclusion_dict = collections.OrderedDict()
for _, module in modules.items():
if isinstance(module.nn_object, object_class):
cut_dict = module.cut_analysis(attribute_name, [0], attribute_dim)
terminal_names = cut_dict["terminal"]
terminal_names = list(terminal_names)
terminal_names.sort()
exclusion_key = "_".join(terminal_names)
exclusion_dict[exclusion_key] = module.name
exclusion_list = []
for key in exclusion_dict.keys():
exclusion_list.append(exclusion_dict[key])
return exclusion_list
# change setting with _modules
def get_names_by_class(model, object_class, include_super_class=True):
name_list = []
stack = []
stack.append([model, "self"])
while len(stack) != 0:
pop_object, object_name = stack.pop()
if include_super_class and isinstance(pop_object, object_class):
name_list.append(object_name)
continue
if is_type(pop_object, object_class):
name_list.append(object_name)
continue
# if the nn.Module is a Sequential or ModuleList
if isinstance(pop_object, (torch.nn.Sequential, torch.nn.ModuleList)):
for index in range(0, len(pop_object)):
if isinstance(pop_object[index], torch.nn.Module):
stack.append([pop_object[index], object_name + "." + str(index)])
if isinstance(pop_object, torch.nn.ModuleDict):
for key in pop_object.keys():
if isinstance(pop_object[key], torch.nn.Module):
stack.append([pop_object[key], object_name + "." + str(key)])
attributes = dir(pop_object)
for attribute in attributes:
sub_object = getattr(pop_object, attribute)
if isinstance(sub_object, torch.nn.Module):
stack.append([sub_object, object_name + "." + attribute])
return name_list
def _find_match_module(
modules,
prev_node,
object_class_list,
current_name_list,
group_name_list,
include_super_class,
):
if len(current_name_list) == len(object_class_list):
group_name_list.append(copy.deepcopy(current_name_list))
return
for _, module in modules.items():
if (
include_super_class
and isinstance(module.nn_object, object_class_list[len(current_name_list)])
) or is_type(module.nn_object, object_class_list[len(current_name_list)]):
if prev_node is None or prev_node in module.in_data:
if len(module.out_data) != 0:
current_name_list.append(module.name)
_find_match_module(
modules,
module.out_data[0],
object_class_list,
current_name_list,
group_name_list,
include_super_class,
)
current_name_list.pop()
def get_name_groups_by_classes(graph, object_class_list, include_super_class=True):
modules = graph.modules
group_name_list = []
current_name_list = []
_find_match_module(
modules,
None,
object_class_list,
current_name_list,
group_name_list,
include_super_class,
)
return group_name_list
def replace_object_by_name_groups(model, group_name_list, replace_function):
for group_name in group_name_list:
objs = []
for name in group_name:
obj = get_object(model, name)
objs.append(obj)
new_objs = replace_function(group_name, objs)
for name, obj in zip(group_name, new_objs):
model = set_object(model, name, obj)
return model
def replace_object_by_names(model, name_list, replace_function):
for name in name_list:
obj = get_object(model, name)
model = set_object(model, name, replace_function(name, obj))
return model
def replace_object_by_class(
model, object_class, replace_function, include_super_class=True
):
name_list = get_names_by_class(model, object_class, include_super_class)
return replace_object_by_names(model, name_list, replace_function)
def normalize_onnx_parameters(**kwargs):
torch_version = torch.__version__.split(".")
if int(torch_version[0]) >= 2 or len(torch_version) > 1 and int(torch_version[1]) >= 10:
kwargs.pop("_retain_param_name", None)
return kwargs
|
from typing import List
import unittest
from src.profile import Profile
class ProfileTest(unittest.TestCase):
def test_calculate_simple_motif(self):
profile = Profile("A", "name")
self.assertDictEqual({
"A": [1],
"C": [0],
"G": [0],
"T": [0]
}, profile.get_frequency_profile())
def test_calculate_complex_motif(self):
profile = Profile("ACGTAA", "name")
self.assertDictEqual({
"A": [1, 0, 0, 0, 1, 1],
"C": [0, 1, 0, 0, 0, 0],
"G": [0, 0, 1, 0, 0, 0],
"T": [0, 0, 0, 1, 0, 0]
}, profile.get_frequency_profile())
def test_combine_profiles(self):
profile_a = Profile("ACGTAA", "name")
profile_b = Profile("ACGTAA", "name")
profile_c = profile_a.combine(profile_b)
self.assertDictEqual({
"A": [1, 0, 0, 0, 1, 1],
"C": [0, 1, 0, 0, 0, 0],
"G": [0, 0, 1, 0, 0, 0],
"T": [0, 0, 0, 1, 0, 0]
}, profile_c.get_frequency_profile())
def test_combine_different_profiles(self):
profile_a = Profile("ACGT", "name")
profile_b = Profile("TGCA", "name")
profile_c = profile_a.combine(profile_b)
self.assertDictEqual({
"A": [0.5, 0, 0, 0.5],
"C": [0, 0.5, 0.5, 0],
"G": [0, 0.5, 0.5, 0],
"T": [0.5, 0, 0, 0.5]
}, profile_c.get_frequency_profile())
def test_profile_with_multiple_motifs(self):
profile = Profile(["ACGT", "TGCA"], "name")
self.assertDictEqual({
"A": [0.5, 0, 0, 0.5],
"C": [0, 0.5, 0.5, 0],
"G": [0, 0.5, 0.5, 0],
"T": [0.5, 0, 0, 0.5]
}, profile.get_frequency_profile())
def test_distance_between_profiles_works(self):
profile_a, profile_b, profile_c = self.__node_factory(["AAA", "AAA", "TTT"])
self.assertTrue(profile_a.log_distance(profile_b) < profile_a.log_distance(profile_c))
self.assertEqual(profile_a.log_distance(profile_b), 1)
self.assertEqual(profile_a.log_distance(profile_c), 1)
def __node_factory(self, dna_strings: List[str]) -> List[Profile]:
profiles = [Profile(string, name=string) for string in dna_strings]
return profiles
|
from django.urls import path, include
from .views import home_view
app_name = 'pages'
urlpatterns = [
# path('', home_view, name='home_view'),
]
|
from physDBD import Params0Gauss, ImportHelper, Params0GaussTraj
import numpy as np
import os
import tensorflow as tf
class TestParams0Gauss:
fnames = [
"../data_test/0000.txt",
"../data_test/0001.txt",
"../data_test/0002.txt",
"../data_test/0003.txt",
"../data_test/0004.txt"
]
species = ["ca2i","ip3"]
nv = 2
def import_params(self, time: float) -> Params0Gauss:
data = ImportHelper.import_gillespie_ssa_at_time(
fnames=self.fnames,
time=time,
species=self.species
)
params = Params0Gauss.fromData(data)
return params
def create_params_traj(self) -> Params0GaussTraj:
return Params0GaussTraj(
times=np.array([0.2,0.3,0.4,0.5,0.6,0.7]),
params0_traj=[
self.import_params(0.2),
self.import_params(0.3),
self.import_params(0.4),
self.import_params(0.5),
self.import_params(0.6),
self.import_params(0.7)
]
)
def test_params(self):
params = self.import_params(0.4)
def test_export(self):
pt = self.create_params_traj()
fname = "cache_params.txt"
pt.export(fname)
# import back
pt_back = Params0GaussTraj.fromFile(fname,nv=self.nv)
# Check
assert len(pt.params0_traj) == len(pt_back.params0_traj)
for i in range(0,len(pt.params0_traj)):
assert pt.params0_traj[i] == pt_back.params0_traj[i]
if os.path.exists(fname):
os.remove(fname)
def test_tf_input(self):
params = self.import_params(0.4)
input0 = params.get_tf_input(tpt=0)
tf.debugging.assert_equal(tf.constant(params.mu_v, dtype="float32"), input0["mu_v"].astype("float32"))
tf.debugging.assert_equal(tf.constant(params.chol_v, dtype="float32"), input0["chol_v"].astype("float32"))
pt = self.create_params_traj()
inputs = pt.get_tf_inputs()
assert len(inputs["mu_v"]) == len(pt.times)-1
for i in range(0,len(inputs["mu_v"])):
tf.debugging.assert_equal(tf.constant(pt.params0_traj[i].mu_v, dtype="float32"), inputs["mu_v"][i].astype("float32"))
tf.debugging.assert_equal(tf.constant(pt.params0_traj[i].chol_v, dtype="float32"), inputs["chol_v"][i].astype("float32"))
|
import os, glob
import json
import re
import numpy as np
from shutil import copy
from AlphagoZero.ai import MCTSPlayer
import AlphagoZero.go as go
from AlphagoZero.models.policy_value import PolicyValue
from AlphagoZero.util import flatten_idx, pprint_board
class Human(object):
def __init__(self,board_size):
self.is_human = True
self.board_size = board_size
def get_move(self, state):
while True:
query = raw_input("Your move: ")
if len(query)==0:
return go.PASS_MOVE
else:
try:
alphabet, number = re.match(r"([a-z]+)([0-9]+)", query, re.I).groups()
y = ord(alphabet.upper()) - ord('A')
x = self.board_size - int(number)
return ((x,y))
except:
print("The input should have the form like 'a1' or 'A1'.")
continue
def run_a_game(alphago_player, human_player, boardsize):
'''Run num_games games to completion, keeping track of each position and move of the new_player.
And return the win ratio
'''
board_size = boardsize
state = go.GameState(size=board_size, komi=0)
# Start all odd games with moves by 'old_player'. Even games will have 'new_player' black.
human_color = np.random.choice([go.BLACK, go.WHITE])
if human_color == go.BLACK:
current = human_player
other = alphago_player
print("Your color is black.")
else:
current = alphago_player
other = human_player
print("Your color is white.")
pprint_board(state.board)
while not state.is_end_of_game:
move = current.get_move(state)
try:
state.do_move(move)
except:
print("Illegal move!")
continue
if other == alphago_player:
other.mcts.update_with_move(move)
current, other = other, current
pprint_board(state.board)
winner = state.get_winner()
if winner == human_color:
print("You won.")
elif winner == 0:
print("Tie.")
else:
print("AlphagoZero won")
def run_play(cmd_line_args=None):
import argparse
parser = argparse.ArgumentParser(description='Play a game with the current best neural network checkpoint.') # noqa: E501
parser.add_argument("--model_json", help="Path to policy value model JSON.", default='network.json')
parser.add_argument("--best_directory", help="Path to folder where the model params and metadata will be saved after each evaluation.", default='/../ckpt/best/weights.*.hdf5') # noqa: E501/
parser.add_argument("--optimized_directory", help="Path to folder where optimized weights are saved", default="/../ckpt/optimized/weights.*.hdf5"),
parser.add_argument("--n_playout", help="number of playout", default=7, type=int)
# Baseline function (TODO) default lambda state: 0 (receives either file
# paths to JSON and weights or None, in which case it uses default baseline 0)
if cmd_line_args is None:
args = parser.parse_args()
else:
args = parser.parse_args(cmd_line_args)
best_model_json = os.path.dirname(__file__) + os.path.join(os.path.dirname(args.best_directory), args.model_json)
args.optimized_directory = os.path.dirname(__file__) + args.optimized_directory
args.best_directory = os.path.dirname(__file__) + args.best_directory
while True:
best_weight_list = glob.glob(args.best_directory)
if len(best_weight_list) == 0:
while True:
optimized_weight_list = glob.glob(args.optimized_directory)
if len(optimized_weight_list) != 0:
optimized_weight_list.sort()
candid_weight_path = optimized_weight_list[-1]
copy(candid_weight_path, os.path.dirname(args.best_directory))
copy(os.path.join(os.path.dirname(args.optimized_directory), args.model_json), os.path.dirname(args.best_directory))
print("The first neural network!")
best_weight_path = os.path.join(os.path.dirname(args.best_directory), os.path.basename(candid_weight_path))
with open(best_model_json, 'r') as f:
js = json.load(f)
js["weights_file"] = best_weight_path
f.close()
with open(best_model_json, 'w') as f:
json.dump(js,f)
f.close()
break
else:
break
best_weight_list.sort()
best_weight_path = best_weight_list[-1]
print("Playing with weights {}".format(best_weight_path))
# Set initial conditions
policy = PolicyValue.load_model(best_model_json)
policy.model.load_weights(best_weight_path)
boardsize = policy.model.input_shape[-1]
best_player = MCTSPlayer(policy.eval_value_state, policy.eval_policy_state, n_playout=args.n_playout, evaluating=True)
human_player = Human(boardsize)
run_a_game(best_player, human_player, boardsize)
if __name__ == '__main__':
run_play()
|
class RPY:
to_card = {(False, False): "!", (False, True): "+", (True, False): "?", (True, True): "*"}
from_card = {"!": (False, False), "+": (False, True), "?": (True, False), "*": (True, True)}
symbol_unique = '-u'
symbol_private = '-p'
symbol_static = '-s'
@classmethod
def rpy_to_cfg(cls, rpy: str) -> dict:
config = {}
config["name"] = rpy[1:].split('[', 1)[0]
config["type_"] = rpy.split('[', 1)[1].split(']', 1)[0]
optional, multiple = cls.from_card[rpy[0]]
if optional:
config["optional"] = optional
if multiple:
config["multiple"] = multiple
if cls.symbol_unique in rpy:
config["unique"] = True
if cls.symbol_private in rpy:
config["private"] = True
if cls.symbol_static in rpy:
config["static"] = True
return config
@classmethod
def cfg_to_rpy(cls, **config):
card = cls.to_card[(config.get('optional', False), config.get('multiple', False))]
u = f" {cls.symbol_unique}" if config.get('unique', False) else ""
p = f" {cls.symbol_private}" if config.get('private', False) else ""
s = f" {cls.symbol_static}" if config.get('static', False) else ""
return card + config['name'] + "[" + config['type'] + "]" + u + p + s
|
def aumentar(n, porcentagem, f=False):
nv = n * (1 + porcentagem/100)
if f:
nv = moeda(nv)
return nv
def diminuir(n=0, porcentagem=0, f=False):
nv = n * (1 - porcentagem/100)
if f:
nv = moeda(nv)
return nv
def dobro(n=0, f=False):
n *= 2
if f:
n = moeda(n)
return n
def metade(n=0, f=False):
n /= 2
if f:
n = moeda(n)
return n
def moeda(n=0.0, m='R$'):
return f'{m}{n:.2f}'.replace('.', ',')
|
# SPDX-FileCopyrightText: 2020 Luke Granger-Brown
#
# SPDX-License-Identifier: Apache-2.0
import scapy.packet
def tagged_layer(parent_packet, tag_name):
def tagged(tag):
def _wrap(cls):
scapy.packet.bind_layers(parent_packet, cls, **{tag_name: tag.value})
return cls
return _wrap
return tagged
class NoPayloadPacket(scapy.packet.Packet):
def extract_padding(self, s):
return b"", s
|
from flask import jsonify
from lin.redprint import Redprint
from app.libs.jwt_api import member_login_required, get_current_member
from app.libs.utils import offset_limit
from app.models.classic import Classic
from app.models.like import Like
classic_api = Redprint('classic')
@classic_api.route('/latest', methods=['GET'])
# # @member_login_required
def get_latest():
classic = Classic.get_latest()
# classic_with_like_status(classic)
return jsonify(classic)
@classic_api.route('/<int:index>/next', methods=['GET'])
# @member_login_required
def get_next(index):
classic = Classic.get_next(index)
# classic_with_like_status(classic)
return jsonify(classic)
@classic_api.route('/<int:index>/previous', methods=['GET'])
# @member_login_required
def get_previous(index):
classic = Classic.get_previous(index)
# classic_with_like_status(classic)
return jsonify(classic)
@classic_api.route('/<int:type>/<int:id>')
# @member_login_required
def get_detail(type, id):
classic = Classic.get_detail(type, id)
return jsonify(classic)
@classic_api.route('/<int:type>/<int:id>/favor')
@member_login_required
def get_like(type, id):
member_id = get_current_member().id
like = Like.get_like(type, id, member_id)
return jsonify(like)
@classic_api.route('/favor')
@member_login_required
def get_favor():
start, count = offset_limit()
member_id = get_current_member().id
classic = Classic.get_favor(member_id, start, count)
for model in classic['models']:
model.like_status = True
model._fields = ['id', 'type', 'summary', 'image', 'fav_nums', 'like_status']
return jsonify(classic)
def classic_with_like_status(classic):
like_status = Like.get_like_status_by_member(get_current_member().id, classic.type, classic.id)
classic.like_status = like_status
classic._fields.append('like_status')
|
import os
__all__ = ['NvidiaSMI', 'DebugCUDA']
class NvidiaSMI(object):
def __init__(self):
if 'PROGRAMFILES' in os.environ.keys():
nvidia_smi_path = os.path.join(
os.environ['PROGRAMFILES'],
'NVIDIA Corporation',
'NVSMI'
)
if nvidia_smi_path not in os.environ['PATH']:
os.environ['PATH'] = os.environ['PATH'] + ";" + nvidia_smi_path
class DebugCUDA(object):
def __init__(self):
os.environ['CUDA_LAUNCH_BLOCKING'] = str(1)
|
"""Additional helper functions dealing with transient-CW F(t0,tau) maps.
See Prix, Giampanis & Messenger (PRD 84, 023007, 2011):
https://arxiv.org/abs/1104.1704
for the algorithm in general
and Keitel & Ashton (CQG 35, 205003, 2018):
https://arxiv.org/abs/1805.05652
for a detailed discussion of the GPU implementation.
"""
import numpy as np
import os
import logging
from time import time
# optional imports
import importlib as imp
def _optional_import(modulename, shorthand=None):
"""
Import a module/submodule only if it's available.
using importlib instead of __import__
because the latter doesn't handle sub.modules
Also including a special check to fail more gracefully
when CUDA_DEVICE is set to too high a number.
"""
if shorthand is None:
shorthand = modulename
shorthandbit = ""
else:
shorthandbit = " as " + shorthand
try:
globals()[shorthand] = imp.import_module(modulename)
logging.debug("Successfully imported module %s%s." % (modulename, shorthandbit))
success = True
except ImportError:
logging.debug("Failed to import module {:s}.".format(modulename))
success = False
return success
class pyTransientFstatMap:
"""
Simplified object class for a F(t0,tau) F-stat map.
This is based on LALSuite's transientFstatMap_t type,
replacing the gsl matrix with a numpy array.
Here, `t0` is a transient start time,
`tau` is a transient duration parameter,
and `F(t0,tau)` is the F-statistic (not 2F)! evaluated
for a signal with those parameters
(and an implicit window function, which is not stored inside this object).
The 'map' covers a range of different `(t0,tau)` pairs.
Attributes
----------
F_mn: np.ndarray
2D array of F values (not 2F!),
with `m` an index over start-times `t0`,
and `n` an index over duration parameters `tau`,
in steps of `dt0` in `[t0, t0+t0Band]`,
and `dtau` in `[tau, tau+tauBand]`.
maxF: float
Maximum of F (not 2F!) over the array.
t0_ML: float
Maximum likelihood estimate of the transient start time `t0`.
tau_ML: float
Maximum likelihood estimate of the transient duration `tau`.
"""
def __init__(self, N_t0Range=None, N_tauRange=None, transientFstatMap_t=None):
"""
The class can be initialized with the following:
Parameters
----------
N_t0Range: int
Number of `t0` values covered.
N_tauRange: int
Number of `tau` values covered.
transientFstatMap_t: lalpulsar.transientFstatMap_t
pre-allocated matrix from lalpulsar.
"""
if transientFstatMap_t:
self._init_from_lalpulsar_type(transientFstatMap_t)
else:
if not N_t0Range or not N_tauRange:
raise ValueError(
"Need either a transientFstatMap_t or a pair of (N_t0Range, N_tauRange)!."
)
self.F_mn = np.zeros((N_t0Range, N_tauRange), dtype=np.float32)
# Initializing maxF to a negative value ensures
# that we always update at least once and hence return
# sane t0_d_ML, tau_d_ML
# even if there is only a single bin where F=0 happens.
self.maxF = float(-1.0)
self.t0_ML = float(0.0)
self.tau_ML = float(0.0)
def _init_from_lalpulsar_type(self, transientFstatMap_t):
"""This essentially just strips out a redundant member level from the lalpulsar structure."""
self.F_mn = transientFstatMap_t.F_mn.data
self.maxF = transientFstatMap_t.maxF
self.t0_ML = transientFstatMap_t.t0_ML
self.tau_ML = transientFstatMap_t.tau_ML
def get_maxF_idx(self):
"""Gets the 2D-unravelled index pair of the maximum of the F_mn map
Returns
-------
idx: tuple
The m,n indices of the map entry with maximal F value.
"""
return np.unravel_index(np.argmax(self.F_mn), self.F_mn.shape)
def write_F_mn_to_file(self, tCWfile, windowRange, header=[]):
"""Format a 2D transient-F-stat matrix over `(t0,tau)` and write as a text file.
Apart from the optional extra header lines,
the format is consistent with lalpulsar.write_transientFstatMap_to_fp().
Parameters
----------
tCWfile: str
Name of the file to write to.
windowRange: lalpulsar.transientWindowRange_t
A lalpulsar structure containing the transient parameters.
header: list
A list of additional header lines
to print at the start of the file.
"""
with open(tCWfile, "w") as tfp:
for hline in header:
tfp.write("# {:s}\n".format(hline))
tfp.write("# t0[s] tau[s] 2F\n")
for m, F_m in enumerate(self.F_mn):
this_t0 = windowRange.t0 + m * windowRange.dt0
for n, this_F in enumerate(F_m):
this_tau = windowRange.tau + n * windowRange.dtau
tfp.write(
" %10d %10d %- 11.8g\n" % (this_t0, this_tau, 2.0 * this_F)
)
fstatmap_versions = {
"lal": lambda multiFstatAtoms, windowRange: lalpulsar_compute_transient_fstat_map(
multiFstatAtoms, windowRange
),
"pycuda": lambda multiFstatAtoms, windowRange: pycuda_compute_transient_fstat_map(
multiFstatAtoms, windowRange
),
}
"""Dictionary of the actual callable transient F-stat map functions this module supports.
Actual runtime availability depends on the corresponding external modules
being available.
"""
def init_transient_fstat_map_features(wantCuda=False, cudaDeviceName=None):
"""Initialization of available modules (or 'features') for computing transient F-stat maps.
Currently, two implementations are supported and checked for
through the `_optional_import()` method:
1. `lal`: requires both `lal` and `lalpulsar` packages to be importable.
2. `pycuda`: requires the `pycuda` package to be importable
along with its modules
`driver`, `gpuarray`, `tools` and `compiler`.
Parameters
----------
wantCuda: bool
Only if this is True and it was possible to import pycuda,
a CUDA device context is created and returned.
cudaDeviceName: str or None
Request a CUDA device with this name.
Partial matches are allowed.
Returns
-------
features: dict
A dictionary of available method names, to match `fstatmap_versions`.
Each key's value is set to `True` only if
all required modules are importable on this system.
gpu_context: pycuda.driver.Context or None
A CUDA device context object, if assigned.
"""
features = {}
have_lal = _optional_import("lal")
have_lalpulsar = _optional_import("lalpulsar")
features["lal"] = have_lal and have_lalpulsar
# import GPU features
have_pycuda = _optional_import("pycuda")
have_pycuda_drv = _optional_import("pycuda.driver", "drv")
have_pycuda_gpuarray = _optional_import("pycuda.gpuarray", "gpuarray")
have_pycuda_tools = _optional_import("pycuda.tools", "cudatools")
have_pycuda_compiler = _optional_import("pycuda.compiler", "cudacomp")
features["pycuda"] = (
have_pycuda
and have_pycuda_drv
and have_pycuda_gpuarray
and have_pycuda_tools
and have_pycuda_compiler
)
logging.debug("Got the following features for transient F-stat maps:")
logging.debug(features)
if wantCuda and features["pycuda"]:
logging.debug("CUDA version: " + ".".join(map(str, drv.get_version())))
drv.init()
logging.debug(
"Starting with default pyCUDA context,"
" then checking all available devices..."
)
try:
context0 = pycuda.tools.make_default_context()
except pycuda._driver.LogicError as e:
if e.message == "cuDeviceGet failed: invalid device ordinal":
devn = int(os.environ["CUDA_DEVICE"])
raise RuntimeError(
"Requested CUDA device number {} exceeds"
" number of available devices!"
" Please change through environment"
" variable $CUDA_DEVICE.".format(devn)
)
else:
raise pycuda._driver.LogicError(e.message)
num_gpus = drv.Device.count()
logging.debug("Found {} CUDA device(s).".format(num_gpus))
devices = []
devnames = np.empty(num_gpus, dtype="S32")
for n in range(num_gpus):
devn = drv.Device(n)
devices.append(devn)
devnames[n] = devn.name().replace(" ", "-").replace("_", "-")
logging.debug(
"device {}: model: {}, RAM: {}MB".format(
n, devnames[n], devn.total_memory() / (2.0 ** 20)
)
)
if "CUDA_DEVICE" in os.environ:
devnum0 = int(os.environ["CUDA_DEVICE"])
else:
devnum0 = 0
matchbit = ""
if cudaDeviceName:
# allow partial matches in device names
devmatches = [
devidx
for devidx, devname in enumerate(devnames)
if cudaDeviceName in devname
]
if len(devmatches) == 0:
context0.detach()
raise RuntimeError(
'Requested CUDA device "{}" not found.'
" Available devices: [{}]".format(
cudaDeviceName, ",".join(devnames)
)
)
else:
devnum = devmatches[0]
if len(devmatches) > 1:
logging.warning(
'Found {} CUDA devices matching name "{}".'
" Choosing first one with index {}.".format(
len(devmatches), cudaDeviceName, devnum
)
)
os.environ["CUDA_DEVICE"] = str(devnum)
matchbit = '(matched to user request "{}")'.format(cudaDeviceName)
elif "CUDA_DEVICE" in os.environ:
devnum = int(os.environ["CUDA_DEVICE"])
else:
devnum = 0
devn = devices[devnum]
logging.info(
"Choosing CUDA device {},"
" of {} devices present: {}{}...".format(
devnum, num_gpus, devn.name(), matchbit
)
)
if devnum == devnum0:
gpu_context = context0
else:
context0.pop()
gpu_context = pycuda.tools.make_default_context()
gpu_context.push()
_print_GPU_memory_MB("Available")
else:
gpu_context = None
return features, gpu_context
def call_compute_transient_fstat_map(
version, features, multiFstatAtoms=None, windowRange=None
):
"""Call a version of the ComputeTransientFstatMap function.
This checks if the requested `version` is available,
and if so, executes the computation of a transient F-statistic map
over the `windowRange`.
Parameters
----------
version: str
Name of the method to call
(currently supported: 'lal' or 'pycuda').
features: dict
Dictionary of available features,
as obtained from `init_transient_fstat_map_features()`.
multiFstatAtoms: lalpulsar.MultiFstatAtomVector or None
The time-dependent F-stat atoms previously computed by `ComputeFstat`.
windowRange: lalpulsar.transientWindowRange_t or None
The structure defining the transient parameters.
Returns
-------
FstatMap: pyTransientFstatMap or lalpulsar.transientFstatMap_t
The output of the called function,
including the evaluated transient F-statistic map
over the windowRange.
timingFstatMap: float
Execution time of the called function.
"""
if version in fstatmap_versions:
if features[version]:
time0 = time()
FstatMap = fstatmap_versions[version](multiFstatAtoms, windowRange)
timingFstatMap = time() - time0
else:
raise Exception(
"Required module(s) for transient F-stat map"
' method "{}" not available!'.format(version)
)
else:
raise Exception(
'Transient F-stat map method "{}"' " not implemented!".format(version)
)
return FstatMap, timingFstatMap
def lalpulsar_compute_transient_fstat_map(multiFstatAtoms, windowRange):
"""Wrapper for the standard lalpulsar function for computing a transient F-statistic map.
See https://lscsoft.docs.ligo.org/lalsuite/lalpulsar/_transient_c_w__utils_8h.html
for the wrapped function.
Parameters
----------
multiFstatAtoms: lalpulsar.MultiFstatAtomVector
The time-dependent F-stat atoms previously computed by `ComputeFstat`.
windowRange: lalpulsar.transientWindowRange_t
The structure defining the transient parameters.
Returns
-------
FstatMap: pyTransientFstatMap
The computed results, see the class definition for details.
"""
FstatMap_lalpulsar = lalpulsar.ComputeTransientFstatMap(
multiFstatAtoms=multiFstatAtoms,
windowRange=windowRange,
useFReg=False,
)
return pyTransientFstatMap(transientFstatMap_t=FstatMap_lalpulsar)
def reshape_FstatAtomsVector(atomsVector):
"""Make a dictionary of ndarrays out of an F-stat atoms 'vector' structure.
Parameters
----------
atomsVector: lalpulsar.FstatAtomVector
The atoms in a 'vector'-like structure:
iterating over timestamps as the higher hierarchical level,
with a set of 'atoms' quantities defined at each timestamp.
Returns
-------
atomsDict: dict
A dictionary with an entry for each quantity,
which then is a 1D ndarray over timestamps for that one quantity.
"""
numAtoms = atomsVector.length
atomsDict = {}
atom_fieldnames = [
"timestamp",
"Fa_alpha",
"Fb_alpha",
"a2_alpha",
"ab_alpha",
"b2_alpha",
]
atom_dtypes = [np.uint32, complex, complex, np.float32, np.float32, np.float32]
for f, field in enumerate(atom_fieldnames):
atomsDict[field] = np.ndarray(numAtoms, dtype=atom_dtypes[f])
for n, atom in enumerate(atomsVector.data):
for field in atom_fieldnames:
atomsDict[field][n] = atom.__getattribute__(field)
atomsDict["Fa_alpha_re"] = np.float32(atomsDict["Fa_alpha"].real)
atomsDict["Fa_alpha_im"] = np.float32(atomsDict["Fa_alpha"].imag)
atomsDict["Fb_alpha_re"] = np.float32(atomsDict["Fb_alpha"].real)
atomsDict["Fb_alpha_im"] = np.float32(atomsDict["Fb_alpha"].imag)
return atomsDict
def _get_absolute_kernel_path(kernel):
pyfstatdir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
kernelfile = kernel + ".cu"
return os.path.join(pyfstatdir, "pyCUDAkernels", kernelfile)
def _print_GPU_memory_MB(key):
mem_used_MB = drv.mem_get_info()[0] / (2.0 ** 20)
mem_total_MB = drv.mem_get_info()[1] / (2.0 ** 20)
logging.debug(
"{} GPU memory: {:.4f} / {:.4f} MB free".format(key, mem_used_MB, mem_total_MB)
)
def pycuda_compute_transient_fstat_map(multiFstatAtoms, windowRange):
"""GPU version of computing a transient F-statistic map.
This is based on XLALComputeTransientFstatMap from LALSuite,
(C) 2009 Reinhard Prix, licensed under GPL.
The 'map' consists of F-statistics evaluated over
a range of different `(t0,tau)` pairs
(transient start-times and duration parameters).
This is a high-level wrapper function;
the actual CUDA computations are performed in one of the functions
`pycuda_compute_transient_fstat_map_rect()`
or `pycuda_compute_transient_fstat_map_exp()`,
depending on the window functon defined in `windowRange`.
Parameters
----------
multiFstatAtoms: lalpulsar.MultiFstatAtomVector
The time-dependent F-stat atoms previously computed by `ComputeFstat`.
windowRange: lalpulsar.transientWindowRange_t
The structure defining the transient parameters.
Returns
-------
FstatMap: pyTransientFstatMap
The computed results, see the class definition for details.
"""
if windowRange.type >= lalpulsar.TRANSIENT_LAST:
raise ValueError(
"Unknown window-type ({}) passed as input."
" Allowed are [0,{}].".format(
windowRange.type, lalpulsar.TRANSIENT_LAST - 1
)
)
# internal dict for search/setup parameters
tCWparams = {}
# first combine all multi-atoms
# into a single atoms-vector with *unique* timestamps
tCWparams["TAtom"] = multiFstatAtoms.data[0].TAtom
TAtomHalf = int(tCWparams["TAtom"] / 2) # integer division
atoms = lalpulsar.mergeMultiFstatAtomsBinned(multiFstatAtoms, tCWparams["TAtom"])
# make a combined input matrix of all atoms vectors, for transfer to GPU
tCWparams["numAtoms"] = atoms.length
atomsDict = reshape_FstatAtomsVector(atoms)
atomsInputMatrix = np.column_stack(
(
atomsDict["a2_alpha"],
atomsDict["b2_alpha"],
atomsDict["ab_alpha"],
atomsDict["Fa_alpha_re"],
atomsDict["Fa_alpha_im"],
atomsDict["Fb_alpha_re"],
atomsDict["Fb_alpha_im"],
)
)
# actual data spans [t0_data, t0_data + tCWparams['numAtoms'] * TAtom]
# in steps of TAtom
tCWparams["t0_data"] = int(atoms.data[0].timestamp)
tCWparams["t1_data"] = int(
atoms.data[tCWparams["numAtoms"] - 1].timestamp + tCWparams["TAtom"]
)
logging.debug(
"Transient F-stat map:"
" t0_data={:d}, t1_data={:d}".format(tCWparams["t0_data"], tCWparams["t1_data"])
)
logging.debug(
"Transient F-stat map:"
" numAtoms={:d}, TAtom={:d},"
" TAtomHalf={:d}".format(tCWparams["numAtoms"], tCWparams["TAtom"], TAtomHalf)
)
# special treatment of window_type = none
# ==> replace by rectangular window spanning all the data
if windowRange.type == lalpulsar.TRANSIENT_NONE:
windowRange.type = lalpulsar.TRANSIENT_RECTANGULAR
windowRange.t0 = tCWparams["t0_data"]
windowRange.t0Band = 0
windowRange.dt0 = tCWparams["TAtom"] # irrelevant
windowRange.tau = tCWparams["numAtoms"] * tCWparams["TAtom"]
windowRange.tauBand = 0
windowRange.dtau = tCWparams["TAtom"] # irrelevant
""" NOTE: indices {i,j} enumerate *actual* atoms and their timestamps t_i,
* while the indices {m,n} enumerate the full grid of values
* in [t0_min, t0_max]x[Tcoh_min, Tcoh_max] in steps of deltaT.
* This allows us to deal with gaps in the data in a transparent way.
*
* NOTE2: we operate on the 'binned' atoms returned
* from XLALmergeMultiFstatAtomsBinned(),
* which means we can safely assume all atoms to be lined up
* perfectly on a 'deltaT' binned grid.
*
* The mapping used will therefore be {i,j} -> {m,n}:
* m = offs_i / deltaT
* start-time offset from t0_min measured in deltaT
* n = Tcoh_ij / deltaT
* duration Tcoh_ij measured in deltaT,
*
* where
* offs_i = t_i - t0_min
* Tcoh_ij = t_j - t_i + deltaT
*
"""
# We allocate a matrix {m x n} = t0Range * TcohRange elements
# covering the full transient window-range [t0,t0+t0Band]x[tau,tau+tauBand]
tCWparams["N_t0Range"] = int(
np.floor(1.0 * windowRange.t0Band / windowRange.dt0) + 1
)
tCWparams["N_tauRange"] = int(
np.floor(1.0 * windowRange.tauBand / windowRange.dtau) + 1
)
FstatMap = pyTransientFstatMap(tCWparams["N_t0Range"], tCWparams["N_tauRange"])
logging.debug(
"Transient F-stat map:"
" N_t0Range={:d}, N_tauRange={:d},"
" total grid points: {:d}".format(
tCWparams["N_t0Range"],
tCWparams["N_tauRange"],
tCWparams["N_t0Range"] * tCWparams["N_tauRange"],
)
)
if windowRange.type == lalpulsar.TRANSIENT_RECTANGULAR:
FstatMap.F_mn = pycuda_compute_transient_fstat_map_rect(
atomsInputMatrix, windowRange, tCWparams
)
elif windowRange.type == lalpulsar.TRANSIENT_EXPONENTIAL:
FstatMap.F_mn = pycuda_compute_transient_fstat_map_exp(
atomsInputMatrix, windowRange, tCWparams
)
else:
raise ValueError(
"Invalid transient window type {}"
" not in [{}, {}].".format(
windowRange.type, lalpulsar.TRANSIENT_NONE, lalpulsar.TRANSIENT_LAST - 1
)
)
# out of loop: get max2F and ML estimates over the m x n matrix
FstatMap.maxF = FstatMap.F_mn.max()
maxidx = np.unravel_index(
FstatMap.F_mn.argmax(), (tCWparams["N_t0Range"], tCWparams["N_tauRange"])
)
FstatMap.t0_ML = windowRange.t0 + maxidx[0] * windowRange.dt0
FstatMap.tau_ML = windowRange.tau + maxidx[1] * windowRange.dtau
logging.debug(
"Done computing transient F-stat map."
" maxF={:.4f}, t0_ML={}, tau_ML={}".format(
FstatMap.maxF, FstatMap.t0_ML, FstatMap.tau_ML
)
)
return FstatMap
def pycuda_compute_transient_fstat_map_rect(atomsInputMatrix, windowRange, tCWparams):
"""GPU computation of the transient F-stat map for rectangular windows.
As discussed in Keitel & Ashton (CQG 35, 205003, 2018):
https://arxiv.org/abs/1805.05652
this version only does GPU parallization for the outer loop,
keeping the partial sums of the inner loop local to each individual kernel
using the 'memory trick'.
Parameters
----------
atomsInputMatrix: np.ndarray
A 2D array of stacked named columns containing the F-stat atoms.
windowRange: lalpulsar.transientWindowRange_t
The structure defining the transient parameters.
tCWparams: dict
A dictionary of miscellaneous parameters.
Returns
-------
F_mn: np.ndarray
A 2D array of the computed transient F-stat map over the
`[t0,tau]` range.
"""
# gpu data setup and transfer
_print_GPU_memory_MB("Initial")
input_gpu = gpuarray.to_gpu(atomsInputMatrix)
Fmn_gpu = gpuarray.GPUArray(
(tCWparams["N_t0Range"], tCWparams["N_tauRange"]), dtype=np.float32
)
_print_GPU_memory_MB("After input+output allocation:")
# GPU kernel
kernel = "cudaTransientFstatRectWindow"
kernelfile = _get_absolute_kernel_path(kernel)
partial_Fstat_cuda_code = cudacomp.SourceModule(open(kernelfile, "r").read())
partial_Fstat_cuda = partial_Fstat_cuda_code.get_function(kernel)
partial_Fstat_cuda.prepare("PIIIIIIIIP")
# GPU grid setup
blockRows = min(1024, tCWparams["N_t0Range"])
blockCols = 1
gridRows = int(np.ceil(1.0 * tCWparams["N_t0Range"] / blockRows))
gridCols = 1
# running the kernel
logging.debug(
"Calling pyCUDA kernel with a grid of {}*{}={} blocks"
" of {}*{}={} threads each: {} total threads...".format(
gridRows,
gridCols,
gridRows * gridCols,
blockRows,
blockCols,
blockRows * blockCols,
gridRows * gridCols * blockRows * blockCols,
)
)
partial_Fstat_cuda.prepared_call(
(gridRows, gridCols),
(blockRows, blockCols, 1),
input_gpu.gpudata,
tCWparams["numAtoms"],
tCWparams["TAtom"],
tCWparams["t0_data"],
windowRange.t0,
windowRange.dt0,
windowRange.tau,
windowRange.dtau,
tCWparams["N_tauRange"],
Fmn_gpu.gpudata,
)
# return results to host
F_mn = Fmn_gpu.get()
_print_GPU_memory_MB("Final")
return F_mn
def pycuda_compute_transient_fstat_map_exp(atomsInputMatrix, windowRange, tCWparams):
"""GPU computation of the transient F-stat map for exponential windows.
As discussed in Keitel & Ashton (CQG 35, 205003, 2018):
https://arxiv.org/abs/1805.05652
this version does full GPU parallization
of both the inner and outer loop.
Parameters
----------
atomsInputMatrix: np.ndarray
A 2D array of stacked named columns containing the F-stat atoms.
windowRange: lalpulsar.transientWindowRange_t
The structure defining the transient parameters.
tCWparams: dict
A dictionary of miscellaneous parameters.
Returns
-------
F_mn: np.ndarray
A 2D array of the computed transient F-stat map over the
`[t0,tau]` range.
"""
# gpu data setup and transfer
_print_GPU_memory_MB("Initial")
input_gpu = gpuarray.to_gpu(atomsInputMatrix)
Fmn_gpu = gpuarray.GPUArray(
(tCWparams["N_t0Range"], tCWparams["N_tauRange"]), dtype=np.float32
)
_print_GPU_memory_MB("After input+output allocation:")
# GPU kernel
kernel = "cudaTransientFstatExpWindow"
kernelfile = _get_absolute_kernel_path(kernel)
partial_Fstat_cuda_code = cudacomp.SourceModule(open(kernelfile, "r").read())
partial_Fstat_cuda = partial_Fstat_cuda_code.get_function(kernel)
partial_Fstat_cuda.prepare("PIIIIIIIIIP")
# GPU grid setup
blockRows = min(32, tCWparams["N_t0Range"])
blockCols = min(32, tCWparams["N_tauRange"])
gridRows = int(np.ceil(1.0 * tCWparams["N_t0Range"] / blockRows))
gridCols = int(np.ceil(1.0 * tCWparams["N_tauRange"] / blockCols))
# running the kernel
logging.debug(
"Calling kernel with a grid of {}*{}={} blocks"
" of {}*{}={} threads each: {} total threads...".format(
gridRows,
gridCols,
gridRows * gridCols,
blockRows,
blockCols,
blockRows * blockCols,
gridRows * gridCols * blockRows * blockCols,
)
)
partial_Fstat_cuda.prepared_call(
(gridRows, gridCols),
(blockRows, blockCols, 1),
input_gpu.gpudata,
tCWparams["numAtoms"],
tCWparams["TAtom"],
tCWparams["t0_data"],
windowRange.t0,
windowRange.dt0,
windowRange.tau,
windowRange.dtau,
tCWparams["N_t0Range"],
tCWparams["N_tauRange"],
Fmn_gpu.gpudata,
)
# return results to host
F_mn = Fmn_gpu.get()
_print_GPU_memory_MB("Final")
return F_mn
|
import os
from abc import ABC, abstractmethod
from pathlib import Path
BASE_PATH = Path.home().joinpath("re")
if not os.path.isdir(BASE_PATH):
os.mkdir(BASE_PATH)
class DownloadTarget(ABC):
@abstractmethod
def get_source(self):
pass
@abstractmethod
def get_target(self):
pass
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
i = 0
for j in range(1, len(nums)):
if nums[i] != nums[j]:
i += 1
nums[i] = nums[j]
return i + 1
s = Solution()
nums = [1, 1]
length = s.removeDuplicates(nums)
print(length)
print(nums)
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: examples for dxfwrite usage, see also tests for examples
# Created: 09.02.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
import sys
import os
try:
import dxfwrite
except ImportError:
# if dxfwrite is not 'installed' append parent dir of __file__ to sys.path
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(curdir, os.path.pardir)))
import dxfwrite
from dxfwrite import DXFEngine as dxf
def dxflist_example(x=0, y=0, w=1, h=1):
# dxf list can contain any dxf entity, that supports __dxf__()
rect = dxfwrite.DXFList()
rect.append(dxf.line(start=(x, y), end=(x+w, y), color=1))
rect.append(dxf.line(start=(x+w, y), end=(x+w, y+h), color=2))
rect.append(dxf.line(start=(x+w, y+h), end=(x, y+h), color=3))
rect.append(dxf.line(start=(x, y+h), end=(x, y), color=4))
return rect
# create a new drawing
name = "simple.dxf"
drawing = dxf.drawing(name)
# add a LAYER-tableentry called 'dxfwrite'
drawing.add_layer('dxfwrite')
# add a VIEWPORT-tableentry
drawing.add_vport(
'*ACTIVE',
center_point=(10,10),
height = 30,
)
# add LINE-entity
drawing.add(dxf.line((0,0),( 10,0),
color=dxfwrite.BYLAYER,
layer='dxfwrite'
))
# add a CIRCLE-entity
drawing.add(dxf.circle(center=(5,0), radius=5))
# add an ARC-entity
drawing.add(dxf.arc(center=(5,0), radius=4, startangle=30, endangle=150))
#add a POINT-entity
drawing.add(dxf.point(point=(1,1)))
# add a SOLID-entity with 4 points
drawing.add(dxf.solid([(0,0), (1,0), (1,1), (0,1)], color=2))
# add a SOLID-entity with 3 points
drawing.add(dxf.solid([(0,1), (1,1), (1,2)], color=3))
# add a 3DFACE-entity
drawing.add(dxf.face3d([(5,5), (6,5), (6,6), (5,6)], color=3))
# add a Trace-entity
drawing.add(dxf.trace([(7,5), (8,5), (8,6), (7,6)], color=4))
# add a TEXT-entity
drawing.add(dxf.text("Manfred"))
# add a TEXT-entity with more properties
drawing.add(dxf.text(
text="mozman",
style="ISOCPEUR",
height=0.7,
oblique=15,
color=5,
insert=(0,5),
rotation=30,
))
# create BLOCK-entity
block = dxf.block(name='Rechteck')
# add DXF-entities to the block
block.add(dxflist_example(0, 0, 1, 1))
# create an ATTDEF-entity, can be use to crate new ATTRIBS with following
# default values, see attdef.new_attrib() call below
attdef = dxf.attdef(
insert=(.2, .2),
rotation = 30,
height=0.25,
text='test',
prompt='test eingeben:', # only important for interactive CAD systems
tag='TEST'
)
# add attdef to the block definition
block.add(attdef)
# add the block to the BLOCKS section
drawing.blocks.add(block)
# insert the block references
for x in range(1, 10):
block_ref = dxf.insert(
blockname='Rechteck',
insert=(x*2,10),
rotation=x*6,
xscale=x,
yscale=x)
# create an ATTRIB-entity from an ATTDEF-entity
attrib = attdef.new_attrib(
height=0.18,
text='attrib:%d' % x,
)
# add ATTRIB-entity to the block refernce
# relative insert, respects the block rotation
block_ref.add(attrib, relative=True)
# add block reference to drawing
drawing.add(block_ref)
# example for aligned text
drawing.add(dxf.text("aligned Text", insert=(0, -3),
halign=dxfwrite.ALIGNED, alignpoint=(3, -3)))
drawing.add(dxf.line((0,-6), (3,-6)))
# example for fitted text
drawing.add(dxf.text("fitted Text", insert=(0, -6),
halign=dxfwrite.FIT, alignpoint=(3, -6)))
drawing.add(dxf.line((0,-9), (3,-9)))
# example for baseline_middle text
drawing.add(dxf.text("baseline_middle Text", insert=(0, -9),
halign=dxfwrite.BASELINE_MIDDLE, alignpoint=(3, -9)))
# example for Polyline, flags=0, creates a 2D polyline
# default is a 3D polyline
polyline= dxf.polyline(linetype='DOT', flags=0)
polyline.add_vertices( [(0,20), (3,20), (6,23), (9,23)] )
drawing.add(polyline)
# and save the drawing
drawing.save()
print("drawing '%s' created.\n" % name)
|
# Copyright The IETF Trust 2019, All Rights Reserved
# Copyright 2018 Cisco and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is run by a cronjob every day and it
automatically removes unused diff files, yangsuite
users and correlation ids.
"""
__author__ = "Miroslav Kovac"
__copyright__ = "Copyright 2018 Cisco and its affiliates, Copyright The IETF Trust 2019, All Rights Reserved"
__license__ = "Apache License, Version 2.0"
__email__ = "miroslav.kovac@pantheon.tech"
import argparse
import hashlib
import os
import shutil
import time
from datetime import datetime as dt
import utility.log as log
from utility.create_config import create_config
from utility.staticVariables import backup_date_format
from utility.util import job_log, get_list_of_backups
from elasticsearch import Elasticsearch
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def create_register_elk_repo(name, is_compress, elk):
body = {}
body['type'] = 'fs'
body['settings'] = {}
body['settings']['location'] = name
body['settings']['compress'] = is_compress
elk.snapshot.create_repository(name, body)
if __name__ == '__main__':
start_time = int(time.time())
parser = argparse.ArgumentParser()
parser.add_argument('--config-path', type=str, default=os.environ['YANGCATALOG_CONFIG_PATH'],
help='Set path to config file')
parser.add_argument('--compress', action='store_true', default=True,
help='Set whether to compress snapshot files. Default is True')
args, extra_args = parser.parse_known_args()
config_path = args.config_path
config = create_config(config_path)
log_directory = config.get('Directory-Section', 'logs')
temp_dir = config.get('Directory-Section', 'temp')
ys_users = config.get('Directory-Section', 'ys-users')
cache_directory = config.get('Directory-Section', 'cache')
repo_name = config.get('General-Section', 'elk-repo-name')
es_host = config.get('DB-Section', 'es-host')
es_port = config.get('DB-Section', 'es-port')
es_aws = config.get('DB-Section', 'es-aws')
#elk_credentials = config.get('Secrets-Section', 'elk-secret').strip('"').split(' ')
LOGGER = log.get_logger('removeUnused', log_directory + '/jobs/removeUnused.log')
LOGGER.info('Starting Cron job remove unused files')
current_time = time.time()
cutoff = current_time - 86400
try:
LOGGER.info('Removing old tmp directory representing int folders')
for dir in next(os.walk(temp_dir))[1]:
if represents_int(dir):
creation_time = os.path.getctime(os.path.join(temp_dir, dir))
if creation_time < cutoff:
shutil.rmtree(os.path.join(temp_dir, dir))
LOGGER.info('Removing old ys temporary users')
dirs = os.listdir(ys_users)
for dir in dirs:
abs = os.path.abspath('{}/{}'.format(ys_users, dir))
if not abs.endswith('yangcat') and not abs.endswith('yang'):
try:
shutil.rmtree(abs)
except:
pass
LOGGER.info('Removing old correlation ids')
# removing correlation ids from file that are older than a day
# Be lenient to missing files
try:
filename = open('{}/correlation_ids'.format(temp_dir), 'r')
lines = filename.readlines()
filename.close()
except IOError:
lines = []
with open('{}/correlation_ids'.format(temp_dir), 'w') as filename:
for line in lines:
line_datetime = line.split(' -')[0]
t = dt.strptime(line_datetime, "%a %b %d %H:%M:%S %Y")
diff = dt.now() - t
if diff.days == 0:
filename.write(line)
LOGGER.info('Removing old yangvalidator cache dirs')
yang_validator_cache = os.path.join(temp_dir, 'yangvalidator')
cutoff = current_time - 2*86400
dirs = os.listdir(yang_validator_cache)
for dir in dirs:
if dir.startswith('yangvalidator-v2-cache-'):
creation_time = os.path.getctime(os.path.join(yang_validator_cache, dir))
if creation_time < cutoff:
shutil.rmtree(os.path.join(yang_validator_cache, dir))
#LOGGER.info('Removing old elasticsearch snapshots')
#if es_aws == 'True':
# es = Elasticsearch([es_host], http_auth=(elk_credentials[0], elk_credentials[1]), scheme='https', port=443)
#else:
# es = Elasticsearch([{'host': '{}'.format(es_host), 'port': es_port}])
#create_register_elk_repo(repo_name, args.compress, es)
#snapshots = es.snapshot.get(repository=repo_name, snapshot='_all')['snapshots']
#sorted_snapshots = sorted(snapshots, key=itemgetter('start_time_in_millis'))
#for snapshot in sorted_snapshots[:-5]:
# es.snapshot.delete(repository=repo_name, snapshot=snapshot['snapshot'])
def hash_file(path: str) -> bytes:
buf_size = 65536 # lets read stuff in 64kB chunks!
sha1 = hashlib.sha1()
with open(path, 'rb') as byte_file:
while True:
data = byte_file.read(buf_size)
if not data:
break
sha1.update(data)
return sha1.digest()
def hash_node(path: str) -> bytes:
if os.path.isfile(path):
return hash_file(path)
elif os.path.isdir(path):
sha1 = hashlib.sha1()
for root, _, filenames in os.walk(path):
for filename in filenames:
file_path = os.path.join(root, filename)
# we only want to compare the contents, not the top directory name
relative_path = file_path[len(path):]
file_signature = relative_path.encode() + hash_file(file_path)
sha1.update(file_signature)
return sha1.digest()
else:
assert False
# remove all files that are same keep the latest one only. Last two months keep all different content json files
# other 4 months (6 in total) keep only latest, remove all other files
def remove_old_backups(subdir: str):
backup_directory = os.path.join(cache_directory, subdir)
list_of_backups = get_list_of_backups(backup_directory)
backup_name_latest = os.path.join(backup_directory, list_of_backups[-1])
def diff_month(later_datetime, earlier_datetime):
return (later_datetime.year - earlier_datetime.year) * 12 + later_datetime.month - earlier_datetime.month
to_remove = []
last_six_months = {}
last_two_months = {}
today = dt.now()
for backup in list_of_backups:
backup_dt = dt.strptime(backup[:backup.index('.')], backup_date_format)
month_difference = diff_month(today, backup_dt)
if month_difference > 6:
to_remove.append(backup)
elif month_difference > 2:
month = backup_dt.month
if month in last_six_months:
if last_six_months[month] > backup:
to_remove.append(backup)
else:
to_remove.append(last_six_months[month])
last_six_months[month] = backup
else:
last_six_months[month] = backup
else:
backup_path = os.path.join(backup_directory, backup)
currently_processed_backup_hash = hash_node(backup_path)
if currently_processed_backup_hash in last_two_months:
if last_two_months[currently_processed_backup_hash] > backup:
to_remove.append(backup)
else:
to_remove.append(last_two_months[currently_processed_backup_hash])
last_two_months[currently_processed_backup_hash] = backup
for backup in to_remove:
backup_path = os.path.join(backup_directory, backup)
if backup_path != backup_name_latest:
if os.path.isdir(backup_path):
shutil.rmtree(backup_path)
elif os.path.isfile(backup_path):
os.unlink(backup_path)
LOGGER.info('Removing old cache json files')
remove_old_backups('confd')
except Exception as e:
LOGGER.exception('Exception found while running removeUnused script')
job_log(start_time, temp_dir, error=str(e), status='Fail', filename=os.path.basename(__file__))
raise e
job_log(start_time, temp_dir, status='Success', filename=os.path.basename(__file__))
LOGGER.info('Job finished successfully')
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import threading
import tensorflow as tf
_CURRENT_CONTEXT = threading.local()
def get_current_context():
"""
Returns current active `TFTracingContext`.
:return: Tracing context.
"""
tracing_context = getattr(_CURRENT_CONTEXT, 'tracing_context', None)
if tracing_context is None:
tracing_context = TFTracingContext()
setattr(_CURRENT_CONTEXT, 'tracing_context', tracing_context)
return tracing_context
class TFTracingContextState:
"""
Contains values that describe a state of the `TFTracingContext`.
"""
def __init__(self,
in_call: bool = False,
wrap_ops: bool = False,
model: Optional[tf.keras.Model] = None):
"""
Initializes the `TFTracingContextState` instance.
:param in_call: Whether currently inside the `call()` method of a `tf.keras.Model`.
:param wrap_ops: Whether currently adding the compression pre-hooks and post-hooks
to TensorFlow operations.
:param model: The Keras model whose `call()` method is currently active.
The `None` value is specified that model is undefined at this moment. This is only
possible when `in_call` is equal to `False`.
"""
self._in_call = in_call
self._wrap_ops = wrap_ops
if model is None and in_call:
raise ValueError(
f'Inconsisten values `{in_call}` and `{model}` for `in_call` and `model` parameters. '
'The `None` value is specified that model is undefined at this moment. This is only '
'possible when `in_call` is equal to `False`.'
)
self._model = model
@property
def in_call(self) -> bool:
return self._in_call
@property
def wrap_ops(self) -> bool:
return self._wrap_ops
@property
def model(self) -> tf.keras.Model:
return self._model
class TFTracingContext:
"""
Contains information about should we wrap the TensorFlow
operation or not.
"""
def __init__(self):
"""
Initializes the `TFTracingContext` instance.
"""
self._state = TFTracingContextState()
# Maps a name used in the graph to the next id to use for that name.
self.names_in_use = {}
@property
def model(self) -> Optional[tf.keras.Model]:
return self.state.model
@property
def in_call(self) -> bool:
return self.state.in_call
@property
def wrap_ops(self) -> bool:
return self.state.wrap_ops
def enter(self,
in_call: bool,
wrap_ops: bool,
model: Optional[tf.keras.Model] = None):
"""
Pushes parameters onto the tracing context.
:param in_call: Whether currently inside the `call()` method of a model.
:param wrap_ops: Whether currently adding the compression pre-hooks and post-hooks
to TensorFlow operations.
:param model: The Keras model whose `call()` method is currently active.
"""
model = self.state.model if model is None else model
next_state = TFTracingContextState(in_call, wrap_ops, model)
return TFTracingContextManager(self, next_state)
def unique_name(self, name: str) -> str:
"""
Returns a unique operation name for `name`.
For more details, please, see implementation of
the `tf.Graph.unique_name()` method.
:param name: The name for an operation.
:return: Unique name.
"""
name_key = name.lower()
i = self.names_in_use.get(name_key, 0)
self.names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self.names_in_use:
name_key = f'{base_name_key}_{i}'
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name('name_1').
self.names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = f'{name}_{i - 1}'
return name
@property
def state(self) -> TFTracingContextState:
return self._state
def load_state(self, state: TFTracingContextState) -> None:
self._state = state
class TFTracingContextManager:
"""
Context manager for the tracing context.
"""
def __init__(self,
tracing_context: TFTracingContext,
next_state: TFTracingContextState):
"""
Initializes the tracing context manager.
:param tracing_context: Tracing context.
:param next_state: Next state of the tracing context which
should be applied.
"""
self._tracing_context = tracing_context
self._next_state = next_state
self._prev_state = None
def __enter__(self):
self._prev_state = self._tracing_context.state
self._tracing_context.load_state(self._next_state)
def __exit__(self, *exc):
self._tracing_context.load_state(self._prev_state)
if not self._tracing_context.in_call:
self._tracing_context.names_in_use = {}
|
from transformers import pipeline
classifier = pipeline('sentiment-analysis')
# classifier('We are very happy to show you the 🤗 Transformers library.')
def sentiment_analysis(input_text):
result_sent = classifier(input_text)
return result_sent
# print(classifier('We are very happy to show you the 🤗 Transformers library.'))
#sentiment
from transformers import BertTokenizer
from model import BertForMultiLabelClassification
from multilabel_pipeline import MultiLabelPipeline
import pandas as pd
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import numpy as np
tokenizer = BertTokenizer.from_pretrained("monologg/bert-base-cased-goemotions-original")
model = BertForMultiLabelClassification.from_pretrained("monologg/bert-base-cased-goemotions-original")
goemotions = MultiLabelPipeline(
model=model,
tokenizer=tokenizer,
threshold=0.3
)
def Sentiments_analysis(essay_input):
########## input_text !!!!!!!!
re_text = essay_input.split(".")
#데이터 전처리
def cleaning(datas):
fin_datas = []
for data in datas:
# 영문자 이외 문자는 공백으로 변환
only_english = re.sub('[^a-zA-Z]', ' ', data)
# 데이터를 리스트에 추가
fin_datas.append(only_english)
return fin_datas
texts = cleaning(re_text)
#분석된 감정만 추출
emo_re = goemotions(texts)
emo_all = []
for list_val in range(0, len(emo_re)):
#print(emo_re[list_val]['labels'],emo_re[list_val]['scores'])
#mo_all.append((emo_re[list_val]['labels'],emo_re[list_val]['scores'])) #KEY, VALUE만 추출하여 리스트로 저장
#emo_all.append(emo_re[list_val]['scores'])
emo_all.append((emo_re[list_val]['labels']))
#추출결과 확인
# emo_all
# ['sadness'],
# ['anger'],
# ['admiration', 'realization'],
# ['admiration', 'disappointment'],
# ['love'],
# ['sadness', 'neutral'],
# ['realization', 'neutral'],
# ['neutral'],
# ['optimism'],
# ['neutral'],
# ['excitement'],
# ['neutral'],
# ['neutral'],
# ['caring'],
# ['gratitude'],
# ['admiration', 'approval'], ...
from pandas.core.common import flatten #이중리스틀 FLATTEN하게 변환
flat_list = list(flatten(emo_all))
# ['neutral',
# 'neutral',
# 'sadness',
# 'anger',
# 'admiration',
# 'realization',
# 'admiration',
# 'disappointment',
#중립적인 감정을 제외하고, 입력한 문장에서 다양한 감정을 모두 추출하고 어떤 감정이 있는지 계산해보자
# unique = []
# for r in flat_list:
# if r == 'neutral':
# pass
# else:
# unique.append(r)
#중립감정 제거 및 유일한 감정값 확인
#unique
unique_re = set(flat_list) #중복제거
############################################################################
# 글에 표현된 감정이 얼마나 다양한지 분석 결과!!!¶
# print("====================================================================")
# print("표현된 다양한 감정 수:", len(unique_re))
# print("====================================================================")
#분석가능한 감정 총 감정 수 - Bert origin model 적용시 28개 감정 추출돰
# total_num_emotion_analyzed = 28
# 감정기복 비율 계산 !!!
#result_emo_swings =round(len(unique_re)/total_num_emotion_analyzed *100,1) #소숫점 첫째자리만 표현
# print("문장에 표현된 감정 비율 : ", result_emo_swings)
# print("====================================================================")
# 결과해서
# reslult_emo_swings : 전체 문장에서의 감정 비율 계산
# unique_re : 분석 추출한 감정 ====> 이것이 중요한 값임
return unique_re
input_text = """i am happy today."""
print(Sentiments_analysis(input_text))
|
#!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def decode(digits, base, signed=False):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# TODO: Decode digits from binary (base 2)
result = 0
useable_digits = string.digits + string.ascii_lowercase
highest_power = len(str(digits).split(".")[0])-1
digits = str(digits).replace(".", "")
if signed == True:
if base == 2 and (digits[0] == "1"):
highest_power -= 1
for i in range(1, len(digits), 1):
if digits[i] == "1":
result += (base**(highest_power))
highest_power -= 1
result = 0 - result
return result
for i in range(len(digits)):
result += (useable_digits.index(digits[i])*(base**(highest_power)))
highest_power -= 1
return result
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
# assert number >= 0, 'number is negative: {}'.format(number)
# TODO: Encode number in binary (base 2)
# TODO: Encode number in hexadecimal (base 16)
# TODO: Encode number in any base (2 up to 36)
useable_digits = string.digits + string.ascii_lowercase
result = ""
if base <= 1:
return number
if number < 0:
digits = -number
else:
digits = number
remainder = digits % base
quotient = digits / base
while quotient > 0:
result = useable_digits[int(remainder)] + result
remainder = int(quotient) % base
quotient = int(quotient) / base
# if type(number) is float:
# result = result + "."
# digits = str(number).split(".")[1]
# for index, value in enumerate(digits):
# character = useable_digits.index(value)
# character *= (base**(-1*(index+1)))
# result += str(character)
if number < 0 and base == 2:
result = "1" + result
return result
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
# TODO: Convert digits from base 2 to base 16 (and vice versa)
deci = decode(digits, base1)
return encode(deci, base2)
def main():
"""Read command-line arguments and convert given digits between bases."""
# print(decode("235", 8))
# print(decode("235.01", 16))
# print(encode(77, 16))
# print(decode("01001000", 2))
# print(decode("11001000", 2, True))
# print(decode("110.101", 2))
# print(decode("1101.101", 2))
# print(encode(13, 2))
print(encode(13.625, 2))
print(encode(-13.625, 2))
# print(encode(4.47, 2))
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(digits, base1, result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
|
import re
from typing import Dict
non_printing_characters_re = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
digits_re: re.Pattern = re.compile(r"\d")
unicode_punctuation: Dict[str, str] = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
normalization = {
"non_printing_characters_re": non_printing_characters_re,
"digits_re": digits_re,
"unicode_punctuation": unicode_punctuation,
}
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import generic
from django.contrib.auth.forms import UserCreationForm
from .models import Hall
def home(request):
return render(request, 'halls/home.html')
class SignUp(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('home')
template_name = 'registration/signup.html'
class createHallsView(generic.CreateView):
model = Hall
fields = ['title']
template_name = 'halls/create_hall.html'
success_url = reverse_lazy('home')
|
def first_last(s):
if len(s) <= 1:
ret = ""
else:
ret = s2 = s1[:2] + s1[-2:]
return ret
s1 = "spring"
print first_last(s1)
s1 = "hello"
print first_last(s1)
s1 = "a"
print first_last(s1)
s1 = "abc"
print first_last(s1)
|
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from utils import *
import os
import sys
import matplotlib.pyplot
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
sys.path.append('../../network')
# Load data
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
#print('Example pic:')
# Example of a picture
#index = 11
#plt.imshow(train_x_orig[index])
#plt.show()
#print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
print('After Standardize and reshape:')
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
#Define model
n_x = 12288 # num_px * num_px * 3
n_h1 = 4
n_h2 = 7
n_h3 = 5
n_y = 1
# Model
import sys
sys.path.append('../../network')
from main_class import *
from layers import *
from loss_functions import *
md=NeuralNetwork(CrossEntropy)
np.random.seed(1)
lr = 0.0075
md.add(Dense(n_h1, input_shape=(n_x,), initializer = 'ng', lr = lr))
md.add(Activation('relu'))
md.add(Dense(n_h2, initializer = 'ng', lr = lr))
md.add(Activation('relu'))
md.add(Dense(n_h3, initializer = 'ng', lr = lr))
md.add(Activation('relu'))
md.add(Dense(n_y, initializer = 'ng', lr = lr))
md.add(Activation('sigmoid'))
#Print_network shape
md.print_network()
# Train
train, val = md.fit(train_x, train_y, n_epochs=1800, batch_size=32)
#Evaluate
pred =md.predict(train_x)
pred=(pred >=0.5)
acc = np.mean((pred == train_y))
print('Training acc: {}'.format(acc))
pred_test =md.predict(test_x)>=0.5
acc = np.mean((pred_test == test_y))
print('Testing acc: {}'.format(acc))
# Print mislabelled
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (80.0, 80.0) # set default size of plots
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(num_images, i + 1, 2)
plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
plt.axis('off')
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
plt.show()
#Better only do it on jupyter notebook
#print_mislabeled_images(classes, test_x, test_y, pred_test)
import imageio
my_image = "my_image4.jpg" # change this to the name of your image file
my_label_y = 0 # the true class of your image (1 -> cat, 0 -> non-cat)
fname = "images/" + my_image
image = np.array(imageio.imread(fname))
#image = np.array(imageio.imread(fname))
my_image = np.array(Image.fromarray(image).resize((num_px,num_px))).reshape((num_px*num_px*3,1))
#my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = md.predict(my_image)>=0.5
plt.imshow(image)
plt.show()
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
my_image = "my_image3.jpg" # change this to the name of your image file
my_label_y = 0 # the true class of your image (1 -> cat, 0 -> non-cat)
fname = "images/" + my_image
image = np.array(imageio.imread(fname))
#image = np.array(imageio.imread(fname))
my_image = np.array(Image.fromarray(image).resize((num_px,num_px))).reshape((num_px*num_px*3,1))
#my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = md.predict(my_image)>=0.5
plt.imshow(image)
plt.show()
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
|
# Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TketModel
=========
Module implementing a lambeq model based on a quantum backend, via `tket`.
"""
from __future__ import annotations
from typing import Any, Callable
import numpy as np
from discopy.quantum import Circuit, Id, Measure
from discopy.tensor import Diagram
from lambeq.training.quantum_model import QuantumModel
class TketModel(QuantumModel):
"""A lambeq model for either shot-based simulations of a quantum
pipeline or experiments run on quantum hardware using `tket`."""
def __init__(self, **kwargs) -> None:
"""Initialise TketModel based on the `t|ket>` backend.
Other Parameters
----------------
backend_config : dict
Dictionary containing the backend configuration. Must include the
fields `'backend'`, `'compilation'` and `'shots'`.
Raises
------
KeyError
If `backend_config` is not provided or `backend_config` has missing
fields.
"""
if 'backend_config' not in kwargs:
raise KeyError('Please provide a backend configuration.')
super().__init__()
backend_config = kwargs['backend_config']
fields = ('backend', 'compilation', 'shots')
missing_fields = [f for f in fields if f not in backend_config]
if missing_fields:
raise KeyError('Missing arguments in backend configuation. '
f'Missing arguments: {missing_fields}.')
self.backend_config = backend_config
def _make_lambda(self, diagram: Diagram) -> Callable[[Any], Any]:
"""Measure and lambdify diagrams."""
measured = diagram >> Id().tensor(*[Measure()] * len(diagram.cod))
return measured.lambdify(*self.symbols)
def _randint(self, low=-1 << 63, high=(1 << 63)-1):
return np.random.randint(low, high, dtype=np.int64)
def get_diagram_output(self, diagrams: list[Diagram]) -> np.ndarray:
"""Return the prediction for each diagram using t|ket>.
Parameters
----------
diagrams : list of :py:class:`~discopy.tensor.Diagram`
The :py:class:`Circuits <discopy.quantum.circuit.Circuit>` to be evaluated.
Raises
------
ValueError
If `model.weights` or `model.symbols` are not initialised.
Returns
-------
np.ndarray
Resulting array.
"""
if len(self.weights) == 0 or not self.symbols:
raise ValueError('Weights and/or symbols not initialised. '
'Instantiate through '
'`TketModel.from_diagrams()` first, '
'then call `initialise_weights()`, or load '
'from pre-trained checkpoint.')
lambdified_diagrams = [self._make_lambda(d) for d in diagrams]
tensors = Circuit.eval(
*[diag_f(*self.weights) for diag_f in lambdified_diagrams],
**self.backend_config,
seed=self._randint()
)
self.backend_config['backend'].empty_cache()
# discopy evals a single diagram into a single result
# and not a list of results
if len(diagrams) == 1:
result = self._normalise_vector(tensors.array)
return result.reshape(1, *result.shape)
return np.array([self._normalise_vector(t.array) for t in tensors])
def forward(self, x: list[Diagram]) -> np.ndarray:
"""Perform default forward pass of a lambeq quantum model.
In case of a different datapoint (e.g. list of tuple) or additional
computational steps, please override this method.
Parameters
----------
x : list of :py:class:`~discopy.tensor.Diagram`
The :py:class:`Circuits <discopy.quantum.circuit.Circuit>` to be
evaluated.
Returns
-------
np.ndarray
Array containing model's prediction.
"""
return self.get_diagram_output(x)
|
"""Fixtures for CLI tests"""
import pathlib
import pytest
from scout.demo.resources import demo_files
from scout.server.app import create_app
#############################################################
###################### App fixtures #########################
#############################################################
# use this app object to test CLI commands which use a test database
DATABASE = "testdb"
REAL_DATABASE = "realtestdb"
@pytest.fixture(scope="function", name="demo_files")
def fixture_demo_files():
"""Return a dictionary with paths to the demo files"""
return demo_files
@pytest.fixture(scope="function")
def bam_path():
"""Return the path to a small existing bam file"""
_path = pathlib.Path("tests/fixtures/bams/reduced_mt.bam")
return _path
@pytest.fixture(scope="function")
def empty_mock_app(real_adapter):
"""Return the path to a mocked app object without any data"""
_mock_app = create_app(
config=dict(
TESTING=True,
DEBUG=True,
MONGO_DBNAME=REAL_DATABASE,
DEBUG_TB_ENABLED=False,
LOGIN_DISABLED=True,
)
)
return _mock_app
@pytest.fixture
def mock_app(real_populated_database):
"""Return the path to a mocked app object with data"""
_mock_app = create_app(
config=dict(
TESTING=True,
DEBUG=True,
MONGO_DBNAME=REAL_DATABASE,
DEBUG_TB_ENABLED=False,
LOGIN_DISABLED=True,
)
)
return _mock_app
|
#
# License: BSD
# https://raw.githubusercontent.com/splintered-reality/py_trees/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
Package version number.
"""
##############################################################################
# Version
##############################################################################
# When changing, Also update setup.py and package.xml
# TODO: use pkg_resources to fetch the version from setup.py
__version__ = '2.1.6'
|
# CPU: 0.06 s
from math import hypot
n, width, height = list(map(int, input().split()))
longest = hypot(width, height)
for _ in range(n):
print('DA' if int(input()) <= longest else 'NE')
|
import math
from random import *
from copy import copy
def printField(f):
for i in range(3):
for j in range(3):
for k in range(3):
print f[i*3 + j * 9 + k],
print "|",
print "\n",
if(i==2 or i==5):
print "------------------"
for i in range(3):
for j in range(3):
for k in range(3):
print f[27+i*3 + j * 9 + k],
print "|",
print "\n",
if(i==2 or i==5):
print "------------------"
for i in range(3):
for j in range(3):
for k in range(3):
print f[6*9+i*3 + j * 9 + k],
print "|",
print "\n",
if(i==2 or i==5):
print "------------------"
def matMult(A,B, n):
C = [[0.0 for col in range(n)] for row in range(n)]
su = 0
for c in range(n):
for d in range(n):
for k in range(n):
su = su + A[c][k]*B[k][d]
C[c][d] = su
su = 0
return C
def vecMatMult(A,v,n):
y = [0.0 for col in range(n)]
for i in range(n):
for j in range(n):
y[i] = y[i] + A[i][j]*v[j]
return y
def pagerank(A, n):
x = [1.0/n for col in range(n)]
B = A
for i in range(600):
B = matMult(A,B,n)
eig = vecMatMult(B,x,n)
return eig
def findSurvivours(winMatrix, n):
#print winMatrix
for i in range(n):
for j in range(n):
winMatrix[i][j] = winMatrix[i][j]*0.95+0.05
for i in range(n):
summa = 0
for j in range(n):
summa = summa + winMatrix[j][i]
for j in range(n):
winMatrix[j][i] = winMatrix[j][i]/float(summa)
"""for i in range(n):
summa = 0
for j in range(n):
summa = summa + winMatrix[j][i]
if(summa != 0):
for j in range(n):
winMatrix[j][i] = winMatrix[j][i]/ float(summa)
else:
for j in range(n):
winMatrix[j][i] = 1.0 / n"""
x = pagerank(winMatrix, n)
print x
survivingAnns = []
for i in range(n/2):
max_val = -1
max_index = 0
for j in range(len(x)):
if(x[j]>=max_val):
max_val = x[j]
max_index = j
x[max_index]=-1.0
#x.pop(max_index)
survivingAnns.append(max_index)
print survivingAnns
return survivingAnns
def playGame(AIs, rules):
rules = copy(rules)
f = rules.getNewBoard()
AIs[0].playerNr = 1
AIs[1].playerNr = 2
AIs[0].resetPlayer()
AIs[1].resetPlayer()
rules.playerNr = 1
while(1):
for p in AIs:
f = p.makeAMove(f)
#printField(f)
if(rules.isOver(f)):
#print f
if(rules.hasWon(f)):
#print "WON!"
return p.playerNr
elif(rules.hasLost(f)):
#print "LOST!"
return p.rules.otherPlayerNr()
elif(rules.isDraw(f)):
#print "draw!"
return 0
else:
return "oopsie"
rules.nextPlayer()
def tournament(AIs, rules):
n = len(AIs)
winMatrix = [[0 for col in range(n)] for row in range(n)]
N = math.floor(1.5*math.log(n))+0
gameCounter = 0
nrGames = N*n
for i in range(n):
ind = range(n)
ind.pop(i)
opponents = []
for j in range(int(N)):
opponents.append(ind.pop(randint(0,n-1-1-j)))
for j in range(len(opponents)):
win = playGame([AIs[i], AIs[opponents[j]]], rules)
gameCounter = gameCounter + 1
print int((gameCounter*100)/nrGames),
print "%,",
#print win
if(win == 2):
winMatrix[opponents[j]][i] = winMatrix[opponents[j]][i] + 1
elif(win == 1):
winMatrix[i][opponents[j]] = winMatrix[i][opponents[j]] + 1
survivours = findSurvivours(winMatrix, n)
return survivours
|
"""models.py
"""
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data as data
import torch.backends.cudnn as cudnn
from torch.nn import functional as F
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from skimage import io
import numpy as np
import os
from PIL import Image
# same loader used during training
inference_loader = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
class FilenameDataset(data.Dataset):
"""
Data loader for filenames and their corresponding labels.
"""
def __init__(self, image_filenames, targets, images_path=''):
"""
Args:
image_filenames (list): List of image filenames
targets (list): List of integers that correspond to target class indices
"""
assert (len(image_filenames) == len(targets))
self.image_filenames = image_filenames
self.targets = targets
self.images_path = images_path
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the target class index
"""
image_filename = os.path.join(self.images_path, self.image_filenames[index])
# if not os.path.isfile(image_filename):
# os.system("ln -s {} {}".format(image_filename.replace("/data/vision/torralba/humanitarian/datasets/images_raw/",
# "/data/vision/torralba/humanitarian/dimitris/getGoogleImages2/finalImages/"), image_filename))
if not os.path.isfile(image_filename):
raise ValueError("{} is not a file".format(image_filename))
try:
with open(image_filename, 'rb') as f:
image = Image.open(f).convert('RGB')
image = inference_loader(image)
except:
print(image_filename)
image = Image.new('RGB', (300, 300), 'white')
image = inference_loader(image)
return image, self.targets[index]
def __len__(self):
return len(self.image_filenames)
def get_trunk_model(args):
if args.pretrained_with_places:
print("loading places weights for pretraining")
model = models.__dict__[args.arch](num_classes=365)
dir_path = os.path.dirname(os.path.realpath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.arch == "resnet18":
model_file = os.path.join(dir_path, "pretrained_weights/resnet18_places365.pth.tar")
checkpoint = torch.load(model_file, map_location=device)
state_dict = {str.replace(k, 'module.', ''): v for k,
v in checkpoint['state_dict'].items()}
model.load_state_dict(state_dict)
model.fc = nn.Linear(512, 1024)
model = nn.Sequential(model, nn.ReLU())
elif args.arch == "resnet50":
model_file = os.path.join(dir_path, "pretrained_weights/resnet50_places365.pth.tar")
checkpoint = torch.load(model_file, map_location=device)
state_dict = {str.replace(k, 'module.', ''): v for k,
v in checkpoint['state_dict'].items()}
model.load_state_dict(state_dict)
model.fc = nn.Linear(2048, 1024)
model = nn.Sequential(model, nn.ReLU())
return model
else:
print("loading imagenet weights for pretraining")
# otherwise load with imagenet weights
if args.arch == "resnet18":
model = models.resnet18(pretrained=True)
model.fc = nn.Linear(512, 1024)
model = nn.Sequential(model, nn.ReLU())
elif args.arch == "resnet50":
model = models.resnet50(pretrained=True)
model.fc = nn.Linear(2048, 1024)
model = nn.Sequential(model, nn.ReLU())
return model
def get_incident_layer(args):
if args.activation == "softmax":
return nn.Linear(args.fc_dim, args.num_incidents + 1)
elif args.activation == "sigmoid":
return nn.Linear(args.fc_dim, args.num_incidents)
def get_place_layer(args):
if args.activation == "softmax":
return nn.Linear(args.fc_dim, args.num_places + 1)
elif args.activation == "sigmoid":
return nn.Linear(args.fc_dim, args.num_places)
def get_incidents_model(args):
"""
Returns [trunk_model, incident_layer, place_layer]
"""
# the shared feature trunk model
trunk_model = get_trunk_model(args)
# the incident model
incident_layer = get_incident_layer(args)
# the place model
place_layer = get_place_layer(args)
print("Let's use", args.num_gpus, "GPUs!")
trunk_model = torch.nn.DataParallel(trunk_model, device_ids=range(args.num_gpus))
incident_layer = torch.nn.DataParallel(incident_layer, device_ids=range(args.num_gpus))
place_layer = torch.nn.DataParallel(place_layer, device_ids=range(args.num_gpus))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trunk_model.to(device)
incident_layer.to(device)
place_layer.to(device)
return [trunk_model, incident_layer, place_layer]
def update_incidents_model_with_checkpoint(incidents_model, args):
"""
Update incidents model with checkpoints (in args.checkpoint_path)
"""
trunk_model, incident_layer, place_layer = incidents_model
# optionally resume from a checkpoint
# TODO: bring in the original pretrained weights maybe?
# TODO: remove the args.trunk_resume, etc.
# TODO: remove path prefix
config_name = os.path.basename(args.config)
print(config_name)
trunk_resume = os.path.join(
args.checkpoint_path, "{}_trunk.pth.tar".format(config_name))
place_resume = os.path.join(
args.checkpoint_path, "{}_place.pth.tar".format(config_name))
incident_resume = os.path.join(
args.checkpoint_path, "{}_incident.pth.tar".format(config_name))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for (path, net) in [(trunk_resume, trunk_model), (place_resume, place_layer), (incident_resume, incident_layer)]:
if os.path.isfile(path):
checkpoint = torch.load(path, map_location=device)
args.start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'])
print("Loaded checkpoint '{}' (epoch {}).".format(path, checkpoint['epoch']))
else:
print("No checkpoint found at '{}'.".format(path))
def update_incidents_model_to_eval_mode(incidents_model):
print("Switching to eval mode.")
for m in incidents_model:
# switch to evaluation mode
m.eval()
def get_predictions_from_model(args,
incidents_model,
batch_input,
image_paths,
index_to_incident_mapping,
index_to_place_mapping,
inference_dict, topk=1):
"""
Input:
{
"image_paths" = [list of image paths],
}
Returns {
"incidents": [], # list of topk elements
"places": [] # list of topk elements
}
"""
trunk_model, incident_layer, place_layer = incidents_model
# compute output with models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input = batch_input.to(device)
output = trunk_model(input)
incident_output = incident_layer(output)
place_output = place_layer(output)
if args.activation == "softmax":
incident_output = F.softmax(incident_output, dim=1)
place_output = F.softmax(place_output, dim=1)
elif args.activation == "sigmoid":
m = nn.Sigmoid()
incident_output = m(incident_output)
place_output = m(place_output)
incident_probs, incident_idx = incident_output.sort(1, True)
place_probs, place_idx = place_output.sort(1, True)
# batch_input[0] is the batch dimension (the # in the batch)
for batch_idx in range(len(batch_input.numpy())):
incidents = []
for idx in incident_idx[batch_idx].cpu().numpy()[:topk]:
if idx < len(index_to_incident_mapping):
incidents.append(
index_to_incident_mapping[idx]
)
else:
incidents.append("no incident")
places = []
for idx in place_idx[batch_idx].cpu().numpy()[:topk]:
if idx < len(index_to_place_mapping):
places.append(
index_to_place_mapping[idx]
)
else:
places.append("no place")
output = {
"incidents": incidents,
"places": places,
"incident_probs": incident_probs[batch_idx].cpu().detach().numpy()[:topk],
"place_probs": place_probs[batch_idx].cpu().detach().numpy()[:topk]
}
image_path = image_paths[batch_idx]
inference_dict[image_path] = output
# TODO: maybe return the output here
return None
def get_predictions_from_model_all(args, incidents_model, batch_input, image_paths, index_to_incident_mapping,
index_to_place_mapping, inference_dict, softmax=True):
"""
Input:
{
"image_paths" = [list of image paths],
}
Returns {
"incidents": [], # list of topk elements
"places": [] # list of topk elements
}
"""
trunk_model, incident_layer, place_layer = incidents_model
# compute output with models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input = batch_input.to(device)
output = trunk_model(input)
incident_output = incident_layer(output)
place_output = place_layer(output)
if softmax:
incident_output = F.softmax(incident_output, dim=1)
place_output = F.softmax(place_output, dim=1)
else:
m = nn.Sigmoid()
incident_output = m(incident_output)
place_output = m(place_output)
incident_probs, incident_idx = incident_output.sort(1, True)
place_probs, place_idx = place_output.sort(1, True)
# batch_input[0] is the batch dimension (the # in the batch)
for batch_idx in range(len(batch_input.numpy())):
incidents = []
for idx in incident_idx[batch_idx].cpu().numpy():
if idx < len(index_to_incident_mapping):
incidents.append(
index_to_incident_mapping[idx]
)
else:
incidents.append("no incident")
places = []
for idx in place_idx[batch_idx].cpu().numpy():
if idx < len(index_to_place_mapping):
places.append(
index_to_place_mapping[idx]
)
else:
places.append("no place")
output = {
"incidents": incidents,
"places": places,
"incident_probs": incident_probs[batch_idx].cpu().detach().numpy(),
"place_probs": place_probs[batch_idx].cpu().detach().numpy()
}
image_path = image_paths[batch_idx]
inference_dict[image_path] = output
# TODO: maybe return the output here
return None
def get_features_from_model(incidents_model, batch_input, image_paths, inference_dict):
"""
Input:
{
"image_paths" = [list of image paths],
}
Returns trunk_model output.
"""
trunk_model, incident_layer, place_layer = incidents_model
# compute output with models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input = batch_input.to(device)
output = trunk_model(input)
# batch_input[0] is the batch dimension (the # in the batch)
for batch_idx in range(len(batch_input.numpy())):
out = output[batch_idx].cpu().detach().numpy()
# print("here")
# print(out)
# print(out.shape)
# print(type(out))
image_path = image_paths[batch_idx]
inference_dict[image_path] = out
# TODO: maybe return the output here
return None
|
"""Interface for the application
This file can be run, and will properly initialize interface (and rest of the
app) in your terminal.
Battle-tested on Solarized color scheme and under tmux.
"""
from contextlib import contextmanager
from datetime import datetime
import os
import os.path
import random
import sys
import npyscreen
from pyaudio_fix import fix_pyaudio
import audio
import config
import filters
import utils
@contextmanager
def use_xterm():
"""Helper setting proper TERM value
Required for colors to work under 16-color tmux.
"""
old_value = os.environ.get('TERM')
os.environ['TERM'] = 'xterm'
yield
if old_value is not None:
os.environ['TERM'] = old_value
def quit():
"""Close application gracefully"""
audio.stop()
sys.exit(0)
def show_quit_popup(key=None):
"""Display popup asking whether to quit application"""
result = npyscreen.notify_yes_no(
message='Do you really want to quit?',
title='Quit',
editw=1, # select No button by default
)
if result:
quit()
class TracksListWidget(npyscreen.TitleSelectOne):
"""Widget displaying list of tracks
Properly loads the track after selecting.
"""
def get_additional_filenames(self, filename):
"""Loads up to 2 additional tracks
Returns list of filenames.
"""
app = self.parent.parentApp
filenames = [filename]
if random.random() >= config.LOAD_MULTIPLE_THRESHOLD:
return filenames
app.notify('Multiple tracks selected!')
if random.random() < config.LOAD_TRIPLE_THRESHOLD:
count = 2
else:
count = 1
for _ in range(count):
for __ in range(10):
selected = random.choice(self.values)
if selected not in filenames:
filenames.append(selected)
break
return filenames
def load_tracks(self, filenames):
"""Loads files as pydub tracks"""
app = self.parent.parentApp
tracks = []
for filename in filenames:
app.notify('Loading {title}...'.format(title=filename))
track = audio.load(filename)
if not app._already_cut:
track = audio.cut(track, app._track_length * 2)
tracks.append(track)
return tracks
def get_infos(self, filenames):
"""Obtains infos about filenames"""
app = self.parent.parentApp
infos = []
for filename in filenames:
info = audio.get_info(filename)
no = app._filenames_list.index(filename) + 1
infos.append('No. {no}'.format(no=no),)
infos += info
infos.append('\n')
return infos
def when_value_edited(self):
"""Loads the track to parent app after selecting
Also cuts it to proper length, if requested.
"""
if not self.value:
return
app = self.parent.parentApp
filename = self.values[self.value[0]]
filenames = self.get_additional_filenames(filename)
song_info = self.parent.get_widget('song-info')
# Load everything
filenames = [app.filenames.get(v) for v in filenames]
infos = self.get_infos(filenames)
tracks = self.load_tracks(filenames)
self.parent.set_status('Loading')
song_info.values = infos
song_info.display()
# Mix 'em up!
track = filters.multiple_tracks(tracks)
app.current_track = track
app.current_track_nos = filenames
app.notify('Loaded!')
# Also, clear filters
self.parent.h_reset_filters()
self.parent.set_status('Ready to play')
self.parent.calculate_points()
self.value = []
self.display()
class MainForm(npyscreen.FormBaseNew):
"""Main form of the application"""
def update_slider(self, value):
"""Sets value of position slider"""
self.get_widget('position').value = value / 1000
self.get_widget('position').display()
def h_play(self, key):
"""Plays currently selected track
Also applies filters, if any are selected.
"""
app = self.parentApp
if not app.current_track:
app.notify('No track selected')
return
for filename in app.current_track_nos:
track_no = app._filenames_list.index(filename) + 1
try:
self.get_widget('track-list').values.remove(track_no)
except ValueError:
pass
self.get_widget('track-list').value = []
app.notify('Applying filters...')
track = filters.apply(app.current_track, self.parentApp.filters)
track = track[:app._track_length]
self.get_widget('position').entry_widget.out_of = len(track) / 1000
self.get_widget('position').display()
audio.play(track, notifier=self.update_slider)
app.notify('Playing!')
self.set_status('Playing')
def h_stop(self, key):
"""Stops currently played track"""
audio.stop()
self.parentApp.notify('Stopped.')
self.set_status('Ready to play')
def h_select_filters(self, key):
"""Randomly selects filters"""
selected = filters.get_random_filters()
self.parentApp.filters = selected
values = [filters.FILTERS_LIST.index(f) for f in selected]
widget = self.get_widget('filters')
widget.value = values
widget.display()
self.parentApp.notify('Filters randomized.')
self.calculate_points()
def h_reset_filters(self, key=None):
"""Clears filters selection"""
widget = self.get_widget('filters')
widget.value = []
self.parentApp.filters = []
widget.display()
self.parentApp.notify('Filters cleared.')
def h_toggle_filter(self, key):
"""Toggles single filter on the filters list"""
index = int(chr(key)) - 1
widget = self.get_widget('filters')
try:
self.parentApp.filters.remove(filters.FILTERS_LIST[index])
widget.value.remove(index)
except ValueError:
self.parentApp.filters.append(filters.FILTERS_LIST[index])
widget.value.append(index)
widget.display()
def set_status(self, message):
"""Sets value for the status widget
This is kind of useful, because statusbar displays only the last
message, and it's important to know whether song is playing, stopped
or loaded.
"""
song_status = self.get_widget('song-status')
song_status.value = message
song_status.display()
def calculate_points(self):
"""Sets proper amount of points in Points widget"""
widget = self.get_widget('points')
# Filters
points = config.FILTER_POINTS[len(self.parentApp.filters)]
# Multiple songs
points *= config.TRACKS_MULTIPLIER[
len(self.parentApp.current_track_nos)
]
widget.value = int(round(points))
widget.display()
def set_up_handlers(self):
"""Sets up handlers for keypresses
Bonus: upper keys are also supported, meaning you don't need to worry
about capslock!
"""
super(MainForm, self).set_up_handlers()
keys = {
'^q': show_quit_popup,
'a': self.h_play,
's': self.h_stop,
'f': self.h_select_filters,
'r': self.h_reset_filters,
}
# Make upperkeys available, too!
for key, func in list(keys.items()):
keys[key.upper()] = func
# Add filter toggling
for i, filter_name in enumerate(filters.FILTERS_LIST, start=1):
keys[str(i)] = self.h_toggle_filter
self.handlers.update(keys)
class SettingsForm(npyscreen.Form):
"""Form with settings
Mainly used to get working directory path. You can also customize some
other things here.
Should be displayed before main form.
"""
def afterEditing(self):
"""Sets proper values in the parent app after pressing OK button"""
app = self.parentApp
path = self.get_widget('path').value
status = self.get_widget('status')
track_length = self.get_widget('track_length').value
already_cut = self.get_widget('track_cut').value
seed = self.get_widget('seed').value
if not path:
status.value = 'Enter something'
return
if not os.path.isdir(path):
status.value = 'That is not a directory'
return
app._path = path
app._track_length = int(track_length) * 1000
app._seed = seed
app._already_cut = already_cut
app.load_filenames(path)
app.initialize_filters()
app.setNextForm('MAIN')
class App(npyscreen.NPSAppManaged):
"""Main application class"""
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
self._filenames = {}
self._filenames_list = []
self._path = None
self.current_track = None
self.current_track_nos = []
self._track_length = 35000 # in ms
self._seed = None
self._already_cut = False
self.filters = []
@property
def filenames(self):
return self._filenames
@filenames.setter
def filenames(self, value):
self._filenames = value
self._filenames_list = [v for k, v in sorted(value.items())]
track_number = self.getForm('MAIN').get_widget('track-list')
track_number.values = list(self._filenames.keys())
track_number.display()
def onStart(self):
"""Initializes all forms and populates it with widgets
TODO: maybe put each form code into:
1) separate method in App?
2) init method in each form?
Either of these would increase readability.
"""
# Directory form
directory_form = self.addForm(
'directory',
SettingsForm,
name='Settings',
)
directory_form.add_widget(
npyscreen.TitleText,
name='Path',
w_id='path',
)
directory_form.nextrely += 1
directory_form.add_widget(
npyscreen.FixedText,
w_id='status',
editable=False,
)
directory_form.add_widget(
npyscreen.TitleText,
name='Track length',
value='35',
w_id='track_length',
)
directory_form.add_widget(
npyscreen.Checkbox,
name='Already cut?',
value=True,
w_id='track_cut',
relx=18,
)
directory_form.add_widget(
npyscreen.TitleText,
name='Random seed',
value='this is some random seed',
w_id='seed',
)
# Main form
form = self.addForm('MAIN', MainForm, name='EKOiE')
form.add_widget(
TracksListWidget,
name='Track number',
values=[],
w_id='track-list',
max_height=form.lines-7,
width=int(form.columns/2),
)
form.add_widget(
npyscreen.TitleFixedText,
height=2,
name='Status',
rely=-3,
editable=False,
w_id='status',
)
# Split screen vertically in half
form.nextrely = 2
form.nextrelx = int(form.columns/2) + 2
form.add_widget(
npyscreen.MultiLineEditableTitle,
height=18,
name='Songs info',
editable=False,
values=[],
w_id='song-info',
)
# Song status
form.add_widget(
npyscreen.TitleFixedText,
height=1,
editable=False,
name='Song status',
w_id='song-status',
)
form.nextrely += 2
# Slider
form.add_widget(
npyscreen.TitleSlider,
name='Position',
out_of=35,
lowest=0,
value=0,
label=True,
w_id='position',
)
form.nextrely += 2
form.add_widget(
npyscreen.TitleMultiSelect,
editable=False,
height=8,
name='Filters',
w_id='filters',
values=filters.FILTERS_LIST,
)
form.nextrely += 2
form.add_widget(
npyscreen.TitleFixedText,
height=1,
editable=False,
name='Points',
w_id='points',
)
self.setNextForm('directory')
def notify(self, message):
"""Displays notification in the bottom of the screen"""
status = self.getForm('MAIN').get_widget('status')
status.value = '[{time}] {message}'.format(
time=datetime.now().strftime('%H:%M:%S'),
message=message,
)
status.display()
def load_filenames(self, path):
"""Loads filenames of tracks from working directory"""
self.notify(
'Loading files from {path}...'.format(path=path),
)
self.filenames = utils.shuffle(
utils.get_filenames(path),
seed=self._seed,
)
self.notify('{count} files loaded.'.format(count=len(self.filenames)))
def initialize_filters(self):
self.notify('Initializing panzerfaust filter...')
filters.initialize_panzer_tracks()
self.notify('Initializing overlay filter...')
filters.initialize_overlay_tracks()
self.notify('Filters initialized.')
if __name__ == '__main__':
with use_xterm():
fix_pyaudio()
app = App()
try:
app.run()
except KeyboardInterrupt:
quit()
|
from requests.exceptions import HTTPError
from graph_notebook.system.database_reset import initiate_database_reset, perform_database_reset
from test.integration import IntegrationTest
class TestStatusWithoutIAM(IntegrationTest):
def test_do_database_reset_initiate(self):
result = initiate_database_reset(self.host, self.port, self.ssl)
self.assertNotEqual(result['payload']['token'], '')
def test_do_database_reset_perform_with_wrong_token(self):
with self.assertRaises(HTTPError) as cm:
perform_database_reset('x', self.host, self.port, self.ssl)
expected_message = "System command parameter 'token' : 'x' does not match database reset token"
self.assertEqual(expected_message, str(cm.exception.response.json()['detailedMessage']))
|
from typing import Optional
from pydantic import Field
from pystratis.api import Model
from pystratis.core.types import Address, Money
# noinspection PyUnresolvedReferences
class OfflineWithdrawalFeeEstimationRequest(Model):
"""A request model for the coldstaking/estimate-offline-cold-staking-withdrawal-tx-fee endpoint.
Args:
wallet_name (str): The wallet name.
account_name (str): The account name.
receiving_address (Address): The receiving address.
amount (Money): The amount to withdraw to the receiving address.
subtract_fee_from_amount (bool, optional): If fee should be subtracted from amount. Default=True.
"""
wallet_name: str = Field(alias='walletName')
account_name: str = Field(alias='accountName')
receiving_address: Address = Field(alias='receivingAddress')
amount: Money
subtract_fee_from_amount: Optional[bool] = Field(default=True, alias='subtractFeeFromAmount')
|
class Db(object):
def __init__(self, web3):
self.web3 = web3
def putString(self, *args, **kwargs):
raise DeprecationWarning("This function has been deprecated")
def getString(self, *args, **kwargs):
raise DeprecationWarning("This function has been deprecated")
def putHex(self, *args, **kwargs):
raise DeprecationWarning("This function has been deprecated")
def getHex(self, *args, **kwargs):
raise DeprecationWarning("This function has been deprecated")
|
import subprocess
import os
import time
def soft_reset():
from picar_4wd.pin import Pin
soft_reset_pin = Pin("D16")
# print('soft_reset')
soft_reset_pin.low()
time.sleep(0.001)
soft_reset_pin.high()
time.sleep(0.001)
def mapping(x,min_val,max_val,aim_min,aim_max):
x = aim_min + abs((x - min_val) / (max_val- min_val) * (aim_max-aim_min))
return x
def cpu_temperature(): # cpu_temperature
raw_cpu_temperature = subprocess.getoutput("cat /sys/class/thermal/thermal_zone0/temp")
cpu_temperature = round(float(raw_cpu_temperature)/1000,2) # convert unit
#cpu_temperature = 'Cpu temperature : ' + str(cpu_temperature)
return cpu_temperature
def gpu_temperature(): # gpu_temperature(
raw_gpu_temperature = subprocess.getoutput( '/opt/vc/bin/vcgencmd measure_temp' )
gpu_temperature = round(float(raw_gpu_temperature.replace( 'temp=', '' ).replace( '\'C', '' )), 2)
#gpu_temperature = 'Gpu temperature : ' + str(gpu_temperature)
return gpu_temperature
def cpu_usage(): # cpu_usage
# result = str(os.popen("top -n1 | awk '/Cpu\(s\):/ {print($2)}'").readline().strip())
result = os.popen("mpstat").read().strip()
result = result.split('\n')[-1].split(' ')[-1]
result = round(100 - float(result), 2)
result = str(result)
# print(result)
return result
def disk_space(): # disk_space
p = os.popen("df -h /")
i = 0
while 1:
i = i +1
line = p.readline()
if i==2:
return line.split()[1:5]
def ram_info():
p = os.popen('free')
i = 0
while 1:
i = i + 1
line = p.readline()
if i==2:
return list(map(lambda x:round(int(x) / 1000,1), line.split()[1:4]))
def pi_read():
result = {
"cpu_temperature": cpu_temperature(),
"gpu_temperature": gpu_temperature(),
"cpu_usage": cpu_usage(),
"disk": disk_space(),
"ram": ram_info(),
"battery": power_read(),
}
return result
def power_read():
from picar_4wd.adc import ADC
power_read_pin = ADC('A4')
power_val = power_read_pin.read()
power_val = power_val / 4095.0 * 3.3
# print(power_val)
power_val = power_val * 3
power_val = round(power_val, 2)
return power_val
def getIP(ifaces=['wlan0', 'eth0']):
import re
if isinstance(ifaces, str):
ifaces = [ifaces]
for iface in list(ifaces):
search_str = 'ip addr show {}'.format(iface)
result = os.popen(search_str).read()
com = re.compile(r'(?<=inet )(.*)(?=\/)', re.M)
ipv4 = re.search(com, result)
if ipv4:
ipv4 = ipv4.groups()[0]
return ipv4
return False
def main():
import sys
if len(sys.argv) >= 2:
print("Welcome to SunFounder PiCar-4WD.")
command = sys.argv[1]
if command == "soft-reset":
print("soft-reset")
soft_reset()
elif command == "power-read":
print("power-read")
print("Power voltage: {}V".format(power_read()))
elif command == "web-example":
if len(sys.argv) >= 3:
opt = sys.argv[2]
if opt == "enable":
os.system("sudo update-rc.d picar-4wd-web-example defaults")
print("web-example start on boot is enabled")
elif opt == "disable":
os.system("sudo update-rc.d picar-4wd-web-example remove")
print("web-example start on boot is disabled")
else:
usage(command)
else:
print("Run: `picar-4wd web-example enable/disable` to enable/disable start on boot")
os.system("sudo python3 /home/pi/picar-4wd/examples/web/start.py")
elif command == "test":
from picar_4wd import forward,get_distance_at,get_grayscale_list,stop
if len(sys.argv) >= 3:
opt = sys.argv[2]
if opt == "motor":
print("Motor test start!, Ctrl+C to Stop")
forward(10)
try:
while True:
pass
except KeyboardInterrupt:
stop()
elif opt == "servo":
print(get_distance_at(0))
elif opt == "grayscale":
print(get_grayscale_list())
else:
usage(command)
else:
print('Command error, "%s" is not in list' % sys.argv[1])
usage()
else:
usage()
destroy()
# def main():
# try:
# _main()
# finally:
def destroy():
quit()
def usage(cmd=None):
general = '''
Usage: picar-4wd [Command] [option]
Commands:
soft-reset
power-read
web-example
test
'''
web_example = '''
Usage: picar-4wd web-example [option]
Options:
enable Enable start on boot
disable Disable start on boot
'''
test = '''
Usage: picar-4wd test [option]
Options:
motor test the motor
servo test the servo
grayscale test the grayscale
'''
if cmd == None:
print(general)
elif cmd == "web-example":
print(web_example)
elif cmd == "test":
print(test)
destroy()
|
# -*- coding: utf-8 -*-
from .record import (
Metadata,
Record,
)
__all__ = ['Parser']
class Parser:
def __init__(self, store):
self.store = store
def parse_record(self, metadata, line):
factors = line.split('|')
if len(factors) < 7:
return
registry, cc, type_, start, value, dete, status = factors[:7]
if type_ not in ('ipv4', 'ipv6'):
return
return Record(metadata, start, type_, value, cc)
def do(self, fp):
metadata = None
for line in fp:
line = line[:-1]
if line.startswith('#') or line.endswith('summary'):
continue
if metadata is None:
version, registry, serial, records,\
startdate, enddate, utcoffset = line.split('|')[:7]
metadata = Metadata(registry, version, serial)
continue
record = self.parse_record(metadata, line)
if record is None:
continue
self.store.persist(record)
|
from django.db import models
from django.db.models import Sum, Avg, Count, Func
from django.db.models.signals import pre_save
from django.utils.text import slugify
from django.dispatch import receiver
def dict_pop(d, to_pop):
return {k: v for k, v in d.items() if k not in to_pop}
class Round(Func):
function = 'ROUND'
arity = 2
arg_joiner = '::numeric, '
raw_stats = {'average_GPA': (Avg,'average_GPA'),
'As': (Avg,'As'), 'Bs': (Avg,'Bs'),
'Cs': (Avg,'Cs'), 'Ds': (Avg,'Ds'), 'Fs': (Avg,'Fs'),
'students': (Sum,'class_size'),
'withdrawals': (Sum,'withdrawals')}
stats_dict = {key: Round(t[0](t[1]), 2) for key, t in raw_stats.items()}
class Semester(models.Model):
name = models.CharField(max_length=20)
ordering = models.PositiveIntegerField()
def __str__(self):
return str(self.name)
class Meta:
ordering = ['ordering']
class Term(models.Model):
semester = models.ForeignKey(Semester, on_delete=models.CASCADE)
year = models.PositiveIntegerField()
def __str__(self):
return '{} {}'.format(self.semester, self.year)
class Meta:
ordering = ['year', 'semester']
class CourseManager(models.Manager):
def get_queryset(self, **kwargs):
base_dict = dict_pop(raw_stats, ['students', 'withdrawals'])
to_annotate = {k: Round(t[0]('sections__'+t[1]), 2) for k, t in base_dict.items()}
return super().get_queryset(**kwargs).annotate(**to_annotate)
class Course(models.Model):
department = models.CharField(max_length=8)
number = models.PositiveIntegerField()
title = models.CharField(max_length=200)
hours = models.PositiveIntegerField()
slug = models.SlugField(unique=True, max_length=200)
objects = CourseManager()
def __str__(self):
return '{} {}: {} ({} hours)'.format(self.department, self.number, self.title, self.hours)
class Meta:
ordering = ['department', 'number', 'title', 'hours']
class Pathway(models.Model):
name = models.CharField(max_length=20)
description = models.CharField(max_length=100)
courses = models.ManyToManyField(Course, related_name='pathways')
def __str__(self):
return self.name
class Meta:
ordering = ('name', 'description',)
class SectionQueryset(models.QuerySet):
def stats(self):
return self.aggregate(**stats_dict)
def group_by_instructor(self):
stats = dict_pop(stats_dict, ['students'])
stats['sections_taught'] = Count('instructor')
return self.values('instructor').annotate(**stats).order_by('instructor')
def group_by_term(self):
semesters = self.prefetch_related('term', 'term__semester').annotate(group_GPA=stats_dict['average_GPA'])
return {'{} {}'.format(s.term.semester.name, s.term.year):\
s.group_GPA for s in semesters}
class Section(models.Model):
term = models.ForeignKey(Term, on_delete=models.CASCADE, related_name='sections')
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='sections')
CRN = models.PositiveIntegerField()
instructor = models.CharField(max_length=30)
average_GPA = models.DecimalField(max_digits=4, decimal_places=2)
As = models.DecimalField(max_digits=4, decimal_places=1)
Bs = models.DecimalField(max_digits=4, decimal_places=1)
Cs = models.DecimalField(max_digits=4, decimal_places=1)
Ds = models.DecimalField(max_digits=4, decimal_places=1)
Fs = models.DecimalField(max_digits=4, decimal_places=1)
withdrawals = models.PositiveIntegerField()
class_size = models.PositiveIntegerField()
slug = models.SlugField(unique=True, max_length=200)
objects = SectionQueryset.as_manager()
def __str__(self):
return '{} | {} | {} | {} {}'.format(self.CRN, self.instructor, self.term, self.course.department, self.course.number)
class Meta:
ordering = ['term', 'course', 'CRN', 'instructor', 'average_GPA']
def create_slug(instance, fields, sender, new_slug=None):
slug = slugify('_'.join(map(str, fields)))
if new_slug is not None:
slug = new_slug
qs = sender.objects.filter(slug=slug).order_by("-id")
if qs.exists():
new_slug = '{}-{}'.format(slug, qs.first().id)
return create_slug(instance, fields, sender, new_slug=new_slug)
return slug
@receiver(pre_save, sender=Course)
def pre_save_course_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
fields = [instance.department, instance.number, instance.title, instance.hours]
instance.slug = create_slug(instance, fields, sender)
@receiver(pre_save, sender=Section)
def pre_save_section_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
fields = [instance.term, instance.course.department, instance.course.number, instance.instructor, instance.CRN]
instance.slug = create_slug(instance, fields, sender)
|
"""
Payout Creation Exception gets raised when request for payout batch creation gets failed.
"""
from .payout_exception import PayoutException
class PayoutBatchCreationException(PayoutException):
"""
PayoutBatchCreationException
"""
__bitpay_message = "Failed to create payout batch"
__bitpay_code = "BITPAY-PAYOUT-BATCH-CREATE"
__api_code = ""
def __init__(self, message, code=202, api_code="000000"):
"""
Construct the PayoutBatchCreationException.
:param message: The Exception message to throw.
:param code: [optional] The Exception code to throw.
:param api_code: [optional] The API Exception code to throw.
"""
message = self.__bitpay_code + ": " + self.__bitpay_message + ":" + message
self.__api_code = api_code
super().__init__(message, code)
|
import math
from keras_frcnn.configurations.FasterRcnnConfiguration import FasterRcnnConfiguration
class OriginalPascalVocConfig(FasterRcnnConfiguration):
"""The original configuration that was provided along the keras_frcnn sample implementation """
def __init__(self):
super().__init__(anchor_box_scales=[128, 256, 512],
anchor_box_ratios=[[1, 1],
[1 / math.sqrt(2), 2 / math.sqrt(2)],
[2 / math.sqrt(2), 1 / math.sqrt(2)]],
resize_smallest_side_of_image_to=600,
use_horizontal_flip_augmentation=False,
use_vertical_flip_augmentation=False,
use_90_degree_rotation_augmentation=False,
verbose=True,
image_channel_mean=[103.939, 116.779, 123.68],
image_scaling_factor=1.0,
number_of_ROIs_at_once=32,
rpn_stride=16,
are_classes_balanced=False,
std_scaling=4.0,
classifier_regr_std=[8.0, 8.0, 4.0, 4.0],
rpn_min_overlap=0.3,
rpn_max_overlap=0.7,
classifier_min_overlap=0.1,
classifier_max_overlap=0.5)
def name(self) -> str:
return "original_pascal_voc"
if __name__ == "__main__":
configuration = OriginalPascalVocConfig()
print(configuration.summary())
|
#!/usr/local/python/bin/python
"""
grab all the actions from 1 day ago, split according to camera
go to last action and get the last image
only make pngs and upload to webserver if needed
"""
import os
import sys
import glob as g
from datetime import datetime
import pymysql
from create_movie import create_movie
# pylint: disable = invalid-name
# pylint: disable = redefined-outer-name
# pylint: disable = superfluous-parens
# globals
topdir = "/ngts"
convert_loc = "/usr/local/bin"
cron_dir = "/usr/local/cron/work"
web_dir = "/ngts/staging/archive/last_images"
thumbsize = 15 # scaling percentage
# empty dictionary for the actions for each camera
cams = {801:None, 802:None, 803:None, 804:None,
805:None, 806:None, 807:None, 808:None,
809:None, 810:None, 811:None, 812:None,
813:None, 899:None}
# cam/das map
das = {801:None, 802:None, 803:None,
804:None, 805:None, 806:None,
807:None, 808:None, 809:None,
810:None, 811:None, 812:None,
813:None, 899:None}
def getDasLoc(das):
for i in das:
if i != 899:
comm = '/usr/local/paladin/bin/ngwhereis %d' % (i)
s = os.popen(comm).readline()
try:
das[i] = s.split()[0]
except IndexError:
das[i] = None
print(s)
return das
def checkDasMachinesAreOnline(das):
cont = 0
for i in das:
if das[i]:
x = os.popen('ping -w 0.2 -c 1 %s' % (das[i])).readlines()
if ' 0% packet loss' in x[-2]:
cont += 0
else:
cont += 1
if cont > 0:
print("MACHINES ARE DOWN - ignoring image generation (NFS issues)")
sys.exit(1)
def getLastActionIds(cams):
for cam in cams:
with pymysql.connect(host='ds', db='ngts_ops') as cur:
qry = """
SELECT
ril.camera_id, ril.action_id, al.action
FROM raw_image_list AS ril
LEFT JOIN action_list AS al
USING (action_id)
WHERE ril.camera_id={}
AND ril.start_time_utc >= now() - INTERVAL 1 HOUR
ORDER BY ril.start_time_utc DESC LIMIT 1
""".format(cam)
print(qry)
t1 = datetime.utcnow()
cur.execute(qry)
t2 = datetime.utcnow()
print('Query took: {:.2f}s'.format((t2-t1).total_seconds()))
# get the action ids for each camera (and dome 899)
qry_result = cur.fetchone()
if qry_result is not None:
cams[int(qry_result[0])] = "action{}_{}".format(qry_result[1],
qry_result[2])
return cams
def makePngImages(cron_dir, web_dir, cam, thumbsize):
# get the last image
t = sorted(g.glob('*.fits'))
if len(t) > 0:
pngfile = "%s.png" % (t[-1])
print("PNG file to make is {}.png".format(t[-1]))
if pngfile not in os.listdir('{}/last_imgs/{}/'.format(cron_dir, cam)):
create_movie([t[-1]],
images_directory='{}/last_imgs/{}'.format(cron_dir, cam),
no_time_series=True,
include_increment=False,
clobber_images_directory=False,
resize_factor=4,
multiprocess=False)
here = os.getcwd()
os.chdir("{}/last_imgs/{}".format(cron_dir, cam))
print("Moving to {}/last_imgs/{}".format(cron_dir, cam))
# make a thumbnail
thumbfile = "{:s}.{:.2f}.png".format(t[-1], (thumbsize/100.))
os.system('/usr/local/bin/convert %s -resize %d%% %s' % (pngfile,
thumbsize,
thumbfile))
print("Making thumbnail {} --> {}".format(pngfile, thumbfile))
# rescale the png to make it smaller
os.system('/usr/local/bin/convert %s -resize 50%% %s' % (pngfile, pngfile))
print("Rescaling larger image %s by 50%%" % (pngfile))
try:
f = open('last_img.log').readline()
except IOError:
f = "XXX"
print("Last image: {}".format(f))
if f != pngfile:
os.system('cp {} {}/cam_{}.png'.format(pngfile, web_dir, cam))
print("Copying {} to {}/cam_{}.png".format(pngfile, web_dir, cam))
os.system('cp {} {}/cam_{}_s.png'.format(thumbfile, web_dir, cam))
print("Copying {} to {}/cam_{}_s.png".format(thumbfile, web_dir, cam))
f3 = open('last_img.log', 'w')
f3.write(pngfile)
f3.close()
print('last_img.log updated with {}'.format(pngfile))
else:
print("Last image already up to date, skipping...")
print('Last image up to date')
os.chdir(here)
print('Moving to {}'.format(here))
else:
print('{} exists already, skipping...'.format(pngfile))
else:
print("No new fits images to convert, skipping {}...".format(das[cam]))
if __name__ == "__main__":
# get the location of each camera/das
das = getDasLoc(das)
# check das machines are all online
checkDasMachinesAreOnline(das)
# get a list of last actions for each camera
cams = getLastActionIds(cams)
# now go into the top level directory
os.chdir(topdir)
# loop over each camera and make the pngs
for cam in cams:
if cams[cam] is not None and cam != 899 and das[cam] is not None:
try:
os.chdir("{}/{}".format(das[cam], cams[cam]))
print("Moving to {}/{}".format(das[cam], cams[cam]))
except OSError:
print('Folder {}/{} does not exist, skipping...'.format(das[cam], cams[cam]))
continue
makePngImages(cron_dir, web_dir, cam, thumbsize)
os.chdir(topdir)
print('Moving to {}'.format(topdir))
|
# pylint: disable=E1101,E1103,W0232
""" manage legacy pickle tests """
from datetime import datetime, timedelta
import operator
import pickle as pkl
import nose
import os
import numpy as np
import pandas.util.testing as tm
import pandas as pd
from pandas import Index
from pandas.sparse.tests import test_sparse
from pandas import compat
from pandas.compat import u
from pandas.util.misc import is_little_endian
import pandas
def _read_pickle(vf, encoding=None, compat=False):
from pandas.compat import pickle_compat as pc
with open(vf,'rb') as fh:
pc.load(fh, encoding=encoding, compat=compat)
class TestPickle(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
from pandas.io.tests.generate_legacy_pickles import create_data
self.data = create_data()
self.path = u('__%s__.pickle' % tm.rands(10))
def compare_element(self, typ, result, expected):
if isinstance(expected,Index):
self.assertTrue(expected.equals(result))
return
if typ.startswith('sp_'):
comparator = getattr(test_sparse,"assert_%s_equal" % typ)
comparator(result,expected,exact_indices=False)
else:
comparator = getattr(tm,"assert_%s_equal" % typ)
comparator(result,expected)
def compare(self, vf):
# py3 compat when reading py2 pickle
try:
data = pandas.read_pickle(vf)
except (ValueError) as e:
if 'unsupported pickle protocol:' in str(e):
# trying to read a py3 pickle in py2
return
else:
raise
for typ, dv in data.items():
for dt, result in dv.items():
try:
expected = self.data[typ][dt]
except (KeyError):
continue
self.compare_element(typ, result, expected)
return data
def read_pickles(self, version):
if not is_little_endian():
raise nose.SkipTest("known failure on non-little endian")
pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
for f in os.listdir(pth):
vf = os.path.join(pth,f)
data = self.compare(vf)
if data is None:
continue
if 'series' in data:
if 'ts' in data['series']:
self._validate_timeseries(data['series']['ts'], self.data['series']['ts'])
def test_read_pickles_0_10_1(self):
self.read_pickles('0.10.1')
def test_read_pickles_0_11_0(self):
self.read_pickles('0.11.0')
def test_read_pickles_0_12_0(self):
self.read_pickles('0.12.0')
def test_read_pickles_0_13_0(self):
self.read_pickles('0.13.0')
def test_read_pickles_0_14_0(self):
self.read_pickles('0.14.0')
def test_round_trip_current(self):
for typ, dv in self.data.items():
for dt, expected in dv.items():
with tm.ensure_clean(self.path) as path:
pd.to_pickle(expected,path)
result = pd.read_pickle(path)
self.compare_element(typ, result, expected)
def _validate_timeseries(self, pickled, current):
# GH 7748
tm.assert_series_equal(pickled, current)
self.assertEqual(pickled.index.freq, current.index.freq)
self.assertEqual(pickled.index.freq.normalize, False)
self.assert_numpy_array_equal(pickled > 0, current > 0)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
|
"""
A few utility functions related to client windows. In
particular, getting an accurate geometry of a client window
including the decorations (this can vary with the window
manager). Also, a functon to move and/or resize a window
accurately by the top-left corner. (Also can change based on
the currently running window manager.)
This module also contains a function 'listen' that must be used
in order to receive certain events from a window. For example,
if you wanted to run 'func' whenever a property on the root
window changed, you could do something like:
::
import xpybutil
import xpybutil.event as event
import xpybutil.ewmh as ewmh
import xpybutil.util as util
import xpybutil.window as window
def func(e):
if util.get_atom_name(e.atom) == '_NET_ACTIVE_WINDOW':
# Do something whenever the active window changes
active_window_id = ewmh.get_active_window().reply()
window.listen(xpybutil.root, 'PropertyChange')
event.connect('PropertyNotify', xpybutil.root, func)
The idea here is to tell X that you want events that fall under
the 'PropertyChange' category. Then you bind 'func' to the
particular event 'PropertyNotify'.
"""
from xpybutil.compat import xproto
from xpybutil import conn
import xpybutil.ewmh as ewmh
class WindowManagers(object):
"""
A list of window managers that xpybutil is aware of.
These mostly modify how we determine the size of a client. In particular,
KWin's decorations are in the parent of the parent of the client, whereas
in Openbox, they are simply in the parent of the client.
I am not sure whether I have plans to expand this list.
"""
Unknown = 0
Openbox = 1
KWin = 2
def listen(window, *event_mask_names):
"""
Makes X report events for the masks provided.
This function must be called in order to get X send you events about a
particular window. (i.e., it is not simply enough to call 'event.connect'.)
This page is required reading if you are to do any event processing:
http://tronche.com/gui/x/xlib/events/processing-overview.html
:param window: Window identifier.
:type window: int
:param event_mask_names: List of mask names.
:type event_mask_names: List of xcb.xproto.EventMask class variable names
:rtype: void
"""
masks = 0
for mask_name in event_mask_names:
assert hasattr(xproto.EventMask, mask_name)
masks |= getattr(xproto.EventMask, mask_name)
conn.core.ChangeWindowAttributesChecked(window, xproto.CW.EventMask,
[masks]).check()
def get_parent_window(window):
"""
Uses QueryTree() to find the parent of the given window.
:param window: Window identifier.
:return: Parent window identifier of 'window'.
:rtype: int
"""
return conn.core.QueryTree(window).reply().parent
def get_geometry(window, window_manager=None):
"""
Returns the geometry of a window. This function will do its best to get
the real geometry of a window; typically by inspecting the window's
decorations if there are any.
Since decorations are different for each window manager, you'll get the
best possible results by passing a supported window manager in.
:param window: Window identifier.
:param window_manager: A class variable from Window.WindowManagers
:type window_manager: int
:return: Real geometry of a client window starting from the top-left corner.
:rtype: (top_left_x, top_left_y, width, height)
"""
if window_manager is WindowManagers.KWin:
p = get_parent_window(window)
return __get_geometry(get_parent_window(p))
else:
return __get_geometry(get_parent_window(window))
def moveresize(win, x=None, y=None, w=None, h=None, window_manager=None):
"""
This function attempts to properly move/resize a window, accounting for
its decorations.
It doesn't rely upon _NET_FRAME_EXTENTS, but instead, uses the actual
parent window to adjust the width and height. (I've found _NET_FRAME_EXTENTS
to be wildly unreliable.)
:param win: Window identifier.
:param x: Top left x coordinate.
:param y: Top left y coordinate.
:param w: Client width.
:param h: Client height.
:param window_manager: A class variable from Window.WindowManagers
:type window_manager: int
:rtype: void
"""
if window_manager is WindowManagers.KWin:
tomove = get_parent_window(get_parent_window(win))
else:
tomove = get_parent_window(win)
if tomove:
cx, cy, cw, ch = __get_geometry(win)
px, py, pw, ph = __get_geometry(tomove)
w -= pw - cw
h -= ph - ch
ewmh.request_moveresize_window(win, x=x, y=y, width=max(1, w),
height=max(1, h), source=2)
def __get_geometry(win):
"""
Private function that abstracts the low level GetGeometry call.
If you're looking for the size of a window including its decorations,
please use ``window.get_geometry``.
:param win: Window identifier.
:return: X rectangle of the window.
:rtype: (x, y, width, height)
"""
raw = conn.core.GetGeometry(win).reply()
return raw.x, raw.y, raw.width, raw.height
|
from django.contrib.auth import login, get_user_model
from django.shortcuts import render, redirect
from users.forms import CustomUserCreationForm
def signup(request):
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect('index')
else:
form = CustomUserCreationForm()
context = {
'form': form
}
return render(request, 'users/signup.html', context)
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def duplicateZeros(self, arr):
"""
:type arr: List[int]
:rtype: None Do not return anything, modify arr in-place instead.
"""
shift, i = 0, 0
while i+shift < len(arr):
shift += int(arr[i] == 0)
i += 1
i -= 1
while shift:
if i+shift < len(arr):
arr[i+shift] = arr[i]
if arr[i] == 0:
shift -= 1
arr[i+shift] = arr[i]
i -= 1
|
class RootLogger:
def info(self, message: str):
assert not hasattr(super(), 'info')
def verbose(self, message: str, color: str):
assert not hasattr(super(), 'verbose')
def find(self, scanner: str, message: str, color:str):
assert not hasattr(super(), 'find')
def warn(self, message: str):
assert not hasattr(super(), 'warn')
def _is_verbose(self):
assert not hasattr(super(), '_is_verbose')
|
#!/usr/bin/python -S
"""
os_path_test.py: Tests for os_path.py
"""
from __future__ import print_function
import unittest
from pylib import os_path # module under test
class OsPathTest(unittest.TestCase):
def testPathExists(self):
self.assertEqual(True, os_path.exists('/'))
self.assertEqual(False, os_path.exists('/nonexistent__ZZZZ'))
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import tensorflow as tf
from imutils import paths
import cv2
from subprocess import PIPE, Popen
"""
This is the quantization script for tflite model float32 to int8 model.
I use the original dataset by Wang Yifan for calibration purpose https://drive.google.com/drive/folders/1w-zoOuSauw5xBhOrfZpjcglvDH14mlGu.
The dataset calibration is not included in this repo, please see the issue pages of Wang Yifan for further information.
https://github.com/wangyifan411/Face-Mask-Type-Detector/issues/1
FYI:
the model that is used by Wang Yifan is SSD_Mobilenet_V2_COCO, where the size is 300x300.
You could find more information about size, batch_size, etc, of the pre-trained model in tensorflow object detection model zoo 1.
This repo use Tensorflow 1
"""
# normH, normW = (300, 300)
# BATCH_SIZE = 24
script = [
"tflite_convert",
"--output_file=./TFLite_model/detect_model_quant.tflite",
"--graph_def_file=./TFLite_model/tflite_graph.pb",
"--input_format=TENSORFLOW_GRAPHDEF",
"--output_format=TFLITE",
"--input_shapes=1,300,300,3",
"--input_arrays=normalized_input_image_tensor",
"--output_arrays=TFLite_Detection_PostProcess,TFLite_Detection_PostProcess:1,TFLite_Detection_PostProcess:2,TFLite_Detection_PostProcess:3",
"--inference_type=QUANTIZED_UINT8",
"--std_dev_values=128",
"--mean_values=128",
"--change_concat_input_ranges=false",
"--allow_custom_ops",
]
output = Popen(script, stdout=PIPE, stderr=PIPE)
stdout, stderr = output.communicate()
print(stdout)
print(stderr)
# def representative_dataset_gen():
# ds = []
# for imgPath in paths.list_images("./dataset_quantize"):
# img = cv2.imread(imgPath)
# img = cv2.resize(img, (normH, normW))
# img = img / 128.0
# img = img.astype(np.float32)
# ds.append(img)
# ds = np.array(ds)
# print(f"[INFO] representative shape {ds.shape}")
# images = tf.data.Dataset.from_tensor_slices(ds).batch(1)
# for image in images.take(BATCH_SIZE):
# yield [image]
# # Convert
# converter = tf.lite.TFLiteConverter.from_frozen_graph("TFLite_model/tflite_graph.pb")
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.representative_dataset = representative_dataset_gen
# converter.target_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# converter.inference_input_type = [tf.uint8]
# converter.inference_output_type = [tf.uint8]
# tflite_quant_model = converter.convert()
# print("[INFO] Successfully convert")
# # Save
# model_dir = "./TFLite_model/detect_model_quant.tflite"
# model_dir.write_bytes(tflite_quant_model)
# print("[INFO] Successfully saved")
|
#
# It is possible to log HART transactions.
# For example log a bursting device or log transactions with other masters
# by connecting a second HART modem.
#
#
# Standard import. Append the path of PyHART. Since this file is in the folder PyHART_tutorial,
# just go back one folder.
#
import sys
sys.path.append('../')
from PyHART.COMMUNICATION.CommCore import *
from PyHART.COMMUNICATION.Utils import *
from PyHART.COMMUNICATION.Common import *
from time import sleep
#
# Procedure to list communication ports
#
count, listOfComPorts = ListCOMPort(True)
comport = None
selection = 0
while (comport == None) and (selection != (count + 1)):
print ('\nSelect the communication port.')
print('Insert the number related to your choice and press enter.')
try:
selection = int(input())
except:
selection = 0
if (selection == (count + 1)):
print('Leaving application...')
sys.exit()
comport = GetCOMPort(selection, listOfComPorts)
#
# Instantiates and starts the communication object
# Here it is very important to define a log file and ensure that
# 'autoPrintTransactions' is set to 'True' and that 'whereToPrint' is set
# to 'CommCore.WhereToPrint.BOTH' (Terminal and file) or
# 'CommCore.WhereToPrint.FILE' (log only in the log file).
#
hart = HartMaster(comport, \
MASTER_TYPE.PRIMARY, \
num_retry = 2, \
retriesOnPolling = False, \
autoPrintTransactions = True, \
whereToPrint = WhereToPrint.BOTH, \
logFile = 'terminalLog.log', \
rt_os = False, \
manageRtsCts = None)
hart.Start()
while True:
sleep(5) # let time to other threads
# @TODO
# To interrupt this infinite cycle you have to implement a 'read keypress'
# mechanism, for example if the user press enter break the cycle.
#
# Kills all threads
#
hart.Stop()
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import intrinsics
from analysis.storegraph.canonicalobjects import FieldSlotName
class IOTreeObj(object):
def __init__(self, path, impl, treetype, parent=None):
self.parent = parent
self.path = path
self.treetype = treetype
self.objMasks = {}
self.fields = {}
self.builtin = False
self.name = None
self.impl = impl
self.link = None
def names(self):
if self.isField():
return self.getFieldSlots()
elif self.impl:
return (self.impl,)
else:
return ()
def isField(self):
return isinstance(self.impl, FieldSlotName)
def getFieldSlots(self):
assert self.parent
slots = []
for obj in self.parent.objMasks.iterkeys():
if self.impl in obj.slots:
slots.append(obj.slots[self.impl])
return slots
def getField(self, field):
if not field in self.fields:
slot = IOTreeObj(self.path + (field,), field, self.treetype, self)
self.fields[field] = slot
else:
slot = self.fields[field]
return slot
def match(self, matcher):
if isinstance(matcher, dict):
for field, child in self.fields.iteritems():
key = field.type, field.name
if key in matcher:
child.match(matcher[key])
else:
self.name = matcher
self.builtin = True
def buildImplementationLUT(self, lut):
if not self.isField() and self.impl:
assert self.impl not in lut, self.impl
lut[self.impl] = self
for child in self.fields.itervalues():
child.buildImplementationLUT(lut)
def makeLinks(self, other, uid):
print "LINK", self.path, other.path
self.link = other
other.link = self
name = "vs2fs_%d" % uid
uid += 1
self.name = name
other.name = name
for field, child in self.fields.iteritems():
if field in other.fields:
uid = child.makeLinks(other.fields[field], uid)
return uid
def unlink(self):
if self.link:
self.link.link = None
self.link = None
def localClone(self, parent):
print "local clone", self.path
result = IOTreeObj(self.path, self.impl, self.treetype, parent)
result.name = self.name
result.builtin = self.builtin
for k, v in self.objMasks.iteritems():
result.objMasks[k] = v
return result
def clone(self, parent):
result = self.localClone(parent)
for field, child in self.fields.iteritems():
result.fields[field] = child.clone(result)
return result
def merge(self, other, parent):
result = self.localClone(parent)
# Wierd: the trees will have entirely different sets of object names!
for k, v in other.objMasks.iteritems():
result.objMasks[k] = v
fields = set()
fields.update(self.fields.iterkeys())
fields.update(other.fields.iterkeys())
for field in fields:
if field in self.fields and field in other.fields:
child = self.fields[field].merge(other.fields[field], result)
elif field in self.fields:
child = self.fields[field].clone(result)
else:
child = other.fields[field].clone(result)
result.fields[field] = child
return result
# Give the tree default names
def nameTree(self, name, uid=0):
if not self.name:
nodename = "%s_%d" % (name, uid)
uid += 1
self.name = nodename
for child in self.fields.itervalues():
uid = child.nameTree(name, uid)
return uid
# Give matching nodes the same name
# Used so the corresponding uniforms of two shaders are named the same.
def harmonize(self, other, name, uid=0):
nodename = "%s_%d" % (name, uid)
uid += 1
self.name = nodename
other.name = nodename
# Recurse into matching children.
for field, child in self.fields.iteritems():
otherchild = other.fields.get(field)
if otherchild is not None:
print "HARMONIZE", field
uid = child.harmonize(otherchild, name, uid)
return uid
def _dump(self):
print self.path
print self.objMasks
for next in self.fields.itervalues():
next._dump()
def dump(self, name):
print
print name
print self.treetype
self._dump()
print
def handleObj(dioa, obj, lut, exist, mask, tobj):
# Does this field actually exist?
if mask is dioa.bool.false: return
# Accumulate the mask
oldmask = tobj.objMasks.get(obj, dioa.bool.false)
objmask = dioa.bool.or_(oldmask, mask)
tobj.objMasks[obj] = dioa.set.simplify(exist, objmask, dioa.set.empty)
# Recurse into each of the object's fields
fieldLUT = obj.slots
for name, field in fieldLUT.iteritems():
# Don't ad intrinsic fields to the tree
#if intrinsics.isIntrinsicField(name): continue
# Don't ad unused fields to the tree
if field not in lut: continue
# Handle the contents of the field.
ctree = lut[field].annotation.values.correlated
child = tobj.getField(name)
handleCTree(dioa, ctree, lut, exist, mask, child)
def handleCTree(dioa, ctree, lut, exist, mask, tobj):
ctree = dioa.set.simplify(mask, ctree, dioa.set.empty)
flat = dioa.set.flatten(ctree)
for obj in flat:
# For each possible object, produce a correlated mask
objleaf = dioa.set.leaf((obj,))
omask = dioa.bool.in_(objleaf, ctree)
omask = dioa.bool.and_(mask, omask)
# Recurse
handleObj(dioa, obj, lut, exist, omask, tobj)
# Used for getting the context object.
def getSingleObject(dioa, lut, lcl):
node = lut[lcl]
flat = node.annotation.values.flat
assert len(flat) == 1
return tuple(flat)[0]
def evaluateContextObject(dioa, lut, exist, lcl, obj, treetype):
tobj = IOTreeObj(('context',), lcl, treetype)
mask = dioa.bool.true
handleObj(dioa, obj, lut, exist, mask, tobj)
if False: tobj.dump('context')
return tobj
def evaluateLocal(dioa, lut, exist, lcl, treetype):
assert lcl is not None, lcl
# The local may be unused, but still a formal parameter
if lcl.isDoNotCare() or lcl not in lut:
return IOTreeObj((), None, treetype)
node = lut[lcl]
# The correlated tree
ctree = node.annotation.values.correlated
tobj = IOTreeObj((lcl,), lcl, treetype)
handleCTree(dioa, ctree, lut, exist, dioa.bool.true, tobj)
if False: tobj.dump(lcl)
return tobj
|
#!/usr/bin/env python3
# encoding: utf-8
# @Time : 2018/8/2 上午11:48
# @Author : yuchangqian
# @Contact : changqian_yu@163.com
# @File : logger.py
import os
import sys
import logging
# from utils import pyt_utils
# from utils.pyt_utils import ensure_dir
_default_level_name = os.getenv('ENGINE_LOGGING_LEVEL', 'INFO')
_default_level = logging.getLevelName(_default_level_name.upper())
# Class for managing display information formatting
# and save information into log file
class LogFormatter(logging.Formatter):
log_fout = None
date_full = '[%(asctime)s %(lineno)d@%(filename)s:%(name)s] '
date = '%(asctime)s '
msg = '%(message)s'
def format(self, record):
if record.levelno == logging.DEBUG:
mcl, mtxt = self._color_dbg, 'DBG'
elif record.levelno == logging.WARNING:
mcl, mtxt = self._color_warn, 'WRN'
elif record.levelno == logging.ERROR:
mcl, mtxt = self._color_err, 'ERR'
else:
mcl, mtxt = self._color_normal, ''
if mtxt:
mtxt += ' '
if self.log_fout:
self.__set_fmt(self.date_full + mtxt + self.msg)
formatted = super(LogFormatter, self).format(record)
# self.log_fout.write(formatted)
# self.log_fout.write('\n')
# self.log_fout.flush()
return formatted
self.__set_fmt(self._color_date(self.date) + mcl(mtxt + self.msg))
formatted = super(LogFormatter, self).format(record)
return formatted
if sys.version_info.major < 3:
def __set_fmt(self, fmt):
self._fmt = fmt
else:
def __set_fmt(self, fmt):
self._style._fmt = fmt
@staticmethod
def _color_dbg(msg):
return '\x1b[36m{}\x1b[0m'.format(msg)
@staticmethod
def _color_warn(msg):
return '\x1b[1;31m{}\x1b[0m'.format(msg)
@staticmethod
def _color_err(msg):
return '\x1b[1;4;31m{}\x1b[0m'.format(msg)
@staticmethod
def _color_omitted(msg):
return '\x1b[35m{}\x1b[0m'.format(msg)
@staticmethod
def _color_normal(msg):
return msg
@staticmethod
def _color_date(msg):
return '\x1b[32m{}\x1b[0m'.format(msg)
def ensure_dir(path):
if not os.path.isdir(path):
os.makedirs(path)
def get_logger(log_dir=None, log_file=None, formatter=LogFormatter):
logger = logging.getLogger()
logger.setLevel(_default_level)
del logger.handlers[:]
if log_dir and log_file:
ensure_dir(log_dir)
LogFormatter.log_fout = True
file_handler = logging.FileHandler(log_file, mode='a')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter(datefmt='%d %H:%M:%S'))
stream_handler.setLevel(0)
logger.addHandler(stream_handler)
return logger
|
import asyncio
import aiohttp
import time
import logging
from selenium import webdriver
from selenium.webdriver.firefox import options
from selenium.common.exceptions import NoSuchElementException
try:
import uvloop
uvloop.install()
except ImportError:
pass
logger = logging.getLogger("render-engine")
logging.basicConfig(level=logging.INFO)
target_url = "http://auto-render:8000/rendered/{}"
options = options.Options()
options.headless = True
driver = webdriver.Firefox(options=options)
def get_html(render_id):
start = time.perf_counter()
driver.get(target_url.format(render_id))
resp = driver.find_element_by_id("render").screenshot_as_base64
stop = time.perf_counter() - start
logger.info(f"render took {stop * 1000}ms to render")
return resp
async def main():
loop = asyncio.get_running_loop()
async with aiohttp.ClientSession() as sess:
ws = await sess.ws_connect("ws://auto-render:8000/worker")
while not ws.closed:
msg = await ws.receive_json()
render_id = msg['id']
try:
bs64 = await loop.run_in_executor(None, get_html, render_id)
except NoSuchElementException:
bs64 = None
data = {
"id": render_id,
"render": bs64
}
await ws.send_json(data)
if __name__ == "__main__":
asyncio.run(main())
|
import re
import json
import subprocess
def run_shell_command(command, cwd=None, env=None, shell_mode=False):
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell_mode,
cwd=cwd,
env=env,
)
stdout, stderr = proc.communicate()
if proc.returncode == 0:
try:
return json.loads(stdout.decode("utf-8")), stderr.decode("utf-8")
except json.JSONDecodeError:
return stdout.decode("utf-8"), stderr.decode("utf-8")
else:
raise Exception(
f'Failed to run command {" ".join(command)}: {stderr.decode("utf-8")}'
)
def get_configuration_value(config_file):
with open(config_file, "r") as file:
configuration = json.loads(file.read())
return configuration
def generate_heroku_app_name(deployment_name):
# Name must start with a letter, end with a letter or digit and can only
# contain lowercase letters, digits, and dashes. Name is too long
# (maximum is 30 characters)
app_name = f"btml-{deployment_name}"[:30]
invalid_chars = re.compile("[^a-zA-Z0-9-]")
app_name = re.sub(invalid_chars, "-", app_name)
return app_name.lower()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from .Contract import *
from .Receivable import *
from .Receipt import *
from .Shop import *
from .Statement import *
from .Application import *
|
'''
到最近的人的最大距离
给你一个数组 seats 表示一排座位,其中 seats[i] = 1 代表有人坐在第 i 个座位上,
seats[i] = 0 代表座位 i 上是空的(下标从 0 开始)。
至少有一个空座位,且至少有一人已经坐在座位上。
亚历克斯希望坐在一个能够使他与离他最近的人之间的距离达到最大化的座位上。
返回他到离他最近的人的最大距离。
示例 1:
输入:seats = [1,0,0,0,1,0,1]
输出:2
解释:
如果亚历克斯坐在第二个空位(seats[2])上,他到离他最近的人的距离为 2 。
如果亚历克斯坐在其它任何一个空位上,他到离他最近的人的距离为 1 。
因此,他到离他最近的人的最大距离是 2 。
示例 2:
输入:seats = [1,0,0,0]
输出:3
解释:
如果亚历克斯坐在最后一个座位上,他离最近的人有 3 个座位远。
这是可能的最大距离,所以答案是 3 。
示例 3:
输入:seats = [0,1]
输出:1
提示:
2 <= seats.length <= 2 * 10^4
seats[i] 为 0 或 1
至少有一个 空座位
至少有一个 座位上有人
'''
from typing import List
'''
思路:贪心算法
找到间隔最大的区间,返回区间距离除以2。如果是开头和末尾,距离不需要除以2。
时间复杂度:O(n)
空间复杂度:O(1)
'''
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
maxRange = 0
preOneIdx = -1
n = len(seats)
for i in range(n):
if seats[i]:
if (i - preOneIdx) // 2 > maxRange:
if preOneIdx == -1: # 如果坐在第0个位置,距离不需要除以2
maxRange = i
else:
maxRange = (i - preOneIdx) // 2
preOneIdx = i
if n - 1 - preOneIdx > maxRange:
maxRange = n - 1 - preOneIdx
return maxRange
s = Solution()
print(s.maxDistToClosest(seats=[0, 0, 0, 1]))
print(s.maxDistToClosest(seats=[1, 0, 0, 0, 1, 0, 1]))
print(s.maxDistToClosest(seats=[1, 0, 0, 0]))
print(s.maxDistToClosest(seats=[0, 1]))
|
# До словника з 1 вправи потрібно додати нового автобота. "Сентінел Прайм" - "Пожежна машина".
# Вивести новий словник на консоль
carsdict = {
"Оптімус Прайм" : "Грузовик Peterbilt 379",
"Бамблбі" : "Chevrolet Camaro",
"Джаз" : "Porsche 935 Turbo"
}
carsdict["Сентінел Прайм"] = "Пожежна машина"
print(carsdict)
|
from dependency_injector import containers, providers
from fdap.app.kiwoom.kiwoom_service import KiwoomService
from fdap.app.opendart.opendart_service import OpenDartService
from fdap.app.refine.refine import Refine
from fdap.database.database import db_session
from fdap.app.repositories.post_repository import PostsRepository
from fdap.app.tistory.tistory_client import TistoryClient, LoginInfo
from fdap.app.autopost.autopost import AutoPost
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
# database
database = providers.Singleton(db_session)
# Services
kiwoom_service = providers.Singleton(
KiwoomService,
_id=config.koapy.account.id,
password=config.koapy.account.password
)
opendart_service = providers.Singleton(
OpenDartService,
url=config.opendart.api.url,
api_key=config.opendart.api.api_key
)
refine = providers.Singleton(
Refine
)
post_repository = providers.Singleton(
PostsRepository,
session=database
)
tistory_login_info = providers.Singleton(
LoginInfo,
client_id=config.tistory.api.client_id,
client_secret=config.tistory.api.client_secret,
redirect_uri=config.tistory.api.redirect_uri,
response_type=config.tistory.api.response_type,
login_id=config.tistory.kakao.id,
login_password=config.tistory.password,
state=config.tistory.api.state
)
tistory_client = providers.Singleton(
TistoryClient,
host=config.tistory.api.url,
config=config.tistory
)
auto_post = providers.Singleton(
AutoPost,
kiwoom=kiwoom_service,
opendart=opendart_service,
refine=refine,
tistory=tistory_client,
repo=post_repository
)
|
"""
Split entire data set into train/test in balanced way of labels
"""
import os
import numpy as np
import json
import random
from utils.misc.json import write_json_line_by_line
def count_total(entire_data):
"""
Get entire json and return how many lines it holds.
"""
num_lines = sum(1 for line in open(entire_data))
return num_lines
def split(entire_data, test_mask):
count = 0
train_dicts = []
test_dicts = []
with open(entire_data) as fin:
for each_line in fin:
each_line = json.loads(each_line)
if count in test_mask:
test_dicts.append(each_line)
else:
train_dicts.append(each_line)
count += 1
return train_dicts, test_dicts
def locate_label_one_and_zero(json_file):
with open(json_file) as fin:
count = 0
label_one = 0
label_one_idx = []
label_zero_idxs = []
label_one_idxs = []
for line_idx, each_line in enumerate(fin):
count += 1
data = json.loads(each_line)
# print(data)
# print(data.keys())
if data['label'] == [1]:
label_one += 1
label_one_idxs.append(line_idx)
else:
label_zero_idxs.append(line_idx)
print("num total test data: {}".format(count))
print("num total label [1]: {}".format(label_one))
return label_one_idxs, label_zero_idxs
def split_random_w_ratio(mylist, ratio):
len_mylist = len(mylist)
mylist = np.array(mylist)
mask = sorted(random.sample(range(len_mylist),
int(len_mylist * ratio)))
not_mask = [i for i in range(len_mylist) if i not in mask]
l1 = mylist[not_mask]
l2 = mylist[mask]
return list(l1), list(l2)
if __name__ == "__main__":
entire_data_path = "../test_data.json"
one_idxs, zero_idxs = locate_label_one_and_zero(entire_data_path)
print(one_idxs)
print(len(one_idxs))
print(len(zero_idxs))
# entire_data_path = "entire_one_label.json"
# one_idxs, zero_idxs = locate_label_one_and_zero(entire_data_path)
# print(one_idxs)
# print(len(one_idxs))
# print(len(zero_idxs))
#
# ratio = 0.2
# mask_ones_train, mask_ones_test = split_random_w_ratio(one_idxs, ratio)
# print(len(mask_ones_train))
# print(len(mask_ones_test))
# mask_zeros_train, mask_zeros_test = split_random_w_ratio(
# zero_idxs, ratio)
# print(len(mask_zeros_train))
# print(len(mask_zeros_test))
#
# mask_train = mask_ones_train + mask_zeros_train
# print(mask_train)
# mask_test = mask_ones_test + mask_zeros_test
#
# print(len(mask_train))
# print(len(mask_test))
#
# assert len(set(mask_train).intersection(set(mask_test))) == 0
#
# train, test = split(entire_data_path, mask_test)
#
# # Delete pre-exist data (because of 'a', this process required)
# write_train_data_path = "../train_data.json"
# write_test_data_path = "../test_data.json"
#
# if os.path.isfile(write_train_data_path):
# os.remove(write_train_data_path)
# if os.path.isfile(write_test_data_path):
# os.remove(write_test_data_path)
#
# write_json_line_by_line(train, "../train_data.json")
# write_json_line_by_line(test, "../test_data.json")
#
# print(count_total("../train_data.json"))
# print(count_total("../test_data.json"))
pass
|
#!/usr/bin/env python2.6
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import argparse
import collections
import sys
from collections import OrderedDict
from awsbatch.common import AWSBatchCliConfig, Boto3ClientFactory, Output, config_logger
from awsbatch.utils import fail, convert_to_date, shell_join, is_job_array, get_job_definition_name_by_arn
from builtins import range
def _get_parser():
"""
Parse input parameters and return the ArgumentParser object
If the command is executed without the --cluster parameter, the command will use the default cluster_name
specified in the [main] section of the user's awsbatch-cli.cfg configuration file and will search
for the [cluster cluster-name] section, if the section doesn't exist, it will ask to CloudFormation
the required information.
If the --cluster parameter is set, the command will search for the [cluster cluster-name] section
in the user's awsbatch-cli.cfg configuration file or, if the file doesn't exist, it will ask to CloudFormation
the required information.
:return: the ArgumentParser object
"""
parser = argparse.ArgumentParser(description='Shows the jobs submitted in the cluster\'s Job Queue.')
parser.add_argument('-c', '--cluster', help='Cluster to use')
parser.add_argument('-s', '--status', help='Comma separated list of job status to ask, defaults to "active" jobs. '
'Accepted values are: SUBMITTED, PENDING, RUNNABLE, STARTING, RUNNING, '
'SUCCEEDED, FAILED, ALL',
default='SUBMITTED,PENDING,RUNNABLE,STARTING,RUNNING')
parser.add_argument('-e', '--expand-arrays', help='Expand job arrays', action='store_true')
parser.add_argument('-d', '--details', help='Show jobs details', action='store_true')
parser.add_argument('-ll', '--log-level', help=argparse.SUPPRESS, default='ERROR')
parser.add_argument('job_ids', help='A space separated list of job IDs to show in the output. If the job is a '
'job array, all the children will be displayed. If a single job is requested '
'it will be shown in a detailed version', nargs='*')
return parser
def _compose_log_stream_url(region, log_stream):
"""
Create logStream url
:param region: the region on which the job has been submitted
:param log_stream: the log stream name
:return: an url
"""
domain = 'amazonaws-us-gov' if region == 'us-gov-west-1' else 'aws'
return "https://console.{0}.amazon.com/cloudwatch/home?" \
"region={1}#logEventViewer:group=/aws/batch/job;stream={2}".format(domain, region, log_stream)
class Job(object):
"""
Generic job object.
"""
def __init__(self, job_id, name, creation_time, start_time, stop_time, status, status_reason, job_definition,
queue, command, reason, exit_code, vcpus, memory, nodes, log_stream, log_stream_url):
self.id = job_id
self.name = name
self.creation_time = creation_time
self.start_time = start_time
self.stop_time = stop_time
self.status = status
self.status_reason = status_reason
self.job_definition = job_definition
self.queue = queue
self.command = command
self.reason = reason
self.exit_code = exit_code
self.vcpus = vcpus
self.memory = memory
self.nodes = nodes
self.log_stream = log_stream
self.log_stream_url = log_stream_url
class AWSBstatCommand(object):
"""
awsbstat command
"""
def __init__(self, log, boto3_factory):
"""
:param log: log
:param boto3_factory: an initialized Boto3ClientFactory object
"""
self.log = log
mapping = collections.OrderedDict([
('jobId', 'id'),
('jobName', 'name'),
('createdAt', 'creation_time'),
('startedAt', 'start_time'),
('stoppedAt', 'stop_time'),
('status', 'status'),
('statusReason', 'status_reason'),
('jobDefinition', 'job_definition'),
('jobQueue', 'queue'),
('command', 'command'),
('exitCode', 'exit_code'),
('reason', 'reason'),
('vcpus', 'vcpus'),
('memory[MB]', 'memory'),
('nodes', 'nodes'),
('logStream', 'log_stream'),
('log', 'log_stream_url')
])
self.output = Output(mapping=mapping)
self.boto3_factory = boto3_factory
self.batch_client = boto3_factory.get_client('batch')
def run(self, job_status, expand_arrays, job_queue=None, job_ids=None, show_details=False):
"""
print list of jobs, by filtering by queue or by ids
"""
if job_ids:
self.__populate_output_by_job_ids(job_status, job_ids, show_details or len(job_ids) == 1)
# explicitly asking for job details,
# or asking for a single job that is not an array (the output is not a list of jobs)
details_required = show_details or (len(job_ids) == 1 and self.output.length() == 1)
elif job_queue:
self.__populate_output_by_queue(job_queue, job_status, expand_arrays, show_details)
details_required = show_details
else:
fail("Error listing jobs from AWS Batch. job_ids or job_queue must be defined")
if details_required:
self.output.show()
else:
self.output.show_table(['jobId', 'jobName', 'status', 'startedAt', 'stoppedAt', 'exitCode'])
def __populate_output_by_job_ids(self, job_status, job_ids, details):
"""
Add Job item or jobs array children to the output
:param job_status: list of job status to ask
:param job_ids: job ids or ARNs
:param details: ask for job details
"""
try:
if job_ids:
self.log.info("Describing jobs (%s), details (%s)" % (job_ids, details))
single_jobs = []
job_array_ids = []
jobs = self.batch_client.describe_jobs(jobs=job_ids)['jobs']
for job in jobs:
if is_job_array(job):
job_array_ids.append(job['jobId'])
else:
single_jobs.append(job)
# create output items for job array children
self.__populate_output_by_array_ids(job_status, job_array_ids, details)
# add single jobs to the output
self.__add_jobs(single_jobs, details)
except Exception as e:
fail("Error describing jobs from AWS Batch. Failed with exception: %s" % e)
def __populate_output_by_array_ids(self, job_status, job_array_ids, details):
"""
Add jobs array children to the output
:param job_status: list of job status to ask
:param job_array_ids: job array ids to ask
:param details: ask for job details
"""
try:
for job_array_id in job_array_ids:
for status in job_status:
self.log.info("Listing job array children for job (%s) in status (%s)" % (job_array_id, status))
next_token = ''
while next_token is not None:
response = self.batch_client.list_jobs(jobStatus=status, arrayJobId=job_array_id,
nextToken=next_token)
# add single jobs to the output
self.__add_jobs(response['jobSummaryList'], details)
next_token = response.get('nextToken')
except Exception as e:
fail("Error listing job array children for job (%s). Failed with exception: %s" % (job_array_id, e))
def __add_jobs(self, jobs, details):
"""
Get job info from AWS Batch and add to the output
:param jobs: list of jobs items (output of the list_jobs function)
:param details: ask for job details
"""
try:
if jobs:
self.log.debug("Adding jobs to the output (%s)" % jobs)
if details:
self.log.info("Asking for jobs details")
jobs_to_show = []
for index in range(0, len(jobs), 100):
jobs_chunk = jobs[index:index + 100]
job_ids = []
for job in jobs_chunk:
job_ids.append(job['jobId'])
jobs_to_show.extend(self.batch_client.describe_jobs(jobs=job_ids)['jobs'])
else:
jobs_to_show = jobs
for job in jobs_to_show:
nodes = 1
if 'nodeProperties' in job:
# MNP job
container = job['nodeProperties']['nodeRangeProperties'][0]['container']
nodes = job['nodeProperties']['numNodes']
elif 'container' in job:
container = job['container']
else:
container = {}
if is_job_array(job):
# parent job array
job_id = '{0}[{1}]'.format(job['jobId'], job['arrayProperties']['size'])
log_stream = '-'
log_stream_url = '-'
else:
job_id = job['jobId']
if 'logStreamName' in container:
log_stream = container.get('logStreamName')
log_stream_url = _compose_log_stream_url(self.boto3_factory.region, log_stream)
else:
log_stream = '-'
log_stream_url = '-'
command = container.get('command', [])
self.log.debug("Adding job to the output (%s)", job)
job = Job(job_id=job_id,
name=job['jobName'],
creation_time=convert_to_date(job['createdAt']),
start_time=convert_to_date(job['startedAt']) if 'startedAt' in job else '-',
stop_time=convert_to_date(job['stoppedAt']) if 'stoppedAt' in job else '-',
status=job.get('status', 'UNKNOWN'),
status_reason=job.get('statusReason', '-'),
job_definition=get_job_definition_name_by_arn(job['jobDefinition'], version=True)
if 'jobQueue' in job else '-',
queue=job['jobQueue'].split('/')[1] if 'jobQueue' in job else '-',
command=shell_join(command) if command else '-',
reason=container.get('reason', '-'),
exit_code=container.get('exitCode', '-'),
vcpus=container.get('vcpus', '-'),
memory=container.get('memory', '-'),
nodes=nodes,
log_stream=log_stream,
log_stream_url=log_stream_url)
self.output.add(job)
except KeyError as e:
fail("Error building Job item. Key (%s) not found." % e)
except Exception as e:
fail("Error adding jobs to the output. Failed with exception: %s" % e)
def __populate_output_by_queue(self, job_queue, job_status, expand_arrays, details):
"""
Add Job items to the output asking for given queue and status
:param job_queue: job queue name or ARN
:param job_status: list of job status to ask
:param expand_arrays: if True, the job array will be expanded by creating a row for each child
:param details: ask for job details
"""
try:
for status in job_status:
next_token = ''
while next_token is not None:
response = self.batch_client.list_jobs(jobStatus=status, jobQueue=job_queue, nextToken=next_token)
single_jobs = []
job_array_ids = []
for job in response['jobSummaryList']:
if is_job_array(job) and expand_arrays is True:
job_array_ids.append(job['jobId'])
else:
single_jobs.append(job)
# create output items for job array children
self.__populate_output_by_job_ids(job_status, job_array_ids, details)
# add single jobs to the output
self.__add_jobs(single_jobs, details)
next_token = response.get('nextToken')
except Exception as e:
fail("Error listing jobs from AWS Batch. Failed with exception: %s" % e)
def main():
aws_batch_job_status = ['SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING', 'RUNNING', 'SUCCEEDED', 'FAILED']
try:
# parse input parameters and config file
args = _get_parser().parse_args()
log = config_logger(args.log_level)
log.info("Input parameters: %s" % args)
config = AWSBatchCliConfig(log=log, cluster=args.cluster)
boto3_factory = Boto3ClientFactory(region=config.region, proxy=config.proxy,
aws_access_key_id=config.aws_access_key_id,
aws_secret_access_key=config.aws_secret_access_key)
job_status_set = OrderedDict((status.strip().upper(), '') for status in args.status.split(','))
if 'ALL' in job_status_set:
# add all the statuses in the list
job_status_set = OrderedDict((status, '') for status in aws_batch_job_status)
job_status = list(job_status_set)
AWSBstatCommand(log, boto3_factory).run(job_status=job_status, expand_arrays=args.expand_arrays,
job_ids=args.job_ids, job_queue=config.job_queue,
show_details=args.details)
except KeyboardInterrupt:
print("Exiting...")
sys.exit(0)
except Exception as e:
fail("Unexpected error. Command failed with exception: %s" % e)
if __name__ == '__main__':
main()
|
import nltk
class EntityExtractor:
def __init__(self):
pass
def get_people(self):
pass
def get_organizations(self):
pass
def get_money(self, taggedText):
#Pattern = $3.2 billion or 50 billion dollars or 50 Dollars
pattern = """
MONEY:
{<\$>?<CD>+<NNP|NNS|NN>?}
"""
MoneyChunker = nltk.RegexpParser(pattern)
result = MoneyChunker.parse(taggedText)
return result
def get_date_time(self):
pass
def get_percentage(self):
pass
def get_places(self):
pass
def main():
text = "I sold my company worth $3.4 Million to some other company for thirty four thousand dollars in yen. I am a rich man now."
from nltk.tokenize import word_tokenize
posTagger = nltk.data.load('taggers/maxent_treebank_pos_tagger/english.pickle')
tokens = word_tokenize(text)
posTags = posTagger.tag(tokens)
ee = EntityExtractor()
print ee.get_money(posTags)
# print nltk.chunk.util.ieerstr2tree(text, chunk_types=['LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'])
if __name__ == '__main__':
main()
|
import numpy as np
import typing as tg
import sklearn.cluster as sklc
import scipy.spatial.distance as spsd
import scipy.signal as spsn
import heapq as hq
import matplotlib.pyplot as plt
def SimpleHierachicalCluster(X: np.ndarray, weight: tg.Optional[np.ndarray]=None) -> np.ndarray:
"""My version of Hierachical Clustering, processing small amount of samples and give easily judgement of the hierachical tree
Running time: O(N^2*D), N is n_samples, D is n_features
"""
n_samples, n_features = X.shape
if weight is None:
weights = np.ones(n_samples, dtype=int)
else:
weights = weight.copy()
hierachical = np.zeros((n_samples - 1, 3), dtype=int) # each row: (index1: int, index2:int, index_new:int)
distances = np.zeros(n_samples - 1)
output_index = 0
nodes = X.copy()
remaining_indexes = set(range(n_samples))
distance_heap = []
calculated = set()
for i in remaining_indexes:
calculated.add(i)
for j in remaining_indexes - calculated:
hq.heappush(distance_heap, (spsd.euclidean(nodes[i], nodes[j]), i, j))
# now go with clustering
while len(remaining_indexes) > 1:
# drop merged ones
min_d, index1, index2 = hq.heappop(distance_heap)
while not (index1 in remaining_indexes and index2 in remaining_indexes):
min_d, index1, index2 = hq.heappop(distance_heap)
centroid = (weights[index1] * nodes[index1] + weights[index2] * nodes[index2]) / (weights[index1] + weights[index2])
# now new centroid comes, drop i and j, and calculate new distances
remaining_indexes.remove(index1)
remaining_indexes.remove(index2)
index_new = nodes.shape[0]
for i in remaining_indexes:
hq.heappush(distance_heap, (spsd.euclidean(nodes[i], centroid), i, index_new))
remaining_indexes.add(index_new)
weights = np.hstack((weights, weights[index1] + weights[index2]))
nodes = np.vstack((nodes, centroid))
# forming output
hierachical[output_index] = (index1, index2, index_new)
distances[output_index] = min_d
output_index += 1
return hierachical, distances, nodes, weights
def LastLocalMinimumCluster(X: np.ndarray, weight: tg.Optional[np.ndarray]=None) -> tg.Tuple[np.ndarray, np.ndarray]:
"""Hierachical Cluster that pick the last local minimum of distance and cut the cluster tree into several clusters
"""
n_samples, n_features = X.shape
hierachical, distances, nodes, weights = SimpleHierachicalCluster(X, weight)
# find local minimums
extrema = spsn.argrelmin(distances) # type: np.ndarray
try:
last_local_minimum = extrema[0][len(extrema[0]) - 1]
except IndexError: # no local_minimum, return all clustered nodes
return nodes[n_samples:], weights[n_samples]
merged_nodes = set(hierachical[:last_local_minimum + 1, 0:2].flat)
post_cluster_nodes = set(hierachical[last_local_minimum + 1:, 2].flat)
total_nodes = set(range(len(nodes)))
cluster_centers = total_nodes - post_cluster_nodes - merged_nodes # nodes that is not merged will be cluster_centers
return nodes[list(cluster_centers)], weights[list(cluster_centers)]
def PercentageCluster(X: np.ndarray, weight: tg.Optional[np.ndarray]=None, percentage: float=0.5) -> tg.Tuple[np.ndarray, np.ndarray]:
"""Hierachical Cluster that cut the cluster tree into several clusters when distance is higher than the require percentage
"""
n_samples, n_features = X.shape
hierachical, distances, nodes, weights = SimpleHierachicalCluster(X, weight)
# find cutting point
max_d = np.max(distances)
extrema = distances < percentage * max_d
for i, x in enumerate(np.flipud(extrema)):
if x:
break
last_less = len(extrema) - 1 - i
merged_nodes = set(hierachical[:last_less + 1, 0:2].flat)
post_cluster_nodes = set(hierachical[last_less + 1:, 2].flat)
total_nodes = set(range(len(nodes)))
cluster_centers = total_nodes - post_cluster_nodes - merged_nodes # nodes that is not merged will be cluster_centers
return nodes[list(cluster_centers)], weights[list(cluster_centers)]
def MeanLogWardTree(X: np.ndarray, weight: tg.Optional[np.ndarray]=None) -> np.ndarray:
"""Hierachical Cluster that pick the mean of log of ward_tree distance and cut the cluster tree into several clusters
"""
if weight is None:
connect = None
else:
connect = np.outer(weight, weight)
n_samples, n_features = X.shape
children, n_components, n_leaves, parents, distances = sklc.ward_tree(X, connectivity=connect, return_distance=True)
c = children.shape[0]
hierachical = np.hstack([children, np.arange(n_samples, n_samples + c).reshape(c, 1)])
log_distance = np.log(np.sqrt(distances**2 / n_features))
mean_log_distance = np.mean(log_distance)
distance_mask = log_distance < mean_log_distance
post_distance_mask = log_distance >= mean_log_distance
merged_nodes = set(hierachical[distance_mask, 0:2].flat)
post_cluster_nodes = set(hierachical[post_distance_mask, 2].flat)
total_nodes = set(range(n_samples, c + n_samples))
cluster_centers = total_nodes - post_cluster_nodes - merged_nodes # nodes that is not merged will be cluster_centers
print(merged_nodes)
print(post_cluster_nodes)
print(total_nodes)
return hierachical[np.asarray(list(cluster_centers)) - n_samples]
|
from .completion_cross_entropy import CompletionCrossEntropyCriterion
from .completion_lifelong_kd_cross_entropy_loss import LifeLongKDCrossEntropyCriterion
|
#!/usr/bin/env python
import difflib
import hashlib
import os
import pathlib
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
from urllib.request import urlopen
import zipfile
import click
import git
import pkg_resources
import yaml
from dataclasses import dataclass, field
from typing import List, Optional, Dict
def normalize(name: str) -> str:
# https://www.python.org/dev/peps/pep-0503/
return re.sub(r"[-_.]+", "-", name).lower()
def get_contents(file_name: pathlib.Path) -> Dict[str, str]:
tar = tarfile.open(mode="r|*", fileobj=file_name.open("rb"))
h = hashlib.sha256()
contents = {}
for member in tar:
if not member.isfile():
continue
f = tar.extractfile(member)
contents[member.name] = f.read()
return contents
def is_content_equal(c1: Dict[str, str], c2: Dict[str, str]) -> bool:
if len(c1.keys()) != len(c2.keys()):
print("number of contents is not same")
print(c1.keys())
print(c2.keys())
return False
for key in c1.keys():
if key not in c2:
print(f"file does not exist: {key}")
return False
if c1[key] != c2[key]:
print(f"file is not equal: {key}")
for diff in difflib.diff_bytes(difflib.unified_diff, c1[key].splitlines(), c2[key].splitlines()):
print(diff)
return False
return True
def download_from_github(
dest_dir: pathlib.Path, repo: str, ver: str
) -> pathlib.Path:
url = f"https://github.com/{repo}/archive/{ver}.zip"
zip_file = dest_dir / f'{repo.replace("/", "_")}_{ver}.zip'
if not zip_file.exists():
u = urlopen(url)
with open(zip_file, "wb") as outs:
block_sz = 8192
while True:
buf = u.read(block_sz)
if not buf:
break
outs.write(buf)
return zip_file
def unzip(
zip_file: pathlib.Path,
dest_dir: pathlib.Path,
sub_dir: Optional[pathlib.Path] = None,
) -> None:
with open(zip_file, "rb") as f:
zipfp = zipfile.ZipFile(f)
for zip_file_name in zipfp.namelist():
original = pathlib.Path(zip_file_name)
name = pathlib.Path(*original.parts[1:])
if sub_dir:
try:
name.relative_to(sub_dir)
except ValueError:
continue
fname = dest_dir / pathlib.Path(
*name.parts[len(sub_dir.parts) :]
)
else:
fname = dest_dir / name
data = zipfp.read(zip_file_name)
if zip_file_name.endswith("/"):
if not fname.exists():
fname.mkdir(parents=True)
else:
fname.write_bytes(data)
def build_package(
package_dir: pathlib.Path,
dest_dir: pathlib.Path,
only_binary: bool = False,
native_build: Optional[str] = None,
release_version: Optional[str] = None,
requires: List[str] = [],
unrequires: List[str] = [],
compare: bool = True,
) -> None:
setup_code = (package_dir / "setup.py").read_text()
# Patch catkin_pkg.python_setup.generate_distutils_setup
# 1. to replace 'Requires' by 'Requires-Dist'
# 2. modify install_requires and version
# 3. add packages for ROS message
# https://www.python.org/dev/peps/pep-0314/#requires-multiple-use
# https://packaging.python.org/specifications/core-metadata/
import setuptools # NOQA
import catkin_pkg.python_setup
def patched_generate_distutils_setup(**kwargs):
new_kwargs = catkin_pkg.python_setup.original_generate_distutils_setup(
**kwargs
)
if "requires" in new_kwargs:
new_kwargs["install_requires"] = sorted(
list(new_kwargs["requires"])
)
del new_kwargs["requires"]
if len(requires) > 0 or len(unrequires) > 0:
new_kwargs["install_requires"] = sorted(
set(new_kwargs.get("install_requires", [])) - set(unrequires)
| set(requires)
)
if (
"install_requires" in new_kwargs
and "genpy" in new_kwargs["install_requires"]
):
new_kwargs["install_requires"].remove("genpy")
new_kwargs["install_requires"].append("genpy<2000")
new_kwargs["install_requires"].sort()
if "packages" in new_kwargs:
packages = new_kwargs["packages"]
for subtype in ("msg", "srv"):
sub_package = package_dir.name + "." + subtype
if (
package_dir / subtype
).exists() and sub_package not in packages:
packages.append(sub_package)
if release_version is not None:
new_kwargs["version"] = release_version
return new_kwargs
catkin_pkg.python_setup.original_generate_distutils_setup = (
catkin_pkg.python_setup.generate_distutils_setup
)
catkin_pkg.python_setup.generate_distutils_setup = (
patched_generate_distutils_setup
)
try:
cwd = os.getcwd()
original_argv = sys.argv
package_name = normalize(package_dir.name)
dest_package_dir = (dest_dir / package_name).resolve()
dest_package_dir.mkdir(parents=True, exist_ok=True)
os.chdir(package_dir)
# build source package
if not only_binary:
sys.argv = ["", "sdist"]
exec(setup_code, globals())
# check before copy
tar_file = next((package_dir / "dist").glob("*.tar.gz"))
if (dest_package_dir / tar_file.name).exists():
if compare:
contents0 = get_contents(tar_file)
contents1 = get_contents(dest_package_dir / tar_file.name)
if not is_content_equal(contents0, contents1):
print(
"Hash is not same! Remove or change the version."
f"{tar_file.name}"
)
shutil.copy(
tar_file, cwd + "/" + tar_file.name + ".new"
)
shutil.copy(
dest_package_dir / tar_file.name,
cwd + "/" + tar_file.name + ".org",
)
sys.exit(1)
else:
print(f"content is not changed: {tar_file.name}")
return
print("copy")
shutil.copy(tar_file, dest_package_dir)
if (not only_binary and native_build is None) or (
only_binary and native_build is not None
):
# if it's updated, build the binary package
sys.argv = ["", "bdist_wheel", "--universal"]
# If Python(3.6/3.7) is built with --enable-shared,
# disutils will build extenstions with libpython,
# which may cause import error in other Python environments.
# So we overwrite that config
if sys.version_info < (3, 8):
import distutils.sysconfig
# make sure _config_vars is ready
distutils.sysconfig.get_config_vars()
org_config_vars = distutils.sysconfig._config_vars
distutils.sysconfig._config_vars["Py_ENABLE_SHARED"] = 0
exec(setup_code, globals())
distutils.sysconfig._config_vars = org_config_vars
else:
exec(setup_code, globals())
if native_build == "all":
# TODO: find a better way
subprocess.call(["python2", "setup.py", "bdist_wheel"])
for wheel in (package_dir / "dist").glob("*.whl"):
shutil.copy(wheel, dest_package_dir)
finally:
sys.argv = original_argv
os.chdir(cwd)
catkin_pkg.python_setup.generate_distutils_setup = (
catkin_pkg.python_setup.original_generate_distutils_setup
)
def generate_rosmsg_from_action(
dest_msg_dir: pathlib.Path, source_action_dir: pathlib.Path
) -> None:
files = source_action_dir.glob("*.action")
for action in files:
dest_msg_dir.mkdir(parents=True, exist_ok=True)
name = action.name[:-7]
# parse
parts = [[]]
for l in action.read_text().split("\n"):
if l.startswith("---"):
parts.append([])
continue
parts[-1].append(l)
parts = ["\n".join(p) for p in parts]
assert len(parts) == 3
(dest_msg_dir / (name + "Goal.msg")).write_text(parts[0])
(dest_msg_dir / (name + "Result.msg")).write_text(parts[1])
(dest_msg_dir / (name + "Feedback.msg")).write_text(parts[2])
(dest_msg_dir / (name + "Action.msg")).write_text(
f"""{name}ActionGoal action_goal
{name}ActionResult action_result
{name}ActionFeedback action_feedback
"""
)
(dest_msg_dir / (name + "ActionGoal.msg")).write_text(
f"""Header header
actionlib_msgs/GoalID goal_id
{name}Goal goal
"""
)
(dest_msg_dir / (name + "ActionResult.msg")).write_text(
f"""Header header
actionlib_msgs/GoalStatus status
{name}Result result
"""
)
(dest_msg_dir / (name + "ActionFeedback.msg")).write_text(
f"""Header header
actionlib_msgs/GoalStatus status
{name}Feedback feedback
"""
)
def generate_package_from_rosmsg(
package_dir: pathlib.Path,
package: str,
version: Optional[str] = None,
search_root_dir: Optional[pathlib.Path] = None,
src_dir: Optional[pathlib.Path] = None,
release_version: Optional[str] = None,
) -> None:
import genpy.generator
import genpy.genpy_main
# NOTE: genpy uses a global variable to manage variable names
# As such, the variable names may change frequently depending on the
# order in which `genpy.generate_messages` is called.
# In order to avoid unnecessary changes, call `reset_var` every time.
genpy.generator.reset_var()
search_dir = {package: [package_dir / "msg"]}
if search_root_dir is not None:
for msg_dir in search_root_dir.glob("**/msg"):
if "test" in str(msg_dir):
continue
p = msg_dir.parent.name
if p not in search_dir:
search_dir[p] = []
search_dir[p].append(msg_dir)
if src_dir is None:
dest_package_dir = package_dir / package
else:
dest_package_dir = package_dir / src_dir / package
print(dest_package_dir)
for gentype in ("msg", "srv"):
files = (package_dir / gentype).glob(f"*.{gentype}")
if files:
# NOTE: files needs to be in alphabetical order in order to make
# the generated files consistent
files = list(sorted(files))
if gentype == "msg":
generator = genpy.generator.MsgGenerator()
elif gentype == "srv":
generator = genpy.generator.SrvGenerator()
ret = generator.generate_messages(
package, files, dest_package_dir / gentype, search_dir
)
if ret:
raise RuntimeError(
"Failed to generate python files from msg files."
)
genpy.generate_initpy.write_modules(dest_package_dir / gentype)
if not (dest_package_dir / "__init__.py").exists():
genpy.generate_initpy.write_modules(dest_package_dir)
if not (package_dir / "setup.py").exists():
if version is None:
version = "0.0.0"
package_xml = package_dir / "package.xml"
if package_xml.exists():
v = re.search(
"<version>(.*)</version>", package_xml.read_text()
)
if v:
version = v.group(1)
if release_version is not None:
version = release_version
genpy_version = pkg_resources.get_distribution('genpy').version
(package_dir / "setup.py").write_text(
f"""from setuptools import find_packages, setup
setup(name=\'{package}\', version=\'{version}\', packages=find_packages(),
install_requires=[\'genpy>={genpy_version},<2000\'])"""
)
def build_package_from_github_package(
build_dir: pathlib.Path,
dest_dir: pathlib.Path,
repository: str,
version: str,
sub_dir: Optional[pathlib.Path] = None,
src_dir: Optional[pathlib.Path] = None,
release_version: Optional[str] = None,
requires: List[str] = [],
unrequires: List[str] = [],
compare: bool = True,
) -> None:
if sub_dir:
package = sub_dir.name
else:
package = repository.split("/")[1]
package_dir = build_dir / package
zipfile = download_from_github(build_dir, repository, version)
unzip(zipfile, package_dir, sub_dir)
if src_dir is not None and (
(package_dir / "msg").exists() or (package_dir / "srv").exists()
):
generate_package_from_rosmsg(
package_dir,
package,
None,
build_dir,
src_dir,
release_version=release_version,
)
build_package(
package_dir=package_dir,
dest_dir=dest_dir,
release_version=release_version,
requires=requires,
unrequires=unrequires,
compare=compare,
)
def build_package_from_github_msg(
build_dir: pathlib.Path,
dest_dir: pathlib.Path,
repository: str,
version: str,
sub_dir: Optional[pathlib.Path] = None,
release_version: Optional[str] = None,
requires: List[str] = [],
unrequires: List[str] = [],
compare: bool = True,
) -> None:
if sub_dir:
package = sub_dir.name
else:
package = repository.split("/")[1]
package_dir = build_dir / package
zipfile = download_from_github(build_dir, repository, version)
if sub_dir is None:
sub_dir = pathlib.Path()
unzip(zipfile, package_dir / "msg", sub_dir / "msg")
unzip(zipfile, package_dir / "srv", sub_dir / "srv")
unzip(zipfile, package_dir / "action", sub_dir / "action")
generate_rosmsg_from_action(package_dir / "msg", package_dir / "action")
generate_package_from_rosmsg(
package_dir,
package,
version,
search_root_dir=build_dir,
release_version=release_version,
)
build_package(
package_dir=package_dir,
dest_dir=dest_dir,
release_version=release_version,
requires=requires,
unrequires=unrequires,
compare=compare,
)
def build_package_from_local_package(
build_dir: pathlib.Path,
dest_dir: pathlib.Path,
src_dir: pathlib.Path,
only_binary: bool,
native_build: Optional[str] = None,
) -> None:
package = src_dir.name
package_dir = build_dir / package
shutil.rmtree(package_dir, ignore_errors=True)
# shutil.copytree(src_dir, package_dir, copy_function=shutil.copy)
def copytree(src, dst):
# windows git symlink support
os.makedirs(dst)
files = os.listdir(src)
for f in files:
srcfull = os.path.join(src, f)
dstfull = os.path.join(dst, f)
m = os.lstat(srcfull).st_mode
if stat.S_ISLNK(m):
srcfull = os.path.join(src, os.readlink(srcfull))
m = os.lstat(srcfull).st_mode
if stat.S_ISDIR(m):
copytree(srcfull, dstfull)
else:
shutil.copy(srcfull, dstfull)
copytree(src_dir, package_dir)
build_package(
package_dir=package_dir,
dest_dir=dest_dir,
only_binary=only_binary,
native_build=native_build,
)
@dataclass
class PackageInfo:
name: str
path: Optional[str] = None
repository: Optional[str] = None
version: Optional[str] = None
native_build: Optional[str] = None
release_version: Optional[str] = None
type: Optional[str] = None
src: Optional[str] = None
requires: Optional[List[str]] = field(default_factory=list)
unrequires: Optional[List[str]] = field(default_factory=list)
skip_compare: Optional[bool] = False
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx, package_list: str = None) -> None:
if ctx.invoked_subcommand is None:
print(ctx.get_help())
@cli.command(help="build packages")
@click.option(
"-l",
"--list",
"package_list",
type=click.Path(exists=True),
help="path to packages.yaml",
default=os.getcwd() + "/packages.yaml",
)
@click.option(
"-d",
"--dest",
"dest_dir",
type=click.Path(),
help="path where to generate packages",
default=os.getcwd() + "/dest",
)
@click.option(
"--native",
is_flag=True,
help="build only platform specific binaries",
default=False,
)
@click.argument("target", required=False, type=str)
def build(
target: Optional[str], package_list: str, dest_dir: str, native: bool,
) -> None:
dest_dir_path = pathlib.Path(dest_dir)
with open(package_list) as f:
packages_dict = yaml.safe_load(f)
packages: List[PackageInfo] = []
for package in packages_dict:
packages.append(PackageInfo(**package))
if target is not None:
if target not in [p.name for p in packages]:
print(f"{target} is not find in {package_list}")
sys.exit(1)
tmp = pathlib.Path(tempfile.mkdtemp())
origin = None
try:
repo = git.Repo()
origin = repo.remotes.origin
origin.fetch()
except git.exc.InvalidGitRepositoryError:
print("Not a git directory. Will not include other arch binaries")
try:
for package in packages:
if target is not None and target != package.name:
continue
if native and package.native_build is None:
continue
print(package.name)
if package.repository is None:
build_package_from_local_package(
build_dir=tmp,
dest_dir=dest_dir_path,
src_dir=pathlib.Path(package.path),
only_binary=native,
native_build=package.native_build,
)
elif package.version is not None:
path = (
pathlib.Path(package.path)
if package.path is not None
else None
)
src = (
pathlib.Path(package.src)
if package.src is not None
else None
)
if package.type is None:
build_package_from_github_package(
build_dir=tmp,
dest_dir=dest_dir_path,
repository=package.repository,
version=package.version,
sub_dir=path,
src_dir=src,
release_version=package.release_version,
requires=package.requires,
unrequires=package.unrequires,
compare=not package.skip_compare,
)
else:
build_package_from_github_msg(
build_dir=tmp,
dest_dir=dest_dir_path,
repository=package.repository,
version=package.version,
sub_dir=path,
release_version=package.release_version,
requires=package.requires,
unrequires=package.unrequires,
compare=not package.skip_compare,
)
finally:
shutil.rmtree(tmp)
@cli.command(help="generate message package")
@click.option(
"-s",
"--seach",
"search_dir",
type=click.Path(exists=True),
help="message search root path",
)
@click.argument("path", type=click.Path(exists=True), required=True)
def genmsg(path: str, search_dir: str) -> None:
package_dir = pathlib.Path(path)
search_dir = pathlib.Path(search_dir) if search_dir is not None else None
generate_rosmsg_from_action(package_dir / "msg", package_dir / "action")
generate_package_from_rosmsg(
package_dir, package_dir.name, None, search_dir
)
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Download data from http://lda.data.parliament.uk"""
from __future__ import division, print_function, absolute_import
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import codecs
import logging
from os import mkdir, path
import requests
import sys
from ddpuk import __version__ # noqa: make available
__author__ = "Florian Rathgeber"
__copyright__ = "Florian Rathgeber 2015"
__license__ = "mit"
log = logging.getLogger(__name__)
BASEURL = 'http://lda.data.parliament.uk'
URL = BASEURL + r'/%(data)s.%(format)s?_pageSize=%(size)d&_page=%(page)d'
MAX_PAGE = 125
SIZE = 500
def download(dataset, fmt='json', size=SIZE, page_from=0, page_to=MAX_PAGE,
datadir='data'):
if not path.exists(datadir):
mkdir(datadir)
for page in range(page_from, page_to):
log.info('downloading page %d/%d', page + 1, page_to + 1)
url = URL % {'data': dataset, 'format': fmt, 'size': size, 'page': page}
log.info(' %s', url)
r = requests.get(url)
fname = path.join(datadir, '%s-%d-%05d.%s' % (dataset, size, page, fmt))
with codecs.open(fname, 'w', 'utf-8') as f:
f.write(r.text)
def parse_args(args=None):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`airgparse.Namespace`
"""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('dataset', help='dataset to download')
parser.add_argument('--format', dest='fmt', default='json', help='format',
choices=['csv', 'json', 'rdf', 'text', 'ttl', 'xml'])
parser.add_argument('--size', type=int, default=SIZE, help='batch size')
parser.add_argument('--page-from', type=int, default=0, help='first page')
parser.add_argument('--page-to', type=int, default=MAX_PAGE, help='last page')
parser.add_argument('--datadir', default='data', help='target directory')
return parser.parse_args(args)
def main():
download(**vars(parse_args()))
def run():
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
main()
if __name__ == "__main__":
run()
|
from mmstructlib.sas.nsc import DoubleArray, nsc_atom_sas, nsc_total_sas, nsc_total_volume
def run_nsc(atoms, probe, dots, func):
"""
"""
num = len(atoms)
coords = DoubleArray(num*3)
radii = DoubleArray(num)
for i, a in enumerate(atoms):
atom_index = i*3
coords[atom_index] = a.coordinates[0]
coords[atom_index+1] = a.coordinates[1]
coords[atom_index+2] = a.coordinates[2]
radii[i] = a.radius + probe
result = DoubleArray()
if func(coords, radii, dots, result):
raise RuntimeError("Error running nsc")
return result
def add_area(atoms, probe = 1.4, dots = 1000):
"""
"""
result = run_nsc(atoms, probe, dots, nsc_atom_sas)
for i, a in enumerate(atoms):
a.area = result[i]
def total_area(atoms, probe = 1.4, dots = 1000):
"""
"""
return run_nsc(atoms, probe, dots, nsc_total_sas)[0]
def total_volume(atoms, probe = 1.4, dots = 1000):
"""
"""
return run_nsc(atoms, probe, dots, nsc_total_volume)[0]
|
from __future__ import absolute_import, division, print_function
from cctbx import crystal, sgtbx, xray
from cctbx import geometry_restraints
from cctbx.array_family import flex
import iotbx.cif.restraints
from libtbx.test_utils import show_diff
from six.moves import cStringIO as StringIO
def exercise_geometry_restraints_as_cif():
quartz = xray.structure(
crystal_symmetry=crystal.symmetry(
(5.01,5.01,5.47,90,90,120), "P6222"),
scatterers=flex.xray_scatterer([
xray.scatterer("Si", (1/2.,1/2.,1/3.)),
xray.scatterer("O", (0.197,-0.197,0.83333))]))
bond_proxies = geometry_restraints.shared_bond_simple_proxy((
geometry_restraints.bond_simple_proxy(
i_seqs=[0,1],
rt_mx_ji=sgtbx.rt_mx("x-y,x,z-2/3"),
distance_ideal=1.6,
weight=3.2),
geometry_restraints.bond_simple_proxy(
i_seqs=[0,1],
distance_ideal=1.7,
weight=1.8),
))
dihedral_proxies = geometry_restraints.shared_dihedral_proxy((
geometry_restraints.dihedral_proxy(
i_seqs = [1,0,1,0],
sym_ops = (sgtbx.rt_mx("1+y,1-x+y, z-1/3"),
sgtbx.rt_mx(),
sgtbx.rt_mx("x-y,x,z-2/3"),
sgtbx.rt_mx("1-x,y-x,1/3-z")),
angle_ideal=-30,
weight=2),
geometry_restraints.dihedral_proxy(
i_seqs = [1,0,1,0],
sym_ops = (sgtbx.rt_mx("1+y,1-x+y, z-1/3"),
sgtbx.rt_mx(),
sgtbx.rt_mx("-y,x-y,z-1/3"),
sgtbx.rt_mx("x-y,x,1/3+z")),
angle_ideal=90,
weight=3),
))
chirality_proxies = geometry_restraints.shared_chirality_proxy((
geometry_restraints.chirality_proxy(
i_seqs = [1,0,1,0],
sym_ops = (sgtbx.rt_mx("1+y,1-x+y, z-1/3"),
sgtbx.rt_mx(),
sgtbx.rt_mx("x-y,x,z-2/3"),
sgtbx.rt_mx("1-x,y-x,1/3-z")),
volume_ideal=1.2,
both_signs=False,
weight=2),
geometry_restraints.chirality_proxy(
i_seqs = [1,0,1,0],
sym_ops = (sgtbx.rt_mx("1+y,1-x+y, z-1/3"),
sgtbx.rt_mx(),
sgtbx.rt_mx("x-y,x,z-2/3"),
sgtbx.rt_mx("1-x,y-x,1/3-z")),
volume_ideal=1.2,
both_signs=True,
weight=2),
))
angle_proxies = geometry_restraints.shared_angle_proxy((
geometry_restraints.angle_proxy(
i_seqs = [1,0,1],
sym_ops = (sgtbx.rt_mx("x-y,x,z-2/3"),
sgtbx.rt_mx(),
sgtbx.rt_mx("-y,x-y,z-1/3")),
angle_ideal=103,
weight=2),
geometry_restraints.angle_proxy(
i_seqs = [1,0,1],
sym_ops = (sgtbx.rt_mx("y+1,-x+y+1,z-1/3"),
sgtbx.rt_mx(),
sgtbx.rt_mx("-y,x-y,z-1/3")),
angle_ideal=110,
weight=5),
geometry_restraints.angle_proxy(
i_seqs = [0,1,0],
sym_ops = (sgtbx.rt_mx("y,-x+y,z+2/3"),
sgtbx.rt_mx(),
sgtbx.rt_mx("-x+y,-x,z+1/3")),
angle_ideal=150,
weight=5),
))
bond_similarity_proxies = geometry_restraints.shared_bond_similarity_proxy((
geometry_restraints.bond_similarity_proxy(
i_seqs=[(0,1),(0,1),(0,1)],
sym_ops=(sgtbx.rt_mx("x-y,x,z-2/3"),
sgtbx.rt_mx("-y,x-y,z-1/3"),
sgtbx.rt_mx("y+1,-x+y+1,z-1/3")),
weights=(1,1,1)),
))
cif_block = iotbx.cif.model.block()
iotbx.cif.restraints.add_to_cif_block(
cif_block, quartz,
bond_proxies=bond_proxies,
angle_proxies=angle_proxies,
dihedral_proxies=dihedral_proxies,
chirality_proxies=chirality_proxies,
bond_similarity_proxies=bond_similarity_proxies)
s = StringIO()
cif_block.show(out=s)
assert not show_diff(s.getvalue(), """\
loop_
_restr_distance_atom_site_label_1
_restr_distance_atom_site_label_2
_restr_distance_site_symmetry_2
_restr_distance_target
_restr_distance_target_weight_param
_restr_distance_diff
Si O 2_554 1.6000 0.5590 -0.0160
Si O 1 1.7000 0.7454 -2.3838
loop_
_restr_angle_atom_site_label_1
_restr_angle_atom_site_label_2
_restr_angle_atom_site_label_3
_restr_angle_site_symmetry_1
_restr_angle_site_symmetry_2
_restr_angle_site_symmetry_3
_restr_angle_target
_restr_angle_target_weight_param
_restr_angle_diff
O Si O 2_554 1 4_554 103.0000 0.7071 1.6926
O Si O 3_664 1 4_554 110.0000 0.4472 -1.3127
Si O Si 3 1 5 150.0000 0.4472 3.0700
loop_
_restr_torsion_atom_site_label_1
_restr_torsion_atom_site_label_2
_restr_torsion_atom_site_label_3
_restr_torsion_atom_site_label_4
_restr_torsion_site_symmetry_1
_restr_torsion_site_symmetry_2
_restr_torsion_site_symmetry_3
_restr_torsion_site_symmetry_4
_restr_torsion_angle_target
_restr_torsion_weight_param
_restr_torsion_diff
O Si O Si 3_664 1 2_554 7_655 -30.0000 0.7071 6.9078
O Si O Si 3_664 1 4_554 2 90.0000 0.5774 11.7036
loop_
_restr_chirality_atom_site_label_1
_restr_chirality_atom_site_label_2
_restr_chirality_atom_site_label_3
_restr_chirality_atom_site_label_4
_restr_chirality_site_symmetry_1
_restr_chirality_site_symmetry_2
_restr_chirality_site_symmetry_3
_restr_chirality_site_symmetry_4
_restr_chirality_volume_target
_restr_chirality_weight_param
_restr_chirality_diff
O Si O Si 3_664 1 2_554 7_655 1.2000 0.7071 2.4415
O Si O Si 3_664 1 2_554 7_655 1.2000 0.7071 -0.0415
loop_
_restr_equal_distance_class_class_id
_restr_equal_distance_class_target_weight_param
_restr_equal_distance_class_average
_restr_equal_distance_class_esd
_restr_equal_distance_class_diff_max
1 1.0000 1.6160 0.0000 0.0000
loop_
_restr_equal_distance_atom_site_label_1
_restr_equal_distance_atom_site_label_2
_restr_equal_distance_site_symmetry_2
_restr_equal_distance_class_id
Si O 2_554 1
Si O 4_554 1
Si O 3_664 1
""")
def exercise_adp_restraints_as_cif():
import libtbx.load_env
if not libtbx.env.has_module("smtbx"):
print("Skipping exercise_adp_restraints_as_cif(): smtbx not available")
return
from smtbx.refinement.restraints import adp_restraints as smtbx_adp_restraints
import smtbx.development
xs = smtbx.development.sucrose()
rigid_bond_proxies = smtbx_adp_restraints.rigid_bond_restraints(
xray_structure=xs).proxies[:3]
rigu_proxies = smtbx_adp_restraints.rigu_restraints(
xray_structure=xs).proxies[:3]
adp_similarity_proxies = smtbx_adp_restraints.adp_similarity_restraints(
xray_structure=xs).proxies[:3]
isotropic_adp_proxies = smtbx_adp_restraints.isotropic_adp_restraints(
xray_structure=xs).proxies[:3]
cif_block = iotbx.cif.model.block()
iotbx.cif.restraints.add_to_cif_block(
cif_block, xs,
rigid_bond_proxies=rigid_bond_proxies,
rigu_proxies=rigu_proxies,
adp_similarity_proxies=adp_similarity_proxies,
isotropic_adp_proxies=isotropic_adp_proxies)
s = StringIO()
cif_block.show(out=s)
assert not show_diff(s.getvalue(), """\
loop_
_restr_U_rigid_atom_site_label_1
_restr_U_rigid_atom_site_label_2
_restr_U_rigid_target_weight_param
_restr_U_rigid_U_parallel
_restr_U_rigid_diff
O1 C1 0.0100 0.0176 0.0006
O1 C2 0.0100 0.0194 -0.0053
O1 C3 0.0100 0.0177 0.0013
loop_
_restr_RIGU_atom_site_label_1
_restr_RIGU_atom_site_label_2
_restr_RIGU_target_weight_param
_restr_RIGU_U13_diff
_restr_RIGU_U23_diff
_restr_RIGU_U33_diff
O1 C1 0.004000 -0.002618 -0.001550 0.000599
O1 C2 0.004000 -0.000752 0.002098 -0.005274
O1 C3 0.004000 -0.002608 -0.001448 0.001305
loop_
_restr_U_similar_atom_site_label_1
_restr_U_similar_atom_site_label_2
_restr_U_similar_weight_param
O1 C1 0.0400
O1 C6 0.0400
O2 C2 0.0800
loop_
_restr_U_iso_atom_site_label
_restr_U_iso_weight_param
O1 0.1000
O2 0.2000
O3 0.2000
""")
def run():
exercise_adp_restraints_as_cif()
exercise_geometry_restraints_as_cif()
print("OK")
if __name__ == '__main__':
run()
|
import math
import os
import random
from pathlib import Path
from collections import defaultdict
import torch
import torch.utils.data
import torchaudio
import numpy as np
import librosa
from librosa.util import normalize
from scipy.io.wavfile import read
from librosa.filters import mel as librosa_mel_fn
def load_wav(path, sr=16000):
"""Load audio from path and resample it
Return: 1d numpy.array of audio data
"""
wav = librosa.load(path, sr=sr, dtype=np.float32, mono=True)[0]
return wav
def get_datafolder_files(datafolder_path, pattern='.wav'):
"""Get all files with specified extension in directory tree
Return: list of file pathes
"""
filelist = []
for root, _, filenames in os.walk(datafolder_path):
for filename in filenames:
if Path(filename).suffix == pattern:
filelist.append(os.path.join(root, filename))
return filelist
def define_train_list(filepathes, clean_suffix='clean', n_utterance_tokens=2):
"""Return dict in following format:
{ utterance_name : [filepath, filename1, filename2...] },
where first value of list is groundtruth file's path
for all other files (with same utterance)
"""
assert clean_suffix in ['cleanraw', 'clean', 'produced']
train_list = defaultdict(list)
for filepath in filepathes:
p = Path(filepath)
tokens = p.stem.split('_')
utterance = '_'.join(tokens[:n_utterance_tokens])
if tokens[-1] == clean_suffix:
train_list[utterance] = [filepath] + train_list[utterance]
else:
train_list[utterance].append(p.stem)
return train_list
def train_test_split(filelist, p=0.85, seed=17):
"""Return train and test set of filenames
This function follows `define_train_list` and uses its output
"""
random.seed(seed)
train_list, test_list = dict(), dict()
for utterance, files in filelist.items():
gt_filepath, filenames = files[0], files[1:]
random.shuffle(filenames)
val_len = int((1 - p) * len(filenames))
train_list[utterance] = [gt_filepath] + filenames[val_len:]
test_list[utterance] = [gt_filepath] + filenames[:val_len]
return train_list, test_list
def save_dataset_filelist(filelist, filelist_path, delim='|'):
with open(filelist_path, 'w', encoding='utf-8') as f:
for utterance, files in filelist.items():
print(utterance + delim + delim.join(files), file=f)
def load_dataset_filelist(filelist_path, delim='|'):
filelist = dict()
with open(filelist_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if len(line) <= 0:
continue
tokens = line.split(delim)
utterance, files = tokens[0], tokens[1:]
filelist[utterance] = files
return filelist
class PreprocessDataset(torch.utils.data.Dataset):
"""
Torch Dataset class for wav files for their following feature extraction.
Assumes that wav files (all names are different)
can be in some subdirectories of root dir.
"""
def __init__(self, filepathes, data_cfg):
self.data_cfg = data_cfg
self.audio_pathes = filepathes
self.filenames = list(map(lambda p: Path(p).name, filepathes))
def __getitem__(self, index):
file_path = self.audio_pathes[index]
filename = self.filenames[index]
wav = load_wav(file_path, sr=self.data_cfg.sample_rate)
return wav, filename
def __len__(self):
return len(self.audio_pathes)
class FeatureDataset(torch.utils.data.Dataset):
"""
Torch Dataset class for wav files and their features.
Assumes that wav files (all names are different)
can be in some subdirectories of root dir,
but feature files in corresponding feature directories alone.
"""
def __init__(self, dataset_cfg, filelist, data_cfg, preload_gt=True, segmented=True, seed=17):
self.data_cfg = data_cfg
self.dataset_cfg = dataset_cfg
self.filelist = filelist
self.segmented = segmented
self.upsampling_rate = self.data_cfg.hop_length * self.data_cfg.target_sample_rate // self.data_cfg.sample_rate
if self.segmented:
assert self.data_cfg.segment_size % (self.data_cfg.hop_length * self.data_cfg.target_sample_rate // self.data_cfg.sample_rate) == 0
self.n_points = self.data_cfg.segment_size
self.n_frames = self.n_points // self.upsampling_rate
self.noise_to_gt_dict = dict()
for files in filelist.values():
# first is ground truth
self.noise_to_gt_dict[Path(files[0]).stem] = files[0]
for filename in files[1:]:
self.noise_to_gt_dict[filename] = files[0]
self.filenames = list(map(lambda p: Path(p).stem, self.noise_to_gt_dict.keys()))
self.gt_list = set(self.noise_to_gt_dict.values())
self.preload_gt = preload_gt
if preload_gt:
self.gt_data = {
gt_path : load_wav(gt_path, sr=self.data_cfg.target_sample_rate)
for gt_path in self.gt_list
}
self.spk_embs = torch.load(self.dataset_cfg.spk_embs_file)
random.seed(seed)
def __getitem__(self, index):
filename = self.filenames[index]
gt_path = self.noise_to_gt_dict[filename]
if self.preload_gt:
gt_wav = self.gt_data[gt_path]
else:
gt_wav = load_wav(gt_path, sr=self.data_cfg.target_sample_rate)
# features: [ppg, f0, loudness]
features = []
for feature_dir in (
self.dataset_cfg.ppg_dir, self.dataset_cfg.f0_dir, self.dataset_cfg.loudness_dir
):
feat = torch.load(os.path.join(feature_dir, Path(filename).with_suffix('.pt')))
features.append(feat)
features = torch.cat(features, dim=1)
# condition: [embed]
cond = self.spk_embs[filename]
# because center=True for feature extraction
n_pad = self.upsampling_rate // 2
gt_wav = np.pad(gt_wav, (n_pad, n_pad), mode='reflect')[:self.upsampling_rate * features.shape[0]]
if self.segmented:
start_frame = random.randint(0, features.shape[0] - self.n_frames)
start_point = start_frame * self.data_cfg.hop_length
gt_wav = gt_wav[start_point:start_point + self.n_points]
features = features[start_frame:start_frame + self.n_frames]
return gt_wav, features, cond
def __len__(self):
return len(self.filenames)
|
from django.contrib import admin
from .models import (
Question,
Answer,
)
admin.site.register(Question)
admin.site.register(Answer)
|
import os
import numpy as np
import streamlit as st
from sklearn.metrics.pairwise import cosine_similarity
from ..utils import get_word_embeddings
@st.cache(persist=eval(os.getenv("PERSISTENT")))
def compute_similarity_matrix_keywords(
model_path,
keywords=[],
all_model_vectors=False,
return_unk_sim=False,
):
keywords, word_embs = get_word_embeddings(
model_path, keywords, all_model_vectors=all_model_vectors, return_words=True
)
word_embs = np.array(word_embs)
sim_matrix = cosine_similarity(word_embs, word_embs)
if return_unk_sim:
unk_emb = np.mean(
[word_embs[i] for i in range(word_embs.shape[0])], axis=0
).reshape(1, -1)
sim_with_unk = cosine_similarity(unk_emb, word_embs)
return keywords, word_embs, sim_matrix, sim_with_unk, unk_emb.reshape(-1)
else:
return keywords, word_embs, sim_matrix
@st.cache(persist=eval(os.getenv("PERSISTENT")))
def compute_acceleration_matrix(
keywords,
sim_matrix_1,
sim_matrix_2,
top_k_acc=200,
skip_same_word_pairs=True,
skip_duplicates=True,
):
acceleration_matrix = sim_matrix_2 - sim_matrix_1
acc_matrix_sorted = (-acceleration_matrix).argsort(axis=None)
acc_matrix_indices = np.unravel_index(acc_matrix_sorted, acceleration_matrix.shape)
sorted_indices = np.vstack(acc_matrix_indices).T
word_pairs = {}
top_k_count = 0
idx = 0
while top_k_count < top_k_acc and idx < len(sorted_indices):
word_1_index, word_2_index = sorted_indices[idx]
idx += 1
word_1 = keywords[word_1_index]
word_2 = keywords[word_2_index]
if skip_same_word_pairs and word_1 == word_2:
continue
if skip_duplicates and (word_2, word_1) in word_pairs:
continue
word_pairs[(word_1, word_2)] = acceleration_matrix[word_1_index][word_2_index]
top_k_count += 1
return word_pairs
@st.cache(persist=eval(os.getenv("PERSISTENT")))
def compute_acc_between_years(
keywords,
model_path_1,
model_path_2,
all_model_vectors=False,
top_k_acc=200,
skip_same_word_pairs=True,
skip_duplicates=True,
):
kw1, em1, sim_matrix_1 = compute_similarity_matrix_keywords(
model_path_1, all_model_vectors=all_model_vectors, keywords=keywords
)
kw2, em2, sim_matrix_2 = compute_similarity_matrix_keywords(
model_path_2, all_model_vectors=all_model_vectors, keywords=keywords
)
word_pairs = compute_acceleration_matrix(
keywords,
sim_matrix_1=sim_matrix_1,
sim_matrix_2=sim_matrix_2,
top_k_acc=top_k_acc,
skip_same_word_pairs=skip_same_word_pairs,
skip_duplicates=skip_duplicates,
)
return word_pairs, em1, em2
@st.cache(persist=eval(os.getenv("PERSISTENT")))
def compute_acc_heatmap_between_years(
keywords, model_path_1, model_path_2, all_model_vectors=False
):
kw1, em1, sim_matrix_1 = compute_similarity_matrix_keywords(
model_path_1, all_model_vectors=all_model_vectors, keywords=keywords
)
kw2, em2, sim_matrix_2 = compute_similarity_matrix_keywords(
model_path_2, all_model_vectors=all_model_vectors, keywords=keywords
)
acceleration_matrix = sim_matrix_2 - sim_matrix_1
return acceleration_matrix
|
from setuptools import setup, find_packages
setup(
name='IPBot Telegram',
version='0.1',
long_description=__doc__,
py_modules=('ipbot',),
install_requires=['DepyTG', 'requests'],
dependency_links=[
"https://github.com/Depau/DepyTG/archive/wip.zip"
],
entry_points={
'console_scripts': [
'ipbot = ipbot:main',
]
}
)
|
#!/bin/python3
import sys
n = int(input().strip())
a = list(map(int, input().strip().split(" ")))
# Write Your Code Here
numberOfSwap = 0
for i in range(n):
for j in range(n - i - 1):
if a[j] > a[j + 1]:
a[j], a[j + 1] = a[j + 1], a[j]
numberOfSwap += 1
if numberOfSwap == 0:
break
print(f"Array is sorted in {numberOfSwap} swaps.")
print(f"First Element: {a[0]}")
print(f"Last Element: {a[-1]}")
|
# Copyright 2021 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara
import numpy as np
import pytest
from aesara import tensor as at
import pymc3 as pm
from pymc3.distributions.shape_utils import (
broadcast_dist_samples_shape,
broadcast_dist_samples_to,
broadcast_distribution_samples,
convert_dims,
convert_shape,
convert_size,
get_broadcastable_dist_samples,
shapes_broadcasting,
to_tuple,
)
from pymc3.exceptions import ShapeWarning
test_shapes = [
(tuple(), (1,), (4,), (5, 4)),
(tuple(), (1,), (7,), (5, 4)),
(tuple(), (1,), (1, 4), (5, 4)),
(tuple(), (1,), (5, 1), (5, 4)),
(tuple(), (1,), (3, 4), (5, 4)),
(tuple(), (1,), (5, 3), (5, 4)),
(tuple(), (1,), (10, 4), (5, 4)),
(tuple(), (1,), (10,), (5, 4)),
(tuple(), (1,), (1, 1, 4), (5, 4)),
(tuple(), (1,), (10, 1, 4), (5, 4)),
(tuple(), (1,), (10, 5, 4), (5, 4)),
]
test_sizes = [
None,
tuple(),
1,
(1,),
10,
(10,),
(1, 1),
(10, 1),
(1, 10),
(5,),
(5, 4),
(1, 1, 1, 1),
]
test_to_shapes = [None, tuple(), (10, 5, 4), (10, 1, 1, 5, 1)]
@pytest.fixture(params=test_sizes, ids=str)
def fixture_sizes(request):
return request.param
@pytest.fixture(params=test_shapes, ids=str)
def fixture_shapes(request):
return request.param
@pytest.fixture(params=[False, True], ids=str)
def fixture_exception_handling(request):
return request.param
@pytest.fixture()
def samples_to_broadcast(fixture_sizes, fixture_shapes):
samples = [np.empty(s) for s in fixture_shapes]
try:
broadcast_shape = broadcast_dist_samples_shape(fixture_shapes, size=fixture_sizes)
except ValueError:
broadcast_shape = None
return fixture_sizes, samples, broadcast_shape
@pytest.fixture(params=test_to_shapes, ids=str)
def samples_to_broadcast_to(request, samples_to_broadcast):
to_shape = request.param
size, samples, broadcast_shape = samples_to_broadcast
if broadcast_shape is not None:
try:
broadcast_shape = broadcast_dist_samples_shape(
[broadcast_shape, to_tuple(to_shape)], size=size
)
except ValueError:
broadcast_shape = None
return to_shape, size, samples, broadcast_shape
@pytest.fixture
def fixture_model():
with pm.Model() as model:
n = 5
dim = 4
with pm.Model():
cov = pm.InverseGamma("cov", alpha=1, beta=1)
x = pm.Normal("x", mu=np.ones((dim,)), sigma=pm.math.sqrt(cov), shape=(n, dim))
eps = pm.HalfNormal("eps", np.ones((n, 1)), shape=(n, dim))
mu = pm.Deterministic("mu", at.sum(x + eps, axis=-1))
y = pm.Normal("y", mu=mu, sigma=1, shape=(n,))
return model, [cov, x, eps, y]
class TestShapesBroadcasting:
@pytest.mark.parametrize(
"bad_input",
[None, [None], "asd", 3.6, {1: 2}, {3}, [8, [8]], "3", ["3"], np.array([[2]])],
ids=str,
)
def test_type_check_raises(self, bad_input):
with pytest.raises(TypeError):
shapes_broadcasting(bad_input, tuple(), raise_exception=True)
with pytest.raises(TypeError):
shapes_broadcasting(bad_input, tuple(), raise_exception=False)
def test_type_check_success(self):
inputs = [3, 3.0, tuple(), [3], (3,), np.array(3), np.array([3])]
out = shapes_broadcasting(*inputs)
assert out == (3,)
def test_broadcasting(self, fixture_shapes, fixture_exception_handling):
shapes = fixture_shapes
raise_exception = fixture_exception_handling
try:
expected_out = np.broadcast(*(np.empty(s) for s in shapes)).shape
except ValueError:
expected_out = None
if expected_out is None:
if raise_exception:
with pytest.raises(ValueError):
shapes_broadcasting(*shapes, raise_exception=raise_exception)
else:
out = shapes_broadcasting(*shapes, raise_exception=raise_exception)
assert out is None
else:
out = shapes_broadcasting(*shapes, raise_exception=raise_exception)
assert out == expected_out
def test_broadcast_dist_samples_shape(self, fixture_sizes, fixture_shapes):
size = fixture_sizes
shapes = fixture_shapes
size_ = to_tuple(size)
shapes_ = [
s if s[: min([len(size_), len(s)])] != size_ else s[len(size_) :] for s in shapes
]
try:
expected_out = np.broadcast(*(np.empty(s) for s in shapes_)).shape
except ValueError:
expected_out = None
if expected_out is not None and any(
s[: min([len(size_), len(s)])] == size_ for s in shapes
):
expected_out = size_ + expected_out
if expected_out is None:
with pytest.raises(ValueError):
broadcast_dist_samples_shape(shapes, size=size)
else:
out = broadcast_dist_samples_shape(shapes, size=size)
assert out == expected_out
class TestSamplesBroadcasting:
def test_broadcast_distribution_samples(self, samples_to_broadcast):
size, samples, broadcast_shape = samples_to_broadcast
if broadcast_shape is not None:
outs = broadcast_distribution_samples(samples, size=size)
assert all(o.shape == broadcast_shape for o in outs)
else:
with pytest.raises(ValueError):
broadcast_distribution_samples(samples, size=size)
def test_get_broadcastable_dist_samples(self, samples_to_broadcast):
size, samples, broadcast_shape = samples_to_broadcast
if broadcast_shape is not None:
size_ = to_tuple(size)
outs, out_shape = get_broadcastable_dist_samples(
samples, size=size, return_out_shape=True
)
assert out_shape == broadcast_shape
for i, o in zip(samples, outs):
ishape = i.shape
if ishape[: min([len(size_), len(ishape)])] == size_:
expected_shape = (
size_ + (1,) * (len(broadcast_shape) - len(ishape)) + ishape[len(size_) :]
)
else:
expected_shape = ishape
assert o.shape == expected_shape
assert shapes_broadcasting(*(o.shape for o in outs)) == broadcast_shape
else:
with pytest.raises(ValueError):
get_broadcastable_dist_samples(samples, size=size)
def test_broadcast_dist_samples_to(self, samples_to_broadcast_to):
to_shape, size, samples, broadcast_shape = samples_to_broadcast_to
if broadcast_shape is not None:
outs = broadcast_dist_samples_to(to_shape, samples, size=size)
assert all(o.shape == broadcast_shape for o in outs)
else:
with pytest.raises(ValueError):
broadcast_dist_samples_to(to_shape, samples, size=size)
@pytest.mark.xfail(reason="InverseGamma was not yet refactored")
def test_sample_generate_values(fixture_model, fixture_sizes):
model, RVs = fixture_model
size = to_tuple(fixture_sizes)
with model:
prior = pm.sample_prior_predictive(samples=fixture_sizes)
for rv in RVs:
assert prior[rv.name].shape == size + tuple(rv.distribution.shape)
class TestShapeDimsSize:
@pytest.mark.parametrize("param_shape", [(), (3,)])
@pytest.mark.parametrize("batch_shape", [(), (3,)])
@pytest.mark.parametrize(
"parametrization",
[
"implicit",
"shape",
"shape...",
"dims",
"dims...",
"size",
],
)
def test_param_and_batch_shape_combos(
self, param_shape: tuple, batch_shape: tuple, parametrization: str
):
coords = {}
param_dims = []
batch_dims = []
# Create coordinates corresponding to the parameter shape
for d in param_shape:
dname = f"param_dim_{d}"
coords[dname] = [f"c_{i}" for i in range(d)]
param_dims.append(dname)
assert len(param_dims) == len(param_shape)
# Create coordinates corresponding to the batch shape
for d in batch_shape:
dname = f"batch_dim_{d}"
coords[dname] = [f"c_{i}" for i in range(d)]
batch_dims.append(dname)
assert len(batch_dims) == len(batch_shape)
with pm.Model(coords=coords) as pmodel:
mu = aesara.shared(np.random.normal(size=param_shape))
with pytest.warns(None):
if parametrization == "implicit":
rv = pm.Normal("rv", mu=mu).shape == param_shape
else:
expected_shape = batch_shape + param_shape
if parametrization == "shape":
rv = pm.Normal("rv", mu=mu, shape=batch_shape + param_shape)
assert rv.eval().shape == expected_shape
elif parametrization == "shape...":
rv = pm.Normal("rv", mu=mu, shape=(*batch_shape, ...))
assert rv.eval().shape == batch_shape + param_shape
elif parametrization == "dims":
rv = pm.Normal("rv", mu=mu, dims=batch_dims + param_dims)
assert rv.eval().shape == expected_shape
elif parametrization == "dims...":
rv = pm.Normal("rv", mu=mu, dims=(*batch_dims, ...))
n_size = len(batch_shape)
n_implied = len(param_shape)
ndim = n_size + n_implied
assert len(pmodel.RV_dims["rv"]) == ndim, pmodel.RV_dims
assert len(pmodel.RV_dims["rv"][:n_size]) == len(batch_dims)
assert len(pmodel.RV_dims["rv"][n_size:]) == len(param_dims)
if n_implied > 0:
assert pmodel.RV_dims["rv"][-1] is None
elif parametrization == "size":
rv = pm.Normal("rv", mu=mu, size=batch_shape + param_shape)
assert rv.eval().shape == expected_shape
else:
raise NotImplementedError("Invalid test case parametrization.")
def test_define_dims_on_the_fly(self):
with pm.Model() as pmodel:
agedata = aesara.shared(np.array([10, 20, 30]))
# Associate the "patient" dim with an implied dimension
age = pm.Normal("age", agedata, dims=("patient",))
assert "patient" in pmodel.dim_lengths
assert pmodel.dim_lengths["patient"].eval() == 3
# Use the dim to replicate a new RV
effect = pm.Normal("effect", 0, dims=("patient",))
assert effect.ndim == 1
assert effect.eval().shape == (3,)
# Now change the length of the implied dimension
agedata.set_value([1, 2, 3, 4])
# The change should propagate all the way through
assert effect.eval().shape == (4,)
@pytest.mark.xfail(reason="Simultaneous use of size and dims is not implemented")
def test_data_defined_size_dimension_can_register_dimname(self):
with pm.Model() as pmodel:
x = pm.Data("x", [[1, 2, 3, 4]], dims=("first", "second"))
assert "first" in pmodel.dim_lengths
assert "second" in pmodel.dim_lengths
# two dimensions are implied; a "third" dimension is created
y = pm.Normal("y", mu=x, size=2, dims=("third", "first", "second"))
assert "third" in pmodel.dim_lengths
assert y.eval().shape() == (2, 1, 4)
def test_can_resize_data_defined_size(self):
with pm.Model() as pmodel:
x = pm.Data("x", [[1, 2, 3, 4]], dims=("first", "second"))
y = pm.Normal("y", mu=0, dims=("first", "second"))
z = pm.Normal("z", mu=y, observed=np.ones((1, 4)))
assert x.eval().shape == (1, 4)
assert y.eval().shape == (1, 4)
assert z.eval().shape == (1, 4)
assert "first" in pmodel.dim_lengths
assert "second" in pmodel.dim_lengths
pmodel.set_data("x", [[1, 2], [3, 4], [5, 6]])
assert x.eval().shape == (3, 2)
assert y.eval().shape == (3, 2)
assert z.eval().shape == (3, 2)
@pytest.mark.xfail(reason="https://github.com/pymc-devs/aesara/issues/390")
def test_size32_doesnt_break_broadcasting():
size32 = at.constant([1, 10], dtype="int32")
rv = pm.Normal.dist(0, 1, size=size32)
assert rv.broadcastable == (True, False)
@pytest.mark.xfail(reason="https://github.com/pymc-devs/aesara/issues/390")
def test_observed_with_column_vector(self):
"""This test is related to https://github.com/pymc-devs/aesara/issues/390 which breaks
broadcastability of column-vector RVs. This unexpected change in type can lead to
incompatibilities during graph rewriting for model.logp evaluation.
"""
with pm.Model() as model:
# The `observed` is a broadcastable column vector
obs = at.as_tensor_variable(np.ones((3, 1), dtype=aesara.config.floatX))
assert obs.broadcastable == (False, True)
# Both shapes describe broadcastable volumn vectors
size64 = at.constant([3, 1], dtype="int64")
# But the second shape is upcasted from an int32 vector
cast64 = at.cast(at.constant([3, 1], dtype="int32"), dtype="int64")
pm.Normal("size64", mu=0, sd=1, size=size64, observed=obs)
pm.Normal("shape64", mu=0, sd=1, shape=size64, observed=obs)
model.logp()
pm.Normal("size_cast64", mu=0, sd=1, size=cast64, observed=obs)
pm.Normal("shape_cast64", mu=0, sd=1, shape=cast64, observed=obs)
model.logp()
def test_dist_api_works(self):
mu = aesara.shared(np.array([1, 2, 3]))
with pytest.raises(NotImplementedError, match="API is not supported"):
pm.Normal.dist(mu=mu, dims=("town",))
assert pm.Normal.dist(mu=mu, shape=(3,)).eval().shape == (3,)
assert pm.Normal.dist(mu=mu, shape=(5, 3)).eval().shape == (5, 3)
assert pm.Normal.dist(mu=mu, shape=(7, ...)).eval().shape == (7, 3)
assert pm.Normal.dist(mu=mu, size=(3,)).eval().shape == (3,)
assert pm.Normal.dist(mu=mu, size=(4, 3)).eval().shape == (4, 3)
def test_mvnormal_shape_size_difference(self):
# Parameters add one batch dimension (4), shape is what you'd expect.
# Under the hood the shape(4, 3) becomes size=(4,) and the RV is initially
# created as (4, 4, 3). The internal ndim-check then recreates it with size=None.
rv = pm.MvNormal.dist(mu=np.ones((4, 3)), cov=np.eye(3), shape=(4, 3))
assert rv.ndim == 2
assert tuple(rv.shape.eval()) == (4, 3)
# shape adds two dimensions (5, 4)
# Under the hood the shape=(5, 4, 3) becomes size=(5, 4).
# The RV is created as (5, 4, 3) right away.
rv = pm.MvNormal.dist(mu=[1, 2, 3], cov=np.eye(3), shape=(5, 4, 3))
assert rv.ndim == 3
assert tuple(rv.shape.eval()) == (5, 4, 3)
# parameters add 1 batch dimension (4), shape adds another (5)
# Under the hood the shape=(5, 4, 3) becomes size=(5, 4)
# The RV is initially created as (5, 4, 3, 4, 3) and then recreated and resized.
rv = pm.MvNormal.dist(mu=np.ones((4, 3)), cov=np.eye(3), shape=(5, 4, 3))
assert rv.ndim == 3
assert tuple(rv.shape.eval()) == (5, 4, 3)
rv = pm.MvNormal.dist(mu=np.ones((4, 3, 2)), cov=np.eye(2), shape=(6, 5, ...))
assert rv.ndim == 5
assert tuple(rv.shape.eval()) == (6, 5, 4, 3, 2)
with pytest.warns(None):
rv = pm.MvNormal.dist(mu=[1, 2, 3], cov=np.eye(3), size=(5, 4))
assert tuple(rv.shape.eval()) == (5, 4, 3)
# When using `size` the API behaves like Aesara/NumPy
with pytest.warns(
ShapeWarning,
match=r"You may have expected a \(2\+1\)-dimensional RV, but the resulting RV will be 5-dimensional",
):
rv = pm.MvNormal.dist(mu=np.ones((5, 4, 3)), cov=np.eye(3), size=(5, 4))
assert tuple(rv.shape.eval()) == (5, 4, 5, 4, 3)
def test_convert_dims(self):
assert convert_dims(dims="town") == ("town",)
with pytest.raises(ValueError, match="must be a tuple, str or list"):
convert_dims(3)
with pytest.raises(ValueError, match="may only appear in the last position"):
convert_dims(dims=(..., "town"))
def test_convert_shape(self):
assert convert_shape(5) == (5,)
with pytest.raises(ValueError, match="tuple, TensorVariable, int or list"):
convert_shape(shape="notashape")
with pytest.raises(ValueError, match="may only appear in the last position"):
convert_shape(shape=(3, ..., 2))
def test_convert_size(self):
assert convert_size(7) == (7,)
with pytest.raises(ValueError, match="tuple, TensorVariable, int or list"):
convert_size(size="notasize")
with pytest.raises(ValueError, match="cannot contain"):
convert_size(size=(3, ...))
def test_lazy_flavors(self):
assert pm.Uniform.dist(2, [4, 5], size=[3, 2]).eval().shape == (3, 2)
assert pm.Uniform.dist(2, [4, 5], shape=[3, 2]).eval().shape == (3, 2)
with pm.Model(coords=dict(town=["Greifswald", "Madrid"])):
assert pm.Normal("n1", mu=[1, 2], dims="town").eval().shape == (2,)
assert pm.Normal("n2", mu=[1, 2], dims=["town"]).eval().shape == (2,)
def test_invalid_flavors(self):
with pytest.raises(ValueError, match="Passing both"):
pm.Normal.dist(0, 1, shape=(3,), size=(3,))
with pm.Model():
with pytest.raises(ValueError, match="Passing both"):
pm.Normal("n", shape=(2,), dims=("town",))
with pytest.raises(ValueError, match="Passing both"):
pm.Normal("n", dims=("town",), size=(2,))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Blueprint
label_writer_450 = Blueprint(
'label_writer_450', __name__, template_folder='templates', static_folder='static'
)
from . import views
from . import zpl
|
import dwavebinarycsp
import dimod
csp = dwavebinarycsp.factories.random_2in4sat(8,4 ) # 8 variables, 4 clauses
bqm = dwavebinarycsp.stitch(csp)
resp = dimod.ExactSolver().sample(bqm)
for sample, energy in resp.data(['sample', 'energy']):
print(sample, csp.check(sample), energy)
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE filters or at
# https://developers.google.com/open-source/licenses/bsd
{
'includes': [
'../../common.gypi',
],
'targets': [
{
'target_name': 'filters',
'type': '<(component)',
'sources': [
'h264_bit_reader.cc',
'h264_bit_reader.h',
'h264_byte_to_unit_stream_converter.cc',
'h264_byte_to_unit_stream_converter.h',
'h264_parser.cc',
'h264_parser.h',
],
'dependencies': [
'../../base/base.gyp:base',
],
},
{
'target_name': 'filters_unittest',
'type': '<(gtest_target_type)',
'sources': [
'h264_bit_reader_unittest.cc',
'h264_byte_to_unit_stream_converter_unittest.cc',
'h264_parser_unittest.cc',
],
'dependencies': [
'../../media/base/media_base.gyp:base',
'../../testing/gtest.gyp:gtest',
'../test/media_test.gyp:media_test_support',
'filters',
],
},
],
}
|
#!/usr/bin/env python
'''
**********************************************************************
* Filename : L298N.py
* Description : A driver module for L298N
* Author : Cavon
* Brand : SunFounder
* E-mail : service@sunfounder.com
* Website : www.sunfounder.com
* Update : Cavon 2016-09-23 New release
**********************************************************************
'''
import RPi.GPIO as GPIO
class Motor(object):
''' Motor driver class
Set direction_channel_a to the GPIO channel which connect to MA,
Set motor_B to the GPIO channel which connect to MB,
Both GPIO channel use BCM numbering;
Set pwm_channel to the PWM channel which connect to PWMA,
Set pwm_B to the PWM channel which connect to PWMB;
PWM channel using PCA9685, Set pwm_address to your address, if is not 0x40
Set debug to True to print out debug informations.
'''
_DEBUG = False
_DEBUG_INFO = 'DEBUG "L298N.py":'
def __init__(self, direction_channel_a, direction_channel_b, pwm=None, offset=True):
'''Init a motor on giving dir. channel and PWM channel.'''
if self._DEBUG:
print self._DEBUG_INFO, "Debug on"
self.direction_channel_a = direction_channel_a
self.direction_channel_b = direction_channel_b
self._offset = offset
self.forward_offset = self._offset
self.backward_offset = not self.forward_offset
self._pwm = pwm
self._speed = 0
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
if self._DEBUG:
print self._DEBUG_INFO, 'setup motor direction channel at', direction_channel_a
print self._DEBUG_INFO, 'setup motor pwm channel as', self._pwm.__name__
GPIO.setup(self.direction_channel_a, GPIO.OUT)
GPIO.setup(self.direction_channel_b, GPIO.OUT)
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, value):
''' Set offset for much user-friendly '''
if value not in (True, False):
raise ValueError('offset value must be Bool value, not"{0}"'.format(value))
self.forward_offset = value
self.backward_offset = not self.forward_offset
if self._DEBUG:
print self._DEBUG_INFO, 'Set offset to %d' % self._offset
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, speed):
''' Set Speed with giving value '''
if speed not in range(0, 101):
raise ValueError('speed ranges fron 0 to 100, not "{0}"'.format(speed))
if not callable(self._pwm):
raise ValueError('pwm is not callable, please set Motor.pwm to a pwm control function with only 1 veriable speed')
if self._DEBUG:
print self._DEBUG_INFO, 'Set speed to: ', speed
self._speed = speed
self._pwm(self._speed)
def forward(self):
''' Set the motor direction to forward '''
GPIO.output(self.direction_channel_a, not self.forward_offset)
GPIO.output(self.direction_channel_b, self.forward_offset)
self.speed = self._speed
if self._DEBUG:
print self._DEBUG_INFO, 'Motor moving forward (%s)' % str((GPIO.LOW, GPIO.HIGH))
def backward(self):
''' Set the motor direction to backward '''
GPIO.output(self.direction_channel_a, not self.backward_offset)
GPIO.output(self.direction_channel_b, self.backward_offset)
self.speed = self._speed
if self._DEBUG:
print self._DEBUG_INFO, 'Motor moving backward (%s)' % str((GPIO.HIGH, GPIO.LOW))
def stop(self):
''' Stop the motor by giving a 0 speed '''
if self._DEBUG:
print self._DEBUG_INFO, 'Motor stop'
self.speed = 0
GPIO.output(self.direction_channel_a, GPIO.LOW)
GPIO.output(self.direction_channel_b, GPIO.LOW)
@property
def debug(self, debug):
return self._DEBUG
@debug.setter
def debug(self, debug):
''' Set if debug information shows '''
if debug in (True, False):
self._DEBUG = debug
else:
raise ValueError('debug must be "True" (Set debug on) or "False" (Set debug off), not "{0}"'.format(debug))
if self._DEBUG:
print self._DEBUG_INFO, "Set debug on"
else:
print self._DEBUG_INFO, "Set debug off"
@property
def pwm(self):
return self._pwm
@pwm.setter
def pwm(self, pwm):
if self._DEBUG:
print self._DEBUG_INFO, 'pwm set'
self._pwm = pwm
def test():
import time
print "********************************************"
print "* *"
print "* L298N *"
print "* *"
print "* Connect MA to BCM17 *"
print "* Connect MB to BCM18 *"
print "* Connect PWMA to BCM27 *"
print "* Connect PWMB to BCM12 *"
print "* *"
print "********************************************"
GPIO.setmode(GPIO.BCM)
GPIO.setup((5, 6), GPIO.OUT)
GPIO.setup((13, 19), GPIO.OUT)
a = GPIO.PWM(27, 60)
b = GPIO.PWM(22, 60)
a.start(0)
b.start(0)
def a_speed(value):
a.ChangeDutyCycle(value)
def b_speed(value):
b.ChangeDutyCycle(value)
motorA = Motor(5,6)
motorB = Motor(13,19)
motorA.debug = True
motorB.debug = True
motorA.pwm = a_speed
motorB.pwm = b_speed
delay = 0.05
motorA.forward()
for i in range(0, 101):
motorA.speed = i
time.sleep(delay)
for i in range(100, -1, -1):
motorA.speed = i
time.sleep(delay)
motorA.backward()
for i in range(0, 101):
motorA.speed = i
time.sleep(delay)
for i in range(100, -1, -1):
motorA.speed = i
time.sleep(delay)
motorB.forward()
for i in range(0, 101):
motorB.speed = i
time.sleep(delay)
for i in range(100, -1, -1):
motorB.speed = i
time.sleep(delay)
motorB.backward()
for i in range(0, 101):
motorB.speed = i
time.sleep(delay)
for i in range(100, -1, -1):
motorB.speed = i
time.sleep(delay)
if __name__ == '__main__':
test()
|
import util
import re
import soupsieve
from bs4 import BeautifulSoup
class DiffParser():
def __init__(self, handler):
self.handler = handler
def _handle_new_revision(self, id, desc, body):
username = util.get_regex_match(body, ">([^>]+) created this revision")
if username is not None:
self.handler.on_diff_new(id, desc, username)
def _handle_request_changes(self, id, desc, body):
username = util.get_regex_match(body, ">([^>]+) requested changes to this revision.")
if username is not None:
self.handler.on_diff_request_changes(id, desc, username)
elif 'This revision now requires changes to proceed' in body:
self.handler.on_diff_request_changes(id, desc, None)
def _handle_comments(self, id, desc, body):
username = util.get_regex_match(body, ">([^>]+) added a comment.")
if username is not None:
soup = BeautifulSoup(body, 'html.parser')
paragraphs = soup.select("div > div > p")
comment = None
if len(paragraphs) > 0 and len(paragraphs[0].parent.text) > 0:
comment = paragraphs[0].parent.text
self.handler.on_diff_comment(id, desc, username, comment)
def _handle_inline_comments(self, id, desc, body):
username = util.get_regex_match(body, ">([^>]+) added inline comments")
if username is not None:
soup = BeautifulSoup(body, 'html.parser')
comment_divs = soup.select("div > strong + div > div > div > div")
comments = []
# try to find any actual comments
for div in comment_divs:
# filter out those with color - those are old comments
new_comments = [comment.text for comment in div.select("p") if 'color' not in comment.parent['style']]
comments += new_comments
self.handler.on_diff_inline_comments(id, desc, username, comments)
def _handle_ready_to_land(self, id, desc, body):
if 'This revision is now accepted and ready to land.' in body:
self.handler.on_diff_ready_to_land(id, desc)
def parse(self, id, desc, body):
self._handle_inline_comments(id, desc, body),
self._handle_new_revision(id, desc, body),
self._handle_comments(id, desc, body),
self._handle_request_changes(id, desc, body),
self._handle_ready_to_land(id, desc, body)
class TaskParser():
def __init__(self, handler):
self.handler = handler
def _handle_comments(self, id, desc, body):
username = util.get_regex_match(body, ">([^>]+) added a comment.")
if username is not None:
soup = BeautifulSoup(body, 'html.parser')
paragraphs = soup.select("div > div > p")
comment = None
if len(paragraphs) > 0 and len(paragraphs[0].parent.text) > 0:
comment = paragraphs[0].parent.text
self.handler.on_task_comment(id, desc, username, comment)
def _handle_task_move(self, id, desc, body):
username = util.get_regex_match(body, ">([^>]+) moved this task")
movement = util.get_regex_match(body, "moved this task ([^\.]+)")
if username is not None:
self.handler.on_task_move(id, desc, username)
def parse(self, id, desc, body):
self._handle_comments(id, desc, body)
self._handle_task_move(id, desc, body)
|
import numpy as np
from .decision_templates_base import make_decision_profiles, make_decision_templates
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.cluster import OPTICS
class OpticsTemplatesClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, estimators, use_weights=False):
self.estimators = estimators
self.use_weights = use_weights
def fit(self, X, y):
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
self.classifiers_pool_ = self.estimators
dp = make_decision_profiles(X, self.classifiers_pool_)
self.decision_templates_ = []
self.decision_templates_classes_ = []
self.decision_templates_weights_ = []
for label in np.unique(y):
cl = OPTICS(min_samples=3).fit(dp[y == label])
dt, _ = make_decision_templates(dp[y == label], cl.labels_)
self.decision_templates_.append(dt)
self.decision_templates_classes_.append(np.repeat(label, len(dt)))
if self.use_weights:
labels, counts = np.unique(cl.labels_, return_counts=True)
self.decision_templates_weights_.append(counts / counts.sum())
# TODO: Consider variance as alternative
else:
self.decision_templates_weights_.append(np.ones(len(dt)))
self.decision_templates_ = np.concatenate(self.decision_templates_)
self.decision_templates_classes_ = np.concatenate(self.decision_templates_classes_)
self.decision_templates_weights_ = np.concatenate(self.decision_templates_weights_)
return self
def predict(self, X):
check_is_fitted(self, ['classes_', 'classifiers_pool_', 'decision_templates_', 'decision_templates_classes_', 'decision_templates_weights_'])
X = check_array(X)
dp = make_decision_profiles(X, self.classifiers_pool_)
distances = np.array([np.linalg.norm(x - dp, axis=1) / w for x, w in zip(self.decision_templates_, self.decision_templates_weights_)])
return self.decision_templates_classes_.take(np.argmin(distances, axis=0))
|
#coding=utf-8
from django.db import models
from aops.settings import INT_CHOICES
from cmdb import signals
from cmdb.models.idc import Idc
from cmdb.models.contract import Contract
from django.db import IntegrityError
class IdcContract(models.Model):
idc = models.ForeignKey(Idc)
contract = models.ForeignKey(Contract)
is_dynamic = models.IntegerField(editable=True, choices=INT_CHOICES, default=0)
is_deleted = models.IntegerField(editable=True, choices=INT_CHOICES, default=0)
create_time = models.DateTimeField(blank=True, null=True, auto_now_add=True)
update_time = models.DateTimeField(blank=True, null=True, auto_now=True)
class Meta:
db_table = 'idc_contract'
ordering = ['-id']
app_label = 'cmdb'
unique_together = (("idc","contract"),)
def __unicode__(self):
return u"%s - %s" % (self.idc ,self.contract)
#为了在模板标签中可以使用items方法
def items(self):
return [(field, field.value_to_string(self)) for field in IdcContract._meta.fields]
def delete(self, *args, **kwargs):
super(IdcContract, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
if self.id is not None :
r_idc_contract = IdcContract.objects.get(pk=self.id)
else:
print 'Alter'
super(IdcContract, self).save(*args, **kwargs)
|
commands = []
while True:
try: line = input()
except: break
if not line: break
commands.append(line.split())
for starting_a in range(155, 160):
d = {'a': starting_a, 'b': 0, 'c': 0, 'd': 0}
i = 0
out = []
while len(out) < 100:
if commands[i][0] == 'inc' and commands[i+1][0] == 'dec' and commands[i+2][0] == 'jnz' and commands[i+1][1] == commands[i+2][1] and commands[i+2][2] == '-2':
d[commands[i][1]] += d[commands[i+1][1]]
d[commands[i+1][1]] = 0
i += 3
continue
elif commands[i][0] == 'cpy':
a, b = commands[i][1:]
if a.isalpha():
d[b] = d[a]
elif b.isalpha():
d[b] = int(a)
elif commands[i][0] == 'inc':
d[commands[i][1]] += 1
elif commands[i][0] == 'dec':
d[commands[i][1]] -= 1
elif commands[i][0] == 'jnz':
a, b = commands[i][1:]
if a.isalpha():
if d[a] != 0:
i += (d[b] if b.isalpha() else int(b)) - 1
else:
if int(a) != 0:
i += (d[b] if b.isalpha() else int(b)) - 1
elif commands[i][0] == 'tgl':
a = commands[i][1]
if a.isalpha(): a = d[a]
else: a = int(a)
if i + a >= 0 and i + a < len(commands):
command = commands[i + a][0]
if command == 'inc': command = 'dec'
elif command == 'dec': command = 'inc'
elif command == 'jnz': command = 'cpy'
elif command == 'cpy': command = 'jnz'
elif command == 'tgl': command = 'inc'
commands[i + a][0] = command
elif commands[i][0] == 'out':
out.append(d[commands[i][1]])
i += 1
if out == [i % 2 for i in range(100)]:
print(starting_a)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.