hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c97448796dfb926f6770fff4a800e18b1e4d6c32 | 12,733 | py | Python | mongoengine/dereference.py | svanburen/mongoengine | b250abdd12d9b0d6fba5c40c7d57c4dfbd4581de | [
"MIT"
] | null | null | null | mongoengine/dereference.py | svanburen/mongoengine | b250abdd12d9b0d6fba5c40c7d57c4dfbd4581de | [
"MIT"
] | null | null | null | mongoengine/dereference.py | svanburen/mongoengine | b250abdd12d9b0d6fba5c40c7d57c4dfbd4581de | [
"MIT"
] | null | null | null | from bson import SON, DBRef
from mongoengine.base import (
BaseDict,
BaseList,
EmbeddedDocumentList,
TopLevelDocumentMetaclass,
get_document,
)
from mongoengine.base.datastructures import LazyReference
from mongoengine.connection import get_db
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import DictField, ListField, MapField, ReferenceField
from mongoengine.queryset import QuerySet
class DeReference:
def __call__(self, items, max_depth=1, instance=None, name=None):
"""
Cheaply dereferences the items to a set depth.
Also handles the conversion of complex data types.
:param items: The iterable (dict, list, queryset) to be dereferenced.
:param max_depth: The maximum depth to recurse to
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param get: A boolean determining if being called by __get__
"""
if items is None or isinstance(items, str):
return items
# cheapest way to convert a queryset to a list
# list(queryset) uses a count() query to determine length
if isinstance(items, QuerySet):
items = [i for i in items]
self.max_depth = max_depth
doc_type = None
if instance and isinstance(
instance, (Document, EmbeddedDocument, TopLevelDocumentMetaclass)
):
doc_type = instance._fields.get(name)
while hasattr(doc_type, "field"):
doc_type = doc_type.field
if isinstance(doc_type, ReferenceField):
field = doc_type
doc_type = doc_type.document_type
is_list = not hasattr(items, "items")
if is_list and all([i.__class__ == doc_type for i in items]):
return items
elif not is_list and all(
[i.__class__ == doc_type for i in items.values()]
):
return items
elif not field.dbref:
# We must turn the ObjectIds into DBRefs
# Recursively dig into the sub items of a list/dict
# to turn the ObjectIds into DBRefs
def _get_items_from_list(items):
new_items = []
for v in items:
value = v
if isinstance(v, dict):
value = _get_items_from_dict(v)
elif isinstance(v, list):
value = _get_items_from_list(v)
elif not isinstance(v, (DBRef, Document)):
value = field.to_python(v)
new_items.append(value)
return new_items
def _get_items_from_dict(items):
new_items = {}
for k, v in items.items():
value = v
if isinstance(v, list):
value = _get_items_from_list(v)
elif isinstance(v, dict):
value = _get_items_from_dict(v)
elif not isinstance(v, (DBRef, Document)):
value = field.to_python(v)
new_items[k] = value
return new_items
if not hasattr(items, "items"):
items = _get_items_from_list(items)
else:
items = _get_items_from_dict(items)
self.reference_map = self._find_references(items)
self.object_map = self._fetch_objects(doc_type=doc_type)
return self._attach_objects(items, 0, instance, name)
def _find_references(self, items, depth=0):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
"""
reference_map = {}
if not items or depth >= self.max_depth:
return reference_map
# Determine the iterator to use
if isinstance(items, dict):
iterator = list(items.values())
else:
iterator = items
# Recursively find dbreferences
depth += 1
for item in iterator:
if isinstance(item, (Document, EmbeddedDocument)):
for field_name, field in item._fields.items():
v = item._data.get(field_name, None)
if isinstance(v, LazyReference):
# LazyReference inherits DBRef but should not be dereferenced here !
continue
elif isinstance(v, DBRef):
reference_map.setdefault(field.document_type, set()).add(v.id)
elif isinstance(v, (dict, SON)) and "_ref" in v:
reference_map.setdefault(get_document(v["_cls"]), set()).add(
v["_ref"].id
)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
field_cls = getattr(
getattr(field, "field", None), "document_type", None
)
references = self._find_references(v, depth)
for key, refs in references.items():
if isinstance(
field_cls, (Document, TopLevelDocumentMetaclass)
):
key = field_cls
reference_map.setdefault(key, set()).update(refs)
elif isinstance(item, LazyReference):
# LazyReference inherits DBRef but should not be dereferenced here !
continue
elif isinstance(item, DBRef):
reference_map.setdefault(item.collection, set()).add(item.id)
elif isinstance(item, (dict, SON)) and "_ref" in item:
reference_map.setdefault(get_document(item["_cls"]), set()).add(
item["_ref"].id
)
elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:
references = self._find_references(item, depth - 1)
for key, refs in references.items():
reference_map.setdefault(key, set()).update(refs)
return reference_map
def _fetch_objects(self, doc_type=None):
"""Fetch all references and convert to their document objects
"""
object_map = {}
for collection, dbrefs in self.reference_map.items():
# we use getattr instead of hasattr because hasattr swallows any exception under python2
# so it could hide nasty things without raising exceptions (cfr bug #1688))
ref_document_cls_exists = getattr(collection, "objects", None) is not None
if ref_document_cls_exists:
col_name = collection._get_collection_name()
refs = [
dbref for dbref in dbrefs if (col_name, dbref) not in object_map
]
references = collection.objects.in_bulk(refs)
for key, doc in references.items():
object_map[(col_name, key)] = doc
else: # Generic reference: use the refs data to convert to document
if isinstance(doc_type, (ListField, DictField, MapField)):
continue
refs = [
dbref for dbref in dbrefs if (collection, dbref) not in object_map
]
if doc_type:
references = doc_type._get_db()[collection].find(
{"_id": {"$in": refs}}
)
for ref in references:
doc = doc_type._from_son(ref)
object_map[(collection, doc.id)] = doc
else:
references = get_db()[collection].find({"_id": {"$in": refs}})
for ref in references:
if "_cls" in ref:
doc = get_document(ref["_cls"])._from_son(ref)
elif doc_type is None:
doc = get_document(
"".join(x.capitalize() for x in collection.split("_"))
)._from_son(ref)
else:
doc = doc_type._from_son(ref)
object_map[(collection, doc.id)] = doc
return object_map
def _attach_objects(self, items, depth=0, instance=None, name=None):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
"""
if not items:
if isinstance(items, (BaseDict, BaseList)):
return items
if instance:
if isinstance(items, dict):
return BaseDict(items, instance, name)
else:
return BaseList(items, instance, name)
if isinstance(items, (dict, SON)):
if "_ref" in items:
return self.object_map.get(
(items["_ref"].collection, items["_ref"].id), items
)
elif "_cls" in items:
doc = get_document(items["_cls"])._from_son(items)
_cls = doc._data.pop("_cls", None)
del items["_cls"]
doc._data = self._attach_objects(doc._data, depth, doc, None)
if _cls is not None:
doc._data["_cls"] = _cls
return doc
if not hasattr(items, "items"):
is_list = True
list_type = BaseList
if isinstance(items, EmbeddedDocumentList):
list_type = EmbeddedDocumentList
as_tuple = isinstance(items, tuple)
iterator = enumerate(items)
data = []
else:
is_list = False
iterator = list(items.items())
data = {}
depth += 1
for k, v in iterator:
if is_list:
data.append(v)
else:
data[k] = v
if k in self.object_map and not is_list:
data[k] = self.object_map[k]
elif isinstance(v, (Document, EmbeddedDocument)):
for field_name in v._fields:
v = data[k]._data.get(field_name, None)
if isinstance(v, DBRef):
data[k]._data[field_name] = self.object_map.get(
(v.collection, v.id), v
)
elif isinstance(v, (dict, SON)) and "_ref" in v:
data[k]._data[field_name] = self.object_map.get(
(v["_ref"].collection, v["_ref"].id), v
)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
item_name = "{}.{}.{}".format(name, k, field_name)
data[k]._data[field_name] = self._attach_objects(
v, depth, instance=instance, name=item_name
)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
item_name = f"{name}.{k}" if name else name
data[k] = self._attach_objects(
v, depth - 1, instance=instance, name=item_name
)
elif isinstance(v, DBRef) and hasattr(v, "id"):
data[k] = self.object_map.get((v.collection, v.id), v)
if instance and name:
if is_list:
return tuple(data) if as_tuple else list_type(data, instance, name)
return BaseDict(data, instance, name)
depth += 1
return data
| 43.457338 | 100 | 0.512369 |
16b924c8d1cc2ab55305da31cf49bf4c896718de | 1,216 | py | Python | gamelayer/graphics/image_loader.py | Windspar/Gamelayer | 65e1cf11548bc02bc49348eb265c209172c14844 | [
"MIT"
] | null | null | null | gamelayer/graphics/image_loader.py | Windspar/Gamelayer | 65e1cf11548bc02bc49348eb265c209172c14844 | [
"MIT"
] | null | null | null | gamelayer/graphics/image_loader.py | Windspar/Gamelayer | 65e1cf11548bc02bc49348eb265c209172c14844 | [
"MIT"
] | null | null | null | from pathlib import Path
from os import listdir, path
from pygame.image import load as image_load
from pygame.transform import scale as image_scale
DEFAULT_IMAGE_TYPES = "bmp", "png", "jpg", "gif"
class ImageLoader:
def __init__(self, location, alpha=False, image_types=DEFAULT_IMAGE_TYPES, scale=None):
self.images = {}
self.load(location, alpha, image_types, scale)
def __getitem__(self, key):
return self.images[key]
def clear(self):
self.images = {}
def load(self, location, alpha=False, image_types=DEFAULT_IMAGE_TYPES, scale=None):
location = Path(location)
if location.exists():
files = listdir(location)
for file in files:
file = Path(file)
if file.suffix[1:] in image_types:
filename = path.join(location, file)
image = image_load(filename)
if scale:
image = image_scale(image, scale)
if alpha:
image = image.convert_alpha()
else:
image = image.convert()
self.images[file.stem] = image
| 31.179487 | 91 | 0.564145 |
0961185fe8ceada895971efeb866a97f14646e4f | 20,026 | py | Python | pytato/utils.py | inducer/pytato | 8cf2575cd6cdb36891f2c57469dfc671e573450a | [
"MIT"
] | 2 | 2020-10-13T15:56:52.000Z | 2021-07-28T20:50:35.000Z | pytato/utils.py | inducer/pytato | 8cf2575cd6cdb36891f2c57469dfc671e573450a | [
"MIT"
] | 215 | 2020-10-15T19:49:59.000Z | 2022-03-31T15:37:51.000Z | pytato/utils.py | inducer/pytato | 8cf2575cd6cdb36891f2c57469dfc671e573450a | [
"MIT"
] | 6 | 2020-10-14T23:31:40.000Z | 2022-02-21T20:32:57.000Z | from __future__ import annotations
__copyright__ = "Copyright (C) 2021 Kaushik Kulkarni"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import islpy as isl
import pymbolic.primitives as prim
from typing import (Tuple, List, Union, Callable, Any, Sequence, Dict,
Optional, Iterable, TypeVar)
from pytato.array import (Array, ShapeType, IndexLambda, SizeParam, ShapeComponent,
DtypeOrScalar, ArrayOrScalar, BasicIndex,
AdvancedIndexInContiguousAxes,
AdvancedIndexInNoncontiguousAxes,
ConvertibleToIndexExpr, IndexExpr, NormalizedSlice)
from pytato.scalar_expr import (ScalarExpression, IntegralScalarExpression,
SCALAR_CLASSES, INT_CLASSES, BoolT)
from pytools import UniqueNameGenerator
from pytato.transform import Mapper
__doc__ = """
Helper routines
---------------
.. autofunction:: are_shape_components_equal
.. autofunction:: are_shapes_equal
.. autofunction:: get_shape_after_broadcasting
.. autofunction:: dim_to_index_lambda_components
"""
# {{{ partition
Tpart = TypeVar("Tpart")
def partition(pred: Callable[[Tpart], bool],
iterable: Iterable[Tpart]) -> Tuple[List[Tpart],
List[Tpart]]:
"""
Use a predicate to partition entries into false entries and true
entries
"""
# Inspired from https://docs.python.org/3/library/itertools.html
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
from itertools import tee, filterfalse
t1, t2 = tee(iterable)
return list(filterfalse(pred, t1)), list(filter(pred, t2))
# }}}
def get_shape_after_broadcasting(
exprs: Sequence[Union[Array, ScalarExpression]]) -> ShapeType:
"""
Returns the shape after broadcasting *exprs* in an operation.
"""
from pytato.diagnostic import CannotBroadcastError
shapes = [expr.shape if isinstance(expr, Array) else () for expr in exprs]
result_dim = max((len(s) for s in shapes), default=0)
# append leading dimensions of all the shapes with 1's to match result_dim.
augmented_shapes = [((1,)*(result_dim-len(s)) + s) for s in shapes]
def _get_result_axis_length(axis_lengths: List[IntegralScalarExpression]
) -> IntegralScalarExpression:
result_axis_len = axis_lengths[0]
for axis_len in axis_lengths[1:]:
if are_shape_components_equal(axis_len, result_axis_len):
pass
elif are_shape_components_equal(axis_len, 1):
pass
elif are_shape_components_equal(result_axis_len, 1):
result_axis_len = axis_len
else:
raise CannotBroadcastError("operands could not be broadcasted "
"together with shapes "
f"{' '.join(str(s) for s in shapes)}.")
return result_axis_len
return tuple(_get_result_axis_length([s[i] for s in augmented_shapes])
for i in range(result_dim))
def get_indexing_expression(shape: ShapeType,
result_shape: ShapeType) -> Tuple[ScalarExpression, ...]:
"""
Returns the indices while broadcasting an array of shape *shape* into one of
shape *result_shape*.
"""
assert len(shape) <= len(result_shape)
i_start = len(result_shape) - len(shape)
indices = []
for i, (dim1, dim2) in enumerate(zip(shape, result_shape[i_start:])):
if not are_shape_components_equal(dim1, dim2):
assert are_shape_components_equal(dim1, 1)
indices.append(0)
else:
indices.append(prim.Variable(f"_{i+i_start}"))
return tuple(indices)
def with_indices_for_broadcasted_shape(val: prim.Variable, shape: ShapeType,
result_shape: ShapeType) -> prim.Expression:
if len(shape) == 0:
# scalar expr => do not index
return val
else:
return val[get_indexing_expression(shape, result_shape)]
def extract_dtypes_or_scalars(
exprs: Sequence[ArrayOrScalar]) -> List[DtypeOrScalar]:
dtypes: List[DtypeOrScalar] = []
for expr in exprs:
if isinstance(expr, Array):
dtypes.append(expr.dtype)
else:
assert isinstance(expr, SCALAR_CLASSES)
dtypes.append(expr)
return dtypes
def update_bindings_and_get_broadcasted_expr(arr: ArrayOrScalar,
bnd_name: str,
bindings: Dict[str, Array],
result_shape: ShapeType
) -> ScalarExpression:
"""
Returns an instance of :class:`~pytato.scalar_expr.ScalarExpression` to address
*arr* in a :class:`pytato.array.IndexLambda` of shape *result_shape*.
"""
if isinstance(arr, SCALAR_CLASSES):
return arr
assert isinstance(arr, Array)
bindings[bnd_name] = arr
return with_indices_for_broadcasted_shape(prim.Variable(bnd_name),
arr.shape,
result_shape)
def broadcast_binary_op(a1: ArrayOrScalar, a2: ArrayOrScalar,
op: Callable[[ScalarExpression, ScalarExpression], ScalarExpression], # noqa:E501
get_result_type: Callable[[DtypeOrScalar, DtypeOrScalar], np.dtype[Any]], # noqa:E501
) -> ArrayOrScalar:
from pytato.array import _get_default_axes
if np.isscalar(a1) and np.isscalar(a2):
from pytato.scalar_expr import evaluate
return evaluate(op(a1, a2)) # type: ignore
result_shape = get_shape_after_broadcasting([a1, a2])
dtypes = extract_dtypes_or_scalars([a1, a2])
result_dtype = get_result_type(*dtypes)
bindings: Dict[str, Array] = {}
expr1 = update_bindings_and_get_broadcasted_expr(a1, "_in0", bindings,
result_shape)
expr2 = update_bindings_and_get_broadcasted_expr(a2, "_in1", bindings,
result_shape)
return IndexLambda(op(expr1, expr2),
shape=result_shape,
dtype=result_dtype,
bindings=bindings,
axes=_get_default_axes(len(result_shape)))
# {{{ dim_to_index_lambda_components
class ShapeExpressionMapper(Mapper):
"""
Mapper that takes a shape component and returns it as a scalar expression.
"""
def __init__(self, var_name_gen: UniqueNameGenerator):
self.cache: Dict[Array, ScalarExpression] = {}
self.var_name_gen = var_name_gen
self.bindings: Dict[str, SizeParam] = {}
def rec(self, expr: Array) -> ScalarExpression: # type: ignore
if expr in self.cache:
return self.cache[expr]
result: Array = super().rec(expr)
self.cache[expr] = result
return result
def map_index_lambda(self, expr: IndexLambda) -> ScalarExpression:
from pytato.scalar_expr import substitute
return substitute(expr.expr, {name: self.rec(val)
for name, val in expr.bindings.items()})
def map_size_param(self, expr: SizeParam) -> ScalarExpression:
name = self.var_name_gen("_in")
self.bindings[name] = expr
return prim.Variable(name)
def dim_to_index_lambda_components(expr: ShapeComponent,
vng: Optional[UniqueNameGenerator] = None,
) -> Tuple[ScalarExpression,
Dict[str, SizeParam]]:
"""
Returns the scalar expressions and bindings to use the shape
component within an index lambda.
.. testsetup::
>>> import pytato as pt
>>> from pytato.utils import dim_to_index_lambda_components
>>> from pytools import UniqueNameGenerator
.. doctest::
>>> n = pt.make_size_param("n")
>>> expr, bnds = dim_to_index_lambda_components(3*n+8, UniqueNameGenerator())
>>> print(expr)
3*_in + 8
>>> bnds
{'_in': SizeParam(name='n')}
"""
if isinstance(expr, INT_CLASSES):
return expr, {}
if vng is None:
vng = UniqueNameGenerator()
assert isinstance(vng, UniqueNameGenerator)
assert isinstance(expr, Array)
mapper = ShapeExpressionMapper(vng)
result = mapper(expr)
return result, mapper.bindings
# }}}
def are_shape_components_equal(dim1: ShapeComponent, dim2: ShapeComponent) -> bool:
"""
Returns *True* iff *dim1* and *dim2* are have equal
:class:`~pytato.array.SizeParam` coefficients in their expressions.
"""
from pytato.scalar_expr import substitute, distribute
def to_expr(dim: ShapeComponent) -> ScalarExpression:
expr, bnds = dim_to_index_lambda_components(dim,
UniqueNameGenerator())
return substitute(expr, {name: prim.Variable(bnd.name)
for name, bnd in bnds.items()})
dim1_expr = to_expr(dim1)
dim2_expr = to_expr(dim2)
# ScalarExpression.__eq__ returns Any
return (distribute(dim1_expr-dim2_expr) == 0) # type: ignore
def are_shapes_equal(shape1: ShapeType, shape2: ShapeType) -> bool:
"""
Returns *True* iff *shape1* and *shape2* have the same dimensionality and the
correpsonding components are equal as defined by
:func:`~pytato.utils.are_shape_components_equal`.
"""
return ((len(shape1) == len(shape2))
and all(are_shape_components_equal(dim1, dim2)
for dim1, dim2 in zip(shape1, shape2)))
# {{{ ShapeToISLExpressionMapper
class ShapeToISLExpressionMapper(Mapper):
"""
Mapper that takes a shape component and returns it as :class:`isl.Aff`.
"""
def __init__(self, space: isl.Space):
self.cache: Dict[Array, isl.Aff] = {}
self.space = space
# type-ignore reason: incompatible return type with super class
def rec(self, expr: Array) -> isl.Aff: # type: ignore[override]
if expr in self.cache:
return self.cache[expr]
result: Array = super().rec(expr)
self.cache[expr] = result
return result
def map_index_lambda(self, expr: IndexLambda) -> isl.Aff:
from pytato.scalar_expr import evaluate
return evaluate(expr.expr, {name: self.rec(val)
for name, val in expr.bindings.items()})
def map_size_param(self, expr: SizeParam) -> isl.Aff:
dt, pos = self.space.get_var_dict()[expr.name]
return isl.Aff.var_on_domain(self.space, dt, pos)
# }}}
def _create_size_param_space(names: Iterable[str]) -> isl.Space:
return isl.Space.create_from_names(isl.DEFAULT_CONTEXT,
set=[],
params=sorted(names)).params()
def _get_size_params_assumptions_bset(space: isl.Space) -> isl.BasicSet:
bset = isl.BasicSet.universe(space)
for name in bset.get_var_dict():
bset = bset.add_constraint(isl.Constraint.ineq_from_names(space, {name: 1}))
return bset
def _is_non_negative(expr: ShapeComponent) -> BoolT:
"""
Returns *True* iff it can be proven that ``expr >= 0``.
"""
if isinstance(expr, INT_CLASSES):
return expr >= 0
assert isinstance(expr, Array) and expr.shape == ()
from pytato.transform import InputGatherer
# type-ignore reason: passed Set[Optional[str]]; function expects Set[str]
space = _create_size_param_space({expr.name # type: ignore
for expr in InputGatherer()(expr)})
aff = ShapeToISLExpressionMapper(space)(expr)
# type-ignore reason: mypy doesn't know comparing isl.Sets returns bool
return (aff.ge_set(aff * 0) # type: ignore[no-any-return]
<= _get_size_params_assumptions_bset(space))
def _is_non_positive(expr: ShapeComponent) -> BoolT:
"""
Returns *True* iff it can be proven that ``expr <= 0``.
"""
return _is_non_negative(-expr)
# {{{ _index_into
# {{{ normalized slice
def _normalize_slice(slice_: slice,
axis_len: ShapeComponent) -> NormalizedSlice:
start, stop, step = slice_.start, slice_.stop, slice_.step
if step is None:
step = 1
if not isinstance(step, INT_CLASSES):
raise ValueError(f"slice step must be an int or 'None' (got a {type(step)})")
if step == 0:
raise ValueError("slice step cannot be zero")
if step > 0:
default_start: ShapeComponent = 0
default_stop: ShapeComponent = axis_len
else:
default_start = axis_len - 1
default_stop = -1
if start is None:
start = default_start
else:
if isinstance(axis_len, INT_CLASSES):
if -axis_len <= start < axis_len:
start = start % axis_len
elif start >= axis_len:
if step > 0:
start = axis_len
else:
start = axis_len - 1
else:
if step > 0:
start = 0
else:
start = -1
else:
raise NotImplementedError
if stop is None:
stop = default_stop
else:
if isinstance(axis_len, INT_CLASSES):
if -axis_len <= stop < axis_len:
stop = stop % axis_len
elif stop >= axis_len:
if step > 0:
stop = axis_len
else:
stop = axis_len - 1
else:
if step > 0:
stop = 0
else:
stop = -1
else:
raise NotImplementedError
return NormalizedSlice(start, stop, step)
def _normalized_slice_len(slice_: NormalizedSlice) -> ShapeComponent:
start, stop, step = slice_.start, slice_.stop, slice_.step
if step > 0:
if _is_non_negative(stop - start):
return (stop - start + step - 1) // step
elif _is_non_positive(stop - start):
return 0
else:
# ISL could not ascertain the expression's sign
raise NotImplementedError("could not ascertain the sign of "
f"{stop-start} while computing the axis"
" length.")
else:
if _is_non_negative(start - stop):
return (start - stop - step - 1) // (-step)
elif _is_non_positive(start - stop):
return 0
else:
# ISL could not ascertain the expression's sign
raise NotImplementedError("could not ascertain the sign of "
f"{start-stop} while computing the axis"
" length.")
# }}}
def _index_into(ary: Array, indices: Tuple[ConvertibleToIndexExpr, ...]) -> Array:
from pytato.diagnostic import CannotBroadcastError
from pytato.array import _get_default_axes
# {{{ handle ellipsis
if indices.count(...) > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
if indices.count(...):
ellipsis_pos = indices.index(...)
indices = (indices[:ellipsis_pos]
+ (slice(None, None, None),) * (ary.ndim - len(indices) + 1)
+ indices[ellipsis_pos+1:])
# }}}
# {{{ "pad" index with complete slices to match ary's ndim
if len(indices) < ary.ndim:
indices = indices + (slice(None, None, None),) * (ary.ndim - len(indices))
# }}}
if len(indices) != ary.ndim:
raise IndexError(f"Too many indices (expected {ary.ndim}"
f", got {len(indices)})")
if any(idx is None for idx in indices):
raise NotImplementedError("newaxis is not supported")
# {{{ validate broadcastability of the array indices
try:
array_idx_shape = get_shape_after_broadcasting(
[idx for idx in indices if isinstance(idx, Array)])
except CannotBroadcastError as e:
raise IndexError(str(e))
# }}}
# {{{ validate index
for i, idx in enumerate(indices):
if isinstance(idx, slice):
pass
elif isinstance(idx, INT_CLASSES):
if not (_is_non_negative(idx + ary.shape[i])
and _is_non_negative(ary.shape[i] - 1 - idx)):
raise IndexError(f"{idx} is out of bounds for axis {i}")
elif isinstance(idx, Array):
if idx.dtype.kind != "i":
raise IndexError("only integer arrays are valid array indices")
else:
raise IndexError("only integers, slices, ellipsis and integer arrays"
" are valid indices")
# }}}
# {{{ normalize slices
normalized_indices: List[IndexExpr] = [_normalize_slice(idx, axis_len)
if isinstance(idx, slice)
else idx
for idx, axis_len in zip(indices,
ary.shape)]
del indices
# }}}
if any(isinstance(idx, Array) for idx in normalized_indices):
# advanced indexing expression
i_adv_indices, i_basic_indices = partition(
lambda idx: isinstance(
normalized_indices[idx],
NormalizedSlice),
range(len(normalized_indices)))
if any(i_adv_indices[0] < i_basic_idx < i_adv_indices[-1]
for i_basic_idx in i_basic_indices):
# non contiguous advanced indices
return AdvancedIndexInNoncontiguousAxes(
ary,
tuple(normalized_indices),
axes=_get_default_axes(len(array_idx_shape)
+ len(i_basic_indices)))
else:
return AdvancedIndexInContiguousAxes(
ary,
tuple(normalized_indices),
axes=_get_default_axes(len(array_idx_shape)
+ len(i_basic_indices)))
else:
# basic indexing expression
return BasicIndex(ary,
tuple(normalized_indices),
axes=_get_default_axes(
len([idx
for idx in normalized_indices
if isinstance(idx, NormalizedSlice)])))
# }}}
| 36.082883 | 110 | 0.586737 |
f412bb181a020668ba33c36eae771fdd47500468 | 7,977 | py | Python | satella/instrumentation/memory/memthread.py | piotrmaslanka/satella | bf4ba7a21ad2ac93a366442a2b4574dc5568b87e | [
"MIT"
] | 12 | 2019-12-13T10:17:38.000Z | 2022-01-05T09:01:36.000Z | satella/instrumentation/memory/memthread.py | piotrmaslanka/satella | bf4ba7a21ad2ac93a366442a2b4574dc5568b87e | [
"MIT"
] | 26 | 2016-04-01T11:55:26.000Z | 2021-12-30T17:03:59.000Z | satella/instrumentation/memory/memthread.py | piotrmaslanka/satella | bf4ba7a21ad2ac93a366442a2b4574dc5568b87e | [
"MIT"
] | 1 | 2021-05-31T08:45:22.000Z | 2021-05-31T08:45:22.000Z | import logging
import os
import time
import typing as tp
import psutil
from satella.coding.concurrent import CallableGroup, CallNoOftenThan, CancellableCallback, \
IntervalTerminableThread
from satella.coding.structures import Singleton
from satella.time import measure
from .conditions import BaseCondition, ZerothSeverity
logger = logging.getLogger(__name__)
__all__ = ['MemoryPressureManager']
@Singleton
class MemoryPressureManager(IntervalTerminableThread):
"""
Manager of the memory pressure.
The program is in some severity state. The baseline state is 0, meaning everything's OK.
Please note that it is sufficient to instantiate this class for the thread to run.
Eg.
>>> mt = MemoryPressureManager(maximum_available=4*GB, severity_levels=[GlobalRelativeValue(20),
>>> GlobalRelativeValue(10)])
>>> @mt.register_on_severity(1)
>>> def trigger_a():
>>> print('80% consumption of memory exceeded')
>>> @mt.register_on_severity(2)
>>> def trigger_b():
>>> print('90% consumption of memory exceeded')
As well, this object is a singleton.
:param maximum_available: maximum amount of memory that this program can use
:param severity_levels: this defines the levels of severity. A level is reached when program's
consumption is other this many percent of it's maximum_available amount of memory.
:param check_interval: amount of seconds of pause between consecutive checks, or
a time string
:param log_transitions: whether to log to logger when a transition takes place
:ivar severity_level: current severity level (int)
0 means memory is OK, 1 and more means memory is progressively more limited
"""
def __init__(self, maximum_available: tp.Optional[int] = None,
severity_levels: tp.List[BaseCondition] = None,
check_interval: tp.Union[str, int] = 10,
log_transitions: bool = True):
super().__init__(check_interval, name='memory pressure manager', daemon=True)
self.log_transitions = log_transitions # type: bool
self.process = psutil.Process(os.getpid()) # type: psutil.Process
self.maximum_available = maximum_available # type: int
self.severity_levels = [ZerothSeverity()] + (
severity_levels or []) # type: tp.List[BaseCondition]
self.callbacks_on_entered = [CallableGroup(gather=False) for _ in
range(len(
self.severity_levels))] # type: tp.List[CallableGroup]
self.callbacks_on_remains = [CallableGroup(gather=False) for _ in
range(len(
self.severity_levels))] # type: tp.List[CallableGroup]
self.callbacks_on_left = [CallableGroup(gather=False) for _ in
range(len(
self.severity_levels))] # type: tp.List[CallableGroup]
self.callbacks_on_memory_normal = CallableGroup(gather=False)
self.severity_level = 0 # type: int
self.stopped = False # type: bool
self.start()
def advance_to_severity_level(self, target_level: int):
while self.severity_level != target_level:
delta = target_level - self.severity_level
delta = int(delta / abs(delta))
if delta > 0:
# Means we are ENTERING a severity level
self.severity_level += delta
self.callbacks_on_entered[self.severity_level]()
if self.log_transitions:
logger.warning('Entered severity level %s' % (self.severity_level,))
elif delta < 0:
# Means we are LEAVING a severity level
self.callbacks_on_left[self.severity_level]()
if self.log_transitions:
logger.warning('Left severity level %s' % (self.severity_level,))
self.severity_level += delta
if self.severity_level == 0:
self.callbacks_on_memory_normal()
def stop(self):
"""Stop this thread from operating"""
self.stopped = True
def resume(self):
"""Resume the operation of this thread"""
self.stopped = False
def loop(self) -> None:
if self.stopped:
return
self.callbacks_on_memory_normal.remove_cancelled()
for cg in self.callbacks_on_entered:
cg.remove_cancelled()
for cg in self.callbacks_on_left:
cg.remove_cancelled()
for cg in self.callbacks_on_remains:
cg.remove_cancelled()
severity_level = self.calculate_severity_level()
if self.severity_level != severity_level:
self.advance_to_severity_level(severity_level)
else:
self.callbacks_on_remains[severity_level]()
def calculate_severity_level(self) -> int:
"""
This returns a severity level. 0 is the baseline severity level.
"""
memory_info = self.process.memory_info()
for level, condition in reversed(list(enumerate(self.severity_levels))):
if condition.can_fire(memory_info, self.maximum_available):
return level
@staticmethod
def register_on_memory_normal(fun: tp.Callable) -> CancellableCallback:
"""
Register this handler to fire when memory state falls back to 0.
This will be fired once, once memory state falls back to normal.
:param fun: callable to register
:return: a CancellableCallback under this callback is registered
"""
cc = CancellableCallback(fun)
MemoryPressureManager().callbacks_on_memory_normal.add(cc)
return cc
@staticmethod
def register_on_entered_severity(severity: int):
"""
Register this handler to fire on entered a particular severity level.
This means that situation has gotten worse.
Use like this:
>>> MemoryPressureManager.register_on_entered_severity(1)
>>> def entered_severity_one():
>>> print('Entered memory severity level 1')
:param severity: severity level to react to
"""
def outer(fun):
cc = CancellableCallback(fun)
MemoryPressureManager().callbacks_on_entered[severity].add(cc)
return cc
return outer
@staticmethod
def register_on_left_severity(severity: int):
"""
Register a handler to be called when given severity level is left. This means
that we have advanced to a lower severity level.
>>> MemoryPressureManager.register_on_left_severity(1)
>>> def entered_severity_one():
>>> print('Memory comsumption no longer 1')
:param severity: severity level to leave
"""
def outer(fun):
cc = CancellableCallback(fun)
MemoryPressureManager().callbacks_on_left[severity].add(cc)
return cc
return outer
@staticmethod
def register_on_remaining_in_severity(severity: int, call_no_more_often_than: int = 0):
"""
Register this handler to fire on remaining in a particular severity level. Use like this:
>>> MemoryPressureManager.register_on_remaining_in_severity(0, 30)
>>> def entered_severity_one():
>>> print('Memory comsumption OK. I am called no more often than each 30 seconds')
:param severity: severity level
:param call_no_more_often_than: call no more often than this amount of seconds
"""
def outer(fun):
cno = CallNoOftenThan(call_no_more_often_than, fun)
cc = CancellableCallback(cno)
MemoryPressureManager().callbacks_on_remains[severity].add(cc)
return cc
return outer
| 37.450704 | 100 | 0.636204 |
59d6599b7cebdb62289cbeb8ce30377d6cfe81cf | 5,511 | py | Python | main.py | plon-io/pandas | bbbdb821d14adcd623b3d531db66cdd3717ee038 | [
"MIT"
] | 1 | 2020-05-26T07:23:10.000Z | 2020-05-26T07:23:10.000Z | main.py | plon-io/pandas | bbbdb821d14adcd623b3d531db66cdd3717ee038 | [
"MIT"
] | null | null | null | main.py | plon-io/pandas | bbbdb821d14adcd623b3d531db66cdd3717ee038 | [
"MIT"
] | 2 | 2019-11-02T19:09:54.000Z | 2020-01-19T11:53:07.000Z | import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Import San Francisco Bay Area Weather data from CSV file
data = pd.read_csv('weather.csv')
# Make variables some friendlier names for users
old_names = ['Max TemperatureF', 'Min TemperatureF', 'Mean TemperatureF', 'Max Dew PointF', 'MeanDew PointF',
'Min DewpointF', 'Max Humidity',
' Mean Humidity', ' Min Humidity', ' Max Sea Level PressureIn', ' Mean Sea Level PressureIn',
' Min Sea Level PressureIn', ' Max VisibilityMiles', ' Mean VisibilityMiles',
' Min VisibilityMiles', ' Max Wind SpeedMPH', ' Mean Wind SpeedMPH', ' Max Gust SpeedMPH', 'PrecipitationIn',
' CloudCover', ' WindDirDegrees', ' Events']
new_names = ['maxTemp', 'minTemp', 'meanTemp', 'maxDew', 'meanDew', 'minDew', 'maxHum', 'meanHum', 'minHum', 'maxPress',
'minPress', 'meanPress', 'maxVis', 'meanVis',
'minVis', 'maxWind', 'meanWind', 'maxGust', 'preIn', 'cloud', 'WindDir', 'events']
data.rename(columns=dict(zip(old_names, new_names)), inplace=True)
# Delete unused column in CSV File
del data['preIn']
# Remove the bad samples in temperature
data = data[(data['maxTemp'] <= 110) & (data['minTemp'] >= 25)]
# List unique values on example column using drop_duplicates(We can also use unique())
df2 = pd.DataFrame(data, columns=['ZIP'])
u = df2.drop_duplicates(['ZIP'])
# Get data for cities
# 94107 -> San Francisco
# 94063 -> San Mateo
# 94301 -> Santa Clara
# 94041 -> Mountain View
# 95113 -> San Jose
zipcodes = [94107, 94063, 94301, 94041, 95113]
# Day of months: start September, end August
x = [30, 61, 91, 122, 153, 182, 213, 243, 274, 304, 335, 366]
labels = ['September','October','November','December','January','February','March','April','May','June','July','August']
# Plots of Mean temperature in Fahrenheit scale
plt.figure()
for zcode in zipcodes:
local = data.loc[data['ZIP'] == zcode]
df1 = pd.DataFrame(local, columns=['meanTemp'])
plt.plot(df1.as_matrix(), '-', label=str(zcode))
plt.xticks(x,labels,rotation='vertical',fontsize=12)
plt.grid(True)
plt.xlabel('Month')
plt.ylabel('Temperature in Fahrenheit scale', fontsize=15)
plt.title('Fahrenheit Mean Temperature on Bay Area Cities',fontsize=20)
plt.legend(["San Francisco", "San Mateo","Santa Clara", "Mountain View","San Jose"])
plt.show()
# Plot compare Mean Wind and Max Gust
plt.figure()
for zcode in zipcodes:
mw = data.loc[data['ZIP'] == zcode]
df3 = pd.DataFrame(mw, columns=['meanWin', 'maxGust'])
plt.plot(df3.as_matrix(),'-', label=str(zcode))
plt.xticks(x,labels,rotation='vertical', fontsize=12)
plt.grid(True)
plt.xlabel('Month')
plt.ylabel('MPH', fontsize=15)
plt.title('Mean Wind and Max Gust', fontsize=20)
plt.legend(["Mean Wind","Max Gust"])
plt.show()
# Plot mean temperature with mean humidity for San Francisco
sf = data.loc[data['ZIP'] == 94107]
plt.figure()
df4 = pd.DataFrame(sf, columns=['meanTemp','meanHum'])
plt.plot(df4.as_matrix(), '-')
plt.grid(True)
plt.autoscale()
plt.xlabel('Month')
plt.ylabel('', fontsize=15)
plt.title('Mean Temperature and Mean Humidity for San Francisco',fontsize=18)
plt.xticks(x,labels,rotation='vertical', fontsize=12)
plt.legend(["Mean Temperature[F]", "Mean Humidity[%]"], fontsize=15)
plt.show()
# replace '' string with blank values to zero in CSV file
data.fillna(0, inplace=True)
# Histogram of Mean Temperature in All cities in Bay Area
plot_hist = plt.hist(data['meanTemp'], bins=10)
plt.xlabel('Temperature [F]', fontsize=15)
plt.ylabel('Amount', fontsize=15)
plt.title('Mean Temperature on San Francisco Bay Area', fontsize=20)
plt.show()
# Plot Area compare Cloud Level and Event such as rain, rain-thunderstorm, fog or fog-rain example for San Francisco
data['events'].replace(['Rain','Rain-Thunderstorm','Fog','Fog-Rain'],[1,1,0,1],inplace=True)
sf = data.loc[data['ZIP'] == 94107]
df7 = pd.DataFrame(sf, columns=['cloud','events'])
df7.plot.area(stacked=False)
plt.xlabel('Month')
plt.ylabel('Cloud Level', fontsize=18)
plt.title('Cloud Level with Events: Rain, Storm, Fog etc.',fontsize=20)
plt.xticks(x,labels,rotation='vertical', fontsize=12)
plt.legend(["Cloud","Rain: 1-yes, 0-no"])
plt.show()
# Plot of min, max and mean pressure for San Francisco
sf = data.loc[data['ZIP'] == 94107]
plt.figure()
df8 = pd.DataFrame(sf, columns=['minPress','meanPress','maxPress'])
plt.plot(df8.as_matrix(), '-')
plt.grid(True)
plt.autoscale()
plt.xlabel('Month')
plt.ylabel('inHg', fontsize=18)
plt.title('Min, Mean and Max Pressure for San Francisco',fontsize=20)
plt.xticks(x,labels,rotation='vertical', fontsize=12)
plt.legend(["Minimum Pressure", "Mean Pressure","Maximum Pressure"])
plt.show()
"""
# Plot of Rain and humidity
plt.figure()
df9 = pd.DataFrame(sf, columns=['events','meanHum'])
plt.plot(df9, '-')
plt.xlabel('Month')
plt.ylabel('')
plt.title('Plot compare Events such as Rain, Fog etc. with mean Humidity', fontsize=20)
plt.xticks(x,labels,rotation='vertical', fontsize=12)
plt.legend(["Rain?", "Mean Humidity"])
plt.show()
# Area Plot compare Cloud Lever and Visibility
df10 = pd.DataFrame(sf, columns=['cloud','minVis'])
df10.plot.area(stacked=False)
plt.xlabel('Month')
plt.ylabel('')
plt.title('Plot compare Cloud Level and Mean Visibility', fontsize=20)
plt.xticks(x,labels,rotation='vertical', fontsize=12)
plt.legend(["Cloud Lever","Mean Visibility"])
plt.show()
"""
# Print all data from CSV file
print(data)
| 34.018519 | 122 | 0.697877 |
b2d8d2f2b12565c685acd56339bfecde7b9fcff2 | 525 | py | Python | plyplus/__init__.py | erezsh/plyplus | bd2e7d37e4a3a80812a13b84584fd885d5a24f4c | [
"MIT"
] | 169 | 2015-01-16T12:48:23.000Z | 2021-12-09T16:00:13.000Z | plyplus/__init__.py | erezsh/plyplus | bd2e7d37e4a3a80812a13b84584fd885d5a24f4c | [
"MIT"
] | 26 | 2015-01-23T16:30:28.000Z | 2018-07-07T09:14:18.000Z | plyplus/__init__.py | erezsh/plyplus | bd2e7d37e4a3a80812a13b84584fd885d5a24f4c | [
"MIT"
] | 53 | 2015-01-22T20:20:10.000Z | 2021-12-05T13:39:57.000Z | """A friendly yet powerful LR-parser written in Python."""
from __future__ import absolute_import
from getpass import getuser
import os
from tempfile import gettempdir
__version__ = "0.7.5"
PLYPLUS_DIR = os.path.join(gettempdir(), 'plyplus-' + getuser())
try:
os.mkdir(PLYPLUS_DIR)
except OSError:
pass
from .strees import SVisitor, STransformer, is_stree
from .common import PlyplusException, GrammarException, TokenizeError, ParseError
from .plyplus import Grammar
from . import selector
selector.install()
| 21 | 81 | 0.775238 |
acced69c2980140181cf1d2947eb68ec0026f658 | 6,803 | py | Python | extras/metric-framework/py/event_qa.py | guy4261/fight-churn | f3820edd6d4af5e0bd625434d3ad4236aa781ef4 | [
"MIT"
] | 151 | 2019-04-26T19:05:14.000Z | 2022-03-28T10:11:53.000Z | extras/metric-framework/py/event_qa.py | guy4261/fight-churn | f3820edd6d4af5e0bd625434d3ad4236aa781ef4 | [
"MIT"
] | 15 | 2019-08-05T06:35:00.000Z | 2022-03-31T02:58:30.000Z | extras/metric-framework/py/event_qa.py | guy4261/fight-churn | f3820edd6d4af5e0bd625434d3ad4236aa781ef4 | [
"MIT"
] | 71 | 2019-06-07T17:50:04.000Z | 2022-03-27T02:49:24.000Z | import sqlalchemy
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import json
import pandas
import os
import sys
import argparse
parser = argparse.ArgumentParser()
# Run control arguments
parser.add_argument("--schema", type=str, help="The name of the schema", default='socialnet7')
parser.add_argument("--events", type=str,nargs='*', help="List of events to run (default to all)")
parser.add_argument("--hideax", action="store_true", default=False,help="Hide axis labels")
parser.add_argument("--format", type=str, help="Format to save in", default='png')
class EventChecker:
def __init__(self,args):
'''
EventChecker has the parameters and logic to run the QA query on events. Loads parameter json from the adjacent
conf directory, which has the date range for the schema. Makes postgres sqlalchemy with environment variables.
Select all of the events for this schema into a pandas dataframe, by querying the event_type table.
If a list of properties was provided, the SQL to run QA on the event properties is also created (see
make_one_event_sql for how that is used)
:param schema: string the name of the schema
:param properties: list of strings, property names ffor the events
'''
self.args=args
self.schema=args.schema
self.monthFormat = mdates.DateFormatter('%b')
# Load the metric configuration dictionary
with open('../conf/%s_metrics.json' % self.schema, 'r') as myfile:
self.metric_dict=json.loads(myfile.read())
# get the start/end date from the metric configuration
self.from_date=self.metric_dict['date_range']['from_date']
self.to_date=self.metric_dict['date_range']['to_date']
# make the output path (if necessary)
self.save_path = '../../../fight-churn-output/' + self.schema + '/'
os.makedirs(self.save_path,exist_ok=True)
# Make a sql connection with sqlalchmey
self.URI= f"postgresql://localhost/{os.environ['CHURN_DB']}?user={os.environ['CHURN_DB_USER']}&password={os.environ['CHURN_DB_PASS']}"
print('Saving results to %s' % self.save_path)
engine = sqlalchemy.create_engine(self.URI)
self.conn = engine.connect()
# read the event types from the database
self.events = pandas.read_sql_query("select * from %s.event_type" % self.schema, self.conn)
# load the sql template used to make the queries
with open('../sql/qa_event.sql', 'r') as myfile:
self.qa_sql = myfile.read().replace('\n', ' ')
# extra setup, if there are event properties
if len(self.metric_dict['event_properties']) > 0:
self.property_term = ','.join(['sum(%s) as %s' % (p, p) for p in self.metric_dict['event_properties']])
self.property_term = ', ' + self.property_term
else:
self.property_term = ''
def make_one_event_sql(self,event):
'''
Fill in the SQL template to make one query string for a named event
:param event:
:return:
'''
print('Checking event %s' % event['event_type_name'])
aSql = self.qa_sql.replace('%event_type_id', str(event['event_type_id']))
aSql = aSql.replace('%schema', self.schema)
aSql = aSql.replace('%from_date', self.from_date)
aSql = aSql.replace('%to_date', self.to_date)
aSql = aSql.replace('%property_term', self.property_term)
return aSql
def plot_event_without_properties(self,res,cleanedName):
'''
Plot query result for an event that has only a simple count (no properties)
:param res:
:param cleanedName:
:return:
'''
res.plot(kind='line', linestyle="-", marker=".", x='event_date', y='n_event', color='black',
title='%s n_event' % cleanedName, legend=False, ylim=(0, round(1.1 * res['n_event'].max())))
if self.args.hideax:
plt.gca().get_yaxis().set_visible(False)
plt.gca().get_xaxis().set_major_formatter(self.monthFormat)
else:
plt.gcf().autofmt_xdate()
def plot_event_with_properties(self,res,cleaned_name,valid_properties):
'''
Plot result for an event with properties
:param res: data frame with the result of the query
:param cleaned_name: string - if the event name was cleaned of spaces or punctuation, this is it
:param valid_properties: list of strings, names of any properties in this event
:return:
'''
n_valid_property = sum([int(v) for v in valid_properties])
plt.figure(figsize=(5, 8))
plt.subplot(n_valid_property + 1, 1, 1)
plt.plot('event_date', 'n_event', data=res, marker='.', color='black', linewidth=1, label="count")
plt.legend()
plt.title('%s' % cleaned_name)
if self.args.hideax:
plt.gca().get_yaxis().set_visible(False)
plt.gca().get_xaxis().set_major_formatter(self.monthFormat)
for p in range(0, n_valid_property):
if not valid_properties[p]: continue
count = sum([int(v) for v in valid_properties[0:p + 1]])
plt.subplot(n_valid_property + 1, 1, 1 + count)
plt.plot('event_date', self.metric_dict['event_properties'][p], data=res, marker='.', color='black', linewidth=1,
label="sum(%s)" % self.metric_dict['event_properties'][p])
plt.legend()
if self.args.hideax:
plt.gca().get_yaxis().set_visible(False)
plt.gca().get_xaxis().set_major_formatter(self.monthFormat)
def check_one_event_qa(self,event,hideAx=False):
'''
Run the query and make the plot to check quality of one event. First the query is formed with make_one_event_sql,
and the result is retrieved into a Pandas dataframe.
:param event:
:param hideAx:
:return:
'''
aSql = self.make_one_event_sql(event)
res = pandas.read_sql_query(aSql, self.conn)
cleaned_name = ''.join(e for e in event['event_type_name'] if e.isalnum())
# res.to_csv(self.save_path+cleaned_name+'_event_qa.csv',index=False)
if not any(res['n_event'].notnull()):
print('\t *** No events for %s' % cleaned_name)
return
valid_properties = [any(res[p].notnull()) for p in self.metric_dict['event_properties']]
n_valid_property = sum([int(v) for v in valid_properties])
if n_valid_property > 0:
self.plot_event_with_properties(res,cleaned_name,valid_properties)
else:
self.plot_event_without_properties(res,cleaned_name)
plt.savefig(self.save_path + 'event_qa_' + cleaned_name + '.' + self.args.format)
plt.close()
def check_events(self):
'''
Check all of the events in a loop, calling check_one_event_qa for each. If a list of events is provided,
it only checks the events in the list.
:param events_2check:
:return:
'''
for idx, event in self.events.iterrows():
if self.args.events is not None and event['event_type_name'] not in self.args.events:
continue
self.check_one_event_qa(event)
'''
####################################################################################################
The main script for quality assurance checks on events
'''
if __name__ == "__main__":
args, _ = parser.parse_known_args()
event_check = EventChecker(args)
event_check.check_events()
| 36.772973 | 137 | 0.706894 |
a60fc72215ee9250f9c9e6c437499474dee954b4 | 4,560 | py | Python | python/federatedml/linear_model/coordinated_linear_model/logistic_regression/hetero_logistic_regression/hetero_lr_base.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 715 | 2019-01-24T10:52:03.000Z | 2019-10-31T12:19:22.000Z | python/federatedml/linear_model/coordinated_linear_model/logistic_regression/hetero_logistic_regression/hetero_lr_base.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 270 | 2019-02-11T02:57:36.000Z | 2019-08-29T11:22:33.000Z | python/federatedml/linear_model/coordinated_linear_model/logistic_regression/hetero_logistic_regression/hetero_lr_base.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 200 | 2019-01-26T14:21:35.000Z | 2019-11-01T01:14:36.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.linear_model.coordinated_linear_model.logistic_regression.base_logistic_regression import \
BaseLogisticRegression
from federatedml.optim.gradient.hetero_sqn_gradient import sqn_factory
from federatedml.param.logistic_regression_param import HeteroLogisticParam
from federatedml.protobuf.generated import lr_model_meta_pb2
from federatedml.secureprotol import PaillierEncrypt
from federatedml.transfer_variable.transfer_class.hetero_lr_transfer_variable import HeteroLRTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroLRBase(BaseLogisticRegression):
def __init__(self):
super().__init__()
self.model_name = 'HeteroLogisticRegression'
self.model_param_name = 'HeteroLogisticRegressionParam'
self.model_meta_name = 'HeteroLogisticRegressionMeta'
self.mode = consts.HETERO
self.aggregator = None
self.cipher = None
self.batch_generator = None
self.gradient_loss_operator = None
self.converge_procedure = None
self.model_param = HeteroLogisticParam()
self.transfer_variable = HeteroLRTransferVariable()
def _init_model(self, params):
super()._init_model(params)
self.encrypted_mode_calculator_param = params.encrypted_mode_calculator_param
self.cipher_operator = PaillierEncrypt()
self.cipher.register_paillier_cipher(self.transfer_variable)
self.converge_procedure.register_convergence(self.transfer_variable)
self.batch_generator.register_batch_generator(self.transfer_variable)
self.gradient_loss_operator.register_gradient_procedure(self.transfer_variable)
# if len(self.component_properties.host_party_idlist) == 1:
# LOGGER.debug(f"set_use_async")
# self.gradient_loss_operator.set_use_async()
self.gradient_loss_operator.set_fixed_float_precision(self.model_param.floating_point_precision)
def _get_meta(self):
meta_protobuf_obj = lr_model_meta_pb2.LRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
need_one_vs_rest=self.need_one_vs_rest)
return meta_protobuf_obj
def get_model_summary(self):
header = self.header
if header is None:
return {}
weight_dict, intercept_ = self.get_weight_intercept_dict(header)
# best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
summary = {"coef": weight_dict,
"intercept": intercept_,
"is_converged": self.is_converged,
"one_vs_rest": self.need_one_vs_rest,
"best_iteration": self.callback_variables.best_iteration}
if self.callback_variables.validation_summary is not None:
summary["validation_metrics"] = self.callback_variables.validation_summary
# if self.validation_strategy:
# validation_summary = self.validation_strategy.summary()
# if validation_summary:
# summary["validation_metrics"] = validation_summary
return summary
| 50.10989 | 110 | 0.660965 |
971e2f17bf6b4c6757d11c76acf0afc35bc6ab5a | 418 | py | Python | sphinx/python-intro/source/code/oneoffcoder/clazz/methodoverriding.py | oneoffcoder/books | 84619477294a3e37e0d7538adf819113c9e8dcb8 | [
"CC-BY-4.0"
] | 26 | 2020-05-05T08:07:43.000Z | 2022-02-12T03:28:15.000Z | sphinx/python-intro/source/code/oneoffcoder/clazz/methodoverriding.py | oneoffcoder/books | 84619477294a3e37e0d7538adf819113c9e8dcb8 | [
"CC-BY-4.0"
] | 19 | 2021-03-10T00:33:51.000Z | 2022-03-02T13:04:32.000Z | sphinx/python-intro/source/code/oneoffcoder/clazz/methodoverriding.py | oneoffcoder/books | 84619477294a3e37e0d7538adf819113c9e8dcb8 | [
"CC-BY-4.0"
] | 2 | 2022-01-09T16:48:21.000Z | 2022-02-19T17:06:50.000Z | class Car(object):
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
def __repr__(self):
return f"{{'make': {self.make}, 'model': {self.model}, 'year': {self.year}}}"
def __str__(self):
return f'Car(make={self.make}, model={self.model}, year={self.year})'
car = Car('Honda', 'Accord', 2019)
print(car)
print(repr(car))
| 23.222222 | 85 | 0.58134 |
6e5f52ddb64dedffb5be0506634dedc91e4acaf3 | 6,703 | py | Python | scripts/compare_tracking.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | scripts/compare_tracking.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | scripts/compare_tracking.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | # pythonpath modification to make hytra available
# for import without requiring it to be installed
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# standard imports
import numpy as np
import cPickle
import collections
import multiprocessing
import os
import os.path as path
import optparse
import sys
from empryonic import io
from empryonic.learning import match as m
from empryonic.learning import quantification as quant
import h5py
def match(fn_pair):
assoc = m.match_files(fn_pair[0], fn_pair[1], options.threshold, options.ignore_z, options.swap_xy, verbose=False)
print("-> matched: " + path.basename(fn_pair[0]) + " <-> " + path.basename(fn_pair[1]))
return assoc
def getIdsAndValidity(h5file):
try:
ids = np.sort(h5file['objects/meta/id'].value)
valid = h5file['objects/meta/valid'].value
except:
print("Warning: could not load ids and validity from hdf5 file. Reconstructing from segmentation...")
labelImage = h5file['segmentation/labels'].value
ids = np.unique(labelImage)
ids = ids[ids > 0]
valid = np.ones(ids.shape)
return ids, valid
def construct_associations(base_fns, cont_fns, timesteps, verbose=False):
assocs = []
for t in range(timesteps):
base_fn = base_fns[t]
cont_fn = cont_fns[t]
assert(int(os.path.splitext(os.path.basename(base_fn))[0]) == int(os.path.splitext(os.path.basename(cont_fn))[0]))
with h5py.File(base_fn, 'r') as f:
base_ids, base_valid = getIdsAndValidity(f)
# base_detection = f['objects/meta/detection'].value
with h5py.File(cont_fn, 'r') as f:
cont_ids, cont_valid = getIdsAndValidity(f)
# cont_detection = f['objects/meta/detection'].value
if verbose:
print("sanity checking %d" % t)
assert(np.all(base_ids == cont_ids))
assert(np.all(base_valid == 1))
assert(np.all(cont_valid == 1))
base_ids = map(int, base_ids)
cont_ids = map(int, cont_ids)
assoc = {'lhs':dict(zip(base_ids, cont_ids)), 'rhs':dict(zip(cont_ids, base_ids))}
assocs.append(assoc)
return assocs
def get_all_frame_files_from_folder(folder):
fns = {}
for fn in os.listdir(folder):
name, ext = os.path.splitext(os.path.basename(fn))
try:
if ext == '.h5':
fns[int(name)] = path.abspath(path.join(folder, fn))
except:
pass
return fns
def get_tracking_filenames(base_dir, cont_dir):
base_fns = get_all_frame_files_from_folder(base_dir)
cont_fns = get_all_frame_files_from_folder(cont_dir)
base_ids = set(base_fns.keys())
cont_ids = set(cont_fns.keys())
# intersect ids
shared_ids = base_ids & cont_ids
assert(set(range(min(shared_ids), max(shared_ids) + 1)) == shared_ids)
base_fns = [base_fns[fid] for fid in shared_ids]
cont_fns = [cont_fns[fid] for fid in shared_ids]
base_fns.sort()
cont_fns.sort()
return base_fns, cont_fns
if __name__=="__main__":
usage = """%prog [options] BASE_DIR CONTESTANT_DIR
Compare two tracking results, based only on the association information in the tracking group.
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--quietly', action='store_true', dest='quiet', help='non-verbose')
parser.add_option('--max-ts', dest='max_ts', type=int, default=-1, help='max. timestep (exclusive) [default=%default]')
#parser.add_option('--no-detailed-stats', action='store_true', dest='no_detailed_stats', help="don't write detailed statistics into an output file")
#parser.add_option('-o', type='str', dest='output_fn', default='batch_performance.txt', help='output file for detailed stats; no effect if "--no-detailed-stats" is set [default: %default]')
#parser.add_option('-t', '--threshold', type='float', dest='threshold', default=25, help='distance threshold for the matching (matching only below the threshold) [default: %default]')
#parser.add_option('--swap-xy', action='store_true', dest='swap_xy', help='switches x and y coordinates of the traxels in FILE1')
#parser.add_option('--ignore-z', action='store_true', dest='ignore_z', help='only match in the x-y subspace')
#parser.add_option('--precomputed-match', action='store_true', dest='precomputed_match', help='match files will be loaded from ./matched/ [invalidates all match related options]')
options, args = parser.parse_args()
verbose = not bool(options.quiet)
numArgs = len(args)
if numArgs == 2:
base_dir = args[0]
cont_dir = args[1]
base_fns, cont_fns = get_tracking_filenames(base_dir, cont_dir)
else:
parser.print_help()
sys.exit(1)
if options.max_ts != -1:
base_fns = base_fns[:options.max_ts]
if len(base_fns) < 2:
print("Abort: at least two base files needed.")
sys.exit(1)
if len(cont_fns) < 2:
print("Abort: at least two contestant files needed.")
sys.exit(1)
# if len(base_fns) != len(cont_fns):
# print "Warning: number of base files has to match number of contestant files."
timesteps = min((len(base_fns), len(cont_fns)))
first_timestep = int(os.path.splitext(os.path.basename(base_fns[0]))[0])
##
## construct id assocs; assumed to be identically mapped in this script
## (i.e. the ids don't differ for the same object in base and contestant)
##
assocs = construct_associations(base_fns, cont_fns, timesteps, verbose)
##
## generate taxonomy
##
fn_pairs = zip(base_fns[0:timesteps], cont_fns[0:timesteps])
assert(timesteps == len(assocs))
ts = []
for i,v in enumerate(fn_pairs[1:]):
if verbose:
print(path.basename(v[0]), path.basename(v[1]))
t = quant.compute_taxonomy(assocs[i], assocs[i+1], v[0], v[1], i + first_timestep + 1)
ts.append(t)
#sys.stdout.write('%d ' % i)
sys.stdout.flush()
overall = reduce( quant.Taxonomy.union, ts )
def total_elements( taxonomy ):
return len(taxonomy.base_basic) + len(taxonomy.cont_basic)
assert(sum((total_elements(t) for t in ts)) == total_elements(overall))
##
## report results
##
if verbose:
print("Measuring performance...")
print("-> Precision: %.3f" % overall.precision())
print("-> Recall: %.3f" % overall.recall())
print("-> F-measure %.3f: " % overall.f_measure())
print("Check", 2.*overall.precision() * overall.recall() / (overall.precision() + overall.recall()))
print(overall)
else:
print(overall.to_line())
| 38.522989 | 193 | 0.652991 |
69f94a183851c160059b4fc7372b7e22d2e19190 | 715 | py | Python | indicators/Indicator.py | vd1371/gram | 065c33c854beea633a833e64a7b80ff727868f2d | [
"Apache-2.0"
] | null | null | null | indicators/Indicator.py | vd1371/gram | 065c33c854beea633a833e64a7b80ff727868f2d | [
"Apache-2.0"
] | 21 | 2020-02-12T07:25:55.000Z | 2020-03-18T21:10:00.000Z | indicators/Indicator.py | vd1371/gram | 065c33c854beea633a833e64a7b80ff727868f2d | [
"Apache-2.0"
] | 1 | 2021-12-04T13:09:46.000Z | 2021-12-04T13:09:46.000Z | # Adding parent directory to the PYTHONPATH
import sys
sys.path.insert(0,'..')
from utils.GlobalVariables import *
class Indicator(object):
# Base class for indicators
def __init__(self):
super(Indicator, self).__init__()
def calculate(self, df=None):
'''
# Calcualte method gets df (a python dataframe) as input
# It calculates the values of the indicator and returns a panda series
# this panda series must have the same index as the panda dataframe so it could be appendable when being used
:param: df
:param: indicator attributes
:return: panda or dataframe including ONLY the calculated values of the indicator
'''
raise NotImplementedError("'calculate' method is not implemented yet") | 35.75 | 111 | 0.755245 |
1e2935dc750a25dc5cd1704489254c416e36ad70 | 58 | py | Python | telegram-bot-0.0.2/config.py | Yarik9008/nebosckop | 729cc77ccc7d3f09c01d17d1cebea1d5ea9206ab | [
"MIT"
] | null | null | null | telegram-bot-0.0.2/config.py | Yarik9008/nebosckop | 729cc77ccc7d3f09c01d17d1cebea1d5ea9206ab | [
"MIT"
] | null | null | null | telegram-bot-0.0.2/config.py | Yarik9008/nebosckop | 729cc77ccc7d3f09c01d17d1cebea1d5ea9206ab | [
"MIT"
] | null | null | null | TOKEN = '5058985853:AAHbLTN9sai_eocdTjzSnnbhne7XaBWdRVQ'
| 19.333333 | 56 | 0.862069 |
b66189639f45a3341174cdde6b72450a6a50a8c8 | 5,009 | py | Python | environments/recommenders/restaurant_toy_recsim_test.py | JaniAnttonenp/ml-fairness | d76786f5bfb00239a8a68d6de69b9889cf7cf61e | [
"Apache-2.0"
] | 268 | 2019-09-10T14:02:07.000Z | 2022-03-31T02:50:57.000Z | environments/recommenders/restaurant_toy_recsim_test.py | JaniAnttonenp/ml-fairness | d76786f5bfb00239a8a68d6de69b9889cf7cf61e | [
"Apache-2.0"
] | 17 | 2019-08-29T14:08:42.000Z | 2022-02-05T20:26:26.000Z | environments/recommenders/restaurant_toy_recsim_test.py | JaniAnttonenp/ml-fairness | d76786f5bfb00239a8a68d6de69b9889cf7cf61e | [
"Apache-2.0"
] | 68 | 2019-08-15T16:57:54.000Z | 2022-02-25T19:15:15.000Z | # coding=utf-8
# Copyright 2020 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for restaurant_toy_recsim."""
import itertools
from absl import flags
from absl.testing import absltest
import test_util
from environments.recommenders import recsim_wrapper
from environments.recommenders import restaurant_toy_recsim
import numpy as np
from recsim.simulator import environment
from recsim.simulator import recsim_gym
FLAGS = flags.FLAGS
def _always_moving_transition_matrix(num_states, num_actions):
"""Returns a transition matrix that moves deterministically at every step."""
transition_matrix = restaurant_toy_recsim.TransitionMatrix(
num_states, num_actions)
for state, action in itertools.product(range(num_states), range(num_actions)):
row = np.zeros(num_states, dtype=float)
row[(state + 1) % num_states] = 1.0
transition_matrix.add_row(state=state, action=action, row=row)
return transition_matrix
def _build_components(deterministic_transitions=False):
"""Returns recsim components."""
rec_types = [
restaurant_toy_recsim.RestaurantType.JUNK,
restaurant_toy_recsim.RestaurantType.HEALTHY
]
user_states = ['Neutral', 'UnhealthySt', 'HealthySt']
num_states = len(user_states)
num_actions = len(rec_types)
transition_matrix_constructor = (
_always_moving_transition_matrix if deterministic_transitions else
restaurant_toy_recsim.TransitionMatrix.RandomMatrix)
user_config = restaurant_toy_recsim.UserConfig(
user_states_names=user_states,
state_transition_matrix=transition_matrix_constructor(
num_states, num_actions),
reward_matrix=np.random.rand(num_states, num_actions))
seeds = restaurant_toy_recsim.SimulationSeeds(2, 5)
config = restaurant_toy_recsim.EnvConfig(user_config, rec_types, seeds)
user_sampler, user_model = restaurant_toy_recsim.build_user_components(config)
restaurants, document_sampler = restaurant_toy_recsim.build_document_components(
config)
env = environment.Environment(
user_model,
document_sampler,
num_candidates=num_actions,
slate_size=1,
resample_documents=False)
recsim_env = recsim_gym.RecSimGymEnv(env, restaurant_toy_recsim.rating_reward)
return (config, user_sampler, user_model, restaurants, document_sampler,
recsim_env)
class RestaurantToyExampleTest(absltest.TestCase):
def setUp(self):
super(RestaurantToyExampleTest, self).setUp()
(self.config, self.usersampler, self.user_model, self.restaurants,
self.document_sampler,
self.env) = _build_components(deterministic_transitions=False)
def set_up_deterministic(self):
(self.config, self.usersampler, self.user_model, self.restaurants,
self.document_sampler,
self.env) = _build_components(deterministic_transitions=True)
def test_document_observation_space_matches(self):
for doc in self.restaurants:
self.assertIn(doc.create_observation(), doc.observation_space())
def test_user_observation_space_matches(self):
user = self.usersampler.sample_user()
self.assertIn(user.create_observation(), user.observation_space())
def test_environment_can_advance_by_steps(self):
self.env.reset()
for slate in [[0]]*10:
self.env.step(slate)
# Tests that the env.step() completes successfully
def test_different_users_are_sampled_as_steps_progress(self):
unique_users = set()
for _ in range(5):
self.env.reset()
for slate in [[0], [0], [0], [0]]:
observation, _, _, _ = self.env.step(slate)
unique_users.add(observation['user']['user_id'])
self.assertLen(unique_users, 5)
def test_update_state_changes_state(self):
"""Tests if the user transitions state when step is called.
The test uses an off diagonal deterministic transition matrix to make sure
the user moves for sure.
"""
self.set_up_deterministic()
self.env.reset()
state = self.env._environment.user_model._user_state.curr_state
for slate in [[0]]*10:
self.env.step(slate)
next_state = self.env._environment.user_model._user_state.curr_state
self.assertNotEqual(state, next_state)
state = next_state
def test_ml_fairness_gym_environment_can_run(self):
self.set_up_deterministic()
ml_fairness_env = recsim_wrapper.wrap(self.env)
test_util.run_test_simulation(env=ml_fairness_env, stackelberg=True)
if __name__ == '__main__':
absltest.main()
| 35.524823 | 82 | 0.758235 |
fc357d8ed65f9f8159c9672ae19436a5b666ae83 | 2,302 | py | Python | main/concat.py | fchamma/isolateBlocks | 76de0ec72e091224e7a8d43a044a3cd3c0d1afc6 | [
"MIT"
] | null | null | null | main/concat.py | fchamma/isolateBlocks | 76de0ec72e091224e7a8d43a044a3cd3c0d1afc6 | [
"MIT"
] | null | null | null | main/concat.py | fchamma/isolateBlocks | 76de0ec72e091224e7a8d43a044a3cd3c0d1afc6 | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse, os, json, sys, time
from parse_args_concatenate import *
from concatenate_dependencies import *
from base import *
# Retrieving help and error messages, stored as dictionaries in txt files
script_dir = os.path.dirname(__file__)
labelsPath = os.path.join(script_dir, 'labels.txt')
with open(labelsPath) as f:
labels = json.loads(f.read())
f.close()
# Parsing command line arguments
parser = argparse.ArgumentParser(labels['helpDescription'])
parser.add_argument('inputFolder', help = labels['helpMsgInputFolder'])
parser.add_argument('outfile', help = labels['helpMsgOutfile'])
parser.add_argument('header', nargs='*', default = 'header', help = labels['helpMsgHeader'])
parser.add_argument('-e', '--extension', default = 'csv', help = labels['helpMsgExtension'], metavar='str')
parser.add_argument('-fd', '--fileDelimiter', default = ',', help = labels['helpMsgFileDelim'], metavar='str')
parser.add_argument('-idp', '--idPosition', default = 2, help = labels['helpMsgIdPos'], type = int, metavar='int')
parser.add_argument('-ow', '--overwrite', action='store_true', help = labels['helpMsgOverwrite'])
# parser.add_argument("-v", "--verbose", action="store_true", help = labels['helpMsgVerb'])
args = parser.parse_args()
# Function to group necessary functions for command line run
def main():
print(labels['statusConcatenationStarted'])
blocks = parse_inputBlocks(args.inputFolder, args.fileDelimiter, args.idPosition)
blocks = sort_block_write_order(blocks, args.header)
output = populate_output(blocks)
if sys.version_info[0] < 3:
try:
res = write_output_py2(output, args.outfile, labels, args.overwrite)
except:
print(labels['statusConcatenationFailed'])
sys.exit()
else:
try:
res = write_output_py3(output, args.outfile, labels, args.overwrite)
except:
print(labels['statusConcatenationFailed'])
sys.exit()
if res == 'Success':
print(labels['statusConcatenationSuccess'])
if __name__ == '__main__':
start_time = time.time()
# validate_args(args, labels)
main()
print(labels['elapsedTime'] % round(time.time() - start_time, 2))
| 44.269231 | 115 | 0.684622 |
43b95905aa829ccab452e63f8654bf889a50d976 | 3,535 | py | Python | train.py | Feng1909/soft_robot_group | 8d800ea00412c5b4e4a68e7388c2052ccdd4a2c9 | [
"MIT"
] | null | null | null | train.py | Feng1909/soft_robot_group | 8d800ea00412c5b4e4a68e7388c2052ccdd4a2c9 | [
"MIT"
] | null | null | null | train.py | Feng1909/soft_robot_group | 8d800ea00412c5b4e4a68e7388c2052ccdd4a2c9 | [
"MIT"
] | null | null | null | import argparse
import random
import numpy as np
import torch
import gym
import realant_sim
import model
from td3 import TD3
# from sac import SAC
def rollout(agent, env, train=False, random=False):
# state = env.reset()
state = env.reset_model()
episode_step, episode_return = 0, 0
done = False
print("begin rollout")
while not done:
print(" rollout: " + str(episode_step) + " episode step")
if random:
action = env.action_space_sample()
else:
action = agent.act(state, train=train)
next_state, reward, info = env.step(action)
episode_return += reward
if train:
# not_done = 1.0 if (episode_step+1) == env._max_episode_steps else float(not done)
if episode_step < 10:
not_done = 1.0
else:
not_done = 0.0
agent.replay_buffer.append([state, action, [reward], next_state, [not_done]])
agent._timestep += 1
state = next_state
episode_step += 1
if (episode_step+1 == 10):
done = True
if train and not random:
for _ in range(episode_step):
agent.update_parameters()
return episode_return
def evaluate(agent, env, n_episodes=10):
returns = [rollout(agent, env, train=False, random=False) for _ in range(n_episodes)]
return np.mean(returns)
def train(agent, env, n_episodes=1000, n_random_episodes=10):
for episode in range(n_episodes):
print("episode: " + str(episode))
train_return = rollout(agent, env, train=True, random=episode<n_random_episodes)
print(f'Episode {episode}. Return {train_return}')
if (episode+1) % 10 == 0:
eval_return = evaluate(agent, env)
print(f'Eval Reward {eval_return}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--agent", default="td3") # td3 or sac
parser.add_argument("--env", default="mujoco") # mujoco or pybullet
parser.add_argument("--task", default="walk") # sleep or turn or walk
parser.add_argument("--seed", default=1, type=int)
parser.add_argument("--latency", default=2, type=int)
parser.add_argument("--xyz_noise_std", default=0.01, type=int)
parser.add_argument("--rpy_noise_std", default=0.01, type=int)
parser.add_argument("--min_obs_stack", default=4, type=int)
args = parser.parse_args()
# if args.env == 'mujoco':
# env = gym.make(
# 'RealAntMujoco-v0',
# task=args.task,
# latency=args.latency,
# xyz_noise_std=args.xyz_noise_std,
# rpy_noise_std=args.rpy_noise_std,
# min_obs_stack=args.min_obs_stack,
# )
# elif args.env == 'pybullet':
# env = gym.make('RealAntBullet-v0', task=args.task)
# else:
# raise Exception('Unknown env')
# obs_size, act_size = env.observation_space.shape[0], env.action_space.shape[0]
# 17 4
obs_size = 17
act_size = 4
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
env = model.Model()
print("env create successfully")
# env.seed(args.seed)
# env.action_space.seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
agent = TD3(device, obs_size, act_size)
print("agent create successfully")
train(agent, env, n_episodes=1000, n_random_episodes=10)
torch.save(agent, '/home/feng1909/test.pth') | 31.283186 | 95 | 0.624328 |
f91c5546a367ca83d3c95e7f940e27f8c4b6f3f7 | 12,915 | py | Python | python/ray/workflow/api.py | johnpjust/ray | cdba1d6a971bc1f3c009753eb46bedb7b28eb3bd | [
"Apache-2.0"
] | 21,382 | 2016-09-26T23:12:52.000Z | 2022-03-31T21:47:45.000Z | python/ray/workflow/api.py | oscarknagg/ray | 20d47873c9e8f5bbb80fe36e5d16256c337c4db3 | [
"Apache-2.0"
] | 19,689 | 2016-09-17T08:21:25.000Z | 2022-03-31T23:59:30.000Z | python/ray/workflow/api.py | cc13ny/ray | 48ecb1f88a89c3894e2a92d66d89d3965c179ecd | [
"Apache-2.0"
] | 4,114 | 2016-09-23T18:54:01.000Z | 2022-03-31T15:07:32.000Z | import logging
import os
import types
from typing import Dict, Set, List, Tuple, Union, Optional, Any, TYPE_CHECKING
import ray
from ray.workflow import execution
from ray.workflow.step_function import WorkflowStepFunction
# avoid collision with arguments & APIs
from ray.workflow import virtual_actor_class
from ray.workflow import storage as storage_base
from ray.workflow.common import (WorkflowStatus, ensure_ray_initialized,
WorkflowRunningError, WorkflowNotFoundError)
from ray.workflow import serialization
from ray.workflow.storage import Storage
from ray.workflow import workflow_access
from ray.workflow.workflow_storage import get_workflow_storage
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.workflow.virtual_actor_class import (VirtualActorClass,
VirtualActor)
logger = logging.getLogger(__name__)
@PublicAPI(stability="beta")
def init(storage: "Optional[Union[str, Storage]]" = None) -> None:
"""Initialize workflow.
Args:
storage: The external storage URL or a custom storage class. If not
specified, ``/tmp/ray/workflow_data`` will be used.
"""
if storage is None:
storage = os.environ.get("RAY_WORKFLOW_STORAGE")
if storage is None:
# We should use get_temp_dir_path, but for ray client, we don't
# have this one. We need a flag to tell whether it's a client
# or a driver to use the right dir.
# For now, just use /tmp/ray/workflow_data
storage = "file:///tmp/ray/workflow_data"
if isinstance(storage, str):
logger.info(f"Using storage: {storage}")
storage = storage_base.create_storage(storage)
elif not isinstance(storage, Storage):
raise TypeError("'storage' should be None, str, or Storage type.")
try:
_storage = storage_base.get_global_storage()
except RuntimeError:
pass
else:
# we have to use the 'else' branch because we would raise a
# runtime error, but we do not want to be captured by 'except'
if _storage.storage_url == storage.storage_url:
logger.warning("Calling 'workflow.init()' again with the same "
"storage.")
else:
raise RuntimeError("Calling 'workflow.init()' again with a "
"different storage")
storage_base.set_global_storage(storage)
workflow_access.init_management_actor()
serialization.init_manager()
def make_step_decorator(step_options: Dict[str, Any]):
def decorator(func):
return WorkflowStepFunction(func, **step_options)
return decorator
@PublicAPI(stability="beta")
def step(*args, **kwargs):
"""A decorator used for creating workflow steps.
Examples:
>>> @workflow.step
... def book_flight(origin: str, dest: str) -> Flight:
... return Flight(...)
>>> @workflow.step(max_retries=3, catch_exceptions=True)
... def book_hotel(dest: str) -> Hotel:
... return Hotel(...)
"""
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return make_step_decorator({})(args[0])
if len(args) != 0:
raise ValueError(f"Invalid arguments for step decorator {args}")
step_options = {}
max_retries = kwargs.pop("max_retries", None)
if max_retries is not None:
step_options["max_retries"] = max_retries
catch_exceptions = kwargs.pop("catch_exceptions", None)
if catch_exceptions is not None:
step_options["catch_exceptions"] = catch_exceptions
name = kwargs.pop("name", None)
if name is not None:
step_options["name"] = name
metadata = kwargs.pop("metadata", None)
if metadata is not None:
step_options["metadata"] = metadata
if len(kwargs) != 0:
step_options["ray_options"] = kwargs
return make_step_decorator(step_options)
@PublicAPI(stability="beta")
class _VirtualActorDecorator:
"""A decorator used for creating a virtual actor based on a class.
The class that is based on must have the "__getstate__" and
"__setstate__" method.
Examples:
>>> @workflow.virtual_actor
... class Counter:
... def __init__(self, x: int):
... self.x = x
...
... # Mark a method as a readonly method. It would not modify the
... # state of the virtual actor.
... @workflow.virtual_actor.readonly
... def get(self):
... return self.x
...
... def incr(self):
... self.x += 1
... return self.x
...
... def __getstate__(self):
... return self.x
...
... def __setstate__(self, state):
... self.x = state
...
... # Create and run a virtual actor.
... counter = Counter.get_or_create(actor_id="Counter", x=1)
... assert ray.get(counter.run(incr)) == 2
"""
@classmethod
def __call__(cls, _cls: type) -> "VirtualActorClass":
return virtual_actor_class.decorate_actor(_cls)
@classmethod
def readonly(cls, method: types.FunctionType) -> types.FunctionType:
if not isinstance(method, types.FunctionType):
raise TypeError("The @workflow.virtual_actor.readonly "
"decorator can only wrap a method.")
method.__virtual_actor_readonly__ = True
return method
virtual_actor = _VirtualActorDecorator()
@PublicAPI(stability="beta")
def get_actor(actor_id: str) -> "VirtualActor":
"""Get an virtual actor.
Args:
actor_id: The ID of the actor.
Returns:
A virtual actor.
"""
ensure_ray_initialized()
return virtual_actor_class.get_actor(actor_id,
storage_base.get_global_storage())
@PublicAPI(stability="beta")
def resume(workflow_id: str) -> ray.ObjectRef:
"""Resume a workflow.
Resume a workflow and retrieve its output. If the workflow was incomplete,
it will be re-executed from its checkpointed outputs. If the workflow was
complete, returns the result immediately.
Examples:
>>> trip = start_trip.step()
>>> res1 = trip.run_async(workflow_id="trip1")
>>> res2 = workflow.resume("trip1")
>>> assert ray.get(res1) == ray.get(res2)
Args:
workflow_id: The id of the workflow to resume.
Returns:
An object reference that can be used to retrieve the workflow result.
"""
ensure_ray_initialized()
return execution.resume(workflow_id)
@PublicAPI(stability="beta")
def get_output(workflow_id: str, *,
name: Optional[str] = None) -> ray.ObjectRef:
"""Get the output of a running workflow.
Args:
workflow_id: The workflow to get the output of.
name: If set, fetch the specific step instead of the output of the
workflow.
Examples:
>>> trip = start_trip.options(name="trip").step()
>>> res1 = trip.run_async(workflow_id="trip1")
>>> # you could "get_output()" in another machine
>>> res2 = workflow.get_output("trip1")
>>> assert ray.get(res1) == ray.get(res2)
>>> step_output = workflow.get_output("trip1", "trip")
>>> assert ray.get(step_output) == ray.get(res1)
Returns:
An object reference that can be used to retrieve the workflow result.
"""
ensure_ray_initialized()
return execution.get_output(workflow_id, name)
@PublicAPI(stability="beta")
def list_all(status_filter: Optional[Union[Union[WorkflowStatus, str], Set[
Union[WorkflowStatus, str]]]] = None
) -> List[Tuple[str, WorkflowStatus]]:
"""List all workflows matching a given status filter.
Args:
status: If given, only returns workflow with that status. This can
be a single status or set of statuses. The string form of the
status is also acceptable, i.e.,
"RUNNING"/"FAILED"/"SUCCESSFUL"/"CANCELED"/"RESUMABLE".
Examples:
>>> workflow_step = long_running_job.step()
>>> wf = workflow_step.run_async(workflow_id="long_running_job")
>>> jobs = workflow.list_all()
>>> assert jobs == [ ("long_running_job", workflow.RUNNING) ]
>>> ray.get(wf)
>>> jobs = workflow.list_all({workflow.RUNNING})
>>> assert jobs == []
>>> jobs = workflow.list_all(workflow.SUCCESSFUL)
>>> assert jobs == [ ("long_running_job", workflow.SUCCESSFUL) ]
Returns:
A list of tuple with workflow id and workflow status
"""
ensure_ray_initialized()
if isinstance(status_filter, str):
status_filter = set({WorkflowStatus(status_filter)})
elif isinstance(status_filter, WorkflowStatus):
status_filter = set({status_filter})
elif isinstance(status_filter, set):
if all(isinstance(s, str) for s in status_filter):
status_filter = {WorkflowStatus(s) for s in status_filter}
elif not all(isinstance(s, WorkflowStatus) for s in status_filter):
raise TypeError("status_filter contains element which is not"
" a type of `WorkflowStatus or str`."
f" {status_filter}")
elif status_filter is None:
status_filter = set(WorkflowStatus.__members__.keys())
else:
raise TypeError(
"status_filter must be WorkflowStatus or a set of WorkflowStatus.")
return execution.list_all(status_filter)
@PublicAPI(stability="beta")
def resume_all(include_failed: bool = False) -> Dict[str, ray.ObjectRef]:
"""Resume all resumable workflow jobs.
This can be used after cluster restart to resume all tasks.
Args:
with_failed: Whether to resume FAILED workflows.
Examples:
>>> workflow_step = failed_job.step()
>>> output = workflow_step.run_async(workflow_id="failed_job")
>>> try:
>>> ray.get(output)
>>> except Exception:
>>> print("JobFailed")
>>> jobs = workflow.list_all()
>>> assert jobs == [("failed_job", workflow.FAILED)]
>>> assert workflow.resume_all(
>>> include_failed=True).get("failed_job") is not None
Returns:
A list of (workflow_id, returned_obj_ref) resumed.
"""
ensure_ray_initialized()
return execution.resume_all(include_failed)
@PublicAPI(stability="beta")
def get_status(workflow_id: str) -> WorkflowStatus:
"""Get the status for a given workflow.
Args:
workflow_id: The workflow to query.
Examples:
>>> workflow_step = trip.step()
>>> output = workflow_step.run(workflow_id="trip")
>>> assert workflow.SUCCESSFUL == workflow.get_status("trip")
Returns:
The status of that workflow
"""
ensure_ray_initialized()
if not isinstance(workflow_id, str):
raise TypeError("workflow_id has to be a string type.")
return execution.get_status(workflow_id)
@PublicAPI(stability="beta")
def cancel(workflow_id: str) -> None:
"""Cancel a workflow. Workflow checkpoints will still be saved in storage. To
clean up saved checkpoints, see `workflow.delete()`.
Args:
workflow_id: The workflow to cancel.
Examples:
>>> workflow_step = some_job.step()
>>> output = workflow_step.run_async(workflow_id="some_job")
>>> workflow.cancel(workflow_id="some_job")
>>> assert [("some_job", workflow.CANCELED)] == workflow.list_all()
Returns:
None
"""
ensure_ray_initialized()
if not isinstance(workflow_id, str):
raise TypeError("workflow_id has to be a string type.")
return execution.cancel(workflow_id)
@PublicAPI(stability="beta")
def delete(workflow_id: str) -> None:
"""Delete a workflow, its checkpoints, and other information it may have
persisted to storage. To stop a running workflow, see
`workflow.cancel()`.
NOTE: The caller should ensure that the workflow is not currently
running before deleting it.
Args:
workflow_id: The workflow to delete.
Examples:
>>> workflow_step = some_job.step()
>>> output = workflow_step.run_async(workflow_id="some_job")
>>> workflow.delete(workflow_id="some_job")
>>> assert [] == workflow.list_all()
Returns:
None
"""
try:
status = get_status(workflow_id)
if status == WorkflowStatus.RUNNING:
raise WorkflowRunningError("DELETE", workflow_id)
except ValueError:
raise WorkflowNotFoundError(workflow_id)
wf_storage = get_workflow_storage(workflow_id)
wf_storage.delete_workflow()
__all__ = ("step", "virtual_actor", "resume", "get_output", "get_actor",
"resume_all", "get_status", "cancel")
| 33.808901 | 81 | 0.63794 |
99de5a3c59b8d0fb6035d9827afda3f272346146 | 10,653 | py | Python | gubernator/main.py | apelisse/test-infra | 6927845d67554021b2617b886c5080854e46f3a8 | [
"Apache-2.0"
] | null | null | null | gubernator/main.py | apelisse/test-infra | 6927845d67554021b2617b886c5080854e46f3a8 | [
"Apache-2.0"
] | null | null | null | gubernator/main.py | apelisse/test-infra | 6927845d67554021b2617b886c5080854e46f3a8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import logging
import re
import os
import webapp2
import jinja2
import yaml
from google.appengine.api import memcache, urlfetch
import defusedxml.ElementTree as ET
import cloudstorage as gcs
import gcs_async
import filters
import log_parser
import pull_request
BUCKET_WHITELIST = {
re.match(r'gs://([^/]+)', path).group(1)
for path in yaml.load(open("buckets.yaml"))
}
DEFAULT_JOBS = {
'kubernetes-jenkins/logs/': {
'kubelet-gce-e2e-ci',
'kubernetes-build',
'kubernetes-e2e-gce',
'kubernetes-e2e-gce-scalability',
'kubernetes-e2e-gce-slow',
'kubernetes-e2e-gke',
'kubernetes-e2e-gke-slow',
'kubernetes-kubemark-5-gce',
'kubernetes-kubemark-500-gce',
'kubernetes-test-go',
}
}
PR_PREFIX = 'kubernetes-jenkins/pr-logs/pull'
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__) + '/templates'),
extensions=['jinja2.ext.autoescape'],
trim_blocks=True,
autoescape=True)
JINJA_ENVIRONMENT.line_statement_prefix = '%'
filters.register(JINJA_ENVIRONMENT.filters)
def pad_numbers(s):
"""Modify a string to make its numbers suitable for natural sorting."""
return re.sub(r'\d+', lambda m: m.group(0).rjust(16, '0'), s)
def memcache_memoize(prefix, expires=60 * 60, neg_expires=60):
"""Decorate a function to memoize its results using memcache.
The function must take a single string as input, and return a pickleable
type.
Args:
prefix: A prefix for memcache keys to use for memoization.
expires: How long to memoized values, in seconds.
neg_expires: How long to memoize falsey values, in seconds
Returns:
A decorator closure to wrap the function.
"""
# setting the namespace based on the current version prevents different
# versions from sharing cache values -- meaning there's no need to worry
# about incompatible old key/value pairs
namespace = os.environ['CURRENT_VERSION_ID']
def wrapper(func):
@functools.wraps(func)
def wrapped(arg):
key = prefix + arg
data = memcache.get(key, namespace=namespace)
if data is not None:
return data
else:
data = func(arg)
if data:
memcache.add(key, data, expires, namespace=namespace)
else:
memcache.add(key, data, neg_expires, namespace=namespace)
return data
return wrapped
return wrapper
@memcache_memoize('gs-ls://', expires=60)
def gcs_ls(path):
"""Enumerate files in a GCS directory. Returns a list of FileStats."""
if path[-1] != '/':
path += '/'
return list(gcs.listbucket(path, delimiter='/'))
def parse_junit(xml):
"""Generate failed tests as a series of (name, duration, text) tuples."""
tree = ET.fromstring(xml)
if tree.tag == 'testsuite':
for child in tree:
name = child.attrib['name']
time = float(child.attrib['time'])
for param in child.findall('failure'):
yield name, time, param.text
elif tree.tag == 'testsuites':
for testsuite in tree:
suite_name = testsuite.attrib['name']
for child in testsuite.findall('testcase'):
name = '%s %s' % (suite_name, child.attrib['name'])
time = float(child.attrib['time'])
for param in child.findall('failure'):
yield name, time, param.text
else:
logging.error('unable to find failures, unexpected tag %s', tree.tag)
@memcache_memoize('build-details://', expires=60 * 60 * 4)
def build_details(build_dir):
"""
Collect information from a build directory.
Args:
build_dir: GCS path containing a build's results.
Returns:
started: value from started.json {'version': ..., 'timestamp': ...}
finished: value from finished.json {'timestamp': ..., 'result': ...}
failures: list of (name, duration, text) tuples
build_log: a hilighted portion of errors in the build log. May be None.
"""
started_fut = gcs_async.read(build_dir + '/started.json')
finished = gcs_async.read(build_dir + '/finished.json').get_result()
started = started_fut.get_result()
if finished and not started:
started = 'null'
if started and not finished:
finished = 'null'
elif not (started and finished):
return
started = json.loads(started)
finished = json.loads(finished)
failures = []
junit_paths = [f.filename for f in gcs_ls('%s/artifacts' % build_dir)
if re.match(r'junit_.*\.xml', os.path.basename(f.filename))]
junit_futures = [gcs_async.read(f) for f in junit_paths]
for future in junit_futures:
junit = future.get_result()
if junit is None:
continue
failures.extend(parse_junit(junit))
build_log = None
if finished and finished.get('result') != 'SUCCESS' and len(failures) == 0:
build_log = gcs_async.read(build_dir + '/build-log.txt').get_result()
if build_log:
build_log = log_parser.digest(build_log.decode('utf8', 'replace'))
logging.info('fallback log parser emitted %d lines',
build_log.count('\n'))
return started, finished, failures, build_log
@memcache_memoize('pr-details://', expires=60 * 3)
def pr_builds(pr):
"""
Get information for all builds run by a PR.
Args:
pr: the PR number
Returns:
A dictionary of {job: [(build_number, started_json, finished.json)]}
"""
jobs_dirs_fut = gcs_async.listdirs('%s/%s' % (PR_PREFIX, pr))
def base(path):
return os.path.basename(os.path.dirname(path))
jobs_futures = [(job, gcs_async.listdirs(job)) for job in jobs_dirs_fut.get_result()]
futures = []
for job, builds_fut in jobs_futures:
for build in builds_fut.get_result():
sta_fut = gcs_async.read('/%sstarted.json' % build)
fin_fut = gcs_async.read('/%sfinished.json' % build)
futures.append([base(job), base(build), sta_fut, fin_fut])
futures.sort(key=lambda (job, build, s, f): (job, pad_numbers(build)), reverse=True)
jobs = {}
for job, build, started_fut, finished_fut in futures:
started = started_fut.get_result()
finished = finished_fut.get_result()
if started is not None:
started = json.loads(started)
if finished is not None:
finished = json.loads(finished)
jobs.setdefault(job, []).append((build, started, finished))
return jobs
class RenderingHandler(webapp2.RequestHandler):
"""Base class for Handlers that render Jinja templates."""
def __init__(self, *args, **kwargs):
super(RenderingHandler, self).__init__(*args, **kwargs)
# The default deadline of 5 seconds is too aggressive of a target for GCS
# directory listing operations.
urlfetch.set_default_fetch_deadline(60)
def render(self, template, context):
"""Render a context dictionary using a given template."""
template = JINJA_ENVIRONMENT.get_template(template)
self.response.write(template.render(context))
def check_bucket(self, prefix):
if prefix in BUCKET_WHITELIST:
return
if prefix[:prefix.find('/')] not in BUCKET_WHITELIST:
self.abort(404)
class IndexHandler(RenderingHandler):
"""Render the index."""
def get(self):
self.render("index.html", {'jobs': DEFAULT_JOBS})
class BuildHandler(RenderingHandler):
"""Show information about a Build and its failing tests."""
def get(self, prefix, job, build):
self.check_bucket(prefix)
job_dir = '/%s/%s/' % (prefix, job)
build_dir = job_dir + build
details = build_details(build_dir)
if not details:
logging.warning('unable to load %s', build_dir)
self.render('build_404.html', {"build_dir": build_dir})
self.response.set_status(404)
return
started, finished, failures, build_log = details
if started:
commit = started['version'].split('+')[-1]
else:
commit = None
pr = None
if prefix.startswith(PR_PREFIX):
pr = os.path.basename(prefix)
self.render('build.html', dict(
job_dir=job_dir, build_dir=build_dir, job=job, build=build,
commit=commit, started=started, finished=finished,
failures=failures, build_log=build_log, pr=pr))
class BuildListHandler(RenderingHandler):
"""Show a list of Builds for a Job."""
def get(self, prefix, job):
self.check_bucket(prefix)
job_dir = '/%s/%s/' % (prefix, job)
fstats = gcs_ls(job_dir)
fstats.sort(key=lambda f: pad_numbers(f.filename), reverse=True)
self.render('build_list.html',
dict(job=job, job_dir=job_dir, fstats=fstats))
class JobListHandler(RenderingHandler):
"""Show a list of Jobs in a directory."""
def get(self, prefix):
self.check_bucket(prefix)
jobs_dir = '/%s' % prefix
fstats = gcs_ls(jobs_dir)
fstats.sort()
self.render('job_list.html', dict(jobs_dir=jobs_dir, fstats=fstats))
class PRHandler(RenderingHandler):
"""Show a list of test runs for a PR."""
def get(self, pr):
builds = pr_builds(pr)
max_builds, headings, rows = pull_request.builds_to_table(builds)
self.render('pr.html', dict(pr=pr, prefix=PR_PREFIX,
max_builds=max_builds, header=headings, rows=rows))
app = webapp2.WSGIApplication([
(r'/', IndexHandler),
(r'/jobs/(.*)$', JobListHandler),
(r'/builds/(.*)/([^/]+)/?', BuildListHandler),
(r'/build/(.*)/([^/]+)/(\d+)/?', BuildHandler),
(r'/pr/(\d+)', PRHandler),
], debug=True)
| 34.587662 | 89 | 0.63353 |
11c77afe218bd1d5428fc168e217686df2b517ef | 13,874 | py | Python | mypy/test/testfinegrained.py | nvuillam/mypy | beba94c509211947201ece020e8fe513695ecea0 | [
"PSF-2.0"
] | 1 | 2021-02-16T10:01:33.000Z | 2021-02-16T10:01:33.000Z | mypy/test/testfinegrained.py | nvuillam/mypy | beba94c509211947201ece020e8fe513695ecea0 | [
"PSF-2.0"
] | null | null | null | mypy/test/testfinegrained.py | nvuillam/mypy | beba94c509211947201ece020e8fe513695ecea0 | [
"PSF-2.0"
] | 1 | 2022-01-31T13:24:43.000Z | 2022-01-31T13:24:43.000Z | """Test cases for fine-grained incremental checking.
Each test cases runs a batch build followed by one or more fine-grained
incremental steps. We verify that each step produces the expected output.
See the comment at the top of test-data/unit/fine-grained.test for more
information.
N.B.: Unlike most of the other test suites, testfinegrained does not
rely on an alt_lib_path for finding source files. This means that they
can test interactions with the lib_path that is built implicitly based
on specified sources.
"""
import os
import re
import shutil
from typing import List, Dict, Any, Tuple, Union, cast
from mypy import build
from mypy.modulefinder import BuildSource
from mypy.errors import CompileError
from mypy.options import Options
from mypy.test.config import test_temp_dir
from mypy.test.data import (
DataDrivenTestCase, DataSuite, UpdateFile, DeleteFile
)
from mypy.test.helpers import (
assert_string_arrays_equal, parse_options, copy_and_fudge_mtime, assert_module_equivalence,
assert_target_equivalence
)
from mypy.server.mergecheck import check_consistency
from mypy.dmypy_util import DEFAULT_STATUS_FILE
from mypy.dmypy_server import Server
from mypy.config_parser import parse_config_file
from mypy.find_sources import create_source_list
import pytest
# Set to True to perform (somewhat expensive) checks for duplicate AST nodes after merge
CHECK_CONSISTENCY = False
class FineGrainedSuite(DataSuite):
files = [
'fine-grained.test',
'fine-grained-cycles.test',
'fine-grained-blockers.test',
'fine-grained-modules.test',
'fine-grained-follow-imports.test',
'fine-grained-suggest.test',
]
# Whether to use the fine-grained cache in the testing. This is overridden
# by a trivial subclass to produce a suite that uses the cache.
use_cache = False
def should_skip(self, testcase: DataDrivenTestCase) -> bool:
# Decide whether to skip the test. This could have been structured
# as a filter() classmethod also, but we want the tests reported
# as skipped, not just elided.
if self.use_cache:
if testcase.only_when == '-only_when_nocache':
return True
# TODO: In caching mode we currently don't well support
# starting from cached states with errors in them.
if testcase.output and testcase.output[0] != '==':
return True
else:
if testcase.only_when == '-only_when_cache':
return True
return False
def run_case(self, testcase: DataDrivenTestCase) -> None:
if self.should_skip(testcase):
pytest.skip()
return
main_src = '\n'.join(testcase.input)
main_path = os.path.join(test_temp_dir, 'main')
with open(main_path, 'w', encoding='utf8') as f:
f.write(main_src)
options = self.get_options(main_src, testcase, build_cache=False)
build_options = self.get_options(main_src, testcase, build_cache=True)
server = Server(options, DEFAULT_STATUS_FILE)
num_regular_incremental_steps = self.get_build_steps(main_src)
step = 1
sources = self.parse_sources(main_src, step, options)
if step <= num_regular_incremental_steps:
messages = self.build(build_options, sources)
else:
messages = self.run_check(server, sources)
a = []
if messages:
a.extend(normalize_messages(messages))
assert testcase.tmpdir
a.extend(self.maybe_suggest(step, server, main_src, testcase.tmpdir.name))
if server.fine_grained_manager:
if CHECK_CONSISTENCY:
check_consistency(server.fine_grained_manager)
steps = testcase.find_steps()
all_triggered = []
for operations in steps:
step += 1
output, triggered = self.perform_step(
operations,
server,
options,
build_options,
testcase,
main_src,
step,
num_regular_incremental_steps,
)
a.append('==')
a.extend(output)
all_triggered.extend(triggered)
# Normalize paths in test output (for Windows).
a = [line.replace('\\', '/') for line in a]
assert_string_arrays_equal(
testcase.output, a,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))
if testcase.triggered:
assert_string_arrays_equal(
testcase.triggered,
self.format_triggered(all_triggered),
'Invalid active triggers ({}, line {})'.format(testcase.file,
testcase.line))
def get_options(self,
source: str,
testcase: DataDrivenTestCase,
build_cache: bool,) -> Options:
# This handles things like '# flags: --foo'.
options = parse_options(source, testcase, incremental_step=1)
options.incremental = True
options.use_builtins_fixtures = True
options.show_traceback = True
options.error_summary = False
options.fine_grained_incremental = not build_cache
options.use_fine_grained_cache = self.use_cache and not build_cache
options.cache_fine_grained = self.use_cache
options.local_partial_types = True
if re.search('flags:.*--follow-imports', source) is None:
# Override the default for follow_imports
options.follow_imports = 'error'
for name, _ in testcase.files:
if 'mypy.ini' in name or 'pyproject.toml' in name:
parse_config_file(options, lambda: None, name)
break
return options
def run_check(self, server: Server, sources: List[BuildSource]) -> List[str]:
response = server.check(sources, is_tty=False, terminal_width=-1)
out = cast(str, response['out'] or response['err'])
return out.splitlines()
def build(self,
options: Options,
sources: List[BuildSource]) -> List[str]:
try:
result = build.build(sources=sources,
options=options)
except CompileError as e:
return e.messages
return result.errors
def format_triggered(self, triggered: List[List[str]]) -> List[str]:
result = []
for n, triggers in enumerate(triggered):
filtered = [trigger for trigger in triggers
if not trigger.endswith('__>')]
filtered = sorted(filtered)
result.append(('%d: %s' % (n + 2, ', '.join(filtered))).strip())
return result
def get_build_steps(self, program_text: str) -> int:
"""Get the number of regular incremental steps to run, from the test source"""
if not self.use_cache:
return 0
m = re.search('# num_build_steps: ([0-9]+)$', program_text, flags=re.MULTILINE)
if m is not None:
return int(m.group(1))
return 1
def perform_step(self,
operations: List[Union[UpdateFile, DeleteFile]],
server: Server,
options: Options,
build_options: Options,
testcase: DataDrivenTestCase,
main_src: str,
step: int,
num_regular_incremental_steps: int) -> Tuple[List[str], List[List[str]]]:
"""Perform one fine-grained incremental build step (after some file updates/deletions).
Return (mypy output, triggered targets).
"""
for op in operations:
if isinstance(op, UpdateFile):
# Modify/create file
copy_and_fudge_mtime(op.source_path, op.target_path)
else:
# Delete file/directory
if os.path.isdir(op.path):
# Sanity check to avoid unexpected deletions
assert op.path.startswith('tmp')
shutil.rmtree(op.path)
else:
os.remove(op.path)
sources = self.parse_sources(main_src, step, options)
if step <= num_regular_incremental_steps:
new_messages = self.build(build_options, sources)
else:
new_messages = self.run_check(server, sources)
updated: List[str] = []
changed: List[str] = []
targets: List[str] = []
triggered = []
if server.fine_grained_manager:
if CHECK_CONSISTENCY:
check_consistency(server.fine_grained_manager)
triggered.append(server.fine_grained_manager.triggered)
updated = server.fine_grained_manager.updated_modules
changed = [mod for mod, file in server.fine_grained_manager.changed_modules]
targets = server.fine_grained_manager.processed_targets
expected_stale = testcase.expected_stale_modules.get(step - 1)
if expected_stale is not None:
assert_module_equivalence(
'stale' + str(step - 1),
expected_stale, changed)
expected_rechecked = testcase.expected_rechecked_modules.get(step - 1)
if expected_rechecked is not None:
assert_module_equivalence(
'rechecked' + str(step - 1),
expected_rechecked, updated)
expected = testcase.expected_fine_grained_targets.get(step)
if expected:
assert_target_equivalence(
'targets' + str(step),
expected, targets)
new_messages = normalize_messages(new_messages)
a = new_messages
assert testcase.tmpdir
a.extend(self.maybe_suggest(step, server, main_src, testcase.tmpdir.name))
return a, triggered
def parse_sources(self, program_text: str,
incremental_step: int,
options: Options) -> List[BuildSource]:
"""Return target BuildSources for a test case.
Normally, the unit tests will check all files included in the test
case. This differs from how testcheck works by default, as dmypy
doesn't currently support following imports.
You can override this behavior and instruct the tests to check
multiple modules by using a comment like this in the test case
input:
# cmd: main a.py
You can also use `# cmdN:` to have a different cmd for incremental
step N (2, 3, ...).
"""
m = re.search('# cmd: mypy ([a-zA-Z0-9_./ ]+)$', program_text, flags=re.MULTILINE)
regex = '# cmd{}: mypy ([a-zA-Z0-9_./ ]+)$'.format(incremental_step)
alt_m = re.search(regex, program_text, flags=re.MULTILINE)
if alt_m is not None:
# Optionally return a different command if in a later step
# of incremental mode, otherwise default to reusing the
# original cmd.
m = alt_m
if m:
# The test case wants to use a non-default set of files.
paths = [os.path.join(test_temp_dir, path) for path in m.group(1).strip().split()]
return create_source_list(paths, options)
else:
base = BuildSource(os.path.join(test_temp_dir, 'main'), '__main__', None)
# Use expand_dir instead of create_source_list to avoid complaints
# when there aren't any .py files in an increment
return [base] + create_source_list([test_temp_dir], options,
allow_empty_dir=True)
def maybe_suggest(self, step: int, server: Server, src: str, tmp_dir: str) -> List[str]:
output: List[str] = []
targets = self.get_suggest(src, step)
for flags, target in targets:
json = '--json' in flags
callsites = '--callsites' in flags
no_any = '--no-any' in flags
no_errors = '--no-errors' in flags
try_text = '--try-text' in flags
m = re.match('--flex-any=([0-9.]+)', flags)
flex_any = float(m.group(1)) if m else None
m = re.match(r'--use-fixme=(\w+)', flags)
use_fixme = m.group(1) if m else None
m = re.match('--max-guesses=([0-9]+)', flags)
max_guesses = int(m.group(1)) if m else None
res = cast(Dict[str, Any],
server.cmd_suggest(
target.strip(), json=json, no_any=no_any, no_errors=no_errors,
try_text=try_text, flex_any=flex_any, use_fixme=use_fixme,
callsites=callsites, max_guesses=max_guesses))
val = res['error'] if 'error' in res else res['out'] + res['err']
if json:
# JSON contains already escaped \ on Windows, so requires a bit of care.
val = val.replace('\\\\', '\\')
val = val.replace(os.path.realpath(tmp_dir) + os.path.sep, '')
output.extend(val.strip().split('\n'))
return normalize_messages(output)
def get_suggest(self, program_text: str,
incremental_step: int) -> List[Tuple[str, str]]:
step_bit = '1?' if incremental_step == 1 else str(incremental_step)
regex = '# suggest{}: (--[a-zA-Z0-9_\\-./=?^ ]+ )*([a-zA-Z0-9_.:/?^ ]+)$'.format(step_bit)
m = re.findall(regex, program_text, flags=re.MULTILINE)
return m
def normalize_messages(messages: List[str]) -> List[str]:
return [re.sub('^tmp' + re.escape(os.sep), '', message)
for message in messages]
| 39.64 | 98 | 0.599034 |
3c5fedd240c5be7abdfd259bff02c4afc8b9f423 | 8,836 | py | Python | model.py | theodoriss/STSGCN | 8788e40a9d62f1e5664fb1e6b34f43faeb6f9cd7 | [
"MIT"
] | 25 | 2021-11-01T08:51:33.000Z | 2022-03-29T06:12:55.000Z | model.py | theodoriss/STSGCN | 8788e40a9d62f1e5664fb1e6b34f43faeb6f9cd7 | [
"MIT"
] | 3 | 2022-01-04T02:32:17.000Z | 2022-03-23T14:06:38.000Z | model.py | theodoriss/STSGCN | 8788e40a9d62f1e5664fb1e6b34f43faeb6f9cd7 | [
"MIT"
] | 10 | 2021-10-17T01:41:44.000Z | 2022-03-04T14:25:48.000Z | #!/usr/bin/env python
# coding: utf-8
import torch
import torch.nn as nn
import math
class ConvTemporalGraphical(nn.Module):
#Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py
r"""The basic module for applying a graph convolution.
Args:
in_channels (int): Number of channels in the input sequence data
out_channels (int): Number of channels produced by the convolution
kernel_size (int): Size of the graph convolving kernel
t_kernel_size (int): Size of the temporal convolving kernel
t_stride (int, optional): Stride of the temporal convolution. Default: 1
t_padding (int, optional): Temporal zero-padding added to both sides of
the input. Default: 0
t_dilation (int, optional): Spacing between temporal kernel elements.
Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output.
Default: ``True``
Shape:
- Input: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
- Output: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self,
time_dim,
joints_dim
):
super(ConvTemporalGraphical,self).__init__()
self.A=nn.Parameter(torch.FloatTensor(time_dim, joints_dim,joints_dim)) #learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix)
stdv = 1. / math.sqrt(self.A.size(1))
self.A.data.uniform_(-stdv,stdv)
self.T=nn.Parameter(torch.FloatTensor(joints_dim , time_dim, time_dim))
stdv = 1. / math.sqrt(self.T.size(1))
self.T.data.uniform_(-stdv,stdv)
'''
self.prelu = nn.PReLU()
self.Z=nn.Parameter(torch.FloatTensor(joints_dim, joints_dim, time_dim, time_dim))
stdv = 1. / math.sqrt(self.Z.size(2))
self.Z.data.uniform_(-stdv,stdv)
'''
def forward(self, x):
x = torch.einsum('nctv,vtq->ncqv', (x, self.T))
## x=self.prelu(x)
x = torch.einsum('nctv,tvw->nctw', (x, self.A))
## x = torch.einsum('nctv,wvtq->ncqw', (x, self.Z))
return x.contiguous()
class ST_GCNN_layer(nn.Module):
"""
Shape:
- Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
:in_channels= dimension of coordinates
: out_channels=dimension of coordinates
+
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
time_dim,
joints_dim,
dropout,
bias=True):
super(ST_GCNN_layer,self).__init__()
self.kernel_size = kernel_size
assert self.kernel_size[0] % 2 == 1
assert self.kernel_size[1] % 2 == 1
padding = ((self.kernel_size[0] - 1) // 2,(self.kernel_size[1] - 1) // 2)
self.gcn=ConvTemporalGraphical(time_dim,joints_dim) # the convolution layer
self.tcn = nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
(self.kernel_size[0], self.kernel_size[1]),
(stride, stride),
padding,
),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True),
)
if stride != 1 or in_channels != out_channels:
self.residual=nn.Sequential(nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=(1, 1)),
nn.BatchNorm2d(out_channels),
)
else:
self.residual=nn.Identity()
self.prelu = nn.PReLU()
def forward(self, x):
# assert A.shape[0] == self.kernel_size[1], print(A.shape[0],self.kernel_size)
res=self.residual(x)
x=self.gcn(x)
x=self.tcn(x)
x=x+res
x=self.prelu(x)
return x
class CNN_layer(nn.Module): # This is the simple CNN layer,that performs a 2-D convolution while maintaining the dimensions of the input(except for the features dimension)
def __init__(self,
in_channels,
out_channels,
kernel_size,
dropout,
bias=True):
super(CNN_layer,self).__init__()
self.kernel_size = kernel_size
padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) # padding so that both dimensions are maintained
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
self.block= [nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size,padding=padding)
,nn.BatchNorm2d(out_channels),nn.Dropout(dropout, inplace=True)]
self.block=nn.Sequential(*self.block)
def forward(self, x):
output= self.block(x)
return output
# In[11]:
class Model(nn.Module):
"""
Shape:
- Input[0]: Input sequence in :math:`(N, in_channels,T_in, V)` format
- Output[0]: Output sequence in :math:`(N,T_out,in_channels, V)` format
where
:math:`N` is a batch size,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
:in_channels=number of channels for the coordiantes(default=3)
+
"""
def __init__(self,
input_channels,
input_time_frame,
output_time_frame,
st_gcnn_dropout,
joints_to_consider,
n_txcnn_layers,
txc_kernel_size,
txc_dropout,
bias=True):
super(Model,self).__init__()
self.input_time_frame=input_time_frame
self.output_time_frame=output_time_frame
self.joints_to_consider=joints_to_consider
self.st_gcnns=nn.ModuleList()
self.n_txcnn_layers=n_txcnn_layers
self.txcnns=nn.ModuleList()
self.st_gcnns.append(ST_GCNN_layer(input_channels,64,[1,1],1,input_time_frame,
joints_to_consider,st_gcnn_dropout))
self.st_gcnns.append(ST_GCNN_layer(64,32,[1,1],1,input_time_frame,
joints_to_consider,st_gcnn_dropout))
self.st_gcnns.append(ST_GCNN_layer(32,64,[1,1],1,input_time_frame,
joints_to_consider,st_gcnn_dropout))
self.st_gcnns.append(ST_GCNN_layer(64,input_channels,[1,1],1,input_time_frame,
joints_to_consider,st_gcnn_dropout))
# at this point, we must permute the dimensions of the gcn network, from (N,C,T,V) into (N,T,C,V)
self.txcnns.append(CNN_layer(input_time_frame,output_time_frame,txc_kernel_size,txc_dropout)) # with kernel_size[3,3] the dimensinons of C,V will be maintained
for i in range(1,n_txcnn_layers):
self.txcnns.append(CNN_layer(output_time_frame,output_time_frame,txc_kernel_size,txc_dropout))
self.prelus = nn.ModuleList()
for j in range(n_txcnn_layers):
self.prelus.append(nn.PReLU())
def forward(self, x):
for gcn in (self.st_gcnns):
x = gcn(x)
x= x.permute(0,2,1,3) # prepare the input for the Time-Extrapolator-CNN (NCTV->NTCV)
x=self.prelus[0](self.txcnns[0](x))
for i in range(1,self.n_txcnn_layers):
x = self.prelus[i](self.txcnns[i](x)) +x # residual connection
return x
| 34.11583 | 174 | 0.545496 |
678e31e71e55a45017b1f76a57f62e35556decdc | 4,284 | py | Python | kubernetes/client/models/v1_host_path_volume_source.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | 1 | 2019-07-12T05:38:06.000Z | 2019-07-12T05:38:06.000Z | kubernetes/client/models/v1_host_path_volume_source.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_host_path_volume_source.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | 1 | 2021-05-18T12:25:56.000Z | 2021-05-18T12:25:56.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1HostPathVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'path': 'str',
'type': 'str'
}
attribute_map = {
'path': 'path',
'type': 'type'
}
def __init__(self, path=None, type=None):
"""
V1HostPathVolumeSource - a model defined in Swagger
"""
self._path = None
self._type = None
self.discriminator = None
self.path = path
if type is not None:
self.type = type
@property
def path(self):
"""
Gets the path of this V1HostPathVolumeSource.
Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
:return: The path of this V1HostPathVolumeSource.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this V1HostPathVolumeSource.
Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
:param path: The path of this V1HostPathVolumeSource.
:type: str
"""
if path is None:
raise ValueError("Invalid value for `path`, must not be `None`")
self._path = path
@property
def type(self):
"""
Gets the type of this V1HostPathVolumeSource.
Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
:return: The type of this V1HostPathVolumeSource.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1HostPathVolumeSource.
Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
:param type: The type of this V1HostPathVolumeSource.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1HostPathVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.461538 | 182 | 0.565826 |
954d6cc18aadd69994842aafdf63804f892aa17c | 259 | py | Python | 2015/06/fc_2015_06_05.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | null | null | null | 2015/06/fc_2015_06_05.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | 1 | 2015-04-27T01:43:45.000Z | 2015-04-27T01:43:45.000Z | 2015/06/fc_2015_06_05.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# imports go here
from collections import Counter
#
# Free Coding session for 2015-06-05
# Written by Matt Warren
#
def digit_counts(number):
return Counter(str(number))
if __name__ == '__main__':
print(digit_counts(1234534))
| 18.5 | 36 | 0.72973 |
b12fe4033c2adef2fbf72e540c13b1f7cde06778 | 7,170 | py | Python | docs/sphinx/conf.py | EOX-A/ngeo-b | b55315c7955b4c2b68cbd7f8276ac890ee19106e | [
"MIT"
] | 4 | 2016-08-05T17:33:41.000Z | 2020-07-10T21:30:13.000Z | docs/sphinx/conf.py | EOX-A/ngeo-b | b55315c7955b4c2b68cbd7f8276ac890ee19106e | [
"MIT"
] | 23 | 2015-10-29T17:52:06.000Z | 2021-07-20T09:52:18.000Z | docs/sphinx/conf.py | EOX-A/ngeo-b | b55315c7955b4c2b68cbd7f8276ac890ee19106e | [
"MIT"
] | 5 | 2015-07-21T09:33:57.000Z | 2019-11-28T22:55:03.000Z | # -*- coding: utf-8 -*-
#
# ngEOBrowseServer documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 18 17:54:45 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ngEO Browse Server'
copyright = u'2011, 2012, 2013, 2014, 2015, 2016 European Space Agency'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ".".join(get_version().split(".")[:2])
version = "0.1" # TODO: make this dependant on an actual version
# The full version, including alpha/beta/rc tags.
#release = get_version()
release = "0.1.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# TODO: ?
html_style = "styles.css"
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# TODO: ?
#html_title = ""ngEO Browse Server documentation " + release + " documentation"
html_title = "ngEO Browse Server documentation (version %s)" % release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
html_last_updated_fmt = '%Y-%m-%dT%H:%M:%SZ'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
# '**':'indexsidebar.html',
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ngEOBrowseServerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('en/index', 'ngEOBrowseServer.tex', u'ngEOBrowseServer Documentation',
u'Stephan Meissl \\and Fabian Schindler',
'manual', True),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'logo/logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
latex_domain_indices = False
latex_use_modindex = False
latex_show_pagerefs = True
latex_show_urls = 'footnote'
# Links to external projects.
intersphinx_mapping = {
'django': ('https://docs.djangoproject.com/en/dev/', 'http://docs.djangoproject.com/en/dev/_objects/'),
'python': ('http://docs.python.org/2.7', None),
}
| 33.194444 | 107 | 0.72106 |
546a8814fd33ff524fe862b9278e966a3dce9b97 | 23,686 | py | Python | example_problems/tutorial/magic_index/services/magic_indexes_lib.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 4 | 2021-06-27T13:27:24.000Z | 2022-03-24T10:46:28.000Z | example_problems/tutorial/magic_index/services/magic_indexes_lib.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 1 | 2021-01-23T06:50:31.000Z | 2021-03-17T15:35:18.000Z | example_problems/tutorial/magic_index/services/magic_indexes_lib.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 5 | 2021-04-01T15:21:57.000Z | 2022-01-29T15:07:38.000Z | #!/usr/bin/env python3
import random, math
from collections import Counter
import numpy as np
def check_input_vector(vec, TAc, LANG):
for i in range(1,len(vec)):
if vec[i] == vec[i-1]:
TAc.print(LANG.render_feedback("equal-values", f'No. Your vector contains entries with the same value vec[{i-1}] = {vec[i-1]} =vec[{i}].'), "red", ["bold"])
exit(0)
if vec[i] < vec[i-1]:
TAc.print(LANG.render_feedback("decrease", f'No. Your vector is not incrisingly sorted: vec[{i-1}] = {vec[i-1]} > {vec[i]} = vec[{i}].'), "red", ["bold"])
exit(0)
def spot_magic_index(vec):
magic_indexes = []
for i in range(len(vec)):
if vec[i]==i:
magic_indexes.append(i)
return magic_indexes
# The optimal strategy shoots always in the middle. The worst case is as follows: at the first shoot nature answers "yes, it is a magic index" (otherwise half of the positions are lost from the very beginning). The problem splits into two, left and right, which howevere have a different nature (since I now know that the magic positions will form a prefix or a suffix).
# On this residual problem nature decides each time I can not split in precisely half, leaving me the biggest half.
def num_questions_worst_case(n):
if n==0:
return 0
else:
return 1 + num_questions_worst_case_support((n-1)//2) + num_questions_worst_case_support((n-1)//2 + ((n-1)%2))
def num_questions_worst_case_support(n):
if n==0:
return 0
else:
return 1 + num_questions_worst_case_support((n-1)//2 + ((n-1)%2))
def print_vector(vec, TAc, LANG):
w = len(vec)*2
h = 3
print()
for i in range(h):
if not i or i == h-1:
#print(' -'*w, end ='')
TAc.print(LANG.render_feedback("draw box", f' -'*w), "white", ["bold"], end="")
print()
else:
#print('|', end="")
TAc.print(LANG.render_feedback("draw box", '|'), "white", ["bold"], end="")
for i in vec:
#print(f' {i} |', end="")
TAc.print(LANG.render_feedback("draw box", f' {i} |'), "white", ["bold"], end="")
print()
print()
def print_game_representation(discovered_vec, TAc, LANG):
representation = ','.join([str(x) for x in discovered_vec])
TAc.print(LANG.render_feedback("game representation", f'The current game representation is: {representation}\n'), "white", ["bold"], end="")
table_g = [None] * 1000
table_f = [None] * 1000
worst_f, worst_g = {}, {}
table_f[0],table_g[0] = 0, 0
table_f[1], table_g[1] = 1, 1
def g(u, case):
global table_g, worst_g
pos = math.ceil(u/2)
if u == 0:
return table_g[u]
if u == 1:
return table_g[u]
else:
if case == 'left':
if table_g[u-pos] == None:
table_g[u-pos] = g(u-pos, case) #-1
case1 = table_g[u-pos]
if table_g[pos-1] == None:
table_g[pos-1] = g(pos-1, case) #0
case0 = table_g[pos-1]
else:
if table_g[pos-1] == None:
table_g[pos-1] = g(pos-1, case) #1
case1 = table_g[pos-1]
if table_g[u-pos] == None:
table_g[u-pos] = g(u-pos, case) #0
case0 = table_g[u-pos]
#if the worst case is the same, choose a random value between 0 and 1 (-1)
if case0 == case1:
if case == 'left':
w = random.randint(-1,0)
else:
w = random.randint(0,1)
else:
w = np.argmax([case1, case0])
#if w==0 means that the worst case is obtained placing a 1 or -1 in the index
if w == 0:
if case == 'left':
worst_g[pos-1] = '-1'
else:
worst_g[pos-1] = '1'
else:
worst_g[pos-1] = '0'
return 1 + max(case1, case0)
# la funzione f dato l'indice ottimale (quello spezzando al centro) torna quante domande bisogna fare nel caso pessimo per ogni possibile valore -1 (valore più piccolo dell'indice),
# 0 valore identico all'indice (MI), e infine 1 (valore più grande dell'indice)
def f(u):
global table_f, table_g, worst_f
# arrotondiamo per eccesso per non avere il problema della numerazione degli indici da 0 o 1
pos = math.ceil(u/2)
if u == 0:
return table_f[u]
if u == 1:
return table_f[u]
else:
if table_f[u-pos] == None:
table_f[u-pos] = f(u-pos) #-1
case1 = table_f[u-pos]
if table_f[pos - 1] == None:
table_f[pos - 1] = f(pos-1)
case2 = table_f[pos - 1] #1
if table_g[pos-1] == None:
table_g[pos-1] = g(pos-1, 'left')
if table_g[u-pos] == None:
table_g[u-pos] = g(u-pos, 'right')
case0 = table_g[pos-1] + table_g[u-pos] #0
w = np.argmax([case1,case2,case0])
if w == 0:
worst_f[pos-1] = '-1'
elif w == 1:
worst_f[pos-1] = '1'
else:
worst_f[pos-1] = '0'
return 1 + max(case1, case2, case0)
def get_positions_f(vec):
# get last occurence of a '-1', index() returns the first occurence so we inverted the vector and took the first occurence (so the last in the original vec) and subtract the result from the original last position of the vector. For the computation of the optimal move, we use the rounded down division in order to avoid mistakes with indexes. Note for the optimal move with enumeration that starts from 1 we use the following formula: (unknow / 2) rounded up, which corresponds to the formula: (lower+upper)// 2 (rounded down) with enumeration from 0.
if '-1' in vec and '1' in vec:
posLess = (len(vec)-1) - vec[::-1].index('-1')
posGreater = vec.index('1')
if posGreater == posLess + 1:
return len(vec[posLess+1:posGreater]), None
return len(vec[posLess+1:posGreater]), ((posLess+1) + (posGreater-1))//2
elif '-1' in vec:
posLess = (len(vec)-1) - vec[::-1].index('-1')
if posLess == len(vec)-1:
return 0,None
else:
return len(vec[posLess+1:]), (((posLess+1)+ (len(vec)-1))//2)
elif '1' in vec:
posGreater = vec.index('1')
if posGreater == 0:
return 0,None
else:
return len(vec[:posGreater]), ((posGreater-1)//2)
else:
return len(vec), ((len(vec)-1)//2)
def get_positions_g(vec):
first_occ_0 = vec.index('0')
last_occ_0 = (len(vec)-1) - vec[::-1].index('0')
last_occ_less = 0
first_occ_greater = len(vec) # we initialize this like if the first occurence was out of the vector
sum_factor = 0 # use if there is not a '-1' in the vector
sub_factor = 0 # use if there is not a '1' in the vector
s = 0
# if it exists at least one element equal to '-1' in vec update the position of '-1' with the last occurence.
if '-1' in vec:
last_occ_less = (len(vec)-1) - vec[::-1].index('-1')
sum_factor += 1
# if it exists at least one element equal to '1' in vec update the position of '-1' with the first occurence.
if '1' in vec:
first_occ_greater = vec.index('1')
sub_factor += 1
# use this variable in the case where we don't have a 1 in vec. Since we initialize the first occurance to len(vec), then in the division if we don't subtract a -1 we obtain a wrong position where to point
if first_occ_greater == len(vec):
s += 1
#print(vec)
#print(f'first_occ_0 = {first_occ_0}, last_occ_0 = {last_occ_0}, last_occ_less = {last_occ_less}, first_occ_greater = {first_occ_greater}')
# the optimal moves are computed as the middle value between the last occurence of a '-1' and the first MI excluded and the last occurence of MI excluded and the first occurence of a '1'
optimal_pos = [((last_occ_less + sum_factor) + (first_occ_0 - 1))//2 , ((last_occ_0 + 1) + (first_occ_greater - s - sub_factor))//2]
unknowns = [len(vec[last_occ_less + sum_factor: first_occ_0]), len(vec[last_occ_0 + 1: first_occ_greater])]
# check that if unknow is 0 in some part of the vector (i.e., we know already everything), the optimal move is None.
if unknowns[0] == 0:
optimal_pos[0] = None
if unknowns[1] == 0:
optimal_pos[1] = None
return unknowns, optimal_pos
def getWorst_f():
return worst_f
def getWorst_g():
return worst_g
def cleanWorst_f():
global worst_f
worst_f = {}
return worst_f
def cleanWorst_g():
global worst_g
worst_g = {}
return worst_g
def get_first_previous(discovered_vec, chosen_index):
free_space_left = 0
for i in discovered_vec[chosen_index-1::-1]:
if i != None:
return i, free_space_left
free_space_left += 1
def get_first_following(discovered_vec, chosen_index):
free_space_right = 0
for i in discovered_vec[chosen_index+1:]:
if i != None:
return i, free_space_right
free_space_right += 1
def generate_value_for_vector(server_vector, discovered_vec, chosen_index):
#print('chosen index= ', chosen_index)
if server_vector[chosen_index] == '0':
return chosen_index
elif server_vector[chosen_index] == '-1':
if '-1' in server_vector[:chosen_index] and '-1' in server_vector[chosen_index+1:]:
first_previous_smaller, free_space_left = get_first_previous(discovered_vec, chosen_index)
first_following_smaller, free_space_rigth = get_first_following(discovered_vec, chosen_index)
upper_bound = (first_following_smaller - 1) - free_space_rigth
lower_bound = (first_previous_smaller + 1) + free_space_left
return random.randint(lower_bound, upper_bound)
elif '-1' in server_vector[:chosen_index]:
first_previous_smaller, free_space_left = get_first_previous(discovered_vec, chosen_index)
upper_bound = chosen_index - 1
lower_bound = (first_previous_smaller + 1) + free_space_left
return random.randint(lower_bound, upper_bound)
elif '-1' in server_vector[chosen_index+1:]:
first_following_smaller, free_space_rigth = get_first_following(discovered_vec, chosen_index)
upper_bound = (first_following_smaller - 1) - free_space_rigth
lower_bound = chosen_index - random.randint(10,100)
return random.randint(lower_bound, upper_bound)
else:
upper_bound = chosen_index - 1
lower_bound = chosen_index - random.randint(10,100)
return random.randint(lower_bound, upper_bound)
elif server_vector[chosen_index] == '1':
if '1' in server_vector[:chosen_index] and '1' in server_vector[chosen_index+1:]:
first_previous_greater, free_space_left = get_first_previous(discovered_vec, chosen_index)
first_following_greater, free_space_rigth = get_first_following(discovered_vec, chosen_index)
upper_bound = (first_previous_greater + 1) + free_space_left
lower_bound = (first_following_greater - 1) - free_space_rigth
return random.randint(lower_bound, upper_bound)
elif '1' in server_vector[:chosen_index]:
first_previous_greater, free_space_left = get_first_previous(discovered_vec, chosen_index)
upper_bound = chosen_index + random.randint(10,100)
lower_bound = (first_previous_greater + 1) + free_space_left
return random.randint(lower_bound, upper_bound)
elif '1' in server_vector[chosen_index+1:]:
first_following_greater, free_space_rigth = get_first_following(discovered_vec, chosen_index)
upper_bound = (first_following_greater - 1) - free_space_rigth
lower_bound = chosen_index + 1
return random.randint(lower_bound, upper_bound)
else:
lower_bound = chosen_index + 1
upper_bound = chosen_index + random.randint(10,100)
return random.randint(lower_bound, upper_bound)
def generate_random_optimal_value_f(optimal_pos, chosen_index, vector_len):
if chosen_index <= vector_len//4:
return '-1'
elif chosen_index >= vector_len - vector_len//4:
return '1'
else:
return '0'
def check_goal(opponent, goal, feedback, magic_indexes, user_solution, wasted_dollars, min_questions_worst_case, TAc, LANG):
# we give feedback based on the chosen optimality level
if user_solution == ['e'] and magic_indexes==[]:
isCorrect = True
else:
isCorrect = magic_indexes == user_solution
if isCorrect:
if wasted_dollars < min_questions_worst_case and opponent=='optimal':
TAc.print(LANG.render_feedback("correct solution!", f'Correct! But it is impossible to solve the problem asking {wasted_dollars} questions (in the worst case)! You are cheating...'), "yellow", ["bold"])
exit(0)
#correct
if goal == 'correct':
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You reached your goal'), "green", ["bold"])
exit(0)
#at_most_twice_the_opt - al massimo 2 volte la sol ottima
elif goal == 'at_most_twice_the_opt':
isAtMostTwice = wasted_dollars <= 2*min_questions_worst_case #compreso il doppio, true if is it at most twice the opt
if isAtMostTwice:
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You reached your goal'), "green", ["bold"])
exit(0)
else:
if feedback == 'yes_no_goal':
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You inserted the right magic indexes, but you wasted too many dollars: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
elif feedback == 'how_far':
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You inserted the right magic indexes, but you wasted {wasted_dollars-2*min_questions_worst_case} more dollars than the optimal solution: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
#opt_plus_one - esattamente la sol ottima + 1
elif goal == 'opt_plus_one':
isOptPlusOne = wasted_dollars == min_questions_worst_case + 1 #true if it is exactly opt_plus_one
if isOptPlusOne:
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You reached your goal'), "green", ["bold"])
exit(0)
else:
if feedback == 'yes_no_goal':
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You inserted the right magic indexes, but you wasted too many dollars: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
elif feedback == 'how_far':
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You inserted the right magic indexes, but you wasted {wasted_dollars-(min_questions_worst_case + 1)} more dollars than the optimal solution: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
elif goal == 'optimal':
isOptimal = wasted_dollars == min_questions_worst_case
if isOptimal:
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You reached your goal'), "green", ["bold"])
exit(0)
else:
if feedback == 'yes_no_goal':
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You inserted the right magic indexes, but you wasted too many dollars: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
elif feedback == 'how_far':
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You inserted the right magic indexes, but you wasted {wasted_dollars-(min_questions_worst_case)} more dollars than the optimal solution: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
else:
TAc.print(LANG.render_feedback("wrong solution!", f'Wrong answer! You didn\'t reach your goal'), "red", ["bold"])
exit(0)
def simple_strucural_rep(initial_representation):
representation = initial_representation.split(',')
i = 0
for x in representation:
if x != '?':
if int(x) == i:
representation[i] = '='
elif int(x) < i:
representation[i] = '<'
else:
representation[i] = '>'
i += 1
new_representation = ','.join([str(x) for x in representation])
return new_representation
def reinforced_strucural_rep(initial_representation, compact):
simple_rep = simple_strucural_rep(initial_representation)
simple_rep = simple_rep.split(',')
if '<' in simple_rep:
first_occ_less = simple_rep.index('<')
last_occ_less = len(simple_rep) - 1 - simple_rep[::-1].index('<')
if last_occ_less != 0:
first_occ_less = 0
for i in range(first_occ_less,last_occ_less):
simple_rep[i] = '<'
if '=' in simple_rep:
first_occ_eq = simple_rep.index('=')
last_occ_eq = len(simple_rep) - 1 - simple_rep[::-1].index('=')
for i in range(first_occ_eq,last_occ_eq):
simple_rep[i] = '='
if '>' in simple_rep:
first_occ_great = simple_rep.index('>')
last_occ_great = len(simple_rep) - 1 - simple_rep[::-1].index('>')
if last_occ_great != len(simple_rep) - 1:
last_occ_great = len(simple_rep) - 1
for i in range(first_occ_great,last_occ_great+1):
simple_rep[i] = '>'
if compact == True:
counter = Counter(simple_rep)
count_less = counter['<']
count_unknown = 0
count_equal = counter['=']
count_unknown2 = 0
count_great = counter['>']
eq = False
for c in simple_rep:
if c == '?' and eq == False:
count_unknown += 1
elif c == '?' and eq == True:
count_unknown2 += 1
elif c == '=':
eq = True
simple_rep = [f'{count_less}<' , f'{count_unknown}?',f'{count_equal}=',f'{count_unknown2}?',f'{count_great}>']
reinforced_strucural_rep = ','.join([str(x) for x in simple_rep])
return reinforced_strucural_rep
def get_simple_conf(vector_configuration):
vector_configuration = ''.join(vector_configuration).split() # compress the configuration into a single word and then
vector_configuration = list(vector_configuration) # create a list of single char
simple_conf = []
simple_conf.extend(['<'] * int(vector_configuration[0]))
simple_conf.extend(['?'] * int(vector_configuration[2]))
simple_conf.extend(['='] * int(vector_configuration[4]))
simple_conf.extend(['?'] * int(vector_configuration[6]))
simple_conf.extend(['>'] * int(vector_configuration[8]))
return simple_conf
def get_server_vec_representation(vector_configuration):
vector_configuration = list(reinforced_strucural_rep(vector_configuration, False))
try:
for _ in range(len(vector_configuration)):
vector_configuration.remove(',')
except:
pass
server_vec = [None] * len(vector_configuration)
i = 0
for x in vector_configuration:
if x == '<':
server_vec[i] = '-1'
elif x == '=':
server_vec[i] = '0'
elif x == '>':
server_vec[i] = '1'
i += 1
return server_vec
def update_server_vec(optimal_pos, ans, server_vec):
if optimal_pos == ans:
server_vec[optimal_pos] = '0'
elif ans < optimal_pos:
server_vec[optimal_pos] = '-1'
elif ans > optimal_pos:
server_vec[optimal_pos] = '1'
return server_vec
def check_ans(server_vector, optimal_pos):
correct = True
if server_vector[optimal_pos] == '1' and ('-1' in server_vector[optimal_pos+1:] or '0' in server_vector[optimal_pos+1:]):
correct = False
elif server_vector[optimal_pos] == '-1' and ('1' in server_vector[:optimal_pos] or '0' in server_vector[:optimal_pos]):
correct = False
return correct
def check_goal_eval(goal, feedback, wasted_dollars, min_questions_worst_case, TAc, LANG):
if wasted_dollars < min_questions_worst_case and goal=='optimal':
TAc.print(LANG.render_feedback("correct solution!", f'Correct! But it is impossible to solve the problem asking {wasted_dollars} questions (in the worst case)! You are cheating...'), "yellow", ["bold"])
exit(0)
#at_most_twice_the_opt - al massimo 2 volte la sol ottima
if goal == 'at_most_twice_the_opt':
isAtMostTwice = wasted_dollars <= 2*min_questions_worst_case #compreso il doppio, true if is it at most twice the opt
if isAtMostTwice:
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You reached your goal'), "green", ["bold"])
exit(0)
else:
if feedback == 'yes_no_goal':
TAc.print(LANG.render_feedback(" wasted dollars", f'You wasted too many dollars: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
elif feedback == 'how_far':
TAc.print(LANG.render_feedback(" difference", f'You wasted {wasted_dollars-2*min_questions_worst_case} more dollars than the optimal solution: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
#opt_plus_one - esattamente la sol ottima + 1
elif goal == 'opt_plus_one':
isOptPlusOne = wasted_dollars == min_questions_worst_case + 1 #true if it is exactly opt_plus_one
if isOptPlusOne:
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You reached your goal'), "green", ["bold"])
exit(0)
else:
if feedback == 'yes_no_goal':
TAc.print(LANG.render_feedback(" wasted dollars", f'You wasted too many dollars: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
elif feedback == 'how_far':
TAc.print(LANG.render_feedback(" difference", f'You wasted {wasted_dollars-(min_questions_worst_case + 1)} more dollars than the optimal solution: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
elif goal == 'optimal':
isOptimal = wasted_dollars == min_questions_worst_case
if isOptimal:
TAc.print(LANG.render_feedback(" correct solution!", f'Correct! You reached your goal'), "green", ["bold"])
exit(0)
else:
if feedback == 'yes_no_goal':
TAc.print(LANG.render_feedback(" wasted dollars", f'You wasted too many dollars: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
elif feedback == 'how_far':
TAc.print(LANG.render_feedback(" difference", f'You wasted {wasted_dollars-(min_questions_worst_case)} more dollars than the optimal solution: you didn\'t reach your goal'), "yellow", ["bold"])
exit(0)
| 40.697595 | 555 | 0.606983 |
537913f3fa720280994d754016a39ac792993f13 | 9,168 | py | Python | src/engine/SCons/Tool/javac.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | 1 | 2020-03-21T05:24:47.000Z | 2020-03-21T05:24:47.000Z | src/engine/SCons/Tool/javac.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | 3 | 2019-01-15T20:40:02.000Z | 2021-02-13T03:16:34.000Z | src/engine/SCons/Tool/javac.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | 1 | 2021-08-04T12:32:39.000Z | 2021-08-04T12:32:39.000Z | """SCons.Tool.javac
Tool-specific initialization for javac.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import os.path
from collections import OrderedDict
import SCons.Action
import SCons.Builder
from SCons.Node.FS import _my_normcase
from SCons.Tool.JavaCommon import parse_java_file, get_java_install_dirs, get_java_include_paths
import SCons.Util
def classname(path):
"""Turn a string (path name) into a Java class name."""
return os.path.normpath(path).replace(os.sep, '.')
def emit_java_classes(target, source, env):
"""Create and return lists of source java files
and their corresponding target class files.
"""
java_suffix = env.get('JAVASUFFIX', '.java')
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
target[0].must_be_same(SCons.Node.FS.Dir)
classdir = target[0]
s = source[0].rentry().disambiguate()
if isinstance(s, SCons.Node.FS.File):
sourcedir = s.dir.rdir()
elif isinstance(s, SCons.Node.FS.Dir):
sourcedir = s.rdir()
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % s.__class__)
slist = []
js = _my_normcase(java_suffix)
for entry in source:
entry = entry.rentry().disambiguate()
if isinstance(entry, SCons.Node.FS.File):
slist.append(entry)
elif isinstance(entry, SCons.Node.FS.Dir):
result = OrderedDict()
dirnode = entry.rdir()
def find_java_files(arg, dirpath, filenames):
java_files = sorted([n for n in filenames
if _my_normcase(n).endswith(js)])
mydir = dirnode.Dir(dirpath)
java_paths = [mydir.File(f) for f in java_files]
for jp in java_paths:
arg[jp] = True
for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath()):
find_java_files(result, dirpath, filenames)
entry.walk(find_java_files, result)
slist.extend(list(result.keys()))
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % entry.__class__)
version = env.get('JAVAVERSION', '1.4')
full_tlist = []
for f in slist:
tlist = []
source_file_based = True
pkg_dir = None
if not f.is_derived():
pkg_dir, classes = parse_java_file(f.rfile().get_abspath(), version)
if classes:
source_file_based = False
if pkg_dir:
d = target[0].Dir(pkg_dir)
p = pkg_dir + os.sep
else:
d = target[0]
p = ''
for c in classes:
t = d.File(c + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = sourcedir
t.attributes.java_classname = classname(p + c)
tlist.append(t)
if source_file_based:
base = f.name[:-len(java_suffix)]
if pkg_dir:
t = target[0].Dir(pkg_dir).File(base + class_suffix)
else:
t = target[0].File(base + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = f.dir
t.attributes.java_classname = classname(base)
tlist.append(t)
for t in tlist:
t.set_specific_source([f])
full_tlist.extend(tlist)
return full_tlist, slist
JavaAction = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
JavaBuilder = SCons.Builder.Builder(action = JavaAction,
emitter = emit_java_classes,
target_factory = SCons.Node.FS.Entry,
source_factory = SCons.Node.FS.Entry)
class pathopt(object):
"""
Callable object for generating javac-style path options from
a construction variable (e.g. -classpath, -sourcepath).
"""
def __init__(self, opt, var, default=None):
self.opt = opt
self.var = var
self.default = default
def __call__(self, target, source, env, for_signature):
path = env[self.var]
if path and not SCons.Util.is_List(path):
path = [path]
if self.default:
default = env[self.default]
if default:
if not SCons.Util.is_List(default):
default = [default]
path = path + default
if path:
return [self.opt, os.pathsep.join(map(str, path))]
else:
return []
def Java(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the separate JavaClass{File,Dir}
Builders.
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
# Pad the target list with repetitions of the last element in the
# list so we have a target for every source element.
target = target + ([target[-1]] * (len(source) - len(target)))
java_suffix = env.subst('$JAVASUFFIX')
result = []
for t, s in zip(target, source):
if isinstance(s, SCons.Node.FS.Base):
if isinstance(s, SCons.Node.FS.File):
b = env.JavaClassFile
else:
b = env.JavaClassDir
else:
if os.path.isfile(s):
b = env.JavaClassFile
elif os.path.isdir(s):
b = env.JavaClassDir
elif s[-len(java_suffix):] == java_suffix:
b = env.JavaClassFile
else:
b = env.JavaClassDir
result.extend(b(t, s, *args, **kw))
return result
def generate(env):
"""Add Builders and construction variables for javac to an Environment."""
java_file = SCons.Tool.CreateJavaFileBuilder(env)
java_class = SCons.Tool.CreateJavaClassFileBuilder(env)
java_class_dir = SCons.Tool.CreateJavaClassDirBuilder(env)
java_class.add_emitter(None, emit_java_classes)
java_class.add_emitter(env.subst('$JAVASUFFIX'), emit_java_classes)
java_class_dir.emitter = emit_java_classes
env.AddMethod(Java)
version = env.get('JAVAVERSION', None)
if env['PLATFORM'] == 'win32':
# Ensure that we have a proper path for javac
paths = get_java_install_dirs('win32', version=version)
javac = SCons.Tool.find_program_path(env, 'javac', default_paths=paths)
if javac:
javac_bin_dir = os.path.dirname(javac)
env.AppendENVPath('PATH', javac_bin_dir)
else:
javac = SCons.Tool.find_program_path(env, 'javac')
env['JAVAINCLUDES'] = get_java_include_paths(env, javac, version)
env['JAVAC'] = 'javac'
env['JAVACFLAGS'] = SCons.Util.CLVar('')
env['JAVABOOTCLASSPATH'] = []
env['JAVACLASSPATH'] = []
env['JAVASOURCEPATH'] = []
env['_javapathopt'] = pathopt
env['_JAVABOOTCLASSPATH'] = '${_javapathopt("-bootclasspath", "JAVABOOTCLASSPATH")} '
env['_JAVACLASSPATH'] = '${_javapathopt("-classpath", "JAVACLASSPATH")} '
env['_JAVASOURCEPATH'] = '${_javapathopt("-sourcepath", "JAVASOURCEPATH", "_JAVASOURCEPATHDEFAULT")} '
env['_JAVASOURCEPATHDEFAULT'] = '${TARGET.attributes.java_sourcedir}'
env['_JAVACCOM'] = '$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET.attributes.java_classdir} $_JAVASOURCEPATH $SOURCES'
env['JAVACCOM'] = "${TEMPFILE('$_JAVACCOM','$JAVACCOMSTR')}"
env['JAVACLASSSUFFIX'] = '.class'
env['JAVASUFFIX'] = '.java'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 36.819277 | 158 | 0.614856 |
8d773eba0188d95f5194c9d6e5f02e616d452932 | 1,087 | py | Python | morpfw/crud/typeregistry.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 8 | 2018-12-08T01:41:58.000Z | 2020-12-21T15:30:12.000Z | morpfw/crud/typeregistry.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 17 | 2019-02-05T15:01:32.000Z | 2020-04-28T16:17:42.000Z | morpfw/crud/typeregistry.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 2 | 2018-12-08T05:03:37.000Z | 2019-03-20T07:15:21.000Z |
import reg
class TypeRegistry(object):
def __init__(self):
self.types = []
self.schema_name = {}
def register_type(self, name, schema):
if name not in self.types:
self.types.append(name)
self.schema_name[schema] = name
def get_typeinfo(self, name, request):
try:
factory = request.app.get_typeinfo_factory(name)
except NotImplementedError:
factory = None
if factory is None:
raise KeyError('No type info registered for %s' % name)
result = factory(request) # self.typeinfo_factories[name](request)
result['name'] = name
return result
def get_typeinfos(self, request):
res = {}
for k in self.types:
res[k] = self.get_typeinfo(k, request)
return res
def get_typeinfo_by_schema(self, schema, request):
name = self.schema_name.get(schema, None)
if name is None:
raise KeyError('No type info registered for %s' % schema)
return self.get_typeinfo(name, request)
| 27.175 | 75 | 0.601656 |
740d6a001b0c2a409c4794373abd653122064186 | 12,276 | py | Python | mozc-nazoru/src/nazoru/lib.py | ikeji/mozc-devices | 59a3805b539ba02eaf68eac1f5664fac0912420d | [
"Apache-2.0"
] | 1,002 | 2016-03-31T15:49:47.000Z | 2022-03-31T14:53:54.000Z | mozc-nazoru/src/nazoru/lib.py | ikeji/mozc-devices | 59a3805b539ba02eaf68eac1f5664fac0912420d | [
"Apache-2.0"
] | 15 | 2016-03-31T23:24:08.000Z | 2021-12-23T04:52:42.000Z | mozc-nazoru/src/nazoru/lib.py | ikeji/mozc-devices | 59a3805b539ba02eaf68eac1f5664fac0912420d | [
"Apache-2.0"
] | 122 | 2016-03-31T16:44:32.000Z | 2022-03-26T10:05:25.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Nazoru input library.
This is a collection of methods to preprocess input stroke data before any
training starts.
"""
import time
import random
import cairocffi as cairo
import numpy as np
from PIL import Image
from io import BytesIO
from enum import Enum
SCOPE = 'Nazorunet'
INPUT_NODE_NAME = 'inputs'
OUTPUT_NODE_NAME = SCOPE + '/Predictions/Reshape_1'
KANAS = (u'あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほ'
u'まみむめもやゆよらりるれろわゐんゑを'
u'abcdefghijklmnopqrstuvwxyz1234567890'
u'♡ーずぐ')
KEYS = ('a', 'i', 'u', 'e', 'o',
'ka', 'ki', 'ku', 'ke', 'ko',
'sa', 'si', 'su', 'se', 'so',
'ta', 'ti', 'tu', 'te', 'to',
'na', 'ni', 'nu', 'ne', 'no',
'ha', 'hi', 'hu', 'he', 'ho',
'ma', 'mi', 'mu', 'me', 'mo',
'ya', 'yu', 'yo',
'ra', 'ri', 'ru', 're', 'ro',
'wa', 'wi', 'nn', 'we', 'wo',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '0',
'ha-to', '-', 'zu', 'gu')
class KeyboardArrangement(Enum):
"""Enum for keyboard arrangements.
"""
qwerty_jis = [
u'1234567890-^¥',
u'qwertyuiop@[',
u'asdfghjkl;:]',
u'zxcvbnm,./_',
]
def key2pos(key, arrangement=KeyboardArrangement.qwerty_jis.value, offset=0.5):
"""Returns the key position.
Args:
key (string): Key to get position.
arrangement (list): Keyboard arrangement.
offset (number): How much the keys are shifting by row.
Returns:
position (tuple(number, number)): Position (x, y).
"""
for i, row in enumerate(arrangement):
if key in row:
y = i
x = row.index(key) + i * offset
return (x, y)
return None
def keydowns2points(keydowns):
"""Translates keydowns to points.
Args:
keydowns: [(key, t), ...] List of keydowns.
Returns:
points: [(x, y, t), ...] List of points.
"""
points = []
for keydown in keydowns:
pos = key2pos(keydown[0])
if pos:
points.append((pos[0], pos[1], keydown[1]))
return points
def normalize_x(x):
"""Normalizes position.
Args:
x (list): [[x, y, t], ...] List of points to normalize the position (x, y)
into 0-1 range.
Returns:
x (list): [[x', y', t], ...] List of points with the normalized potision
(x', y').
"""
x = np.array(x)
max_ = np.max(x[:, :2], axis=0)
min_ = np.min(x[:, :2], axis=0)
x[:, :2] = (x[:, :2] - min_) / (max_ - min_)
return x
def pendown_encode(x_diff, sigma=1.6):
"""Encodes time difference into pendown state.
Args:
x_diff (list): [[dx, dy, dt], ...] List of diffs to encode.
Returns:
x_diff_encoded (list): [[dx, dy, dt, pendown], ...] Encoded list of diffs.
"""
thres = np.mean(x_diff[:,2]) + sigma * np.std(x_diff[:,2])
x_diff_encoded = np.concatenate((
x_diff,
[[0] if dt_i > thres else [1] for dt_i in x_diff[:, 2]]
), axis=1)
return x_diff_encoded
def surface_to_array(surface):
"""Returns image array from cairo surface.
Args:
surface: Cairo surface to translate.
"""
buf = BytesIO()
surface.write_to_png(buf)
png_string = buf.getvalue()
im = Image.open(BytesIO(png_string))
imdata = np.asarray(im.convert('L'))
return imdata
def get_direction(diff):
"""Returns directions and weights for 8-directional features.
For more detail, see
- Bai, Zhen-Long, and Qiang Huo. "A study on the use of 8-directional features
for online handwritten Chinese character recognition."
- Liu, Cheng-Lin, and Xiang-Dong Zhou. "Online Japanese character recognition
using trajectory-based normalization and direction feature extraction."
Weight is halved for pen-up states.
Args:
diff (numpy.array): Encoded diff vector (dx, dy, dt, pendown).
Returns:
First direction (Right (0), Down, (2), Left (4), Up (6)) and its weight, and
Second direction (Bottom right (1), Bottom left (3), Up left (5), Up right
(7)) and its weight.
"""
if np.abs(diff[0]) >= np.abs(diff[1]):
if diff[0] >= 0:
direction1 = 0
else:
direction1 = 4
else:
if diff[1] >= 0:
direction1 = 2
else:
direction1 = 6
if diff[0] >= 0:
if diff [1] >= 0:
direction2 = 1
else:
direction2 = 7
else:
if diff [1] >= 0:
direction2 = 3
else:
direction2 = 5
length = np.linalg.norm(diff[:2])
if length == 0: return 0, 0, 1, 0
weight1 = np.abs(np.abs(diff[0]) - np.abs(diff[1])) / length
weight2 = np.sqrt(2) * min(np.abs(diff[0]), np.abs(diff[1])) / length
if diff[3] == 0:
weight1 /= 2
weight2 /= 2
return direction1, weight1, direction2, weight2
def generate_images(x_norm, x_diff_encoded, directional_feature,
temporal_feature, scale, stroke_width):
"""Generates image array from strokes.
Args:
x_norm: [(x', y', t), ...] Normalized points.
x_diff_encoded: [(dx, dy, dt, pendown), ...] Normalized diffs.
directional_feature (boolean): True when using direcitonal feature.
temporal_feature (boolean): True when using temporal feature.
scale (int): Scale of the image.
stroke_width (int): Brush thickness to draw.
Returns:
images (numpy.array): An array of images. Each image should have a shape of
(scale, scale). Eight images will be added into the returned array if
|directional_feature| is True, otherwise one original image will be
added. Also, two images will be generated if |temporal_feature| is True.
For example, the shape of |images| will be (scale, scale, 10) when both of
options are True.
"""
if directional_feature:
images = generate_image_direct_decomp(
x_norm, x_diff_encoded, scale, stroke_width)
else:
images = generate_image_plain(x_norm, x_diff_encoded, scale, stroke_width)
if temporal_feature:
image = generate_image_temporal(
x_norm, x_diff_encoded, scale, stroke_width, inversed=False)
images = np.concatenate((images, image), axis=-1)
image = generate_image_temporal(
x_norm, x_diff_encoded, scale, stroke_width, inversed=True)
images = np.concatenate((images, image), axis=-1)
return images
def generate_image_direct_decomp(x_norm, x_diff_encoded, scale, stroke_width):
"""Generates image array from strokes using direction feature.
Args:
x_norm: [(x', y', t), ...] Normalized points.
x_diff_encoded: [(dx, dy, dt, pendown), ...] Normalized diffs.
scale (int): scale of the image.
stroke_width (int): Brush thickness to draw.
Returns:
image (numpy.array): Image array with a shape of (scale, scale, 8).
"""
surfaces = [cairo.ImageSurface(cairo.FORMAT_A8, scale, scale)
for _ in range(8)]
curr_x = x_norm[0][0]
curr_y = x_norm[0][1]
for i, diff in enumerate(x_diff_encoded):
direction1, weight1, direction2, weight2 = get_direction(diff)
ctx = cairo.Context(surfaces[direction1])
ctx.move_to(curr_x * scale, curr_y * scale)
ctx.set_line_width(stroke_width)
ctx.set_source_rgba(1, 1, 1, weight1)
ctx.line_to((curr_x + diff[0]) * scale, (curr_y + diff[1]) * scale)
ctx.stroke()
ctx = cairo.Context(surfaces[direction2])
ctx.move_to(curr_x * scale, curr_y * scale)
ctx.set_line_width(stroke_width)
ctx.set_source_rgba(1, 1, 1, weight2)
ctx.line_to((curr_x + diff[0]) * scale, (curr_y + diff[1]) * scale)
ctx.stroke()
curr_x += diff[0]
curr_y += diff[1]
return np.array([
surface_to_array(surface) for surface in surfaces]).transpose(1, 2, 0)
def generate_image_plain(x_norm, x_diff_encoded, scale, stroke_width):
"""Generates image array from strokes without direction feature.
Args:
x_norm: [(x', y', t), ...] Normalized points.
x_diff_encoded: [(dx, dy, dt, pendown), ...] Normalized diffs.
scale (int): scale of the image.
stroke_width (int): Brush thickness to draw.
Returns:
image (numpy.array): Image array with a shape of (scale, scale, 1).
"""
surface = cairo.ImageSurface(cairo.FORMAT_A8, scale, scale)
curr_x = x_norm[0][0]
curr_y = x_norm[0][1]
for i, diff in enumerate(x_diff_encoded):
ctx = cairo.Context(surface)
ctx.move_to(curr_x * scale, curr_y * scale)
ctx.set_line_width(stroke_width)
if diff[3] == 1:
ctx.set_source_rgba(1, 1, 1, 1)
else:
ctx.set_source_rgba(1, 1, 1, 0.5)
ctx.line_to((curr_x + diff[0]) * scale, (curr_y + diff[1]) * scale)
ctx.stroke()
curr_x += diff[0]
curr_y += diff[1]
return surface_to_array(surface).reshape(scale, scale, 1)
def generate_image_temporal(x_norm, x_diff_encoded, scale, stroke_width,
steepness=2, inversed=False):
surface = cairo.ImageSurface(cairo.FORMAT_A8, scale, scale)
curr_x = x_norm[0][0]
curr_y = x_norm[0][1]
spent_t = 0
for i, diff in enumerate(x_diff_encoded):
ctx = cairo.Context(surface)
ctx.move_to(curr_x * scale, curr_y * scale)
ctx.set_line_width(stroke_width)
weight = 1 - spent_t / x_norm[-1][2]
if inversed: weight = 1 - weight
weight = max(weight, 0) ** steepness
if diff[3] == 0: weight /= 2
ctx.set_source_rgba(1, 1, 1, weight)
ctx.line_to((curr_x + diff[0]) * scale, (curr_y + diff[1]) * scale)
ctx.stroke()
curr_x += diff[0]
curr_y += diff[1]
spent_t += diff[2]
return surface_to_array(surface).reshape(scale, scale, 1)
def split_data(x, t, val_rate, test_rate):
"""Splits data into training, validation, and testing data.
Args:
x: Data to split.
t: Label to split.
val_rate: What percentage of data to use as a validation set.
test_rate: What percentage of data to use as a testing set.
Returns:
train_x: Training inputs.
train_t: Training labels.
val_x: Validation inputs.
val_t: Validation labels.
test_x: Testing inputs.
test_t: Testing labels.
"""
n = x.shape[0]
train_x = x[:int(n * (1 - val_rate - test_rate))]
train_t = t[:int(n * (1 - val_rate - test_rate))]
val_x= x[int(n * (1 - val_rate - test_rate)):int(n * (1 - test_rate))]
val_t = t[int(n * (1 - val_rate - test_rate)):int(n * (1 - test_rate))]
test_x = x[int(n * (1 - test_rate)):]
test_t = t[int(n * (1 - test_rate)):]
return train_x, train_t, val_x, val_t, test_x, test_t
def keydowns2image(keydowns, directional_feature, temporal_feature, scale=16,
stroke_width=2):
"""Converts a list of keydowns into image.
Args:
keydowns: [(key, t), ...] Training data as a list of keydowns.
directional_feature (boolean): True when using directional feature.
temporal_feature (boolean): True when using temporal feature.
scale (int): Scale of the image.
stroke_width (int): Brush thickness to draw.
Returns:
X_im: Image dataset in numpy array format. The shape differs by used
features.
(directional=True, temporal=True) => (scale, scale, 10)
(directional=True, temporal=False) => (scale, scale, 8)
(directional=False, temporal=True) => (scale, scale, 3)
(directional=False, temporal=False) => (scale, scale, 1)
"""
# Translate keys to 2D points. {(key, t), ...} -> {(x, y, t), ...}
X = keydowns2points(keydowns)
# 0-1 normalization
X_norm = normalize_x(X)
# Take difference. {(x, y, t), ...} -> {(dx, dy, dt), ...}.
X_diff = np.diff(X_norm, axis=0)
# Encode pendown state. {(dx, dy, dt), ...} -> {(dx, dy, dt, pendown), ...}
X_diff_encoded = pendown_encode(X_diff)
# Render into images.
X_im = generate_images(X_norm, X_diff_encoded, directional_feature,
temporal_feature, scale, stroke_width) / 255.
return X_im
| 29.868613 | 80 | 0.634164 |
ee6da983f744413f42cde4cda62e1bae0779cd4b | 6,810 | py | Python | galvasr2/align/utils.py | keithachorn-intel/peoples-speech | b7623488dff36d343f8f5a6ead0a5a3a82f723bd | [
"Apache-2.0"
] | 3 | 2020-12-18T05:17:48.000Z | 2021-05-31T06:22:53.000Z | galvasr2/align/utils.py | keithachorn-intel/peoples-speech | b7623488dff36d343f8f5a6ead0a5a3a82f723bd | [
"Apache-2.0"
] | 12 | 2020-07-18T15:36:57.000Z | 2021-01-16T18:14:26.000Z | galvasr2/align/utils.py | keithachorn-intel/peoples-speech | b7623488dff36d343f8f5a6ead0a5a3a82f723bd | [
"Apache-2.0"
] | 1 | 2020-09-15T21:59:31.000Z | 2020-09-15T21:59:31.000Z |
import os
import sys
import time
import heapq
from multiprocessing.dummy import Pool as ThreadPool
KILO = 1024
KILOBYTE = 1 * KILO
MEGABYTE = KILO * KILOBYTE
GIGABYTE = KILO * MEGABYTE
TERABYTE = KILO * GIGABYTE
SIZE_PREFIX_LOOKUP = {'k': KILOBYTE, 'm': MEGABYTE, 'g': GIGABYTE, 't': TERABYTE}
def parse_file_size(file_size):
file_size = file_size.lower().strip()
if len(file_size) == 0:
return 0
n = int(keep_only_digits(file_size))
if file_size[-1] == 'b':
file_size = file_size[:-1]
e = file_size[-1]
return SIZE_PREFIX_LOOKUP[e] * n if e in SIZE_PREFIX_LOOKUP else n
def keep_only_digits(txt):
return ''.join(filter(str.isdigit, txt))
def secs_to_hours(secs):
hours, remainder = divmod(secs, 3600)
minutes, seconds = divmod(remainder, 60)
return '%02d:%02d:%02d' % (hours, minutes, seconds)
def log_progress(it, total=None, interval=60.0, step=None, entity='it', file=sys.stderr):
if total is None and hasattr(it, '__len__'):
total = len(it)
if total is None:
line_format = ' {:8d} (elapsed: {}, speed: {:.2f} {}/{})'
else:
line_format = ' {:' + str(len(str(total))) + 'd} of {} : {:6.2f}% (elapsed: {}, speed: {:.2f} {}/{}, ETA: {})'
overall_start = time.time()
interval_start = overall_start
interval_steps = 0
def print_interval(steps, time_now):
elapsed = time_now - overall_start
elapsed_str = secs_to_hours(elapsed)
speed_unit = 's'
interval_duration = time_now - interval_start
print_speed = speed = interval_steps / (0.001 if interval_duration == 0.0 else interval_duration)
if print_speed < 0.1:
print_speed = print_speed * 60
speed_unit = 'm'
if print_speed < 1:
print_speed = print_speed * 60
speed_unit = 'h'
elif print_speed > 1000:
print_speed = print_speed / 1000.0
speed_unit = 'ms'
if total is None:
line = line_format.format(global_step, elapsed_str, print_speed, entity, speed_unit)
else:
percent = global_step * 100.0 / total
eta = secs_to_hours(((total - global_step) / speed) if speed > 0 else 0)
line = line_format.format(global_step, total, percent, elapsed_str, print_speed, entity, speed_unit, eta)
print(line, file=file, flush=True)
for global_step, obj in enumerate(it, 1):
interval_steps += 1
yield obj
t = time.time()
if (step is None and t - interval_start > interval) or (step is not None and interval_steps >= step):
print_interval(interval_steps, t)
interval_steps = 0
interval_start = t
if interval_steps > 0:
print_interval(interval_steps, time.time())
def circulate(items, center=None):
count = len(list(items))
if count > 0:
if center is None:
center = count // 2
center = min(max(center, 0), count - 1)
yield center, items[center]
for i in range(1, count):
#print('ANOTHER')
if center + i < count:
yield center + i, items[center + i]
if center - i >= 0:
yield center - i, items[center - i]
def by_len(items):
indexed = list(enumerate(items))
return sorted(indexed, key=lambda e: len(e[1]), reverse=True)
def enweight(items, direction=0):
"""
Enumerates all entries together with a positional weight value.
The positional weight progresses quadratically.
:param items: Items to enumerate
:param direction: Order of assigning positional weights to N-grams:
direction < 0: Weight of first N-gram is 1.0 and of last one 0.0
direction > 0: Weight of first N-gram is 0.0 and of last one 1.0
direction == 0: Weight of center N-gram(s) near or equal 0, weight of first and last N-gram 1.0
:return: Produces (object, float) tuples representing the enumerated item
along with its assigned positional weight value
"""
items = list(items)
direction = -1 if direction < 0 else (1 if direction > 0 else 0)
n = len(items) - 1
if n < 1:
if n == 0:
yield items[0], 1
# This used to be raise StopIteration, but python3.7 changed the semantics of that:
# https://stackoverflow.com/questions/51700960/runtimeerror-generator-raised-stopiteration-every-time-i-try-to-run-app
return
for i, item in enumerate(items):
c = (i + n * (direction - 1) / 2) / n
yield item, c * c * (4 - abs(direction) * 3)
def greedy_minimum_search(a, b, compute, result_a=None, result_b=None):
if a > b:
a, b = b, a
result_a, result_b = result_b, result_a
if a == b:
return result_a or result_b or compute(a)
result_a = result_a or compute(a)
result_b = result_b or compute(b)
if b == a+1:
return result_a if result_a[0] < result_b[0] else result_b
c = (a+b) // 2
if result_a[0] < result_b[0]:
return greedy_minimum_search(a, c, compute, result_a=result_a)
else:
return greedy_minimum_search(c, b, compute, result_b=result_b)
class Interleaved:
"""Collection that lazily combines sorted collections in an interleaving fashion.
During iteration the next smallest element from all the sorted collections is always picked.
The collections must support iter() and len()."""
def __init__(self, *iterables, key=lambda obj: obj):
self.iterables = iterables
self.key = key
self.len = sum(map(len, iterables))
def __iter__(self):
return heapq.merge(*self.iterables, key=self.key)
def __len__(self):
return self.len
class LimitingPool:
"""Limits unbound ahead-processing of multiprocessing.Pool's imap method
before items get consumed by the iteration caller.
This prevents OOM issues in situations where items represent larger memory allocations."""
def __init__(self, processes=None, limit_factor=2, sleeping_for=0.1):
self.processes = os.cpu_count() if processes is None else processes
self.pool = ThreadPool(processes=processes)
self.sleeping_for = sleeping_for
self.max_ahead = self.processes * limit_factor
self.processed = 0
def __enter__(self):
return self
def limit(self, it):
for obj in it:
while self.processed >= self.max_ahead:
time.sleep(self.sleeping_for)
self.processed += 1
yield obj
def map(self, fun, it):
for obj in self.pool.imap(fun, self.limit(it)):
self.processed -= 1
yield obj
def __exit__(self, exc_type, exc_value, traceback):
self.pool.close()
| 35.284974 | 126 | 0.625404 |
815cd84d217e35f428efb6015e9fd7bc973c2d91 | 16,173 | py | Python | detectron2/data/build.py | liuy-61/detectron2_origin_liuy | bd75971b94f055fded6125c1d136b1ea188b75f0 | [
"Apache-2.0"
] | 2 | 2020-01-02T09:10:09.000Z | 2020-05-11T07:30:44.000Z | detectron2/data/build.py | liuy-61/detectron2_origin_liuy | bd75971b94f055fded6125c1d136b1ea188b75f0 | [
"Apache-2.0"
] | null | null | null | detectron2/data/build.py | liuy-61/detectron2_origin_liuy | bd75971b94f055fded6125c1d136b1ea188b75f0 | [
"Apache-2.0"
] | 1 | 2021-02-14T01:26:26.000Z | 2021-02-14T01:26:26.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import bisect
import copy
import itertools
import logging
import numpy as np
import pickle
import torch.utils.data
from fvcore.common.file_io import PathManager
from tabulate import tabulate
from termcolor import colored
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.logger import log_first_n
from . import samplers
from .catalog import DatasetCatalog, MetadataCatalog
from .common import DatasetFromList, MapDataset
from .dataset_mapper import DatasetMapper
from .detection_utils import check_metadata_consistency
"""
This file contains the default logic to build a dataloader for training or testing.
"""
__all__ = [
"build_detection_train_loader",
"build_detection_test_loader",
"get_detection_dataset_dicts",
"load_proposals_into_dataset",
"print_instances_class_histogram",
]
def filter_images_with_only_crowd_annotations(dataset_dicts):
"""
Filter out images with none annotations or only crowd annotations
(i.e., images without non-crowd annotations).
A common training-time preprocessing on COCO dataset.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
Returns:
list[dict]: the same format, but filtered.
"""
num_before = len(dataset_dicts)
def valid(anns):
for ann in anns:
if ann.get("iscrowd", 0) == 0:
return True
return False
dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
num_after = len(dataset_dicts)
logger = logging.getLogger(__name__)
logger.info(
"Removed {} images with no usable annotations. {} images left.".format(
num_before - num_after, num_after
)
)
return dataset_dicts
def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
"""
Filter out images with too few number of keypoints.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
Returns:
list[dict]: the same format as dataset_dicts, but filtered.
"""
num_before = len(dataset_dicts)
def visible_keypoints_in_image(dic):
# Each keypoints field has the format [x1, y1, v1, ...], where v is visibility
annotations = dic["annotations"]
return sum(
(np.array(ann["keypoints"][2::3]) > 0).sum()
for ann in annotations
if "keypoints" in ann
)
dataset_dicts = [
x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image
]
num_after = len(dataset_dicts)
logger = logging.getLogger(__name__)
logger.info(
"Removed {} images with fewer than {} keypoints.".format(
num_before - num_after, min_keypoints_per_image
)
)
return dataset_dicts
def load_proposals_into_dataset(dataset_dicts, proposal_file):
"""
Load precomputed object proposals into the dataset.
The proposal file should be a pickled dict with the following keys:
- "ids": list[int] or list[str], the image ids
- "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
- "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
corresponding to the boxes.
- "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposal_file (str): file path of pre-computed proposals, in pkl format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger = logging.getLogger(__name__)
logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
proposals = pickle.load(f, encoding="latin1")
# Rename the key names in D1 proposal files
rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
for key in rename_keys:
if key in proposals:
proposals[rename_keys[key]] = proposals.pop(key)
# Fetch the indexes of all proposals that are in the dataset
# Convert image_id to str since they could be int.
img_ids = set({str(record["image_id"]) for record in dataset_dicts})
id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
# Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
for record in dataset_dicts:
# Get the index of the proposal
i = id_to_index[str(record["image_id"])]
boxes = proposals["boxes"][i]
objectness_logits = proposals["objectness_logits"][i]
# Sort the proposals in descending order of the scores
inds = objectness_logits.argsort()[::-1]
record["proposal_boxes"] = boxes[inds]
record["proposal_objectness_logits"] = objectness_logits[inds]
record["proposal_bbox_mode"] = bbox_mode
return dataset_dicts
def _quantize(x, bin_edges):
bin_edges = copy.copy(bin_edges)
bin_edges = sorted(bin_edges)
quantized = list(map(lambda y: bisect.bisect_right(bin_edges, y), x))
return quantized
def print_instances_class_histogram(dataset_dicts, class_names):
"""
Args:
dataset_dicts (list[dict]): list of dataset dicts.
class_names (list[str]): list of class names (zero-indexed).
"""
num_classes = len(class_names)
hist_bins = np.arange(num_classes + 1)
histogram = np.zeros((num_classes,), dtype=np.int)
for entry in dataset_dicts:
annos = entry["annotations"]
classes = [x["category_id"] for x in annos if not x.get("iscrowd", 0)]
histogram += np.histogram(classes, bins=hist_bins)[0]
N_COLS = min(6, len(class_names) * 2)
def short_name(x):
# make long class names shorter. useful for lvis
if len(x) > 13:
return x[:11] + ".."
return x
data = list(
itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
)
total_num_instances = sum(data[1::2])
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
if num_classes > 1:
data.extend(["total", total_num_instances])
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "#instances"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
log_first_n(
logging.INFO,
"Distribution of training instances among all {} categories:\n".format(num_classes)
+ colored(table, "cyan"),
key="message",
)
def build_batch_data_sampler(
sampler, images_per_batch, group_bin_edges=None, grouping_features=None
):
"""
Return a dataset index sampler that batches dataset indices possibly with
grouping to improve training efficiency.
Args:
sampler (torch.utils.data.sampler.Sampler): any subclass of
:class:`torch.utils.data.sampler.Sampler`.
images_per_batch (int): the batch size. Note that the sampler may return
batches that have between 1 and images_per_batch (inclusive) elements
because the underlying index set (and grouping partitions, if grouping
is used) may not be divisible by images_per_batch.
group_bin_edges (None, list[number], tuple[number]): If None, then grouping
is disabled. If a list or tuple is given, the values are used as bin
edges for defining len(group_bin_edges) + 1 groups. When batches are
sampled, only elements from the same group are returned together.
grouping_features (None, list[number], tuple[number]): If None, then grouping
is disabled. If a list or tuple is given, it must specify for each index
in the underlying dataset the value to be used for placing that dataset
index into one of the grouping bins.
Returns:
A BatchSampler or subclass of BatchSampler.
"""
if group_bin_edges and grouping_features:
assert isinstance(group_bin_edges, (list, tuple))
assert isinstance(grouping_features, (list, tuple))
group_ids = _quantize(grouping_features, group_bin_edges)
batch_sampler = samplers.GroupedBatchSampler(sampler, group_ids, images_per_batch)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_batch, drop_last=True
) # drop last so the batch always have the same size
# NOTE when we add batch inference support, make sure not to use this.
return batch_sampler
def get_detection_dataset_dicts(
dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
dataset_names (list[str]): a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `dataset_names`.
"""
assert len(dataset_names)
debug = 1
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
for dataset_name, dicts in zip(dataset_names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
debug = 1
if proposal_files is not None:
assert len(dataset_names) == len(proposal_files)
# load precomputed proposals from proposal files
dataset_dicts = [
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
]
debug = 1
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
debug = 1
has_instances = "annotations" in dataset_dicts[0]
# Keep images without instance-level GT if the dataset has semantic labels.
if filter_empty and has_instances and "sem_seg_file_name" not in dataset_dicts[0]:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
if min_keypoints > 0 and has_instances:
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
if has_instances:
try:
class_names = MetadataCatalog.get(dataset_names[0]).thing_classes
check_metadata_consistency("thing_classes", dataset_names)
print_instances_class_histogram(dataset_dicts, class_names)
except AttributeError: # class names are not available for this dataset
pass
debug = 1
return dataset_dicts
def build_detection_train_loader(cfg, mapper=None):
"""
A data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Start workers to work on the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will return.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, True)`.
Returns:
a torch DataLoader object
"""
num_workers = get_world_size()
images_per_batch = 1
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
debug = 1
dataset = DatasetFromList(dataset_dicts, copy=False)
debug = 1
# Bin edges for batching images with similar aspect ratios. If ASPECT_RATIO_GROUPING
# is enabled, we define two bins with an edge at height / width = 1.
group_bin_edges = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []
aspect_ratios = [float(img["height"]) / float(img["width"]) for img in dataset]
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
sampler = samplers.RepeatFactorTrainingSampler(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
batch_sampler = build_batch_data_sampler(
sampler, images_per_worker, group_bin_edges, aspect_ratios
)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
debug = 1
return data_loader
def build_detection_test_loader(cfg, dataset_name, mapper=None):
"""
Similar to `build_detection_train_loader`.
But this function uses the given `dataset_name` argument (instead of the names in cfg),
and uses batch size 1.
Args:
cfg: a detectron2 CfgNode
dataset_name (str): a name of the dataset that's available in the DatasetCatalog
mapper (callable): a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, False)`.
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
"""
dataset_dicts = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
dataset = DatasetFromList(dataset_dicts)
if mapper is None:
mapper = DatasetMapper(cfg, False)
dataset = MapDataset(dataset, mapper)
sampler = samplers.InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
def trivial_batch_collator(batch):
"""
A batch collator that does nothing.
"""
return batch
def worker_init_reset_seed(worker_id):
seed_all_rng(np.random.randint(2 ** 31) + worker_id)
| 37.787383 | 103 | 0.686391 |
caf51a47b8bdb2c8a31723207ebc6b0958cf6cc4 | 4,445 | py | Python | packages/pyre/config/native/Importer.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/config/native/Importer.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/config/native/Importer.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
# externals
import sys
# support
from ... import primitives, tracking
# superclass
from ..Loader import Loader
# declaration
class Importer(Loader):
"""
This component codec recognizes uris of the form
import:package.subpackage.factory#name
The uri is interpreted as if
from package.subpackage import factory
factory(name=name)
had been issued to the interpreter. {factory} is expected to be either a component class or
a function that returns a component class. This class is then instantiated using {name} as
the sole argument to the constructor. If {name} is not present, the component class is
returned.
"""
# types
from .Shelf import Shelf as shelf
# public data
schemes = ('import',)
# interface
@classmethod
def load(cls, uri, **kwds):
"""
Interpret {uri} as a module to be loaded
"""
# get the module name
source = str(uri.address)
# build a simple locator
locator = tracking.simple(source=uri.uri)
# show me
# print(f" importing: '{source}'")
# attempt to
try:
# import the module
module = __import__(source)
# show me
# print(f" uri: {uri} -> module: {module}")
# the address portion of {uri} is not importable
except (ImportError, TypeError) as error:
# show me
# print(f" error: {error}")
# complain
raise cls.LoadingError(
codec=cls, uri=uri, locator=locator, description=str(error)) from error
# all other exceptions are probably caused by the contents of the module; let them
# propagate to the user; on success, look up {module} in the global list of modules and
# return it dressed up as a shelf
return cls.shelf(module=sys.modules[source], uri=uri, locator=locator)
@classmethod
def locateShelves(cls, protocol, scheme, context, **kwds):
"""
Locate candidate shelves for the given {uri}
"""
# sign in
# print("{.__name__}.locateShelves:".format(cls))
# chain up for the rest
for candidate in super().locateShelves(
protocol=protocol, scheme=scheme, context=context, **kwds):
# make a uri
uri = cls.uri(scheme='import', address=candidate)
# and send it off
yield uri
# all done
return
# context handling
@classmethod
def interpret(cls, request):
"""
Attempt to extract to extract a resolution context and a symbol from the {request}
"""
# i deal with python package specifications
context = request.address.split('.')
# the symbol is just the last entry
symbol = '' if not context else context[-1]
# return the pair
return context, symbol
@classmethod
def assemble(cls, context):
"""
Assemble the sequence of directories in to a form suitable for the address part of a uri
"""
# i make module paths
return '.'.join(context)
# initialization
@classmethod
def prime(cls, linker):
"""
Build my initial set of shelves
"""
# attempt to
try:
# get the main module
import __main__
# if this failed
except ImportError:
# no worries
return
# otherwise, attempt to
try:
# get the name of the script we are executing
filename = __main__.__file__
# if it doesn't have one
except AttributeError:
# no worries
return
# resolve the file name
filename = str(primitives.path(filename).resolve())
# make a uri
uri = cls.uri(scheme='file', address=filename)
# and a locator
here = tracking.simple('while priming the {.__name__} loader'.format(cls))
# make a shelf
shelf = cls.shelf(module=__main__, uri=uri, locator=here)
# attach it to the linker
linker.shelves[uri.uri] = shelf
# show me
# print("registered '__main__' as {.uri!r}".format(uri))
# nothing else to do
return
# end of file
| 27.78125 | 96 | 0.581102 |
91cb21964588589f8b2e1d57b923eb9f8fac3fc1 | 80 | py | Python | src/test/devrepo_test/config_test.py | random-python/devrepo | 4917c2d55e16d4942c6d8f4b2c57138822e3cfcc | [
"Apache-2.0"
] | 1 | 2020-06-20T15:18:17.000Z | 2020-06-20T15:18:17.000Z | src/test/devrepo_test/config_test.py | random-python/devrepo | 4917c2d55e16d4942c6d8f4b2c57138822e3cfcc | [
"Apache-2.0"
] | null | null | null | src/test/devrepo_test/config_test.py | random-python/devrepo | 4917c2d55e16d4942c6d8f4b2c57138822e3cfcc | [
"Apache-2.0"
] | null | null | null | from devrepo.config import *
def test_config():
print()
print(CONFIG)
| 11.428571 | 28 | 0.6625 |
011042bd0afc768d33bbe66ef1d1faecb4d73d19 | 1,656 | py | Python | python/tvm/relay/op/__init__.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 4 | 2019-05-08T04:46:07.000Z | 2019-11-11T19:43:04.000Z | python/tvm/relay/op/__init__.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 2 | 2020-09-14T09:18:25.000Z | 2020-09-24T03:28:18.000Z | python/tvm/relay/op/__init__.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 2 | 2019-08-08T01:48:03.000Z | 2019-09-27T06:49:16.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=wildcard-import, redefined-builtin
"""Relay core operators."""
# operator defs
from .op import get, register_compute, register_gradient, \
register_pattern, register_alter_op_layout, register_legalize, \
OpPattern, OpStrategy, debug, register_external_compiler
from . import strategy
# Operators
from .reduce import *
from .tensor import *
from .transform import *
from .algorithm import *
from . import vm
from . import nn
from . import annotation
from . import memory
from . import image
from . import vision
from . import op_attrs
# operator registry
from . import _tensor
from . import _tensor_grad
from . import _transform
from . import _reduce
from . import _algorithm
def _register_op_make():
# pylint: disable=import-outside-toplevel
from . import _make
from .. import expr
expr._op_make = _make
_register_op_make()
| 30.666667 | 68 | 0.766908 |
39895b7962a006ec03f50cb3b3e65ef970746549 | 652 | py | Python | Experiment-2/EXAMPLES/Exp-2_ClassTask-13.py | aaryarajoju/cu-py | 2292ab06197405d379f063dd03861936c9912103 | [
"Unlicense"
] | 2 | 2021-01-08T08:29:27.000Z | 2021-01-14T13:47:27.000Z | Experiment-2/EXAMPLES/Exp-2_ClassTask-13.py | aaryarajoju/cu-py | 2292ab06197405d379f063dd03861936c9912103 | [
"Unlicense"
] | 1 | 2020-11-02T20:20:34.000Z | 2020-11-02T20:57:31.000Z | Experiment-2/EXAMPLES/Exp-2_ClassTask-13.py | aaryarajoju/cu-py | 2292ab06197405d379f063dd03861936c9912103 | [
"Unlicense"
] | null | null | null | # Birthday Wishes
# Demonstrates keyword arguments and default parameter values
# positional parameters
def birthday1(name, age):
print("Happy birthday,", name, "!", " I hear you're", age,"today.\n")
# parameters with default values
def birthday2(name = "Jackson", age = 1):
print("Happy birthday,", name, "!", " I hear you're", age, "today.\n")
birthday1("Jackson", 1)
birthday1(1, "Jackson")
birthday1(name = "Jackson", age = 1)
birthday1(age = 1, name = "Jackson")
birthday2()
birthday2(name = "Katherine")
birthday2(age = 12)
birthday2(name = "Katherine", age = 12)
birthday2("Katherine", 12)
input("\n\nPress the enter key to exit.")
| 27.166667 | 74 | 0.682515 |
16ad951ddd259594d53020a341789b09a93edfb9 | 1,188 | py | Python | crowddynamics-qtgui/qtgui/cli.py | antonvs88/multiobj-guided-evac | 84d78ac29419011d7af45391f230f50e8cbe30f4 | [
"MIT"
] | 1 | 2019-12-16T14:34:35.000Z | 2019-12-16T14:34:35.000Z | crowddynamics-qtgui/qtgui/cli.py | antonvs88/multiobj-guided-evac | 84d78ac29419011d7af45391f230f50e8cbe30f4 | [
"MIT"
] | 1 | 2017-04-24T05:26:55.000Z | 2017-04-24T05:26:55.000Z | crowddynamics-qtgui/qtgui/cli.py | antonvs88/optimal-guided-evacuation | ece2e12204bd41596173af5aacc0933acfd6b7c1 | [
"MIT"
] | null | null | null | """Extends crowddynamics commandline client with gui related commands"""
import logging
import sys
import click
from PyQt4 import QtGui, QtCore
from crowddynamics.logging import setup_logging
from qtgui.main import MainWindow
def run_gui(simulation_cfg=None):
r"""Launches the graphical user interface for visualizing simulation."""
setup_logging()
logger = logging.getLogger(__name__)
logger.info('Starting GUI')
app = QtGui.QApplication(sys.argv)
win = MainWindow()
if simulation_cfg:
win.set_simulations(simulation_cfg)
win.show()
# Start Qt event loop unless running in interactive mode or using pyside.
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
app.exec()
else:
logger.warning("Interactive mode and pyside are not supported.")
logging.info('Exiting GUI')
logging.shutdown()
win.close()
app.exit()
sys.exit()
@click.group()
def main():
pass
@main.command()
@click.option('--simulation_file', type=str, default=None)
def run(simulation_file):
"""Launch gui for crowddynamics"""
run_gui(simulation_file)
if __name__ == "__main__":
main()
| 22.846154 | 77 | 0.701178 |
4def7459940a0d54e32d88d0e56582fa8168dcdb | 1,448 | py | Python | packages/OpenCV/nodes/OpenCV___BilateralFilter0/OpenCV___BilateralFilter0___METACODE.py | Shirazbello/Pyscriptining | 0f2c80a9bb10477d65966faeccc7783f20385c1b | [
"MIT"
] | null | null | null | packages/OpenCV/nodes/OpenCV___BilateralFilter0/OpenCV___BilateralFilter0___METACODE.py | Shirazbello/Pyscriptining | 0f2c80a9bb10477d65966faeccc7783f20385c1b | [
"MIT"
] | null | null | null | packages/OpenCV/nodes/OpenCV___BilateralFilter0/OpenCV___BilateralFilter0___METACODE.py | Shirazbello/Pyscriptining | 0f2c80a9bb10477d65966faeccc7783f20385c1b | [
"MIT"
] | null | null | null | from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
import cv2
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
# self.special_actions['action name'] = self.actionmethod ...
self.img_unfiltered = None
self.img_filtered = None
self.initialized()
def update_event(self, input_called=-1):
self.img_unfiltered = self.input(0)
d_val = self.input(1)
d_val = int(d_val)
sigmaColor_val=self.input(2)
sigmaColor_val=int(sigmaColor_val)
sigmaSpace_val=self.input(3)
sigmaSpace_val=int(sigmaSpace_val)
self.img_filtered = cv2.bilateralFilter( self.img_unfiltered, d_val, sigmaColor_val,sigmaSpace_val)
self.main_widget.show_image(self.img_filtered)
self.outputs[0].set_val(self.img_filtered)
def get_data(self):
data = {}
# ...
return data
def set_data(self, data):
pass
# ...
# optional - important for threading - stop everything here
def removing(self):
pass
| 27.846154 | 107 | 0.647099 |
cf8fe73d112bf3ceedac6e15199ccb259cb318da | 3,248 | py | Python | lib/trellis/plugins/callback/output.py | mAAdhaTTah/trellis | 2e18ca4e20eb991cf4c6a1894c4d56406ec9614f | [
"MIT"
] | 2,291 | 2015-07-03T15:43:21.000Z | 2022-03-28T22:14:11.000Z | lib/trellis/plugins/callback/output.py | mAAdhaTTah/trellis | 2e18ca4e20eb991cf4c6a1894c4d56406ec9614f | [
"MIT"
] | 928 | 2015-07-03T03:17:52.000Z | 2022-03-29T16:46:30.000Z | lib/trellis/plugins/callback/output.py | mAAdhaTTah/trellis | 2e18ca4e20eb991cf4c6a1894c4d56406ec9614f | [
"MIT"
] | 888 | 2015-07-03T18:31:37.000Z | 2022-03-30T12:33:56.000Z | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import sys
DOCUMENTATION = '''
callback: output
type: stdout
short_description: Custom output for Trellis
extends_documentation_fragment:
- default_callback
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
try:
from trellis.utils import output as output
except ImportError:
ansible_config_path = os.getenv('ANSIBLE_CONFIG')
ansible_path = os.path.dirname(ansible_config_path) if ansible_config_path else os.getcwd()
if sys.path.append(os.path.join(ansible_path, 'lib')) in sys.path: raise
sys.path.append(sys.path.append(os.path.join(ansible_path, 'lib')))
from trellis.utils import output as output
class CallbackModule(CallbackModule_default):
''' Customizes the default Ansible output '''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'output'
def __init__(self):
super(CallbackModule, self).__init__()
output.reset_task_info(self)
self.vagrant_version = None
def v2_runner_on_failed(self, result, ignore_errors=False):
self.task_failed = True
output.display_host(self, result)
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_ok(self, result):
output.display_host(self, result)
super(CallbackModule, self).v2_runner_on_ok(result)
def v2_runner_on_skipped(self, result):
output.display_host(self, result)
super(CallbackModule, self).v2_runner_on_skipped(result)
def v2_runner_on_unreachable(self, result):
self.task_failed = True
output.display_host(self, result)
super(CallbackModule, self).v2_runner_on_unreachable(result)
def v2_playbook_on_task_start(self, task, is_conditional):
output.reset_task_info(self, task)
super(CallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
def v2_playbook_on_handler_task_start(self, task):
output.reset_task_info(self, task)
super(CallbackModule, self).v2_playbook_on_handler_task_start(task)
def v2_playbook_on_play_start(self, play):
super(CallbackModule, self).v2_playbook_on_play_start(play)
# Check for relevant settings or overrides passed via cli --extra-vars
extra_vars = play.get_variable_manager().extra_vars
if 'vagrant_version' in extra_vars:
self.vagrant_version = extra_vars['vagrant_version']
def v2_runner_item_on_ok(self, result):
output.display_item(self, result)
output.replace_item_with_key(self, result)
super(CallbackModule, self).v2_runner_item_on_ok(result)
def v2_runner_item_on_failed(self, result):
self.task_failed = True
output.display_item(self, result)
output.replace_item_with_key(self, result)
super(CallbackModule, self).v2_runner_item_on_failed(result)
def v2_runner_item_on_skipped(self, result):
output.display_item(self, result)
output.replace_item_with_key(self, result)
super(CallbackModule, self).v2_runner_item_on_skipped(result)
| 36.494382 | 95 | 0.732451 |
9eaa6396de57010c415b1e049a3256b1234209b7 | 1,054 | py | Python | Models/biocathub_model_pydantic.py | willfinnigan/bch_RetroBioHub | 3cc4dd486b61d20ace5ddda4a4cdcd6e1795f5fb | [
"MIT"
] | null | null | null | Models/biocathub_model_pydantic.py | willfinnigan/bch_RetroBioHub | 3cc4dd486b61d20ace5ddda4a4cdcd6e1795f5fb | [
"MIT"
] | 16 | 2021-12-09T16:10:17.000Z | 2022-02-11T18:26:28.000Z | Models/biocathub_model_pydantic.py | willfinnigan/bch_RetroBioHub | 3cc4dd486b61d20ace5ddda4a4cdcd6e1795f5fb | [
"MIT"
] | 3 | 2022-01-28T12:29:45.000Z | 2022-02-04T09:19:41.000Z | from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
# Defining an reactant
class Reactant(BaseModel):
role:str
smiles:str
name:str
class Config:
orm_mode = True
class Reactantcls:
def __init__(self, role:str, smiles:str, name:str):
self.role = role
self.smiles = smiles
self.name = name
# Defining the Reaction ****************************************
class Reaction(BaseModel):
name:str
educts:List[Reactant]
products:List[Reactant]
class Config:
orm_mode= True
class Reactioncls:
def __init__(self, name, educts, products):
self.name = name
self.educts = educts
self.products = products
# Defining the Enzyme**********************************************
class Enzyme(BaseModel):
name: str
reaction:Reaction
class Config:
orm_mode=True
class Enzymecls:
def __init__(self, name, reaction):
self.name = name
self.reaction = reaction
| 17.566667 | 67 | 0.591082 |
bf369c9e85c9acf9c4a53597d96af791faf9faa2 | 1,484 | py | Python | adjacency_list_graph.py | candaceyw/graphs | 59ed6bd79980f6f51f112279db4ad6c25c8d1edf | [
"MIT"
] | null | null | null | adjacency_list_graph.py | candaceyw/graphs | 59ed6bd79980f6f51f112279db4ad6c25c8d1edf | [
"MIT"
] | null | null | null | adjacency_list_graph.py | candaceyw/graphs | 59ed6bd79980f6f51f112279db4ad6c25c8d1edf | [
"MIT"
] | null | null | null | class Vertex:
def __init__(self, key):
self.id = key
self.connectedTo = {}
def addNeighbor(self, nbr, weight=0):
self.connectedTo[nbr] = weight
def __str__(self):
return str(self.id) + ' connectedTo: ' + str([x.id for x in self.connectedTo])
def getConnections(self):
return self.connectedTo.keys()
def getId(self):
return self.id
def getWeight(self, nbr):
return self.connectedTo[nbr]
class Graph:
def __init__(self):
self.vertList = {}
self.numVertices = 0
def addVertex(self, key):
self.numVertices = self.numVertices + 1
newVertex = Vertex(key)
self.vertList[key] = newVertex
return newVertex
def getVertex(self, n):
if n in self.vertList:
return self.vertList[n]
else:
return None
def __contains__(self, n):
return n in self.vertList
def addEdge(self, f, t, cost=0):
if f not in self.vertList:
nv = self.addVertex(f)
if t not in self.vertList:
nv = self.addVertex(t)
self.vertList[f].addNeighbor(self.vertList[t], cost)
def getVertices(self):
return self.vertList.keys()
def __iter__(self):
return iter(self.vertList.values())
g = Graph()
for i in range(6):
g.addVertex(i)
g.vertList
g.addEdge(0, 1, 2)
for vertex in g:
print(vertex)
print(vertex.getConnections())
print('\n')
| 21.823529 | 86 | 0.588275 |
f406d1f0c86a0998a4e8fe8632467b8e70537a8a | 5,371 | py | Python | scipy/__init__.py | alazarchuk/scipy | 7124fc982ea9b0ea961c65db550c0703abcb9bfd | [
"BSD-3-Clause"
] | 1 | 2020-12-25T08:49:10.000Z | 2020-12-25T08:49:10.000Z | scipy/__init__.py | alazarchuk/scipy | 7124fc982ea9b0ea961c65db550c0703abcb9bfd | [
"BSD-3-Clause"
] | 5 | 2020-09-01T01:19:07.000Z | 2021-10-11T01:06:05.000Z | scipy/__init__.py | alazarchuk/scipy | 7124fc982ea9b0ea961c65db550c0703abcb9bfd | [
"BSD-3-Clause"
] | 1 | 2019-07-19T12:49:26.000Z | 2019-07-19T12:49:26.000Z | """
SciPy: A scientific computing package for Python
================================================
Documentation is available in the docstrings and
online at https://docs.scipy.org.
Contents
--------
SciPy imports all the functions from the NumPy namespace, and in
addition provides:
Subpackages
-----------
Using any of these subpackages requires an explicit import. For example,
``import scipy.cluster``.
::
cluster --- Vector Quantization / Kmeans
fft --- Discrete Fourier transforms
fftpack --- Legacy discrete Fourier transforms
integrate --- Integration routines
interpolate --- Interpolation Tools
io --- Data input and output
linalg --- Linear algebra routines
linalg.blas --- Wrappers to BLAS library
linalg.lapack --- Wrappers to LAPACK library
misc --- Various utilities that don't have
another home.
ndimage --- N-D image package
odr --- Orthogonal Distance Regression
optimize --- Optimization Tools
signal --- Signal Processing Tools
signal.windows --- Window functions
sparse --- Sparse Matrices
sparse.linalg --- Sparse Linear Algebra
sparse.linalg.dsolve --- Linear Solvers
sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library:
Conjugate Gradient Method (LOBPCG)
sparse.linalg.eigen --- Sparse Eigenvalue Solvers
sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned
Conjugate Gradient Method (LOBPCG)
spatial --- Spatial data structures and algorithms
special --- Special functions
stats --- Statistical Functions
Utility tools
-------------
::
test --- Run scipy unittests
show_config --- Show scipy build configuration
show_numpy_config --- Show numpy build configuration
__version__ --- SciPy version string
__numpy_version__ --- Numpy version string
"""
__all__ = ['test']
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError(
"Cannot import SciPy when running from NumPy source directory.")
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space (DEPRECATED)
from ._lib.deprecation import _deprecated
import numpy as _num
linalg = None
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.{0} instead')
# deprecate callable objects, skipping classes
for _key in _num.__all__:
_fun = getattr(_num, _key)
if callable(_fun) and not isinstance(_fun, type):
_fun = _deprecated(_msg.format(_key))(_fun)
globals()[_key] = _fun
from numpy.random import rand, randn
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.random.{0} instead')
rand = _deprecated(_msg.format('rand'))(rand)
randn = _deprecated(_msg.format('randn'))(randn)
# fft is especially problematic, so was removed in SciPy 1.6.0
from numpy.fft import ifft
ifft = _deprecated('scipy.ifft is deprecated and will be removed in SciPy '
'2.0.0, use scipy.fft.ifft instead')(ifft)
import numpy.lib.scimath as _sci
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.lib.scimath.{0} instead')
for _key in _sci.__all__:
_fun = getattr(_sci, _key)
if callable(_fun):
_fun = _deprecated(_msg.format(_key))(_fun)
globals()[_key] = _fun
__all__ += _num.__all__
__all__ += ['randn', 'rand', 'ifft']
del _num
# Remove the linalg imported from NumPy so that the scipy.linalg package can be
# imported.
del linalg
__all__.remove('linalg')
# We first need to detect if we're being called as part of the SciPy
# setup procedure itself in a reliable manner.
try:
__SCIPY_SETUP__
except NameError:
__SCIPY_SETUP__ = False
if __SCIPY_SETUP__:
import sys as _sys
_sys.stderr.write('Running from SciPy source directory.\n')
del _sys
else:
try:
from scipy.__config__ import show as show_config
except ImportError as e:
msg = """Error importing SciPy: you cannot import SciPy while
being in scipy source directory; please exit the SciPy source
tree first and relaunch your Python interpreter."""
raise ImportError(msg) from e
from scipy.version import version as __version__
# Allow distributors to run custom init code
from . import _distributor_init
from scipy._lib import _pep440
if _pep440.parse(__numpy_version__) < _pep440.Version('1.14.5'):
import warnings
warnings.warn("NumPy 1.14.5 or above is required for this version of "
"SciPy (detected version %s)" % __numpy_version__,
UserWarning)
del _pep440
from scipy._lib._ccallback import LowLevelCallable
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
# This makes "from scipy import fft" return scipy.fft, not np.fft
del fft
| 36.04698 | 79 | 0.637311 |
5df23ec04d613c6bcc3e1a49369b1db98fa9a83d | 6,072 | py | Python | samples/server/petstore/flaskConnexion/swagger_server/models/all_of_sub_category_pets_items.py | Cadcorp/swagger-codegen | 23b64dd5e5266a7d0d7fb7a5c800d618c12696de | [
"Apache-2.0"
] | 1 | 2020-09-06T18:36:28.000Z | 2020-09-06T18:36:28.000Z | samples/server/petstore/flaskConnexion/swagger_server/models/all_of_sub_category_pets_items.py | Cadcorp/swagger-codegen | 23b64dd5e5266a7d0d7fb7a5c800d618c12696de | [
"Apache-2.0"
] | null | null | null | samples/server/petstore/flaskConnexion/swagger_server/models/all_of_sub_category_pets_items.py | Cadcorp/swagger-codegen | 23b64dd5e5266a7d0d7fb7a5c800d618c12696de | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.category import Category # noqa: F401,E501
from swagger_server.models.pet import Pet # noqa: F401,E501
from swagger_server.models.tag import Tag # noqa: F401,E501
from swagger_server import util
class AllOfSubCategoryPetsItems(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: int=None, category: Category=None, name: str=None, photo_urls: List[str]=None, tags: List[Tag]=None, status: str=None): # noqa: E501
"""AllOfSubCategoryPetsItems - a model defined in Swagger
:param id: The id of this AllOfSubCategoryPetsItems. # noqa: E501
:type id: int
:param category: The category of this AllOfSubCategoryPetsItems. # noqa: E501
:type category: Category
:param name: The name of this AllOfSubCategoryPetsItems. # noqa: E501
:type name: str
:param photo_urls: The photo_urls of this AllOfSubCategoryPetsItems. # noqa: E501
:type photo_urls: List[str]
:param tags: The tags of this AllOfSubCategoryPetsItems. # noqa: E501
:type tags: List[Tag]
:param status: The status of this AllOfSubCategoryPetsItems. # noqa: E501
:type status: str
"""
self.swagger_types = {
'id': int,
'category': Category,
'name': str,
'photo_urls': List[str],
'tags': List[Tag],
'status': str
}
self.attribute_map = {
'id': 'id',
'category': 'category',
'name': 'name',
'photo_urls': 'photoUrls',
'tags': 'tags',
'status': 'status'
}
self._id = id
self._category = category
self._name = name
self._photo_urls = photo_urls
self._tags = tags
self._status = status
@classmethod
def from_dict(cls, dikt) -> 'AllOfSubCategoryPetsItems':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The AllOfSubCategoryPetsItems of this AllOfSubCategoryPetsItems. # noqa: E501
:rtype: AllOfSubCategoryPetsItems
"""
return util.deserialize_model(dikt, cls)
@property
def id(self) -> int:
"""Gets the id of this AllOfSubCategoryPetsItems.
:return: The id of this AllOfSubCategoryPetsItems.
:rtype: int
"""
return self._id
@id.setter
def id(self, id: int):
"""Sets the id of this AllOfSubCategoryPetsItems.
:param id: The id of this AllOfSubCategoryPetsItems.
:type id: int
"""
self._id = id
@property
def category(self) -> Category:
"""Gets the category of this AllOfSubCategoryPetsItems.
:return: The category of this AllOfSubCategoryPetsItems.
:rtype: Category
"""
return self._category
@category.setter
def category(self, category: Category):
"""Sets the category of this AllOfSubCategoryPetsItems.
:param category: The category of this AllOfSubCategoryPetsItems.
:type category: Category
"""
self._category = category
@property
def name(self) -> str:
"""Gets the name of this AllOfSubCategoryPetsItems.
:return: The name of this AllOfSubCategoryPetsItems.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this AllOfSubCategoryPetsItems.
:param name: The name of this AllOfSubCategoryPetsItems.
:type name: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def photo_urls(self) -> List[str]:
"""Gets the photo_urls of this AllOfSubCategoryPetsItems.
:return: The photo_urls of this AllOfSubCategoryPetsItems.
:rtype: List[str]
"""
return self._photo_urls
@photo_urls.setter
def photo_urls(self, photo_urls: List[str]):
"""Sets the photo_urls of this AllOfSubCategoryPetsItems.
:param photo_urls: The photo_urls of this AllOfSubCategoryPetsItems.
:type photo_urls: List[str]
"""
if photo_urls is None:
raise ValueError("Invalid value for `photo_urls`, must not be `None`") # noqa: E501
self._photo_urls = photo_urls
@property
def tags(self) -> List[Tag]:
"""Gets the tags of this AllOfSubCategoryPetsItems.
:return: The tags of this AllOfSubCategoryPetsItems.
:rtype: List[Tag]
"""
return self._tags
@tags.setter
def tags(self, tags: List[Tag]):
"""Sets the tags of this AllOfSubCategoryPetsItems.
:param tags: The tags of this AllOfSubCategoryPetsItems.
:type tags: List[Tag]
"""
self._tags = tags
@property
def status(self) -> str:
"""Gets the status of this AllOfSubCategoryPetsItems.
pet status in the store # noqa: E501
:return: The status of this AllOfSubCategoryPetsItems.
:rtype: str
"""
return self._status
@status.setter
def status(self, status: str):
"""Sets the status of this AllOfSubCategoryPetsItems.
pet status in the store # noqa: E501
:param status: The status of this AllOfSubCategoryPetsItems.
:type status: str
"""
allowed_values = ["available", "pending", "sold"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
| 29.192308 | 160 | 0.612484 |
09ee2e6670fcaa5f44263f24cb8e86f4a9c18d13 | 2,852 | py | Python | BBRL/src/process.py | dg10mcdos/mario-bmstew | 5b1806fc59dc88fd326a4e1de9c02284ba35f9f9 | [
"BSD-3-Clause"
] | null | null | null | BBRL/src/process.py | dg10mcdos/mario-bmstew | 5b1806fc59dc88fd326a4e1de9c02284ba35f9f9 | [
"BSD-3-Clause"
] | null | null | null | BBRL/src/process.py | dg10mcdos/mario-bmstew | 5b1806fc59dc88fd326a4e1de9c02284ba35f9f9 | [
"BSD-3-Clause"
] | null | null | null | """
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import torch
from src.env import create_train_env
from src.model import PPO
import torch.nn.functional as F
from collections import deque
import numpy as np
from src.helpers import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT, RIGHT_ONLY, flag_get
import csv, os
import time
def evaluate(opt, global_model, num_states, num_actions):
torch.manual_seed(123)
if opt.action_type == "right":
actions = RIGHT_ONLY
elif opt.action_type == "simple":
actions = SIMPLE_MOVEMENT
else:
actions = COMPLEX_MOVEMENT
savefile = opt.saved_path + '/PPO_test.csv'
print(savefile)
title = ['Steps', 'Time', 'TotalReward', "Flag"]
with open(savefile, 'w', newline='') as sfile:
writer = csv.writer(sfile)
writer.writerow(title)
env = create_train_env(actions, mp_wrapper=False)
local_model = PPO(num_states, num_actions)
if torch.cuda.is_available():
local_model.cuda()
local_model.eval()
state = torch.from_numpy(env.reset())
if torch.cuda.is_available():
state = state.cuda()
done = True
curr_step = 0
tot_step = 0
actions = deque(maxlen=opt.max_actions)
tot_reward = 0
got_flag = 0
while True:
start_time = time.time()
curr_step += 1
tot_step += 1
if done:
local_model.load_state_dict(global_model.state_dict())
logits, value = local_model(state)
policy = F.softmax(logits, dim=1)
action = torch.argmax(policy).item() # This selects the best action to take
state, reward, done, info = env.step(action)
tot_reward += reward
# Uncomment following lines if you want to save model whenever level is completed
if flag_get(info):
got_flag = 1
done = True
torch.save(local_model.state_dict(),
"{}/ppo_super_mario_bros_{}".format(opt.saved_path, curr_step))
env.render()
actions.append(action)
if curr_step > opt.num_global_steps or actions.count(actions[0]) == actions.maxlen:
# print("Evaluate: Time's up!")
done = True
if done:
# print("Evaluate: Done!")
ep_time = time.time() - start_time
data = [tot_step, "{:.4f}".format(ep_time), "{:.2f}".format(tot_reward), got_flag]
with open(savefile, 'a', newline='') as sfile:
writer = csv.writer(sfile)
writer.writerows([data])
curr_step = 0
got_flag = 0
tot_reward = 0
actions.clear()
# time.sleep(10) # Sleep for 10 secs
state = env.reset()
state = torch.from_numpy(state)
if torch.cuda.is_available():
state = state.cuda()
| 31 | 94 | 0.601683 |
2206d58edaabdaecb4485c9263704dd0b492da81 | 1,300 | py | Python | keg/armadillo.py | 0xf4b1/keg | 3c8b63420c2f91381f06ecb744122b16c5fb65a0 | [
"MIT"
] | 16 | 2018-08-18T13:33:07.000Z | 2022-03-08T10:11:08.000Z | keg/armadillo.py | MrMoonKr/keg-doc | d17aaba70fd7e8fd56960212b82323b40bfc580b | [
"MIT"
] | 9 | 2018-08-22T17:09:23.000Z | 2018-08-30T00:02:58.000Z | keg/armadillo.py | MrMoonKr/keg-doc | d17aaba70fd7e8fd56960212b82323b40bfc580b | [
"MIT"
] | 4 | 2016-04-28T03:20:08.000Z | 2022-02-08T20:47:14.000Z | from base64 import b32encode
from binascii import hexlify, unhexlify
from hashlib import md5
from Crypto.Cipher import Salsa20 # type: ignore
from .exceptions import IntegrityVerificationError
ARMADILLO_KEY_SIZE = 16
ARMADILLO_DIGEST_SIZE = 4
def verify_armadillo_key(data: bytes) -> bool:
"""
Verifies an Armadillo Key against itself.
The expected data is a 16 byte key followed by an 8 byte digest.
The digest is the first 8 bytes of the md5 of the key.
"""
if len(data) != ARMADILLO_KEY_SIZE + ARMADILLO_DIGEST_SIZE:
raise ValueError(f"Invalid Armadillo Key size.")
actual_data = data[:ARMADILLO_KEY_SIZE]
expected_digest = hexlify(data[ARMADILLO_KEY_SIZE:]).decode()
actual_digest = hexlify(md5(actual_data).digest()[:ARMADILLO_DIGEST_SIZE]).decode()
digest = actual_digest
if digest != expected_digest:
raise IntegrityVerificationError("armadillo key", digest, expected_digest)
return True
class ArmadilloKey:
def __init__(self, data: bytes) -> None:
self.data = data
self.key = data[:ARMADILLO_KEY_SIZE]
def __repr__(self):
return f"<{self.__class__.__name__}: {b32encode(self.data)}>"
def decrypt_object(self, key: str, data: bytes) -> bytes:
nonce = unhexlify(key)[-8:]
cipher = Salsa20.new(key=self.key, nonce=nonce)
return cipher.decrypt(data)
| 27.659574 | 84 | 0.757692 |
1135dbf43a8a48da4d11308ed6e8406d83f1f466 | 215 | py | Python | todos/models.py | charleskbenin/todo-app-api | c0a01c3d5d2da0725483f9962b8ea9f4abc3166d | [
"MIT"
] | 1 | 2021-05-20T21:32:59.000Z | 2021-05-20T21:32:59.000Z | todos/models.py | charleskbenin/todo-app-api | c0a01c3d5d2da0725483f9962b8ea9f4abc3166d | [
"MIT"
] | null | null | null | todos/models.py | charleskbenin/todo-app-api | c0a01c3d5d2da0725483f9962b8ea9f4abc3166d | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.title
| 21.5 | 44 | 0.706977 |
c8969163fe09a8f899549cb6313966220120f151 | 2,025 | py | Python | raidfinder/frames/frame_data.py | cottonmalone/raid-finder-bot | e39cd94bdd416ea9b4cd344ed4cebc9751b14149 | [
"MIT"
] | null | null | null | raidfinder/frames/frame_data.py | cottonmalone/raid-finder-bot | e39cd94bdd416ea9b4cd344ed4cebc9751b14149 | [
"MIT"
] | null | null | null | raidfinder/frames/frame_data.py | cottonmalone/raid-finder-bot | e39cd94bdd416ea9b4cd344ed4cebc9751b14149 | [
"MIT"
] | null | null | null | import collections
from enum import Enum, auto
from .xoro_shiro import XoroShiro
FRAME_MAX_INT = 0xFFFFFFFF
FrameData = collections.namedtuple("FrameData", ["type", "ivs", "ability"])
class ShinyType(Enum):
NONE = auto()
STAR = auto()
SQUARE = auto()
def get_shiny_xor(val):
return (val >> 16) ^ (val & 0xFFFF)
def get_shiny_type(pid, sidtid):
p = get_shiny_xor(pid)
t = get_shiny_xor(sidtid)
if p == t:
return ShinyType.SQUARE
if (p ^ t) < 0x10:
return ShinyType.STAR
return ShinyType.NONE
def get_ivs_for_frame(rng, n_best_ivs):
ivs = [-1] * 6
count, n_ivs, offset = 0, n_best_ivs, -n_best_ivs
while count < n_ivs:
stat, offset = rng.next_int(7, 6, offset)
if ivs[stat] == -1:
ivs[stat] = 31
count += 1
for x in range(0, 6):
if ivs[x] != 31:
ivs[x] = rng.next_int(31)
return ivs
def get_ability(rng, n_best_ivs):
if n_best_ivs > 3:
return rng.next_int(3, 3) + 1
return rng.next_int(1) + 1
def get_frame_data(rng: XoroShiro, n_best_ivs):
# ignore characteristics
_ = rng.next_int(FRAME_MAX_INT, FRAME_MAX_INT)
sidtid = rng.next_int(FRAME_MAX_INT, FRAME_MAX_INT)
pid = rng.next_int(FRAME_MAX_INT, FRAME_MAX_INT)
shiny_type = get_shiny_type(pid, sidtid)
return FrameData(
type=shiny_type,
ivs=get_ivs_for_frame(rng, n_best_ivs),
ability=get_ability(rng, n_best_ivs),
)
def get_shiny_frames(seed, max_count, n_best_ivs):
frames = []
rng = XoroShiro(seed)
for i in range(1, max_count + 1):
frame = get_frame_data(rng.clone(), n_best_ivs)
if frame.type != ShinyType.NONE:
frames.append((i, frame))
print((i, frame))
rng.next_frame()
return frames
def get_data_for_n_frame(seed, n_frame, n_best_ivs):
rng = XoroShiro(seed)
[rng.next_frame() for i in range(n_frame - 1)]
return get_frame_data(rng.clone(), n_best_ivs)
| 20.663265 | 75 | 0.626667 |
a4e8a7c61e87f018fe3823656cab53ce793ca8b8 | 3,626 | py | Python | heat/engine/resources/subnet.py | CiscoSystems/heat | 1b609f3c0621c44e4988a166a38f36c2b57eb4c6 | [
"Apache-2.0"
] | 1 | 2020-08-15T14:29:15.000Z | 2020-08-15T14:29:15.000Z | heat/engine/resources/subnet.py | CiscoSystems/heat | 1b609f3c0621c44e4988a166a38f36c2b57eb4c6 | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/subnet.py | CiscoSystems/heat | 1b609f3c0621c44e4988a166a38f36c2b57eb4c6 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import clients
from heat.common import exception
from heat.openstack.common import log as logging
from heat.engine import resource
logger = logging.getLogger(__name__)
class Subnet(resource.Resource):
tags_schema = {'Key': {'Type': 'String',
'Required': True},
'Value': {'Type': 'String',
'Required': True}}
properties_schema = {
'AvailabilityZone': {'Type': 'String'},
'CidrBlock': {
'Type': 'String',
'Required': True},
'VpcId': {
'Type': 'String',
'Required': True},
'Tags': {'Type': 'List', 'Schema': {
'Type': 'Map',
'Implemented': False,
'Schema': tags_schema}}
}
def __init__(self, name, json_snippet, stack):
super(Subnet, self).__init__(name, json_snippet, stack)
def handle_create(self):
client = self.quantum()
# TODO sbaker Verify that this CidrBlock is within the vpc CidrBlock
network_id = self.properties.get('VpcId')
vpc = self.stack.resource_by_refid(network_id)
router_id = vpc.metadata['router_id']
props = {
'network_id': network_id,
'cidr': self.properties.get('CidrBlock'),
'name': self.physical_resource_name(),
'ip_version': 4
}
subnet = client.create_subnet({'subnet': props})['subnet']
#TODO sbaker check for a non-default router for this network
# and use that instead if it exists
client.add_interface_router(
router_id,
{'subnet_id': subnet['id']})
md = {
'router_id': router_id,
'default_router_id': router_id
}
self.metadata = md
self.resource_id_set(subnet['id'])
def handle_delete(self):
from quantumclient.common.exceptions import QuantumClientException
client = self.quantum()
router_id = self.metadata['router_id']
subnet_id = self.resource_id
#TODO sbaker check for a non-default router for this network
# and remove that instead if it exists
try:
client.remove_interface_router(
router_id,
{'subnet_id': subnet_id})
except QuantumClientException as ex:
if ex.status_code != 404:
raise ex
try:
client.delete_subnet(subnet_id)
except QuantumClientException as ex:
if ex.status_code != 404:
raise ex
def handle_update(self, json_snippet):
return self.UPDATE_REPLACE
def FnGetAtt(self, key):
if key == 'AvailabilityZone':
return self.properties.get(key, '')
raise exception.InvalidTemplateAttribute(resource=self.name, key=key)
def resource_mapping():
if clients.quantumclient is None:
return {}
return {
'AWS::EC2::Subnet': Subnet,
}
| 32.088496 | 78 | 0.599283 |
b3420bff39daecf725b0a03f7546447292707368 | 1,623 | py | Python | wxbot_project_py2.7/wx_handler/bot.py | awesome-archive/WeixinBot | 39982d45022d84718a29cf52899a075edce25af3 | [
"Apache-2.0"
] | 7,748 | 2016-02-02T07:34:57.000Z | 2022-03-30T00:57:17.000Z | wxbot_project_py2.7/wx_handler/bot.py | 156013468/WeixinBot | d9edcd2c9203fe7dd203b22b71bbc48a31e9492b | [
"Apache-2.0"
] | 251 | 2016-02-04T05:10:03.000Z | 2022-03-23T04:28:52.000Z | wxbot_project_py2.7/wx_handler/bot.py | 156013468/WeixinBot | d9edcd2c9203fe7dd203b22b71bbc48a31e9492b | [
"Apache-2.0"
] | 2,298 | 2016-02-02T15:22:39.000Z | 2022-03-29T13:04:57.000Z | #!/usr/bin/env python
# coding: utf-8
#===================================================
from wechat.utils import *
from config import Constant
#---------------------------------------------------
import random, time, json
#===================================================
class Bot(object):
def __init__(self):
self.emoticons = Constant.EMOTICON
self.gifs = []
self.last_time = time.time()
def time_schedule(self):
r = ''
now = time.time()
if int(now - self.last_time) > 3600:
self.last_time = now
url_latest = Constant.BOT_ZHIHU_URL_LATEST
url_daily = Constant.BOT_ZHIHU_URL_DAILY
data = get(url_latest)
j = json.loads(data)
story = j['stories'][random.randint(0, len(j['stories'])-1)]
r = story['title'] + '\n' + url_daily + str(story['id'])
return r.encode('utf-8')
def reply(self, text):
APIKEY = Constant.BOT_TULING_API_KEY
api_url = Constant.BOT_TULING_API_URL % (APIKEY, text, '12345678')
r = json.loads(get(api_url))
if r.get('code') == 100000 and r.get('text') != Constant.BOT_TULING_BOT_REPLY:
p = random.randint(1, 10)
if p > 3:
return r['text']
elif p > 1:
# send emoji
if random.randint(1, 10) > 5:
n = random.randint(0, len(self.emoticons)-1)
m = random.randint(1, 3)
reply = self.emoticons[n].encode('utf-8') * m
return reply
return ''
| 33.8125 | 86 | 0.478743 |
eb4b688b6b6e3bb5f6e1736bd8912a885fcc594c | 172 | py | Python | directory.py | lourranio/pentest-com-python | 2d0090da00b90c2f5bf6c666d02b9091a8a5661f | [
"CNRI-Python"
] | null | null | null | directory.py | lourranio/pentest-com-python | 2d0090da00b90c2f5bf6c666d02b9091a8a5661f | [
"CNRI-Python"
] | null | null | null | directory.py | lourranio/pentest-com-python | 2d0090da00b90c2f5bf6c666d02b9091a8a5661f | [
"CNRI-Python"
] | null | null | null | import os
pasta = './'
for diretorio, subpastas, arquivos in os.walk(pasta):
for arquivo in arquivos:
print(os.path.join(os.path.realpath(diretorio), arquivo))
| 28.666667 | 65 | 0.697674 |
e0a0f0f12fa244f834a67d79ff8f67e7474708ba | 3,388 | py | Python | OLDauregistrationbot.py | patrickcoyle427/AU-Registration-Bot | 4fcfb2e1330b69e1a0d21e000fb06cffd0caa008 | [
"MIT"
] | 1 | 2021-07-25T03:46:12.000Z | 2021-07-25T03:46:12.000Z | OLDauregistrationbot.py | patrickcoyle427/AU-Registration-Bot | 4fcfb2e1330b69e1a0d21e000fb06cffd0caa008 | [
"MIT"
] | null | null | null | OLDauregistrationbot.py | patrickcoyle427/AU-Registration-Bot | 4fcfb2e1330b69e1a0d21e000fb06cffd0caa008 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
AURegistrationBot.py
Messages users information on how to register for a tournament. Used in our Remote Duel YCS,
an online Yugioh tournament played over webcam, run for Konami.
-Written by Patrick Coyle
'''
import os, asyncio, discord
from dotenv import load_dotenv
from discord.ext import commands
from datetime import datetime, timezone
'''
os - reads .env for discord token
asyncio - used for the timer for making auto registration annoucements
discord - platform that the bot runs on, includes all commands to make the bot actually work
dotenv import load_dotenv - Loads the .env
discord.ext import commands - imports bot commands
datetime import datetime, timezone - Used for setting event start times and making sure the bot
doesn't advertise events that have already begun.
'''
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
# Load's discord token from .env file
bot = commands.Bot(command_prefix='!', case_insensitive=True)
# Calls the bot class
global target_channels
# Global allows all functions to use this list of channels if needed
target_channels = (855907986060083220, 856229554585337916)
# Specifies all channels that the bot will be active in
# the number is the ID of the target channel
global giant_card_start
giant_card_start = datetime(2021, 6, 20, 13, 30, tzinfo=timezone.utc)
# Sets a start time for the attack of the giant card event
async def tournament_announcement():
# Sends a message any specified channel on how to register for a tournament.
giant_card_channel = target_channels[1]
current_time = datetime.now(timezone.utc)
every_x_seconds = 180
# Edit this to change annoucement frequency
await bot.wait_until_ready()
while not bot.is_closed():
for channel_id in target_channels:
channel = bot.get_channel(id=channel_id)
if channel_id == giant_card_channel and current_time > giant_card_start:
# Skips annoucing the how to register if the event has already started
pass
else:
await channel.send('If you would like to register for an event, type **!register** and this bot '
'will DM you with the details on how to get started!')
await asyncio.sleep(every_x_seconds)
# Loop waits x seconds before sending the message again
@bot.event
async def on_ready():
print(f'{bot.user.name} has connected to Discord!')
bot.loop.create_task(tournament_announcement())
# Sets up the tournament annoucement loop when bot connects
@bot.command()
async def register(ctx):
giant_card_channel = target_channels[1]
current_time = datetime.now(timezone.utc)
#gets the current time when
if ctx.message.channel.id in target_channels:
# Only sends message if user's bot command call is in the tuple of target channels.
if ctx.message.channel.id == giant_card_channel and current_time > giant_card_start:
# Only sends reg info if event hasn't started yet
await ctx.message.author.send('Sorry this event has already started!')
else:
await ctx.message.author.send('Hey: https://www.google.com')
bot.run(TOKEN)
| 33.215686 | 114 | 0.688902 |
a5a8fa87ca9097b8a0db167bd67a83b8dca60d23 | 5,717 | py | Python | bot/commands/dev_misc.py | Trimatix-indie/SuperDeckBreaker | 6c5f0a6593df5e7f6807b1e2b09aff65dcf8a6fc | [
"MIT"
] | null | null | null | bot/commands/dev_misc.py | Trimatix-indie/SuperDeckBreaker | 6c5f0a6593df5e7f6807b1e2b09aff65dcf8a6fc | [
"MIT"
] | 34 | 2021-03-20T22:42:16.000Z | 2021-09-29T15:50:31.000Z | bot/commands/dev_misc.py | Trimatix-indie/SuperDeckBreaker | 6c5f0a6593df5e7f6807b1e2b09aff65dcf8a6fc | [
"MIT"
] | null | null | null | import discord
import traceback
from datetime import datetime
from . import commandsDB as botCommands
from .. import botState, lib
from ..users import basedGuild
from ..game import sdbGame
from ..cfg import cfg
from . import util_help
async def dev_cmd_dev_help(message: discord.Message, args: str, isDM: bool):
"""dev command printing help strings for dev commands
:param discord.Message message: the discord message calling the command
:param str args: ignored
:param bool isDM: Whether or not the command is being called from a DM channel
"""
await util_help.util_autohelp(message, args, isDM, 3)
botCommands.register("dev-help", dev_cmd_dev_help, 3, signatureStr="**dev-help** *[page number, section or command]*",
shortHelp="Display information about developer-only commands.\nGive a specific command for " +
"detailed info about it, or give a page number or give a section name for brief info.",
longHelp="Display information about developer-only commands.\nGive a specific command for " +
"detailed info about it, or give a page number or give a section name for brief info " +
"about a set of commands. These are the currently valid section names:\n- Miscellaneous")
async def dev_cmd_sleep(message: discord.Message, args: str, isDM: bool):
"""developer command saving all data to JSON and then shutting down the bot
:param discord.Message message: the discord message calling the command
:param str args: ignored
:param bool isDM: Whether or not the command is being called from a DM channel
"""
botState.shutdown = botState.ShutDownState.shutdown
await message.channel.send("shutting down.")
await botState.client.shutdown()
botCommands.register("bot-sleep", dev_cmd_sleep, 3, allowDM=True, useDoc=True)
async def dev_cmd_save(message: discord.Message, args: str, isDM: bool):
"""developer command saving all databases to JSON
:param discord.Message message: the discord message calling the command
:param str args: ignored
:param bool isDM: Whether or not the command is being called from a DM channel
"""
try:
botState.client.saveAllDBs()
except Exception as e:
print("SAVING ERROR", e.__class__.__name__)
print(traceback.format_exc())
await message.channel.send("failed!")
return
print(datetime.now().strftime("%H:%M:%S: Data saved manually!"))
await message.channel.send("saved!")
botCommands.register("save", dev_cmd_save, 3, allowDM=True, useDoc=True)
async def dev_cmd_say(message: discord.Message, args: str, isDM: bool):
"""developer command sending a message to the same channel as the command is called in
:param discord.Message message: the discord message calling the command
:param str args: string containing the message to broadcast
:param bool isDM: Whether or not the command is being called from a DM channel
"""
if args == "":
await message.channel.send("provide a message!")
else:
await message.channel.send(**lib.discordUtil.messageArgsFromStr(args))
botCommands.register("say", dev_cmd_say, 3, forceKeepArgsCasing=True, allowDM=True, useDoc=True)
async def dev_cmd_reset_has_poll(message : discord.Message, args : str, isDM : bool):
"""developer command resetting the poll ownership of the calling user, or the specified user if one is given.
:param discord.Message message: the discord message calling the command
:param str args: string, can be empty or contain a user mention
:param bool isDM: Whether or not the command is being called from a DM channel
"""
# reset the calling user's cooldown if no user is specified
if args == "":
botState.usersDB.getUser(message.author.id).pollOwned = False
# otherwise get the specified user's discord object and reset their cooldown.
# [!] no validation is done.
else:
botState.usersDB.getUser(int(args.lstrip("<@!").rstrip(">"))).pollOwned = False
await message.channel.send("Done!")
botCommands.register("reset-has-poll", dev_cmd_reset_has_poll, 2, allowDM=True, useDoc=True)
async def dev_cmd_force_kill_game(message : discord.Message, args : str, isDM : bool):
"""developer command forcibly killing the game running in the calling channel. Will probably cause memory leaks.
:param discord.Message message: the discord message calling the command
:param str args: ignored
:param bool isDM: Whether or not the command is being called from a DM channel
"""
bGuild: basedGuild.BasedGuild = botState.guildsDB.getGuild(message.guild.id)
if message.channel in bGuild.runningGames:
await bGuild.runningGames[message.channel].forceKill()
await message.reply("Done!")
else:
await message.reply("There is no game running in this channel.")
botCommands.register("force-kill-game", dev_cmd_force_kill_game, 2, allowDM=False, useDoc=True)
async def dev_cmd_toggle_card_urls(message : discord.Message, args : str, isDM : bool):
"""developer command enabling/disabling the showing of card urls in plain text alongside embedded images
:param discord.Message message: the discord message calling the command
:param str args: ignored
:param bool isDM: Whether or not the command is being called from a DM channel
"""
cfg.debugCards = not cfg.debugCards
await message.reply(f"✅ Card URL debugging {'enabled' if cfg.debugCards else 'disabled'}.")
botCommands.register("toggle-card-urls", dev_cmd_toggle_card_urls, 2, allowDM=True, useDoc=True)
| 45.015748 | 125 | 0.712087 |
515b2f11541adbdbab88c04dd2f9cb5fb5861056 | 29,753 | py | Python | test/test_ops.py | MeepoAII/vision | 6e10e3f88158f12b7a304d3c2f803d2bbdde0823 | [
"BSD-3-Clause"
] | null | null | null | test/test_ops.py | MeepoAII/vision | 6e10e3f88158f12b7a304d3c2f803d2bbdde0823 | [
"BSD-3-Clause"
] | null | null | null | test/test_ops.py | MeepoAII/vision | 6e10e3f88158f12b7a304d3c2f803d2bbdde0823 | [
"BSD-3-Clause"
] | null | null | null | import math
import unittest
import numpy as np
import torch
from torch import Tensor
from torch.autograd import gradcheck
from torch.jit.annotations import Tuple
from torch.nn.modules.utils import _pair
from torchvision import ops
class OpTester(object):
@classmethod
def setUpClass(cls):
cls.dtype = torch.float64
def test_forward_cpu_contiguous(self):
self._test_forward(device=torch.device('cpu'), contiguous=True)
def test_forward_cpu_non_contiguous(self):
self._test_forward(device=torch.device('cpu'), contiguous=False)
def test_backward_cpu_contiguous(self):
self._test_backward(device=torch.device('cpu'), contiguous=True)
def test_backward_cpu_non_contiguous(self):
self._test_backward(device=torch.device('cpu'), contiguous=False)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_forward_cuda_contiguous(self):
self._test_forward(device=torch.device('cuda'), contiguous=True)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_forward_cuda_non_contiguous(self):
self._test_forward(device=torch.device('cuda'), contiguous=False)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_backward_cuda_contiguous(self):
self._test_backward(device=torch.device('cuda'), contiguous=True)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_backward_cuda_non_contiguous(self):
self._test_backward(device=torch.device('cuda'), contiguous=False)
def _test_forward(self, device, contiguous):
pass
def _test_backward(self, device, contiguous):
pass
class RoIOpTester(OpTester):
def _test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None):
x_dtype = self.dtype if x_dtype is None else x_dtype
rois_dtype = self.dtype if rois_dtype is None else rois_dtype
pool_size = 5
# n_channels % (pool_size ** 2) == 0 required for PS opeartions.
n_channels = 2 * (pool_size ** 2)
x = torch.rand(2, n_channels, 10, 10, dtype=x_dtype, device=device)
if not contiguous:
x = x.permute(0, 1, 3, 2)
rois = torch.tensor([[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9]],
dtype=rois_dtype, device=device)
pool_h, pool_w = pool_size, pool_size
y = self.fn(x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1)
# the following should be true whether we're running an autocast test or not.
self.assertTrue(y.dtype == x.dtype)
gt_y = self.expected_fn(x, rois, pool_h, pool_w, spatial_scale=1,
sampling_ratio=-1, device=device, dtype=self.dtype)
tol = 1e-3 if (x_dtype is torch.half or rois_dtype is torch.half) else 1e-5
self.assertTrue(torch.allclose(gt_y.to(y.dtype), y, rtol=tol, atol=tol))
def _test_backward(self, device, contiguous):
pool_size = 2
x = torch.rand(1, 2 * (pool_size ** 2), 5, 5, dtype=self.dtype, device=device, requires_grad=True)
if not contiguous:
x = x.permute(0, 1, 3, 2)
rois = torch.tensor([[0, 0, 0, 4, 4], # format is (xyxy)
[0, 0, 2, 3, 4],
[0, 2, 2, 4, 4]],
dtype=self.dtype, device=device)
def func(z):
return self.fn(z, rois, pool_size, pool_size, spatial_scale=1, sampling_ratio=1)
script_func = self.get_script_fn(rois, pool_size)
self.assertTrue(gradcheck(func, (x,)))
self.assertTrue(gradcheck(script_func, (x,)))
def test_boxes_shape(self):
self._test_boxes_shape()
def _helper_boxes_shape(self, func):
# test boxes as Tensor[N, 5]
with self.assertRaises(AssertionError):
a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8)
boxes = torch.tensor([[0, 0, 3, 3]], dtype=a.dtype)
func(a, boxes, output_size=(2, 2))
# test boxes as List[Tensor[N, 4]]
with self.assertRaises(AssertionError):
a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8)
boxes = torch.tensor([[0, 0, 3]], dtype=a.dtype)
ops.roi_pool(a, [boxes], output_size=(2, 2))
def fn(*args, **kwargs):
pass
def get_script_fn(*args, **kwargs):
pass
def expected_fn(*args, **kwargs):
pass
class RoIPoolTester(RoIOpTester, unittest.TestCase):
def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs):
return ops.RoIPool((pool_h, pool_w), spatial_scale)(x, rois)
def get_script_fn(self, rois, pool_size):
@torch.jit.script
def script_fn(input, rois, pool_size):
# type: (Tensor, Tensor, int) -> Tensor
return ops.roi_pool(input, rois, pool_size, 1.0)[0]
return lambda x: script_fn(x, rois, pool_size)
def expected_fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1,
device=None, dtype=torch.float64):
if device is None:
device = torch.device("cpu")
n_channels = x.size(1)
y = torch.zeros(rois.size(0), n_channels, pool_h, pool_w, dtype=dtype, device=device)
def get_slice(k, block):
return slice(int(np.floor(k * block)), int(np.ceil((k + 1) * block)))
for roi_idx, roi in enumerate(rois):
batch_idx = int(roi[0])
j_begin, i_begin, j_end, i_end = (int(round(x.item() * spatial_scale)) for x in roi[1:])
roi_x = x[batch_idx, :, i_begin:i_end + 1, j_begin:j_end + 1]
roi_h, roi_w = roi_x.shape[-2:]
bin_h = roi_h / pool_h
bin_w = roi_w / pool_w
for i in range(0, pool_h):
for j in range(0, pool_w):
bin_x = roi_x[:, get_slice(i, bin_h), get_slice(j, bin_w)]
if bin_x.numel() > 0:
y[roi_idx, :, i, j] = bin_x.reshape(n_channels, -1).max(dim=1)[0]
return y
def _test_boxes_shape(self):
self._helper_boxes_shape(ops.roi_pool)
class PSRoIPoolTester(RoIOpTester, unittest.TestCase):
def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs):
return ops.PSRoIPool((pool_h, pool_w), 1)(x, rois)
def get_script_fn(self, rois, pool_size):
@torch.jit.script
def script_fn(input, rois, pool_size):
# type: (Tensor, Tensor, int) -> Tensor
return ops.ps_roi_pool(input, rois, pool_size, 1.0)[0]
return lambda x: script_fn(x, rois, pool_size)
def expected_fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1,
device=None, dtype=torch.float64):
if device is None:
device = torch.device("cpu")
n_input_channels = x.size(1)
self.assertEqual(n_input_channels % (pool_h * pool_w), 0, "input channels must be divisible by ph * pw")
n_output_channels = int(n_input_channels / (pool_h * pool_w))
y = torch.zeros(rois.size(0), n_output_channels, pool_h, pool_w, dtype=dtype, device=device)
def get_slice(k, block):
return slice(int(np.floor(k * block)), int(np.ceil((k + 1) * block)))
for roi_idx, roi in enumerate(rois):
batch_idx = int(roi[0])
j_begin, i_begin, j_end, i_end = (int(round(x.item() * spatial_scale)) for x in roi[1:])
roi_x = x[batch_idx, :, i_begin:i_end + 1, j_begin:j_end + 1]
roi_height = max(i_end - i_begin, 1)
roi_width = max(j_end - j_begin, 1)
bin_h, bin_w = roi_height / float(pool_h), roi_width / float(pool_w)
for i in range(0, pool_h):
for j in range(0, pool_w):
bin_x = roi_x[:, get_slice(i, bin_h), get_slice(j, bin_w)]
if bin_x.numel() > 0:
area = bin_x.size(-2) * bin_x.size(-1)
for c_out in range(0, n_output_channels):
c_in = c_out * (pool_h * pool_w) + pool_w * i + j
t = torch.sum(bin_x[c_in, :, :])
y[roi_idx, c_out, i, j] = t / area
return y
def _test_boxes_shape(self):
self._helper_boxes_shape(ops.ps_roi_pool)
def bilinear_interpolate(data, y, x, snap_border=False):
height, width = data.shape
if snap_border:
if -1 < y <= 0:
y = 0
elif height - 1 <= y < height:
y = height - 1
if -1 < x <= 0:
x = 0
elif width - 1 <= x < width:
x = width - 1
y_low = int(math.floor(y))
x_low = int(math.floor(x))
y_high = y_low + 1
x_high = x_low + 1
wy_h = y - y_low
wx_h = x - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < height and 0 <= xp < width:
val += wx * wy * data[yp, xp]
return val
class RoIAlignTester(RoIOpTester, unittest.TestCase):
def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, aligned=False, **kwargs):
return ops.RoIAlign((pool_h, pool_w), spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio, aligned=aligned)(x, rois)
def get_script_fn(self, rois, pool_size):
@torch.jit.script
def script_fn(input, rois, pool_size):
# type: (Tensor, Tensor, int) -> Tensor
return ops.roi_align(input, rois, pool_size, 1.0)[0]
return lambda x: script_fn(x, rois, pool_size)
def expected_fn(self, in_data, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, aligned=False,
device=None, dtype=torch.float64):
if device is None:
device = torch.device("cpu")
n_channels = in_data.size(1)
out_data = torch.zeros(rois.size(0), n_channels, pool_h, pool_w, dtype=dtype, device=device)
offset = 0.5 if aligned else 0.
for r, roi in enumerate(rois):
batch_idx = int(roi[0])
j_begin, i_begin, j_end, i_end = (x.item() * spatial_scale - offset for x in roi[1:])
roi_h = i_end - i_begin
roi_w = j_end - j_begin
bin_h = roi_h / pool_h
bin_w = roi_w / pool_w
for i in range(0, pool_h):
start_h = i_begin + i * bin_h
grid_h = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_h))
for j in range(0, pool_w):
start_w = j_begin + j * bin_w
grid_w = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_w))
for channel in range(0, n_channels):
val = 0
for iy in range(0, grid_h):
y = start_h + (iy + 0.5) * bin_h / grid_h
for ix in range(0, grid_w):
x = start_w + (ix + 0.5) * bin_w / grid_w
val += bilinear_interpolate(in_data[batch_idx, channel, :, :], y, x, snap_border=True)
val /= grid_h * grid_w
out_data[r, channel, i, j] = val
return out_data
def _test_boxes_shape(self):
self._helper_boxes_shape(ops.roi_align)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_roi_align_autocast(self):
for x_dtype in (torch.float, torch.half):
for rois_dtype in (torch.float, torch.half):
with torch.cuda.amp.autocast():
self._test_forward(torch.device("cuda"), contiguous=False, x_dtype=x_dtype, rois_dtype=rois_dtype)
class PSRoIAlignTester(RoIOpTester, unittest.TestCase):
def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs):
return ops.PSRoIAlign((pool_h, pool_w), spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio)(x, rois)
def get_script_fn(self, rois, pool_size):
@torch.jit.script
def script_fn(input, rois, pool_size):
# type: (Tensor, Tensor, int) -> Tensor
return ops.ps_roi_align(input, rois, pool_size, 1.0)[0]
return lambda x: script_fn(x, rois, pool_size)
def expected_fn(self, in_data, rois, pool_h, pool_w, device, spatial_scale=1,
sampling_ratio=-1, dtype=torch.float64):
if device is None:
device = torch.device("cpu")
n_input_channels = in_data.size(1)
self.assertEqual(n_input_channels % (pool_h * pool_w), 0, "input channels must be divisible by ph * pw")
n_output_channels = int(n_input_channels / (pool_h * pool_w))
out_data = torch.zeros(rois.size(0), n_output_channels, pool_h, pool_w, dtype=dtype, device=device)
for r, roi in enumerate(rois):
batch_idx = int(roi[0])
j_begin, i_begin, j_end, i_end = (x.item() * spatial_scale - 0.5 for x in roi[1:])
roi_h = i_end - i_begin
roi_w = j_end - j_begin
bin_h = roi_h / pool_h
bin_w = roi_w / pool_w
for i in range(0, pool_h):
start_h = i_begin + i * bin_h
grid_h = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_h))
for j in range(0, pool_w):
start_w = j_begin + j * bin_w
grid_w = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_w))
for c_out in range(0, n_output_channels):
c_in = c_out * (pool_h * pool_w) + pool_w * i + j
val = 0
for iy in range(0, grid_h):
y = start_h + (iy + 0.5) * bin_h / grid_h
for ix in range(0, grid_w):
x = start_w + (ix + 0.5) * bin_w / grid_w
val += bilinear_interpolate(in_data[batch_idx, c_in, :, :], y, x, snap_border=True)
val /= grid_h * grid_w
out_data[r, c_out, i, j] = val
return out_data
def _test_boxes_shape(self):
self._helper_boxes_shape(ops.ps_roi_align)
class NMSTester(unittest.TestCase):
def reference_nms(self, boxes, scores, iou_threshold):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
Returns:
picked: a list of indexes of the kept boxes
"""
picked = []
_, indexes = scores.sort(descending=True)
while len(indexes) > 0:
current = indexes[0]
picked.append(current.item())
if len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[1:]
rest_boxes = boxes[indexes, :]
iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1)
indexes = indexes[iou <= iou_threshold]
return torch.as_tensor(picked)
def _create_tensors_with_iou(self, N, iou_thresh):
# force last box to have a pre-defined iou with the first box
# let b0 be [x0, y0, x1, y1], and b1 be [x0, y0, x1 + d, y1],
# then, in order to satisfy ops.iou(b0, b1) == iou_thresh,
# we need to have d = (x1 - x0) * (1 - iou_thresh) / iou_thresh
# Adjust the threshold upward a bit with the intent of creating
# at least one box that exceeds (barely) the threshold and so
# should be suppressed.
boxes = torch.rand(N, 4) * 100
boxes[:, 2:] += boxes[:, :2]
boxes[-1, :] = boxes[0, :]
x0, y0, x1, y1 = boxes[-1].tolist()
iou_thresh += 1e-5
boxes[-1, 2] += (x1 - x0) * (1 - iou_thresh) / iou_thresh
scores = torch.rand(N)
return boxes, scores
def test_nms(self):
err_msg = 'NMS incompatible between CPU and reference implementation for IoU={}'
for iou in [0.2, 0.5, 0.8]:
boxes, scores = self._create_tensors_with_iou(1000, iou)
keep_ref = self.reference_nms(boxes, scores, iou)
keep = ops.nms(boxes, scores, iou)
self.assertTrue(torch.allclose(keep, keep_ref), err_msg.format(iou))
self.assertRaises(RuntimeError, ops.nms, torch.rand(4), torch.rand(3), 0.5)
self.assertRaises(RuntimeError, ops.nms, torch.rand(3, 5), torch.rand(3), 0.5)
self.assertRaises(RuntimeError, ops.nms, torch.rand(3, 4), torch.rand(3, 2), 0.5)
self.assertRaises(RuntimeError, ops.nms, torch.rand(3, 4), torch.rand(4), 0.5)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_nms_cuda(self):
err_msg = 'NMS incompatible between CPU and CUDA for IoU={}'
for iou in [0.2, 0.5, 0.8]:
boxes, scores = self._create_tensors_with_iou(1000, iou)
r_cpu = ops.nms(boxes, scores, iou)
r_cuda = ops.nms(boxes.cuda(), scores.cuda(), iou)
is_eq = torch.allclose(r_cpu, r_cuda.cpu())
if not is_eq:
# if the indices are not the same, ensure that it's because the scores
# are duplicate
is_eq = torch.allclose(scores[r_cpu], scores[r_cuda.cpu()])
self.assertTrue(is_eq, err_msg.format(iou))
class NewEmptyTensorTester(unittest.TestCase):
def test_new_empty_tensor(self):
input = torch.tensor([2., 2.], requires_grad=True)
new_shape = [3, 3]
out = torch.ops.torchvision._new_empty_tensor_op(input, new_shape)
assert out.size() == torch.Size([3, 3])
assert out.dtype == input.dtype
class DeformConvTester(OpTester, unittest.TestCase):
def expected_fn(self, x, weight, offset, bias, stride=1, padding=0, dilation=1):
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
dil_h, dil_w = _pair(dilation)
weight_h, weight_w = weight.shape[-2:]
n_batches, n_in_channels, in_h, in_w = x.shape
n_out_channels = weight.shape[0]
out_h = (in_h + 2 * pad_h - (dil_h * (weight_h - 1) + 1)) // stride_h + 1
out_w = (in_w + 2 * pad_w - (dil_w * (weight_w - 1) + 1)) // stride_w + 1
n_offset_grps = offset.shape[1] // (2 * weight_h * weight_w)
in_c_per_offset_grp = n_in_channels // n_offset_grps
n_weight_grps = n_in_channels // weight.shape[1]
in_c_per_weight_grp = weight.shape[1]
out_c_per_weight_grp = n_out_channels // n_weight_grps
out = torch.zeros(n_batches, n_out_channels, out_h, out_w, device=x.device, dtype=x.dtype)
for b in range(n_batches):
for c_out in range(n_out_channels):
for i in range(out_h):
for j in range(out_w):
for di in range(weight_h):
for dj in range(weight_w):
for c in range(in_c_per_weight_grp):
weight_grp = c_out // out_c_per_weight_grp
c_in = weight_grp * in_c_per_weight_grp + c
offset_grp = c_in // in_c_per_offset_grp
offset_idx = 2 * (offset_grp * (weight_h * weight_w) + di * weight_w + dj)
pi = stride_h * i - pad_h + dil_h * di + offset[b, offset_idx, i, j]
pj = stride_w * j - pad_w + dil_w * dj + offset[b, offset_idx + 1, i, j]
out[b, c_out, i, j] += (weight[c_out, c, di, dj] *
bilinear_interpolate(x[b, c_in, :, :], pi, pj))
out += bias.view(1, n_out_channels, 1, 1)
return out
def get_fn_args(self, device, contiguous):
batch_sz = 33
n_in_channels = 6
n_out_channels = 2
n_weight_grps = 2
n_offset_grps = 3
stride = (2, 1)
pad = (1, 0)
dilation = (2, 1)
stride_h, stride_w = stride
pad_h, pad_w = pad
dil_h, dil_w = dilation
weight_h, weight_w = (3, 2)
in_h, in_w = (5, 4)
out_h = (in_h + 2 * pad_h - (dil_h * (weight_h - 1) + 1)) // stride_h + 1
out_w = (in_w + 2 * pad_w - (dil_w * (weight_w - 1) + 1)) // stride_w + 1
x = torch.rand(batch_sz, n_in_channels, in_h, in_w, device=device, dtype=self.dtype, requires_grad=True)
offset = torch.randn(batch_sz, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w,
device=device, dtype=self.dtype, requires_grad=True)
weight = torch.randn(n_out_channels, n_in_channels // n_weight_grps, weight_h, weight_w,
device=device, dtype=self.dtype, requires_grad=True)
bias = torch.randn(n_out_channels, device=device, dtype=self.dtype, requires_grad=True)
if not contiguous:
x = x.permute(0, 1, 3, 2).contiguous().permute(0, 1, 3, 2)
offset = offset.permute(1, 3, 0, 2).contiguous().permute(2, 0, 3, 1)
weight = weight.permute(3, 2, 0, 1).contiguous().permute(2, 3, 1, 0)
return x, weight, offset, bias, stride, pad, dilation
def _test_forward(self, device, contiguous):
x, _, offset, _, stride, padding, dilation = self.get_fn_args(device, contiguous)
in_channels = 6
out_channels = 2
kernel_size = (3, 2)
groups = 2
layer = ops.DeformConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups).to(device=x.device, dtype=x.dtype)
res = layer(x, offset)
weight = layer.weight.data
bias = layer.bias.data
expected = self.expected_fn(x, weight, offset, bias, stride=stride, padding=padding, dilation=dilation)
self.assertTrue(torch.allclose(res, expected), '\nres:\n{}\nexpected:\n{}'.format(res, expected))
# test for wrong sizes
with self.assertRaises(RuntimeError):
wrong_offset = torch.rand_like(offset[:, :2])
res = layer(x, wrong_offset)
def _test_backward(self, device, contiguous):
x, weight, offset, bias, stride, padding, dilation = self.get_fn_args(device, contiguous)
def func(x_, offset_, weight_, bias_):
return ops.deform_conv2d(x_, offset_, weight_, bias_, stride=stride, padding=padding, dilation=dilation)
gradcheck(func, (x, offset, weight, bias), nondet_tol=1e-5)
@torch.jit.script
def script_func(x_, offset_, weight_, bias_, stride_, pad_, dilation_):
# type: (Tensor, Tensor, Tensor, Tensor, Tuple[int, int], Tuple[int, int], Tuple[int, int]) -> Tensor
return ops.deform_conv2d(x_, offset_, weight_, bias_, stride=stride_, padding=pad_, dilation=dilation_)
gradcheck(lambda z, off, wei, bi: script_func(z, off, wei, bi, stride, padding, dilation),
(x, offset, weight, bias), nondet_tol=1e-5)
# Test from https://github.com/pytorch/vision/issues/2598
# Run on CUDA only
if "cuda" in device.type:
# compare grads computed on CUDA with grads computed on CPU
true_cpu_grads = None
init_weight = torch.randn(9, 9, 3, 3, requires_grad=True)
img = torch.randn(8, 9, 1000, 110)
offset = torch.rand(8, 2 * 3 * 3, 1000, 110)
if not contiguous:
img = img.permute(0, 1, 3, 2).contiguous().permute(0, 1, 3, 2)
offset = offset.permute(1, 3, 0, 2).contiguous().permute(2, 0, 3, 1)
weight = init_weight.permute(3, 2, 0, 1).contiguous().permute(2, 3, 1, 0)
else:
weight = init_weight
for d in ["cpu", "cuda"]:
out = ops.deform_conv2d(img.to(d), offset.to(d), weight.to(d), padding=1)
out.mean().backward()
if true_cpu_grads is None:
true_cpu_grads = init_weight.grad
self.assertTrue(true_cpu_grads is not None)
else:
self.assertTrue(init_weight.grad is not None)
res_grads = init_weight.grad.to("cpu")
self.assertTrue(true_cpu_grads.allclose(res_grads))
class FrozenBNTester(unittest.TestCase):
def test_frozenbatchnorm2d_repr(self):
num_features = 32
t = ops.misc.FrozenBatchNorm2d(num_features)
# Check integrity of object __repr__ attribute
expected_string = f"FrozenBatchNorm2d({num_features})"
self.assertEqual(t.__repr__(), expected_string)
def test_frozenbatchnorm2d_eps(self):
sample_size = (4, 32, 28, 28)
x = torch.rand(sample_size)
state_dict = dict(weight=torch.rand(sample_size[1]),
bias=torch.rand(sample_size[1]),
running_mean=torch.rand(sample_size[1]),
running_var=torch.rand(sample_size[1]),
num_batches_tracked=torch.tensor(100))
# Check that default eps is zero for backward-compatibility
fbn = ops.misc.FrozenBatchNorm2d(sample_size[1])
fbn.load_state_dict(state_dict, strict=False)
bn = torch.nn.BatchNorm2d(sample_size[1], eps=0).eval()
bn.load_state_dict(state_dict)
# Difference is expected to fall in an acceptable range
self.assertTrue(torch.allclose(fbn(x), bn(x), atol=1e-6))
# Check computation for eps > 0
fbn = ops.misc.FrozenBatchNorm2d(sample_size[1], eps=1e-5)
fbn.load_state_dict(state_dict, strict=False)
bn = torch.nn.BatchNorm2d(sample_size[1], eps=1e-5).eval()
bn.load_state_dict(state_dict)
self.assertTrue(torch.allclose(fbn(x), bn(x), atol=1e-6))
def test_frozenbatchnorm2d_n_arg(self):
"""Ensure a warning is thrown when passing `n` kwarg
(remove this when support of `n` is dropped)"""
self.assertWarns(DeprecationWarning, ops.misc.FrozenBatchNorm2d, 32, eps=1e-5, n=32)
class BoxConversionTester(unittest.TestCase):
@staticmethod
def _get_box_sequences():
# Define here the argument type of `boxes` supported by region pooling operations
box_tensor = torch.tensor([[0, 0, 0, 100, 100], [1, 0, 0, 100, 100]], dtype=torch.float)
box_list = [torch.tensor([[0, 0, 100, 100]], dtype=torch.float),
torch.tensor([[0, 0, 100, 100]], dtype=torch.float)]
box_tuple = tuple(box_list)
return box_tensor, box_list, box_tuple
def test_check_roi_boxes_shape(self):
# Ensure common sequences of tensors are supported
for box_sequence in self._get_box_sequences():
self.assertIsNone(ops._utils.check_roi_boxes_shape(box_sequence))
def test_convert_boxes_to_roi_format(self):
# Ensure common sequences of tensors yield the same result
ref_tensor = None
for box_sequence in self._get_box_sequences():
if ref_tensor is None:
ref_tensor = box_sequence
else:
self.assertTrue(torch.equal(ref_tensor, ops._utils.convert_boxes_to_roi_format(box_sequence)))
class BoxAreaTester(unittest.TestCase):
def test_box_area(self):
# A bounding box of area 10000 and a degenerate case
box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0]], dtype=torch.float)
expected = torch.tensor([10000, 0])
calc_area = ops.box_area(box_tensor)
assert calc_area.size() == torch.Size([2])
assert calc_area.dtype == box_tensor.dtype
assert torch.all(torch.eq(calc_area, expected)).item() is True
class BoxIouTester(unittest.TestCase):
def test_iou(self):
# Boxes to test Iou
boxes1 = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=torch.float)
boxes2 = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=torch.float)
# Expected IoU matrix for these boxes
expected = torch.tensor([[1.0, 0.25, 0.0], [0.25, 1.0, 0.0], [0.0, 0.0, 1.0]])
out = ops.box_iou(boxes1, boxes2)
# Check if all elements of tensor are as expected.
assert out.size() == torch.Size([3, 3])
tolerance = 1e-4
assert ((out - expected).abs().max() < tolerance).item() is True
class GenBoxIouTester(unittest.TestCase):
def test_gen_iou(self):
# Test Generalized IoU
boxes1 = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=torch.float)
boxes2 = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=torch.float)
# Expected gIoU matrix for these boxes
expected = torch.tensor([[1.0, 0.25, -0.7778], [0.25, 1.0, -0.8611],
[-0.7778, -0.8611, 1.0]])
out = ops.generalized_box_iou(boxes1, boxes2)
# Check if all elements of tensor are as expected.
assert out.size() == torch.Size([3, 3])
tolerance = 1e-4
assert ((out - expected).abs().max() < tolerance).item() is True
if __name__ == '__main__':
unittest.main()
| 42.626074 | 118 | 0.582059 |
907670b9ed70a8e297f24807d9ca5655fd24752f | 3,324 | py | Python | mysite/mysite/settings.py | gabiastudillo/SubirArchivosAlServidor | a879594a9d7643c46e3f148614f83910a9993ebc | [
"MIT"
] | null | null | null | mysite/mysite/settings.py | gabiastudillo/SubirArchivosAlServidor | a879594a9d7643c46e3f148614f83910a9993ebc | [
"MIT"
] | null | null | null | mysite/mysite/settings.py | gabiastudillo/SubirArchivosAlServidor | a879594a9d7643c46e3f148614f83910a9993ebc | [
"MIT"
] | null | null | null | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uq%*6n(h8^&_qpn5(vt5f%jzwd7+f#)9l4talh+jm(w+0k2*1q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'core',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'mysite/static')
]
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/' | 25.181818 | 91 | 0.695848 |
1917b0d6133acd3d7c7bf6a1f66ffcf18fd6c9d1 | 140 | py | Python | src/gevt/apps.py | Nurul-GC/gevt | d70ff13eea8504e9150b9416484eae498893ee76 | [
"AFL-3.0"
] | 1 | 2022-03-15T18:45:28.000Z | 2022-03-15T18:45:28.000Z | src/gevt/apps.py | Nurul-GC/gevt | d70ff13eea8504e9150b9416484eae498893ee76 | [
"AFL-3.0"
] | null | null | null | src/gevt/apps.py | Nurul-GC/gevt | d70ff13eea8504e9150b9416484eae498893ee76 | [
"AFL-3.0"
] | null | null | null | from django.apps import AppConfig
class GevtConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'gevt'
| 20 | 56 | 0.75 |
bfb5f3d9bee5ff3a49e0dd65ebcb771b9d680412 | 2,758 | py | Python | tools.py | nebblu/HyPk | e4b7f261c3156d6138a851295ae8b7259f95176f | [
"MIT"
] | null | null | null | tools.py | nebblu/HyPk | e4b7f261c3156d6138a851295ae8b7259f95176f | [
"MIT"
] | null | null | null | tools.py | nebblu/HyPk | e4b7f261c3156d6138a851295ae8b7259f95176f | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import sys
import copy
from scipy import interpolate
import os
import errno
# tools module
def takeRatio(d0,d1,logit=False):
print ('d1'," ",len(d1),'\n')
print ('d0',' ',len(d0),'\n')
dif = len(d0)-len(d1)
d0x=d0[:,0]
d0y=d0[:,1]
d1x=d1[:,0]
d1y=d1[:,1]
if logit==True:
d0x=np.log10(d0x)
d0y=np.log10(d0y)
d1x=np.log10(d1x)
d1y=np.log10(d1x)
if dif > 0:
ratioDat=np.zeros((len(d1),2))
fInterp = interpolate.pchip(d0x,d0y) #!!!Important pchip = monotonic interpolation
condition=d1x<=max(d0x)
ratio=d1y[condition]/fInterp(d1x[condition])
ratioDat=ratioDat[:len(ratio),:]
ratioDat=np.transpose([d1x[condition],ratio])
print (str(dif)+' elements deleted')
elif dif < 0:
ratioDat=np.zeros((len(d0),2))
fInterp = interpolate.pchip(d1x,d1y)
condition=d0x<=max(d1x)
print (d0x[condition],'\n',d1x)
ratio=fInterp(d0x[condition])/d0y[condition]
ratioDat=ratioDat[:len(ratio),:]
ratioDat=np.transpose([d0x[condition],ratio])
print (str(dif)+' elements deleted')
else:
ratioDat=np.zeros((len(d0),2))
fInterp = interpolate.pchip(d1x,d1y)
condition=d0x<=max(d1x)
ratio=fInterp(d0x[condition])/d0y[condition] #in all cases data d1 is divided through data d0
ratioDat=ratioDat[:len(ratio),:]
ratioDat=np.transpose([d0x[condition],ratio])
print ('equal size')
#print ratioDat, '\n'
#print darray1, '\n'
return ratioDat
def derivatives(x,y,order=1):
yderi=np.diff(y)/np.diff(x)
xderi=(x[1:]+x[:-1])/2
if order==2:
xderi2, yderi2 = derivatives(xderi,yderi,1)
return xderi,yderi,xderi2,yderi2
elif order==1:
return xderi,yderi
else:
print ("error, function does not return "+str(order)+"order derivatives")
return None
def calcMaxLim(x,y,xmin,maxArra,minArra):
arra = np.transpose([x,y])
arra = arra[arra[:,0]>=xmin]
maxY = max(arra[:,1])
minY = min(arra[:,1])
maxArra.append(maxY)
minArra.append(minY)
return maxArra, minArra
def mkdirp(dirname):
try:
os.mkdir(dirname)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
def fileexists(filename):
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
try:
file_handle = os.open(filename, flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file already exists.
return True
else: # Something unexpected went wrong so reraise the exception.
raise
else:
return False
| 29.031579 | 101 | 0.602611 |
1d73d775d1c421b7ed9faffcd05c04610b68913e | 2,581 | py | Python | jcasts/podcasts/tests/test_emails.py | danjac/radiofeed | 06e2b10396345f0afcf7c1dfee67c41fbff153e1 | [
"MIT"
] | 5 | 2020-12-23T20:42:26.000Z | 2021-02-20T01:37:47.000Z | jcasts/podcasts/tests/test_emails.py | danjac/radiofeed | 06e2b10396345f0afcf7c1dfee67c41fbff153e1 | [
"MIT"
] | null | null | null | jcasts/podcasts/tests/test_emails.py | danjac/radiofeed | 06e2b10396345f0afcf7c1dfee67c41fbff153e1 | [
"MIT"
] | 3 | 2020-12-25T12:31:58.000Z | 2021-02-13T11:49:49.000Z | from jcasts.episodes.factories import (
AudioLogFactory,
EpisodeFactory,
FavoriteFactory,
QueueItemFactory,
)
from jcasts.podcasts.emails import send_recommendations_email
from jcasts.podcasts.factories import FollowFactory, RecommendationFactory
class TestSendRecommendationEmail:
def test_send_if_no_recommendations_or_episodes(self, user, mailoutbox):
"""If no recommendations, don't send."""
send_recommendations_email(user)
assert len(mailoutbox) == 0
def test_send_if_sufficient_episodes(self, user, mailoutbox):
first = FollowFactory(user=user).podcast
second = FollowFactory(user=user).podcast
third = FollowFactory(user=user).podcast
EpisodeFactory()
first_episode = EpisodeFactory(podcast=first)
second_episode = EpisodeFactory(podcast=second)
third_episode = EpisodeFactory(podcast=third)
EpisodeFactory(podcast=first)
AudioLogFactory(user=user, episode=first_episode)
QueueItemFactory(user=user, episode=second_episode)
FavoriteFactory(user=user, episode=third_episode)
EpisodeFactory(podcast=first)
EpisodeFactory(podcast=second)
EpisodeFactory(podcast=third)
send_recommendations_email(user)
assert len(mailoutbox) == 1
assert mailoutbox[0].to == [user.email]
def test_send_if_sufficient_recommendations(self, user, mailoutbox):
first = FollowFactory(user=user).podcast
second = FollowFactory(user=user).podcast
third = FollowFactory(user=user).podcast
RecommendationFactory(podcast=first)
RecommendationFactory(podcast=second)
RecommendationFactory(podcast=third)
send_recommendations_email(user)
assert len(mailoutbox) == 1
assert mailoutbox[0].to == [user.email]
assert user.recommended_podcasts.count() == 3
def test_send_if_sufficient_recommendations_and_episodes(self, user, mailoutbox):
first = FollowFactory(user=user).podcast
second = FollowFactory(user=user).podcast
third = FollowFactory(user=user).podcast
RecommendationFactory(podcast=first)
RecommendationFactory(podcast=second)
RecommendationFactory(podcast=third)
EpisodeFactory(podcast=first)
EpisodeFactory(podcast=second)
EpisodeFactory(podcast=third)
send_recommendations_email(user)
assert len(mailoutbox) == 1
assert mailoutbox[0].to == [user.email]
assert user.recommended_podcasts.count() == 3
| 32.2625 | 85 | 0.70864 |
09abebeeeb083abfdb72bb94df5a2279ef4076e1 | 11,367 | py | Python | hearthbreaker/cards/minions/druid.py | jirenz/CS229_Project | 78059b2ab813ba347824f3e65b3859ad5dd33cfe | [
"MIT"
] | null | null | null | hearthbreaker/cards/minions/druid.py | jirenz/CS229_Project | 78059b2ab813ba347824f3e65b3859ad5dd33cfe | [
"MIT"
] | null | null | null | hearthbreaker/cards/minions/druid.py | jirenz/CS229_Project | 78059b2ab813ba347824f3e65b3859ad5dd33cfe | [
"MIT"
] | null | null | null | from hearthbreaker.cards.base import MinionCard, ChoiceCard
from hearthbreaker.game_objects import Minion
from hearthbreaker.tags.action import Give, Damage, Silence, Transform, Draw, Heal, \
Summon, AddCard, GiveManaCrystal, Remove, Kill
from hearthbreaker.tags.base import Choice, Buff, Effect, Battlecry, Deathrattle, ActionTag
from hearthbreaker.tags.card_source import CardList, ObjectSource
from hearthbreaker.tags.condition import IsType, GreaterThan
from hearthbreaker.tags.event import Damaged, TurnEnded
from hearthbreaker.tags.selector import CharacterSelector, MinionSelector, SelfSelector, UserPicker, BothPlayer, \
PlayerSelector, HeroSelector, Count, DeadMinionSelector
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.tags.status import ChangeAttack, ChangeHealth, Taunt, ManaChange
from hearthbreaker.cards.spells.neutral import spare_part_list
class Moonfire(ChoiceCard):
def __init__(self):
super().__init__("Moonfire", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, ref_name="moonfire_keeper")
class Dispel(ChoiceCard):
def __init__(self):
super().__init__("Dispel", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class KeeperOfTheGrove(MinionCard):
def __init__(self):
super().__init__("Keeper of the Grove", 4, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, choices=[
Choice(Moonfire(), Damage(2), CharacterSelector(players=BothPlayer(), picker=UserPicker())),
Choice(Dispel(), Silence(), MinionSelector(players=BothPlayer(), picker=UserPicker()))
])
def create_minion(self, player):
return Minion(2, 4)
class CatDruid(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Claw (cat)")
def create_minion(self, p):
return Minion(4, 4, charge=True)
class BearDruid(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Claw (bear)")
def create_minion(self, p):
return Minion(4, 6, taunt=True)
class CatForm(ChoiceCard):
def __init__(self):
super().__init__("Cat Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class BearForm(ChoiceCard):
def __init__(self):
super().__init__("Bear Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class DruidOfTheClaw(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, choices=[
Choice(CatForm(), Transform(CatDruid()), SelfSelector()),
Choice(BearForm(), Transform(BearDruid()), SelfSelector())
])
def create_minion(self, player):
return Minion(4, 4)
class AncientSecrets(ChoiceCard):
def __init__(self):
super().__init__("Ancient Secrets", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientTeachings(ChoiceCard):
def __init__(self):
super().__init__("Ancient Teachings", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientOfLore(MinionCard):
def __init__(self):
super().__init__("Ancient of Lore", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.EPIC, choices=[
Choice(AncientSecrets(), Heal(5), HeroSelector()),
Choice(AncientTeachings(), Draw(3), PlayerSelector())
])
def create_minion(self, player):
return Minion(5, 5)
class Health(ChoiceCard):
def __init__(self):
super().__init__("Rooted", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class Attack(ChoiceCard):
def __init__(self):
super().__init__("Uproot", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientOfWar(MinionCard):
def __init__(self):
super().__init__("Ancient of War", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.EPIC, choices=[
Choice(Health(), Give([Buff(ChangeHealth(5)), Buff(Taunt())]), SelfSelector()),
Choice(Attack(), Give([Buff(ChangeAttack(5))]), SelfSelector()),
])
def create_minion(self, player):
return Minion(5, 5)
class IronbarkProtector(MinionCard):
def __init__(self):
super().__init__("Ironbark Protector", 8, CHARACTER_CLASS.DRUID,
CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(8, 8, taunt=True)
class TauntTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, ref_name="Treant (taunt)")
def create_minion(self, p):
return Minion(2, 2, taunt=True)
class Treant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
def create_minion(self, _):
return Minion(2, 2)
class ChargeTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, ref_name="Treant (charge)")
def create_minion(self, player):
return Minion(2, 2, charge=True, effects=[Effect(TurnEnded(), ActionTag(Kill(), SelfSelector()))])
class PoisonSeedsTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False,
ref_name="Treant (poison seeds)")
def create_minion(self, player):
return Minion(2, 2)
class Panther(MinionCard):
def __init__(self):
super().__init__("Panther", 2, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST)
def create_minion(self, _):
return Minion(3, 2, MINION_TYPE.BEAST)
class IncreaseStats(ChoiceCard):
def __init__(self):
super().__init__("Give your other minions +2/+2 and taunt", 0,
CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, False)
class SummonTreants(ChoiceCard):
def __init__(self):
super().__init__("Summon two 2/2 Treants with taunt", 0,
CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, False)
class Cenarius(MinionCard):
def __init__(self):
super().__init__("Cenarius", 9, CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, choices=[
Choice(IncreaseStats(), Give([Buff(ChangeAttack(2)),
Buff(ChangeHealth(2)),
Buff(Taunt())]), MinionSelector()),
Choice(SummonTreants(), Summon(TauntTreant(), 2), PlayerSelector())
])
def create_minion(self, player):
return Minion(5, 8)
class AttackMode(ChoiceCard):
def __init__(self):
super().__init__("Attack Mode", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class TankMode(ChoiceCard):
def __init__(self):
super().__init__("Tank Mode", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AnodizedRoboCub(MinionCard):
def __init__(self):
super().__init__("Anodized Robo Cub", 2, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.MECH,
choices=[Choice(AttackMode(), Give([Buff(ChangeAttack(1))]), SelfSelector()),
Choice(TankMode(), Give([Buff(ChangeHealth(1))]), SelfSelector())])
def create_minion(self, player):
return Minion(2, 2, taunt=True)
class MechBearCat(MinionCard):
def __init__(self):
super().__init__("Mech-Bear-Cat", 6, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(7, 6, effects=[Effect(Damaged(),
ActionTag(AddCard(CardList(spare_part_list)), PlayerSelector()))])
class CobraForm(MinionCard):
def __init__(self):
super().__init__("Druid of the Fang", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Fang (cobra)")
def create_minion(self, player):
return Minion(7, 7)
class DruidOfTheFang(MinionCard):
def __init__(self):
super().__init__("Druid of the Fang", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON,
battlecry=Battlecry(Transform(CobraForm()), SelfSelector(),
GreaterThan(Count(MinionSelector(IsType(MINION_TYPE.BEAST))), value=0)))
def create_minion(self, player):
return Minion(4, 4)
class Malorne(MinionCard):
def __init__(self):
super().__init__("Malorne", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, minion_type=MINION_TYPE.BEAST)
def create_minion(self, player):
return Minion(9, 7, deathrattle=[Deathrattle(AddCard(ObjectSource(SelfSelector()),
add_to_deck=True), PlayerSelector()),
Deathrattle(Remove(), SelfSelector())])
class GiftOfMana(ChoiceCard):
def __init__(self):
super().__init__("Gift of Mana", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE)
class GiftOfCards(ChoiceCard):
def __init__(self):
super().__init__("Gift of Cards", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE)
class GroveTender(MinionCard):
def __init__(self):
super().__init__("Grove Tender", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, choices=[
Choice(GiftOfMana(), GiveManaCrystal(), PlayerSelector(players=BothPlayer())),
Choice(GiftOfCards(), Draw(), PlayerSelector(players=BothPlayer()))
])
def create_minion(self, player):
return Minion(2, 4)
class FlameCat(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Flame (cat)")
def create_minion(self, p):
return Minion(5, 2)
class FlameBird(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Flame (bird)")
def create_minion(self, p):
return Minion(2, 5)
class FlameCatForm(ChoiceCard):
def __init__(self):
super().__init__("Flame Cat Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class FlameBirdForm(ChoiceCard):
def __init__(self):
super().__init__("Flame Bird Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class DruidOfTheFlame(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, choices=[
Choice(FlameCatForm(), Transform(FlameCat()), SelfSelector()),
Choice(FlameBirdForm(), Transform(FlameBird()), SelfSelector())
])
def create_minion(self, player):
return Minion(2, 2)
class VolcanicLumberer(MinionCard):
def __init__(self):
super().__init__("Volcanic Lumberer", 9, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE,
buffs=[Buff(ManaChange(Count(DeadMinionSelector(players=BothPlayer())), -1))])
def create_minion(self, player):
return Minion(7, 8, taunt=True)
| 35.411215 | 118 | 0.661652 |
b3cf946f7881c2fcc2ed6b1130f026b9a3c6edb8 | 323 | py | Python | course_planner/asgi.py | dragonbone81/bobcat-courses-backend | d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c | [
"MIT"
] | 3 | 2018-10-25T12:41:33.000Z | 2019-09-19T19:47:39.000Z | course_planner/asgi.py | dragonbone81/bobcat-courses-backend | d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c | [
"MIT"
] | 22 | 2018-04-01T02:43:01.000Z | 2022-03-11T23:15:55.000Z | course_planner/asgi.py | dragonbone81/cse120 | d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c | [
"MIT"
] | 1 | 2019-09-19T19:48:59.000Z | 2019-09-19T19:48:59.000Z | """
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "course_planner.settings")
django.setup()
application = get_default_application()
| 24.846154 | 74 | 0.823529 |
9a3b5c161e95df8471ec5f8cd5350b20cd09b45e | 1,963 | py | Python | old_research/OpticalFilmOptimal.py | PeterJaq/optical_film_toolbox | 0e2d2bfa5f1f93d405a2f25ee50e51771be777a5 | [
"Apache-2.0"
] | 4 | 2020-07-05T12:35:45.000Z | 2022-03-17T18:43:04.000Z | old_research/OpticalFilmOptimal.py | PeterJaq/optical_film_toolbox | 0e2d2bfa5f1f93d405a2f25ee50e51771be777a5 | [
"Apache-2.0"
] | null | null | null | old_research/OpticalFilmOptimal.py | PeterJaq/optical_film_toolbox | 0e2d2bfa5f1f93d405a2f25ee50e51771be777a5 | [
"Apache-2.0"
] | null | null | null | import optical_model_env
import deepqnetwork
import csv
def run_maze():
step = 0
write_temp = []
write = []
max_abs = 0
for episode in range(500):
# initial observation
observation, mean_abs = env.init_Device()
print("The init Device is: %s abs:%f" % (observation, mean_abs))
while True:
# RL choose action based on observation
action = RL.choose_action(observation)
# RL take action and get next observation and reward
observation_, reward, done, mean_abs = env.run_simulate(action)
RL.store_transition(observation, action, reward, observation_)
if (step > 200) and (step % 5 == 0):
RL.learn()
# swap observation
observation = observation_
# break while loop when end of this episode
if done:
break
step += 1
write_temp.append(mean_abs)
print('%d step, the final observation:%s, abs:%f, reward:%f' % (step, observation, mean_abs, reward))
if mean_abs > max_abs:
max_abs = mean_abs
write.append(write_temp)
write_temp = []
print("The best result is : %f" % max_abs)
# end of game
fileobj=open('answer_DQN.csv','w')
writer = csv.writer(fileobj)
for row in write:
writer.writerow(row)
#env.destroy()
if __name__ == "__main__":
# maze game
env = optical_model_env.optical_film_env()
RL = deepqnetwork.DeepQNetwork(env.n_actions, env.n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=200,
memory_size=2000,
output_graph=True
)
run_maze()
RL.plot_cost() | 28.449275 | 114 | 0.53082 |
01ab1c86d5d0b8bcdf1938e8e04ac16923851388 | 25 | py | Python | lib/python3.7/site-packages/gatco/views.py | teomoney1999/ACT_gatco_project | a804a6348efeab90f3114606cfbc73aaebab63e1 | [
"MIT"
] | 1 | 2019-08-04T13:12:09.000Z | 2019-08-04T13:12:09.000Z | lib/python3.7/site-packages/gatco/views.py | teomoney1999/ACT_gatco_project | a804a6348efeab90f3114606cfbc73aaebab63e1 | [
"MIT"
] | 2 | 2019-04-03T02:59:28.000Z | 2019-04-03T03:00:43.000Z | lib/python3.7/site-packages/gatco/views.py | teomoney1999/ACT_gatco_project | a804a6348efeab90f3114606cfbc73aaebab63e1 | [
"MIT"
] | 4 | 2019-04-01T23:58:19.000Z | 2021-07-12T03:10:09.000Z | from sanic.views import * | 25 | 25 | 0.8 |
eec83aba72e594f5a6d0a92bfd1456ba5535d5e4 | 344 | py | Python | lex2/lexer/__init__.py | DeltaRazero/liblexer2-python3 | 43a1a63ee005da8a6936665cb27f4ced8158ed6c | [
"Zlib"
] | 1 | 2020-10-31T13:14:45.000Z | 2020-10-31T13:14:45.000Z | lex2/lexer/__init__.py | DeltaRazero/liblexer2-python3 | 43a1a63ee005da8a6936665cb27f4ced8158ed6c | [
"Zlib"
] | 4 | 2020-10-28T16:21:22.000Z | 2020-11-09T21:02:33.000Z | lex2/lexer/__init__.py | DeltaRazero/liblexer2-python3 | 43a1a63ee005da8a6936665cb27f4ced8158ed6c | [
"Zlib"
] | null | null | null | """Components of lexer implementations."""
'''
zlib License
(C) 2020-2022 DeltaRazero
All rights reserved.
'''
# ***************************************************************************************
# Core
from ._base_lexer import BaseLexer
from ._profiler import ProfilerLexer
# Implementations
from ._generic_lexer import GenericLexer
| 19.111111 | 89 | 0.572674 |
bcb651f45bf1dfa64c78fe130f108cd64720b6a9 | 9,818 | py | Python | tests/wallet/test_wallet_store.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | 1 | 2022-03-02T12:36:42.000Z | 2022-03-02T12:36:42.000Z | tests/wallet/test_wallet_store.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | null | null | null | tests/wallet/test_wallet_store.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | null | null | null | # TODO: write tests for other stores
# import asyncio
# from pathlib import Path
# from secrets import token_bytes
# import aiosqlite
# import pytest
# from staidelta.util.ints import uint32, uint64, uint128
# from staidelta.wallet.wallet_coin_record import WalletCoinRecord
# from staidelta.wallet.util.wallet_types import WalletType
# from staidelta.types.coin import Coin
#
#
# @pytest.fixture(scope="module")
# def event_loop():
# loop = asyncio.get_event_loop()
# yield loop
#
#
# class TestWalletStore:
# @pytest.mark.asyncio
# async def test_store(self):
# db_filename = Path("blockchain_wallet_store_test.db")
#
# if db_filename.exists():
# db_filename.unlink()
#
# db_connection = await aiosqlite.connect(db_filename)
# store = await WalletStore.create(db_connection)
# try:
# coin_1 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_2 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_3 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_4 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# record_replaced = WalletCoinRecord(coin_1, uint32(8), uint32(0),
# False, True, WalletType.STANDARD_WALLET, 0)
# record_1 = WalletCoinRecord(coin_1, uint32(4), uint32(0), False,
# True, WalletType.STANDARD_WALLET, 0)
# record_2 = WalletCoinRecord(coin_2, uint32(5), uint32(0),
# False, True, WalletType.STANDARD_WALLET, 0)
# record_3 = WalletCoinRecord(
# coin_3,
# uint32(5),
# uint32(10),
# True,
# False,
# WalletType.STANDARD_WALLET,
# 0,
# )
# record_4 = WalletCoinRecord(
# coin_4,
# uint32(5),
# uint32(15),
# True,
# False,
# WalletType.STANDARD_WALLET,
# 0,
# )
#
# # Test add (replace) and get
# assert await store.get_coin_record(coin_1.name()) is None
# await store.add_coin_record(record_replaced)
# await store.add_coin_record(record_1)
# await store.add_coin_record(record_2)
# await store.add_coin_record(record_3)
# await store.add_coin_record(record_4)
# assert await store.get_coin_record(coin_1.name()) == record_1
#
# # Test persistance
# await db_connection.close()
# db_connection = await aiosqlite.connect(db_filename)
# store = await WalletStore.create(db_connection)
# assert await store.get_coin_record(coin_1.name()) == record_1
#
# # Test set spent
# await store.set_spent(coin_1.name(), uint32(12))
# assert (await store.get_coin_record(coin_1.name())).spent
# assert (await store.get_coin_record(coin_1.name())).spent_block_index == 12
#
# # No coins at height 3
# assert len(await store.get_unspent_coins_at_height(3)) == 0
# assert len(await store.get_unspent_coins_at_height(4)) == 1
# assert len(await store.get_unspent_coins_at_height(5)) == 4
# assert len(await store.get_unspent_coins_at_height(11)) == 3
# assert len(await store.get_unspent_coins_at_height(12)) == 2
# assert len(await store.get_unspent_coins_at_height(15)) == 1
# assert len(await store.get_unspent_coins_at_height(16)) == 1
# assert len(await store.get_unspent_coins_at_height()) == 1
#
# assert len(await store.get_unspent_coins_for_wallet(0)) == 1
# assert len(await store.get_unspent_coins_for_wallet(1)) == 0
#
# coin_5 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# record_5 = WalletCoinRecord(
# coin_5,
# uint32(5),
# uint32(15),
# False,
# False,
# WalletType.STANDARD_WALLET,
# 1,
# )
# await store.add_coin_record(record_5)
# assert len(await store.get_unspent_coins_for_wallet(1)) == 1
#
# assert len(await store.get_spendable_for_index(100, 1)) == 1
# assert len(await store.get_spendable_for_index(100, 0)) == 1
# assert len(await store.get_spendable_for_index(0, 0)) == 0
#
# coin_6 = Coin(token_bytes(32), coin_4.puzzle_hash, uint64(12312))
# await store.add_coin_record(record_5)
# record_6 = WalletCoinRecord(
# coin_6,
# uint32(5),
# uint32(15),
# True,
# False,
# WalletType.STANDARD_WALLET,
# 2,
# )
# await store.add_coin_record(record_6)
# assert len(await store.get_coin_records_by_puzzle_hash(record_6.coin.puzzle_hash)) == 2 # 4 and 6
# assert len(await store.get_coin_records_by_puzzle_hash(token_bytes(32))) == 0
#
# assert await store.get_coin_record_by_coin_id(coin_6.name()) == record_6
# assert await store.get_coin_record_by_coin_id(token_bytes(32)) is None
#
# # BLOCKS
# assert len(await store.get_lca_path()) == 0
#
# # NOT lca block
# br_1 = BlockRecord(
# token_bytes(32),
# token_bytes(32),
# uint32(0),
# uint128(100),
# None,
# None,
# None,
# None,
# uint64(0),
# )
# assert await store.get_block_record(br_1.header_hash) is None
# await store.add_block_record(br_1, False)
# assert len(await store.get_lca_path()) == 0
# assert await store.get_block_record(br_1.header_hash) == br_1
#
# # LCA genesis
# await store.add_block_record(br_1, True)
# assert await store.get_block_record(br_1.header_hash) == br_1
# assert len(await store.get_lca_path()) == 1
# assert (await store.get_lca_path())[br_1.header_hash] == br_1
#
# br_2 = BlockRecord(
# token_bytes(32),
# token_bytes(32),
# uint32(1),
# uint128(100),
# None,
# None,
# None,
# None,
# uint64(0),
# )
# await store.add_block_record(br_2, False)
# assert len(await store.get_lca_path()) == 1
# await store.add_block_to_path(br_2.header_hash)
# assert len(await store.get_lca_path()) == 2
# assert (await store.get_lca_path())[br_2.header_hash] == br_2
#
# br_3 = BlockRecord(
# token_bytes(32),
# token_bytes(32),
# uint32(2),
# uint128(100),
# None,
# None,
# None,
# None,
# uint64(0),
# )
# await store.add_block_record(br_3, True)
# assert len(await store.get_lca_path()) == 3
# await store.remove_block_records_from_path(1)
# assert len(await store.get_lca_path()) == 2
#
# await store.rollback_lca_to_block(0)
# assert len(await store.get_unspent_coins_at_height()) == 0
#
# coin_7 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_8 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_9 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_10 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# record_7 = WalletCoinRecord(coin_7, uint32(0), uint32(1), True, False, WalletType.STANDARD_WALLET, 1)
# record_8 = WalletCoinRecord(coin_8, uint32(1), uint32(2), True, False, WalletType.STANDARD_WALLET, 1)
# record_9 = WalletCoinRecord(coin_9, uint32(2), uint32(3), True, False, WalletType.STANDARD_WALLET, 1)
# record_10 = WalletCoinRecord(
# coin_10,
# uint32(3),
# uint32(4),
# True,
# False,
# WalletType.STANDARD_WALLET,
# 1,
# )
#
# await store.add_coin_record(record_7)
# await store.add_coin_record(record_8)
# await store.add_coin_record(record_9)
# await store.add_coin_record(record_10)
# assert len(await store.get_unspent_coins_at_height(0)) == 1
# assert len(await store.get_unspent_coins_at_height(1)) == 1
# assert len(await store.get_unspent_coins_at_height(2)) == 1
# assert len(await store.get_unspent_coins_at_height(3)) == 1
# assert len(await store.get_unspent_coins_at_height(4)) == 0
#
# await store.add_block_record(br_2, True)
# await store.add_block_record(br_3, True)
#
# await store.rollback_lca_to_block(1)
#
# assert len(await store.get_unspent_coins_at_height(0)) == 1
# assert len(await store.get_unspent_coins_at_height(1)) == 1
# assert len(await store.get_unspent_coins_at_height(2)) == 1
# assert len(await store.get_unspent_coins_at_height(3)) == 1
# assert len(await store.get_unspent_coins_at_height(4)) == 1
#
# except AssertionError:
# await db_connection.close()
# raise
# await db_connection.close()
| 42.502165 | 115 | 0.555714 |
522eff6a8d32b02a0b8d9c5ec7fc22ea19419fc2 | 2,870 | py | Python | ansible/lib/ansible/modules/extras/windows/win_environment.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/extras/windows/win_environment.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/extras/windows/win_environment.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_environment
version_added: "2.0"
short_description: Modifies environment variables on windows hosts.
description:
- Uses .net Environment to set or remove environment variables and can set at User, Machine or Process level.
- User level environment variables will be set, but not available until the user has logged off and on again.
options:
state:
description:
- present to ensure environment variable is set, or absent to ensure it is removed
required: false
default: present
choices:
- present
- absent
name:
description:
- The name of the environment variable
required: true
default: no default
value:
description:
- The value to store in the environment variable. Can be omitted for state=absent
required: false
default: no default
level:
description:
- The level at which to set the environment variable.
- Use 'machine' to set for all users.
- Use 'user' to set for the current user that ansible is connected as.
- Use 'process' to set for the current process. Probably not that useful.
required: true
default: no default
choices:
- machine
- process
- user
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- This module does not broadcast change events.
This means that the minority of windows applications which can have
their environment changed without restarting will not be notified and
therefore will need restarting to pick up new environment settings.
User level environment variables will require the user to log out
and in again before they become available.
'''
EXAMPLES = '''
# Set an environment variable for all users
win_environment:
state: present
name: TestVariable
value: "Test value"
level: machine
# Remove an environment variable for the current users
win_environment:
state: absent
name: TestVariable
level: user
'''
| 32.988506 | 115 | 0.712544 |
5fcfe7c4aed853e19a0fc1a305e9da380a351c2a | 1,912 | py | Python | autoesk_main/test.py | SilasPDJ/autoesk_project_v2 | 249730307ad350a1aaacfd5abe08b0781253854e | [
"MIT"
] | 1 | 2021-03-12T00:40:13.000Z | 2021-03-12T00:40:13.000Z | autoesk_main/test.py | SilasPDJ/autoesk_project_v2 | 249730307ad350a1aaacfd5abe08b0781253854e | [
"MIT"
] | 1 | 2021-04-02T04:40:38.000Z | 2021-04-02T04:42:20.000Z | autoesk_main/test.py | SilasPDJ/autoesk_project_v2 | 249730307ad350a1aaacfd5abe08b0781253854e | [
"MIT"
] | null | null | null | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "infternal.settings")
import django
django.setup()
from django.conf import settings
import dateutil.parser
import re
import requests
import json
from O365 import *
from sites.models import SiteData
from sites.models import Circuits, CircuitMaintenance
from home.models import MailTemplate, MailImages
from jinja2 import Template, Environment
from django.db.models import Q
from datetime import datetime, timedelta
from django.conf import settings
from StringIO import StringIO
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
env = Environment(autoescape=False, optimized=False)
template_data = MailTemplate.objects.get(pk=1)
mail_template = env.from_string(template_data.template)
template_file = StringIO()
mail_template.stream(
StartDate = '17/02/17 Midnight',
EndDate = '17/02/17 6 AM',
Details = 'Circuit maintenace on the cirasodasd asdas da a dskdka aks ada',
).dump(template_file)
content = template_file.getvalue()
# Define these once; use them twice!
strFrom = 'helpdesk@xxxxx.com'
strTo = 'alex@xxxx.com'
# Create the root message and fill in the from, to, and subject headers
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = 'test message'
msgRoot['From'] = strFrom
msgRoot['To'] = strTo
msgRoot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgText = MIMEText('This is the alternative plain text message.')
msgAlternative.attach(msgText)
# We reference the image in the IMG SRC attribute by the ID we give it below
msgText = MIMEText(content, 'html')
msgAlternative.attach(msgText)
| 32.40678 | 83 | 0.787134 |
3ebbb6567e2e9d147f93ba81310c2f3ce85ce4ff | 836 | py | Python | tests/test_tightbinding_honeycomb.py | sharkdp/bandstructure | b74b688afc2b15b20ec1a8ebcf72ba8699b6bf96 | [
"MIT"
] | 7 | 2016-12-23T11:19:35.000Z | 2021-06-08T07:43:16.000Z | tests/test_tightbinding_honeycomb.py | sharkdp/bandstructure | b74b688afc2b15b20ec1a8ebcf72ba8699b6bf96 | [
"MIT"
] | null | null | null | tests/test_tightbinding_honeycomb.py | sharkdp/bandstructure | b74b688afc2b15b20ec1a8ebcf72ba8699b6bf96 | [
"MIT"
] | 12 | 2017-09-28T04:14:03.000Z | 2022-02-16T06:55:41.000Z | import numpy as np
from bandstructure import Parameters
from bandstructure.system import TightBindingSystem
from bandstructure.lattice import HoneycombLattice
def test_tightbinding_honeycomb():
lattice = HoneycombLattice()
params = Parameters({
'lattice': lattice,
't': 1
})
s = TightBindingSystem(params)
path = lattice.getKvectorsPath(resolution=4, pointlabels=['A', 'G', 'X'])
assert len(path.points) == 6
bandstructure = s.solve(path)
np.testing.assert_almost_equal(bandstructure.energies[0][0], -1)
np.testing.assert_almost_equal(bandstructure.energies[1][0], 0)
np.testing.assert_almost_equal(bandstructure.energies[2][0], -2)
np.testing.assert_almost_equal(bandstructure.energies[3][0], -3)
np.testing.assert_almost_equal(bandstructure.energies[5][0], -1)
| 28.827586 | 77 | 0.723684 |
1b0003bc2ed761468f73a06a580ec8228a3c3355 | 166 | py | Python | bookmanager/book/apps.py | shanzhaizhou/szz02 | 97b7c596aac18c780049ebda9c940d4fceeea9e6 | [
"MIT"
] | null | null | null | bookmanager/book/apps.py | shanzhaizhou/szz02 | 97b7c596aac18c780049ebda9c940d4fceeea9e6 | [
"MIT"
] | null | null | null | bookmanager/book/apps.py | shanzhaizhou/szz02 | 97b7c596aac18c780049ebda9c940d4fceeea9e6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class BookConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'book'
verbose_name = '书籍管理'
| 20.75 | 56 | 0.728916 |
5c7bcb5b2e3d8aefea3fcd28c4623d96ded3e65a | 1,480 | py | Python | src/modules/RpiHatSenseModule/AzureIoTLogger.py | elbruno/AzureIoTRpiSenseHat | 2cf136b294601f5d44551560346f20acd2ab36a8 | [
"MIT"
] | null | null | null | src/modules/RpiHatSenseModule/AzureIoTLogger.py | elbruno/AzureIoTRpiSenseHat | 2cf136b294601f5d44551560346f20acd2ab36a8 | [
"MIT"
] | null | null | null | src/modules/RpiHatSenseModule/AzureIoTLogger.py | elbruno/AzureIoTRpiSenseHat | 2cf136b294601f5d44551560346f20acd2ab36a8 | [
"MIT"
] | null | null | null | # Copyright (c) 2022
# Author : Bruno Capuano
# Create Time : 2022 March
# Change Log :
# – Log messages to Azure IoT Hub
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import datetime
class AzureIoTLogger:
@staticmethod
def Log(message):
current_time = datetime.datetime.now()
message = str(f"{current_time} | {message}")
print(message) | 46.25 | 82 | 0.714189 |
1716828928562a8aec696a762ba9618e6d5fee43 | 1,289 | py | Python | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_part_detections/migrations/0011_auto_20200912_2150.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 176 | 2019-07-03T00:20:15.000Z | 2022-03-14T07:51:22.000Z | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_part_detections/migrations/0011_auto_20200912_2150.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 121 | 2019-06-24T20:47:27.000Z | 2022-03-28T02:16:18.000Z | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_part_detections/migrations/0011_auto_20200912_2150.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 144 | 2019-06-18T18:48:43.000Z | 2022-03-31T12:14:46.000Z | # Generated by Django 3.0.8 on 2020-09-12 21:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("azure_part_detections", "0010_partdetection_send_video_to_cloud")]
operations = [
migrations.RenameField(
model_name="partdetection", old_name="maxImage", new_name="maxImages"
),
migrations.AlterField(
model_name="partdetection",
name="inference_mode",
field=models.CharField(
choices=[
("PD", "part_detection"),
("PC", "part_counting"),
("ES", "employee_safety"),
("DD", "defect_detection"),
],
default="PD",
max_length=40,
),
),
migrations.AlterField(
model_name="pdscenario",
name="inference_mode",
field=models.CharField(
choices=[
("PD", "part_detection"),
("PC", "part_counting"),
("ES", "employee_safety"),
("DD", "defect_detection"),
],
default="PD",
max_length=40,
),
),
]
| 29.976744 | 88 | 0.470132 |
4d29e945c878ba999289a08da990044805211af4 | 2,345 | py | Python | pymicmac/workflow/distributed_tapioca/create_all_image_pairs_file.py | pymicmac/pymicmac | 75e8dfbc90aeff4301b144ba76fc627a780ec8ac | [
"Apache-2.0"
] | 27 | 2016-10-18T15:44:06.000Z | 2019-09-11T09:34:47.000Z | pymicmac/workflow/distributed_tapioca/create_all_image_pairs_file.py | pymicmac/pymicmac | 75e8dfbc90aeff4301b144ba76fc627a780ec8ac | [
"Apache-2.0"
] | 28 | 2017-11-13T09:01:16.000Z | 2018-02-07T14:16:47.000Z | pymicmac/workflow/distributed_tapioca/create_all_image_pairs_file.py | pymicmac/pymicmac | 75e8dfbc90aeff4301b144ba76fc627a780ec8ac | [
"Apache-2.0"
] | 12 | 2016-09-26T15:35:04.000Z | 2021-11-12T18:57:41.000Z | #!/usr/bin/python
import argparse
import os
from pymicmac import utils_execution
def run(inputFolder, imageFormat, outputFile):
# Check user parameters
if not os.path.isdir(inputFolder):
raise Exception(inputFolder + " does not exist! (or is not a folder)")
# Create lists of images that have the correct format
images = sorted(os.listdir(inputFolder))
imagesFormat = []
for image in images:
if image.endswith(imageFormat):
imagesFormat.append(image)
if os.path.isfile(outputFile):
raise Exception(outputFile + ' already exists!')
ofile = open(outputFile, 'w')
ofile.write('<?xml version="1.0" ?>\n')
ofile.write('<SauvegardeNamedRel>\n')
for i in range(len(imagesFormat)):
for j in range(len(imagesFormat)):
if i < j:
ofile.write(
' <Cple>' +
imagesFormat[i] +
' ' +
imagesFormat[j] +
'</Cple>\n')
ofile.write(
' <Cple>' +
imagesFormat[j] +
' ' +
imagesFormat[i] +
'</Cple>\n')
ofile.write('</SauvegardeNamedRel>\n')
ofile.close()
def argument_parser():
# define argument menu
description = "Creates a valid image pairs file suitable for Tapioca (to run with option File). Every possible image pair is added"
parser = argparse.ArgumentParser(description=description)
# fill argument groups
parser.add_argument(
'-i',
'--input',
default='',
help='Input folder with the images',
type=str,
required=True)
parser.add_argument(
'-f',
'--format',
default='',
help='File format of the images (only files with this format are considered for the pairs)',
type=str,
required=True)
parser.add_argument(
'-o',
'--output',
default='',
help='Output valid image pairs file',
type=str,
required=True)
return parser
def main():
try:
a = utils_execution.apply_argument_parser(argument_parser())
run(a.input, a.format, a.output)
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| 28.950617 | 135 | 0.556077 |
bbcf6c16d6c8c6c412c95fd66eb37db59fc59051 | 31 | py | Python | series_alignment/__init__.py | brian-murphy/series_alignment | bcb64047607e9f2080d8f04107160345241aba10 | [
"MIT"
] | null | null | null | series_alignment/__init__.py | brian-murphy/series_alignment | bcb64047607e9f2080d8f04107160345241aba10 | [
"MIT"
] | null | null | null | series_alignment/__init__.py | brian-murphy/series_alignment | bcb64047607e9f2080d8f04107160345241aba10 | [
"MIT"
] | null | null | null | __all__ = ["series_alignment"]
| 15.5 | 30 | 0.741935 |
f951882c145fe383ef2c885f1bcf6bd100943b9f | 1,315 | py | Python | toontown/ai/DistributedHydrantZeroMgr.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 99 | 2019-11-02T22:25:00.000Z | 2022-02-03T03:48:00.000Z | toontown/ai/DistributedHydrantZeroMgr.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 42 | 2019-11-03T05:31:08.000Z | 2022-03-16T22:50:32.000Z | toontown/ai/DistributedHydrantZeroMgr.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 57 | 2019-11-03T07:47:37.000Z | 2022-03-22T00:41:49.000Z | from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from toontown.ai import DistributedPhaseEventMgr
class DistributedHydrantZeroMgr(DistributedPhaseEventMgr.DistributedPhaseEventMgr):
neverDisable = 1
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedHydrantZeroMgr')
def __init__(self, cr):
DistributedPhaseEventMgr.DistributedPhaseEventMgr.__init__(self, cr)
cr.hydrantZeroMgr = self
def announceGenerate(self):
DistributedPhaseEventMgr.DistributedPhaseEventMgr.announceGenerate(self)
messenger.send('hydrantZeroIsRunning', [self.isRunning])
def delete(self):
self.notify.debug('deleting hydrantzeromgr')
messenger.send('hydrantZeroIsRunning', [False])
DistributedPhaseEventMgr.DistributedPhaseEventMgr.delete(self)
if hasattr(self.cr, 'hydrantZeroMgr'):
del self.cr.hydrantZeroMgr
def setCurPhase(self, newPhase):
DistributedPhaseEventMgr.DistributedPhaseEventMgr.setCurPhase(self, newPhase)
messenger.send('hydrantZeroPhase', [newPhase])
def setIsRunning(self, isRunning):
DistributedPhaseEventMgr.DistributedPhaseEventMgr.setIsRunning(self, isRunning)
messenger.send('hydrantZeroIsRunning', [isRunning])
| 42.419355 | 87 | 0.76654 |
93fc1919e02a94864614338dcf567b14d9b2a0d6 | 4,961 | py | Python | data_steward/cdr_cleaner/cleaning_rules/create_person_ext_table.py | hwang2739/curation | c9ee1e2cbd1289283023858c7792d72f6c13bb54 | [
"MIT"
] | null | null | null | data_steward/cdr_cleaner/cleaning_rules/create_person_ext_table.py | hwang2739/curation | c9ee1e2cbd1289283023858c7792d72f6c13bb54 | [
"MIT"
] | null | null | null | data_steward/cdr_cleaner/cleaning_rules/create_person_ext_table.py | hwang2739/curation | c9ee1e2cbd1289283023858c7792d72f6c13bb54 | [
"MIT"
] | null | null | null | """
Original Issues: DC-1012
Background
In order to avoid further changes to the standard OMOP person table, two non-standard fields will be housed in a
person_ext table.
Cleaning rule script to run AFTER deid.
This cleaning rule will populate the person_ext table
The following fields will need to be copied from the observation table:
src_id (from observation_ext, should all be “PPI/PM”)
state_of_residence_concept_id: the value_source_concept_id field in the OBSERVATION table row where
observation_source_concept_id = 1585249 (StreetAddress_PIIState)
state_of_residence_source_value: the concept_name from the concept table for the state_of_residence_concept_id
person_id (as research_id) can be pulled from the person table
"""
import logging
# Project imports
import constants.cdr_cleaner.clean_cdr as cdr_consts
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from constants.bq_utils import WRITE_TRUNCATE
from common import JINJA_ENV
LOGGER = logging.getLogger(__name__)
# Query to create person_ext table
PERSON_EXT_TABLE_QUERY = JINJA_ENV.from_string("""
SELECT p.person_id, e.src_id,
o.value_source_concept_id AS state_of_residence_concept_id,
c.concept_name AS state_of_residence_source_value
FROM `{{project}}.{{dataset}}.person` p
LEFT JOIN `{{project}}.{{dataset}}.observation` o
ON p.person_id = o.person_id AND o.observation_source_concept_id = 1585249
LEFT JOIN `{{project}}.{{dataset}}.concept` c
ON o.value_source_concept_id = c.concept_id AND o.observation_source_concept_id = 1585249
LEFT JOIN `{{project}}.{{dataset}}.observation_ext` e
ON o.observation_id = e.observation_id AND o.observation_source_concept_id = 1585249
""")
tables = ['person_ext']
class CreatePersonExtTable(BaseCleaningRule):
"""
Create person_ext table after DEID, adds three non-standard fields:
state_of_residence_concept_id, state_of_residence_source_value
"""
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = ('Create person_ext table')
super().__init__(issue_numbers=['DC1012'],
description=desc,
affected_datasets=[cdr_consts.DEID_BASE],
affected_tables=tables,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id)
def get_query_specs(self):
"""
Return a list of dictionary query specifications.
:return: A list of dictionaries. Each dictionary contains a single query
and a specification for how to execute that query. The specifications
are optional but the query is required.
"""
query_list = []
for table in tables:
query_list.append({
cdr_consts.QUERY:
PERSON_EXT_TABLE_QUERY.render(project=self.project_id,
dataset=self.dataset_id),
cdr_consts.DESTINATION_TABLE:
table,
cdr_consts.DESTINATION_DATASET:
self.dataset_id,
cdr_consts.DISPOSITION:
WRITE_TRUNCATE
})
return query_list
def setup_rule(self, client):
"""
Function to run any data upload options before executing a query.
"""
pass
def setup_validation(self, client):
"""
Run required steps for validation setup
"""
raise NotImplementedError("Please fix me.")
def validate_rule(self, client):
"""
Validates the cleaning rule which deletes or updates the data from the tables
"""
raise NotImplementedError("Please fix me.")
def get_sandbox_tablenames(self):
return []
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(ARGS.project_id,
ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(CreatePersonExtTable,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(CreatePersonExtTable,)])
| 37.022388 | 112 | 0.656924 |
d97aa6cac0235007f52b2ae3b946d959f04c0cf2 | 6,157 | py | Python | users/views.py | peterkurishev/django-users2 | 85214b44adb17295d8933178ffc344d1f8d1ab93 | [
"BSD-3-Clause"
] | null | null | null | users/views.py | peterkurishev/django-users2 | 85214b44adb17295d8933178ffc344d1f8d1ab93 | [
"BSD-3-Clause"
] | null | null | null | users/views.py | peterkurishev/django-users2 | 85214b44adb17295d8933178ffc344d1f8d1ab93 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import messages
from django.contrib.auth import get_user_model, login
from django.urls import reverse
from django.shortcuts import redirect, resolve_url
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from .compat import urlsafe_base64_decode
from .conf import settings
from .signals import user_activated, user_registered
from .utils import EmailActivationTokenGenerator, send_activation_email
try:
from django.contrib.sites.shortcuts import get_current_site
except ImportError: # pragma: no cover
from django.contrib.sites.models import get_current_site
if settings.USERS_SPAM_PROTECTION: # pragma: no cover
from .forms import RegistrationFormHoneypot as RegistrationForm
else:
from .forms import RegistrationForm
@csrf_protect
@never_cache
def register(request,
template_name='users/registration_form.html',
activation_email_template_name='users/activation_email.html',
activation_email_subject_template_name='users/activation_email_subject.html',
activation_email_html_template_name=None,
registration_form=RegistrationForm,
registered_user_redirect_to=None,
post_registration_redirect=None,
activation_from_email=None,
current_app=None,
extra_context=None):
if registered_user_redirect_to is None:
registered_user_redirect_to = getattr(settings, 'LOGIN_REDIRECT_URL')
if request.user.is_authenticated:
return redirect(registered_user_redirect_to)
if not settings.USERS_REGISTRATION_OPEN:
return redirect(reverse('users_registration_closed'))
if post_registration_redirect is None:
post_registration_redirect = reverse('users_registration_complete')
if request.method == 'POST':
form = registration_form(request.POST)
if form.is_valid():
user = form.save()
if settings.USERS_AUTO_LOGIN_AFTER_REGISTRATION:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
elif not user.is_active and settings.USERS_VERIFY_EMAIL:
opts = {
'user': user,
'request': request,
'from_email': activation_from_email,
'email_template': activation_email_template_name,
'subject_template': activation_email_subject_template_name,
'html_email_template': activation_email_html_template_name,
}
send_activation_email(**opts)
user_registered.send(sender=user.__class__, request=request, user=user)
return redirect(post_registration_redirect)
else:
form = registration_form()
current_site = get_current_site(request)
context = {
'form': form,
'site': current_site,
'site_name': current_site.name,
'title': _('Register'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def registration_closed(request,
template_name='users/registration_closed.html',
current_app=None,
extra_context=None):
context = {
'title': _('Registration closed'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def registration_complete(request,
template_name='users/registration_complete.html',
current_app=None,
extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Registration complete'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@never_cache
def activate(request,
uidb64=None,
token=None,
template_name='users/activate.html',
post_activation_redirect=None,
current_app=None,
extra_context=None):
context = {
'title': _('Account activation '),
}
if post_activation_redirect is None:
post_activation_redirect = reverse('users_activation_complete')
UserModel = get_user_model()
assert uidb64 is not None and token is not None
token_generator = EmailActivationTokenGenerator()
try:
uid = urlsafe_base64_decode(uidb64)
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
user.activate()
user_activated.send(sender=user.__class__, request=request, user=user)
if settings.USERS_AUTO_LOGIN_ON_ACTIVATION:
user.backend = 'django.contrib.auth.backends.ModelBackend' # todo - remove this hack
login(request, user)
messages.info(request, 'Thanks for registering. You are now logged in.')
return redirect(post_activation_redirect)
else:
title = _('Email confirmation unsuccessful')
context = {
'title': title,
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def activation_complete(request,
template_name='users/activation_complete.html',
current_app=None,
extra_context=None):
context = {
'title': _('Activation complete'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
| 36.217647 | 97 | 0.667046 |
4f4e9c80a2f687352cc3c6fab27c5cf2e808bd65 | 8,501 | py | Python | generatejson.py | jprichards/installapplications | cb1e1afb418c830288213d00c2e5b34ecbbc7ef5 | [
"Apache-2.0"
] | 1 | 2019-01-11T15:51:22.000Z | 2019-01-11T15:51:22.000Z | generatejson.py | jprichards/installapplications | cb1e1afb418c830288213d00c2e5b34ecbbc7ef5 | [
"Apache-2.0"
] | null | null | null | generatejson.py | jprichards/installapplications | cb1e1afb418c830288213d00c2e5b34ecbbc7ef5 | [
"Apache-2.0"
] | 1 | 2020-08-24T14:47:56.000Z | 2020-08-24T14:47:56.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Generate Json file for installapplications
# Usage: python generatejson.py --item \
# item-name='A name' \
# item-path='A path' \
# item-stage='A stage' \
# item-type='A type' \
# item-url='A url' \
# script-do-not-wait='A boolean' \
# --base-url URL \
# --output PATH
#
# --item can be used unlimited times
# Future plan for this tool is to add AWS S3 integration for auto-upload
import hashlib
import json
import argparse
import os
import subprocess
import tempfile
from xml.dom import minidom
def gethash(filename):
hash_function = hashlib.sha256()
if not os.path.isfile(filename):
return 'NOT A FILE'
fileref = open(filename, 'rb')
while 1:
chunk = fileref.read(2**16)
if not chunk:
break
hash_function.update(chunk)
fileref.close()
return hash_function.hexdigest()
def getpkginfopath(filename):
'''Extracts the package BOM with xar'''
cmd = ['/usr/bin/xar', '-tf', filename]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(bom, err) = proc.communicate()
bom = bom.strip().split('\n')
if proc.returncode == 0:
for entry in bom:
if entry.startswith('PackageInfo'):
return entry
elif entry.endswith('.pkg/PackageInfo'):
return entry
else:
print "Error: %s while extracting BOM for %s" % (err, filename)
def extractpkginfo(filename):
'''Takes input of a file path and returns a file path to the
extracted PackageInfo file.'''
cwd = os.getcwd()
if not os.path.isfile(filename):
return
else:
tmpFolder = tempfile.mkdtemp()
os.chdir(tmpFolder)
# need to get path from BOM
pkgInfoPath = getpkginfopath(filename)
extractedPkgInfoPath = os.path.join(tmpFolder, pkgInfoPath)
cmd = ['/usr/bin/xar', '-xf', filename, pkgInfoPath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
os.chdir(cwd)
return extractedPkgInfoPath
def getpkginfo(filename):
'''Takes input of a file path and returns strings of the
package identifier and version from PackageInfo.'''
if not os.path.isfile(filename):
return "", ""
else:
pkgInfoPath = extractpkginfo(filename)
dom = minidom.parse(pkgInfoPath)
pkgRefs = dom.getElementsByTagName('pkg-info')
for ref in pkgRefs:
pkgId = ref.attributes['identifier'].value.encode('UTF-8')
pkgVersion = ref.attributes['version'].value.encode('UTF-8')
return pkgId, pkgVersion
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base-url', default=None, action='store',
help='Required: Base URL to where root dir is hosted')
parser.add_argument('--output', default=None, action='store',
help='Required: Output directory to save json')
parser.add_argument('--item', default=None, action='append', nargs=6,
metavar=(
'item-name', 'item-path', 'item-stage',
'item-type', 'item-url', 'script-do-not-wait'),
help='Required: Options for item. All items are \
required. Scripts default to rootscript and stage \
defaults to userland')
args = parser.parse_args()
# Bail if we don't have one item, the base url and the output dir
if not args.item or not args.base_url or not args.output:
parser.print_help()
exit(1)
# Let's first loop through the items and convert everything to key value
# pairs
itemsToProcess = []
for item in args.item:
processedItem = {}
for itemOption in item:
values = itemOption.split('=')
processedItem[values[0]] = values[1]
itemsToProcess.append(processedItem)
# Create our stages now so InstallApplications won't blow up
stages = {
'preflight': [],
'setupassistant': [],
'userland': []
}
# Process each item in the order they were passed in
for item in itemsToProcess:
itemJson = {}
# Get the file extension of the file
fileExt = os.path.splitext(item['item-path'])[1]
# Get the file name of the file
fileName = os.path.basename(item['item-path'])
# Get the full path of the file
filePath = item['item-path']
# Determine the type of item to process - for scripts, default to
# rootscript
if fileExt in ('.py', '.sh', '.rb', '.php'):
if item['item-type']:
itemJson['type'] = itemType = item['item-type']
else:
itemJson['type'] = itemType = 'rootscript'
elif fileExt == '.pkg':
itemJson['type'] = itemType = 'package'
else:
print 'Could not determine package type for item or unsupported: \
%s' % str(item)
exit(1)
if itemType not in ('package', 'rootscript', 'userscript'):
print 'item-type malformed: %s' % str(item['item-type'])
exit(1)
# Determine the stage of the item to process - default to userland
if item['item-stage']:
if item['item-stage'] in ('preflight', 'setupassistant',
'userland'):
itemStage = item['item-stage']
pass
else:
print 'item-stage malformed: %s' % str(item['item-stage'])
exit(1)
else:
itemStage = 'userland'
# Determine the url of the item to process - defaults to
# baseurl/stage/filename
if not item['item-url']:
itemJson['url'] = '%s/%s/%s' % (args.base_url, itemStage, fileName)
else:
itemJson['url'] = item['item-url']
# Determine the name of the item to process - defaults to the filename
if not item['item-name']:
itemJson['name'] = fileName
else:
itemJson['name'] = item['item-name']
# Determine the hash of the item to process - SHA256
itemJson['hash'] = gethash(filePath)
# Add information for scripts and packages
if itemType in ('rootscript', 'userscript'):
if itemType == 'userscript':
# Pass the userscripts folder path
itemJson['file'] = '/Library/Application Support/'\
'installapplications/userscripts/%s' % fileName
else:
itemJson['file'] = '/Library/Application Support/'\
'installapplications/%s' % fileName
# Check crappy way of doing booleans
if item['script-do-not-wait'] in ('true', 'True', '1',
'false', 'False', '0'):
# If True, pass the key to the item
if item['script-do-not-wait'] in ('true', 'True', '1'):
itemJson['donotwait'] = True
else:
print 'script-do-not-wait malformed: %s ' % str(
item['script-do-not-wait'])
exit(1)
# If packages, we need the version and packageid
elif itemType == 'package':
(pkgId, pkgVersion) = getpkginfo(filePath)
itemJson['file'] = '/Library/Application Support/'\
'installapplications/%s' % fileName
itemJson['packageid'] = pkgId
itemJson['version'] = pkgVersion
# Append the info to the appropriate stage
stages[itemStage].append(itemJson)
# Saving the json file to the output directory path
if args.output:
savePath = os.path.join(args.output, 'bootstrap.json')
else:
savePath = os.path.join(rootdir, 'bootstrap.json')
# Sort the primary keys, but not the sub keys, so things are in the correct
# order
try:
with open(savePath, 'w') as outFile:
json.dump(stages, outFile, sort_keys=True, indent=2)
except IOError:
print '[Error] Not a valid directory: %s' % savePath
exit(1)
print 'Json saved to %s' % savePath
if __name__ == '__main__':
main()
| 35.128099 | 79 | 0.564875 |
e0fdb67df899dc910082fa3df99153a7b4ae0919 | 5,434 | py | Python | models/backbone.py | Ming-er/detr | 807a603acea768b8ed820dbf897cb003ad67f4a0 | [
"Apache-2.0"
] | null | null | null | models/backbone.py | Ming-er/detr | 807a603acea768b8ed820dbf897cb003ad67f4a0 | [
"Apache-2.0"
] | null | null | null | models/backbone.py | Ming-er/detr | 807a603acea768b8ed820dbf897cb003ad67f4a0 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
# train 的话更新 layer2, 3, 4 否则全不更新
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
# 是否返回中间层
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
'''
IntermediateLayerGetter Examples:
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m, {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
'''
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
# 将 mask 插值到与特征图大小一致
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
# out 形如: out={'0': f1, '1': f2, '2': f3, '3': f4},f1, 2, 3, 4 为 NestedTensor
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
# getattr 函数用于返回一个对象属性值
# FrozenBatchNorm2d: 将统计量(均值与方差)和可学习的仿射参数固定住
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
# 将 backbone 和 position encoding 集成
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
# 对 backbone 的每层输出都进行位置编码,最终返回 backbone 的输出 out 及对应的位置编码结果
return out, pos
def build_backbone(args):
'''
func: 分别建立 backbone 与position_embeding 并将其合并在一起
'''
position_embedding = build_position_encoding(args)
# 是否训练 backbone,是否返回中间层
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| 38.814286 | 113 | 0.642621 |
dbd35ccc0bf365cf368e32f5f8a485bc7e944c6e | 95,251 | py | Python | tensorflow/tools/compatibility/tf_upgrade_v2_test.py | autoih/tensorflow | 4a1ae31d56c3c7f40232aace615945c29dcf9c38 | [
"Apache-2.0"
] | 1 | 2020-01-18T17:54:05.000Z | 2020-01-18T17:54:05.000Z | tensorflow/tools/compatibility/tf_upgrade_v2_test.py | autoih/tensorflow | 4a1ae31d56c3c7f40232aace615945c29dcf9c38 | [
"Apache-2.0"
] | 3 | 2019-07-25T16:55:56.000Z | 2019-08-01T23:44:31.000Z | tensorflow/tools/compatibility/tf_upgrade_v2_test.py | autoih/tensorflow | 4a1ae31d56c3c7f40232aace615945c29dcf9c38 | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import tempfile
from absl.testing import parameterized
import six
import tensorflow as tf
# OSS TF V2 import placeholder.
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
def get_symbol_for_name(root, name):
name_parts = six.ensure_str(name).split(".")
symbol = root
# Iterate starting with second item since 1st item is "tf.".
for part in name_parts[1:]:
symbol = getattr(symbol, part)
return symbol
def get_args(symbol):
if hasattr(inspect, "signature"):
signature = inspect.signature(symbol)
# Ignore *args and **kwargs for now.
return [param.name for param in signature.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
return tf_inspect.getargspec(symbol)[0]
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = six.ensure_str(call_str).find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:six.ensure_str(call_str).find("(")]
args = six.ensure_str(call_str[open_paren_index +
1:close_paren_index]).split(",")
args = [six.ensure_str(arg).split("=")[0].strip() for arg in args]
args = [arg for arg in args if arg] # filter out empty strings
return function_name, args
class TestUpgrade(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
@classmethod
def setUpClass(cls):
super(TestUpgrade, cls).setUpClass()
cls.v2_symbols = {}
cls.v1_symbols = {}
if hasattr(tf.compat, "v2"):
def symbol_collector(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
cls.v2_symbols["tf." + six.ensure_str(name)] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector)
visitor.private_map["tf.compat"] = ["v1"]
traverse.traverse(tf.compat.v2, visitor)
if hasattr(tf.compat, "v1"):
def symbol_collector_v1(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
for name in api_names_v1:
cls.v1_symbols["tf." + six.ensure_str(name)] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector_v1)
traverse.traverse(tf.compat.v1, visitor)
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def _upgrade_multiple(self, old_file_texts):
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
results = []
for old_file_text in old_file_texts:
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
results.append([count, report, errors, out_file.getvalue()])
return results
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertNotEqual(six.ensure_str(report).find("Failed to parse"), -1)
def testReport(self):
text = "tf.angle(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(
six.ensure_str(report).find("Renamed function `tf.angle` to "
"`tf.math.angle`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testAllAPI(self):
if not hasattr(tf.compat, "v2"):
return
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v2 namespace.
# Please regenerate the renames file or edit any manual renames if this
# test fails.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
_, _, _, text = self._upgrade("tf." + six.ensure_str(name))
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
text not in self.v2_symbols and
# Builds currently install old version of estimator that doesn't
# have some 2.0 symbols.
not text.startswith("tf.estimator")):
self.assertFalse(
True, "Symbol %s generated from %s not in v2 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testAllAPIV1(self):
collect = True
v1_symbols = set([])
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v1 namespace.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
if collect:
v1_symbols.add("tf." + six.ensure_str(name))
else:
_, _, _, text = self._upgrade("tf." + six.ensure_str(name))
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
not text.startswith("tf.estimator") and
text not in v1_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v1 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
collect = False
traverse.traverse(tf.compat.v1, visitor)
def testV1KeywordArgNames(self):
all_keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that verifies V1 argument names.
def arg_test_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
names_v1 = tf_export.get_v1_names(attr)
for name in names_v1:
name = "tf.%s" % name
if name not in all_keyword_renames:
continue
arg_names_v1 = tf_inspect.getargspec(attr)[0]
keyword_renames = all_keyword_renames[name]
self.assertEqual(type(keyword_renames), dict)
# Assert that v1 function has valid v1 argument names.
for from_name, _ in keyword_renames.items():
self.assertIn(
from_name, arg_names_v1,
"%s not found in %s arguments: %s" %
(from_name, name, str(arg_names_v1)))
visitor = public_api.PublicAPIVisitor(arg_test_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testV2KeywordArgNames(self):
# This test converts a call of the form:
# tf.foo(arg1=0, arg2=1, ...)
# to 2.0. Then, checks that converted function has valid argument names.
if not hasattr(tf.compat, "v2"):
return
v2_arg_exceptions = {
"verify_shape_is_now_always_true",
# These arguments should not be used, they just specify
# that a function takes named arguments.
"keyword_required",
"_sentinel",
}
v1_name_exceptions = {
"tf.print", # requires print_function import
}
function_warnings = (
tf_upgrade_v2.TFAPIChangeSpec().function_warnings)
function_transformers = (
tf_upgrade_v2.TFAPIChangeSpec().function_transformers)
keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that converts to V2 and checks V2 argument names.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
if not tf_inspect.isfunction(attr):
continue
names_v1 = tf_export.get_v1_names(attr)
arg_names_v1 = get_args(attr)
for name in names_v1:
tf_name = "tf.%s" % name
if tf_name in function_warnings or tf_name in function_transformers:
continue # These require manual change
if tf_name in v1_name_exceptions:
continue
# Assert that arg names after converting to v2 are present in
# v2 function.
# 1. First, create an input of the form:
# tf.foo(arg1=val1, arg2=val2, ...)
args = ",".join(
["%s=%d" % (from_name, from_index)
for from_index, from_name in enumerate(arg_names_v1)])
text_input = "%s(%s)" % (tf_name, args)
# 2. Convert the input to V2.
_, _, _, text = self._upgrade(text_input)
new_function_name, new_args = get_func_and_args_from_str(text)
if new_function_name == "tf.compat.v1.%s" % name:
if tf_name in keyword_renames:
# If we rename arguments, new function must be available in 2.0.
# We should not be using compat.v1 in this case.
self.fail(
"Function '%s' is not in 2.0 when converting\n%s\nto\n%s" %
(new_function_name, text_input, text))
continue
if new_function_name.startswith("tf.compat.v2"):
self.assertIn(new_function_name.replace("tf.compat.v2.", "tf."),
self.v2_symbols)
continue
# 3. Verify V2 function and arguments.
args_v2 = get_args(self.v2_symbols[new_function_name])
args_v2.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v2,
"Invalid argument '%s' in 2.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v2)))
# 4. Verify that the argument exists in v1 as well.
if new_function_name in set(["tf.nn.ctc_loss",
"tf.saved_model.save"]):
continue
args_v1 = get_args(self.v1_symbols[new_function_name])
args_v1.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v1,
"Invalid argument '%s' in 1.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v1)))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testPositionsMatchArgGiven(self):
full_dict = tf_upgrade_v2.TFAPIChangeSpec().function_arg_warnings
method_names = list(full_dict.keys())
for method_name in method_names:
args = list(full_dict[method_name].keys())
if "contrib" in method_name:
# Skip descending and fetching contrib methods during test. These are
# not available in the repo anymore.
continue
elif six.ensure_str(method_name).startswith("*."):
# special case for optimizer methods
method = six.ensure_str(method_name).replace("*", "tf.train.Optimizer")
else:
method = method_name
method = get_symbol_for_name(tf, method)
arg_spec = tf_inspect.getfullargspec(method)
for (arg, pos) in args:
# to deal with the self argument on methods on objects
if six.ensure_str(method_name).startswith("*."):
pos += 1
self.assertEqual(arg_spec[0][pos], arg)
def testReorderFileNeedsUpdate(self):
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().function_reorders)
manual_function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().manual_function_reorders)
added_names_message = """Some function names in
self.reordered_function_names are not in reorders_v2.py.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
removed_names_message = """%s in self.reorders_v2 does not match
any name in self.reordered_function_names.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
self.assertTrue(
reordered_function_names.issubset(function_reorders),
added_names_message)
# function_reorders should contain reordered_function_names
# and their TensorFlow V1 aliases.
for name in function_reorders:
if name in manual_function_reorders:
continue
# get other names for this function
attr = get_symbol_for_name(tf.compat.v1, name)
_, attr = tf_decorator.unwrap(attr)
v1_names = tf_export.get_v1_names(attr)
self.assertTrue(v1_names)
v1_names = ["tf.%s" % n for n in v1_names]
# check if any other name is in
self.assertTrue(
any(n in reordered_function_names for n in v1_names),
removed_names_message % name)
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay",
"tf.train.piecewise_constant_decay",
]:
text = "%s(a, b)\n" % decay
_, report, unused_errors, _ = self._upgrade(text)
self.assertIn("switch to the schedules in "
"`tf.keras.optimizers.schedules`", report)
def verify_compat_v1_rename_correctness(self, values, ns_prefix=""):
if ns_prefix:
ns_prefix += "."
for v in values:
text = "tf." + ns_prefix + v + "(a, b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1." + ns_prefix + v + "(a, b)", new_text)
def testIntializers(self):
initializers = [
"zeros",
"ones",
"constant",
"random_uniform",
"random_normal",
"truncated_normal",
"variance_scaling",
"orthogonal",
"glorot_uniform",
"glorot_normal",
"identity",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="initializers")
initializers = [
"zeros_initializer",
"ones_initializer",
"constant_initializer",
"random_uniform_initializer",
"random_normal_initializer",
"truncated_normal_initializer",
"variance_scaling_initializer",
"orthogonal_initializer",
"glorot_uniform_initializer",
"glorot_normal_initializer",
]
self.verify_compat_v1_rename_correctness(initializers)
initializers = [
"zeros",
"ones",
"Ones",
"Zeros",
"constant",
"Constant",
"VarianceScaling",
"Orthogonal",
"orthogonal",
"Identity",
"identity",
"glorot_uniform",
"glorot_normal",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
"TruncatedNormal",
"truncated_normal",
"RandomUniform",
"uniform",
"random_uniform",
"RandomNormal",
"normal",
"random_normal",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="keras.initializers")
def testContribXavierInitializer(self):
text = "tf.contrib.layers.xavier_initializer()\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=\"uniform\")\n",
)
text = "slim.xavier_initializer(True or False)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "slim.xavier_initializer(uniform=(True or False))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "tf.contrib.layers.xavier_initializer_conv2d(False, 12)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12)\n",
)
text = ("tf.contrib.layers.xavier_initializer_conv2d("
"False, 12, tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtype=tf.float32)\n",
)
text = ("tf.contrib.layers.xavier_initializer("
"False, 12, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtypes=tf.float32)\n",
)
def testVarianceScalingInitializer(self):
text = ("tf.contrib.layers.variance_scaling_initializer("
"mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = ("slim.variance_scaling_initializer("
"uniform=(True or False), mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"distribution=(\"uniform\" if True or False else \"truncated_normal\"),"
" mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = "tf.contrib.layers.variance_scaling_initializer(factor=1.0)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0)\n",
)
text = ("tf.contrib.layers.variance_scaling_initializer("
"12.0, \"FAN_AVG\", True, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(12.0, "
"(\"FAN_AVG\").lower(), "
"(\"uniform\" if True else \"truncated_normal\"), "
"dtypes=tf.float32)\n",
)
def testMetrics(self):
metrics = [
"accuracy",
"auc",
"average_precision_at_k",
"false_negatives",
"false_negatives_at_thresholds",
"false_positives",
"false_positives_at_thresholds",
"mean",
"mean_absolute_error",
"mean_cosine_distance",
"mean_iou",
"mean_per_class_accuracy",
"mean_relative_error",
"mean_squared_error",
"mean_tensor",
"percentage_below",
"precision",
"precision_at_k",
"precision_at_thresholds",
"precision_at_top_k",
"recall",
"recall_at_k",
"recall_at_thresholds",
"recall_at_top_k",
"root_mean_squared_error",
"sensitivity_at_specificity",
"sparse_average_precision_at_k",
"sparse_precision_at_k",
"specificity_at_sensitivity",
"true_negatives",
"true_negatives_at_thresholds",
"true_positives",
"true_positives_at_thresholds",
]
for m in metrics:
text = "tf.metrics." + m + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.metrics." + m + "(a, b)", new_text)
self.assertIn(
"tf.metrics have been replaced with object oriented versions", report)
def testLosses(self):
losses = [
"absolute_difference",
"add_loss",
"compute_weighted_loss",
"cosine_distance",
"get_losses",
"get_regularization_loss",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"huber_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy",
]
for l in losses:
text = "tf.losses." + l + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.losses." + l + "(a, b)", new_text)
self.assertIn(
"tf.losses have been replaced with object oriented versions", report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "()"
expected_text = ns + "(loss_reduction=tf.keras.losses.Reduction.SUM)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ns + "(loss_reduction=TEST)"
expected_text = ns + "(loss_reduction=TEST)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "tf.estimator.BaselineClassifier(m, c, w, v, o, c, lr)"
expected_text = (
"tf.compat.v1.estimator.BaselineClassifier("
"model_dir=m, n_classes=c, weight_column=w, label_vocabulary=v, "
"optimizer=o, config=c, loss_reduction=lr)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.estimator.BaselineClassifier(model_dir=model_dir)"
expected_text = ("tf.estimator.BaselineClassifier(" +
"model_dir=model_dir, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testBaseEstimatorPartitioner(self):
classes = ["LinearEstimator", "DNNLinearCombinedEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitioner(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorOptimizer(self):
classes = ["BaselineEstimator", "LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorOptimizer(self):
classes = [
"BaselineClassifier", "BaselineRegressor", "LinearClassifier",
"LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
suffix = ("(optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
suffix = ("(dnn_optimizer=TEST, linear_optimizer=Test, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorPartitionerAndOptimizer(self):
classes = ["LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorPartitionerAndOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitionerAndOptimizer(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedPartitionerAndOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractGlimpse(self):
text = ("tf.image.extract_glimpse(x, size, off, False, "
"False, False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, False, "
"False, 'uniform' if (False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, uniform_noise=True if uniform_noise else "
"False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, noise='uniform' if (True if uniform_noise else "
"False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" uniform_noise=False,\n"
" name=\"foo\")# Stuff after\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" noise='uniform' if (False) else 'gaussian',\n"
" name=\"foo\")# Stuff after\n")
text = "tf.image.extract_glimpse(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertEqual(errors, [])
def testDropout(self):
text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (keep_prob), name=\"foo\")\n",
)
text = "tf.nn.dropout(x, keep_prob=.4, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (.4), name=\"foo\")\n",
)
text = (
"tf.nn.dropout(x, # Stuff before\n"
" keep_prob=.4, # Stuff after\n"
" name=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, # Stuff before\n"
" rate=1 - (.4), # Stuff after\n"
" name=\"foo\")\n",
)
text = "tf.nn.dropout(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertIn("tf.nn.dropout called without arguments", errors[0])
def testDropoutExpr(self):
text = "tf.nn.dropout(x, 1 - func(3 + 4.), name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (1 - func(3 + 4.)), name=\"foo\")\n",
)
def testContribL1(self):
text = "tf.contrib.layers.l1_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l1_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l1_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1( # Stuff before\n"
" l=.4)\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2(self):
text = "tf.contrib.layers.l2_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l2_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l2_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2( # Stuff before\n"
" l=0.5 * (.4))\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2Expr(self):
text = "tf.contrib.layers.l2_regularizer(1 - func(3 + 4.), scope=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (1 - func(3 + 4.)))\n",
)
def testMathCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testCountNonZeroChanges(self):
text = (
"tf.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomMultinomialToRandomCategorical(self):
text = (
"tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
text = (
"tf.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomPoissonConversion(self):
text1 = "tf.random_poisson(lam, shape, dtype)"
text2 = "tf.random.poisson(lam, shape, dtype)"
expected_text = "tf.random.poisson(lam=lam, shape=shape, dtype=dtype)"
_, unused_report, unused_errors, new_text1 = self._upgrade(text1)
self.assertEqual(new_text1, expected_text)
_, unused_report, unused_errors, new_text2 = self._upgrade(text2)
self.assertEqual(new_text2, expected_text)
def testConvolutionOpUpdate(self):
text = (
"tf.nn.convolution(input, filter, padding, strides, dilation_rate, "
"name, data_format)"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.nn.convolution(input=input, filters=filter, padding=padding, "
"strides=strides, dilations=dilation_rate, name=name, "
"data_format=data_format)"
)
self.assertEqual(new_text, expected_text)
def test_substr(self):
text = "tf.substr(input, pos, len, name, unit)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.strings.substr(input=input, pos=pos, len=len, "
"name=name, unit=unit)\n", new_text)
self.assertEqual(errors, [])
def testColocateGradientsWithOps(self):
text = "tf.gradients(yx=a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "tf.gradients(yx=a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.gradients(yx=a)\n", new_text)
self.assertIn("tf.gradients no longer takes", report)
text = "tf.gradients(y, x, grad_ys, name, colocate, gate)\n"
expected = ("tf.gradients(ys=y, xs=x, grad_ys=grad_ys, name=name, "
"gate_gradients=gate)\n")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def testColocateGradientsWithOpsMinimize(self):
text = "optimizer.minimize(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.minimize(a)\n", new_text)
self.assertIn("Optimizer.minimize no longer takes", report)
def testColocateGradientsWithOpsComputeGradients(self):
text = "optimizer.compute_gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.compute_gradients(a)\n", new_text)
self.assertIn("Optimizer.compute_gradients no longer takes", report)
def testExportSavedModelRename(self):
text = "self.est.export_savedmodel(path)"
_, report, unused_errors, unused_new_text = self._upgrade(text)
self.assertIn(
"rename the method export_savedmodel() to export_saved_model()",
report)
def testArgmin(self):
text = "tf.argmin(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmin(input, 0)"
expected_text = "tf.argmin(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_min(input, 0)"
expected_text = "tf.argmin(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testArgmax(self):
text = "tf.argmax(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmax(input, 0)"
expected_text = "tf.argmax(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_max(input, 0)"
expected_text = "tf.argmax(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testAutograph(self):
text = "tf.autograph.to_graph(f, True, arg_values=None, arg_types=None)"
expected_text = "tf.autograph.to_graph(f, True)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.autograph.to_code"
"(f, False, arg_values=None, arg_types=None, indentation=' ')")
expected_text = "tf.autograph.to_code(f, False)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEstimatorInputs(self):
text = "tf.estimator.inputs.numpy_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.numpy_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.estimator.inputs.pandas_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.pandas_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchToSpace(self):
text = "tf.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.batch_to_space(input, crops, block_size, name)"
expected_text = (
"tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractImagePatches(self):
text = (
"tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
expected_text = (
"tf.image.extract_patches(images, sizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testKerasSavedModel(self):
text = (
"tf.contrib.saved_model.save_keras_model(model, './saved_models')\n"
"tf.contrib.saved_model.load_keras_model(saved_model_path)\n")
expected_text = (
"tf.keras.experimental.export_saved_model(model, './saved_models')\n"
"tf.keras.experimental.load_from_saved_model(saved_model_path)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testStatelessMultinomial(self):
text = (
"tf.random.stateless_multinomial(logits, num_samples, seed, "
"output_dtype=dtype, name=name)")
expected_text = (
"tf.random.stateless_categorical(logits, num_samples, seed, "
"dtype=dtype, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSoftMaxCrossEntropyWithLogitsV2(self):
text = (
"tf.nn.softmax_cross_entropy_with_logits_v2("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, axis=2)")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertFalse(errors)
def testSoftMaxCrossEntropyWithLogits(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo(bar))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSoftMaxCrossEntropyWithLogitsDoesntNest(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo().zz())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo().zz()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSparseMatmul(self):
text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")
expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "
"a_is_sparse=e, b_is_sparse=f, name=g)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testWeightedMoments(self):
text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"
expected_text = (
"tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "
"name=name, keepdims=kd)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseAdd(self):
text = "tf.sparse.add(a, b, t)"
expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseConcat(self):
text = "tf.sparse.concat(ax, inp, name, exp, concat)"
expected_text = (
"tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "
"expand_nonconcat_dims=exp, axis=concat)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSeparableConv2D(self):
text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"
expected_text = (
"tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "
"pointwise_filter=pt, strides=strides, padding=pad, "
"dilations=rate, name=name, data_format=fmt)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2D(self):
text = (
"tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, "
"data_format)")
expected_text = (
"tf.nn.conv2d(input=input, filters=filter, strides=strides, "
"padding=padding, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.nn.conv2d(input, filter=filter, strides=strides, padding=padding, "
"use_cudnn_on_gpu=use_cudnn_on_gpu)")
expected_text = ("tf.nn.conv2d(input=input, filters=filter, "
"strides=strides, padding=padding)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropFilter(self):
text = (
"tf.nn.conv2d_backprop_filter(input, filter_sizes, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.compat.v1.nn.conv2d_backprop_filter(input, filter_sizes, "
"out_backprop, strides, padding, use_cudnn_on_gpu, data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropInput(self):
text = (
"tf.nn.conv2d_backprop_input(input_sizes, filter, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.nn.conv2d_transpose(output_shape=input_sizes, filters=filter, "
"input=out_backprop, strides=strides, padding=padding, "
"data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpacetoBatch(self):
text = "tf.space_to_batch_nd(input, shape, paddings, name)"
expected_text = "tf.space_to_batch(input, shape, paddings, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.nn.space_to_batch(input, paddings, block_size, name)"
expected_text = (
"tf.space_to_batch(input=input, paddings=paddings, "
"block_shape=block_size, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testInTopK(self):
text = "tf.math.in_top_k(a, b, c, n)"
expected_text = (
"tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDepthToSpace(self):
text = "tf.nn.depth_to_space(input, block_size, name, data_format)"
expected_text = (
"tf.nn.depth_to_space(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookup(self):
text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "
"validate_indices, max_norm)")
expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "
"partition_strategy=partition_strategy, name=name, "
"max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookupSparse(self):
text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "
"partition_strategy, name, combiner, max_norm)")
expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "
"sp_ids=sp_ids, sp_weights=sp_weights, "
"partition_strategy=partition_strategy, name=name, "
"combiner=combiner, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnInTopK(self):
text = "tf.nn.in_top_k(predictions, targets, k, name)"
expected_text = ("tf.nn.in_top_k(predictions=predictions, "
"targets=targets, k=k, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpaceToDepth(self):
text = "tf.nn.space_to_depth(input, block_size, name, data_format)"
expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPrint(self):
# tf.print() cannot be parsed unless we import print_function
text = """from __future__ import print_function
tf.print()
tf.print('abc')
"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text) # Text should stay the same
def testSparseSplit(self):
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testIterators(self):
for (text, expected) in [
("(expr + yielding(data)).make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator((expr + yielding(data)))"),
("dataset.make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("dataset.make_one_shot_iterator(shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("dataset.make_one_shot_iterator(x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("dataset.make_initializable_iterator()",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("ds.make_initializable_iterator(shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("dataset.make_initializable_iterator(x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)")]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testStructure(self):
for (text, expected) in [
("tf.data.experimental.DatasetStructure", "tf.data.DatasetSpec"),
("tf.data.experimental.OptionalStructure", "tf.OptionalSpec"),
("tf.data.experimental.RaggedTensorStructure", "tf.RaggedTensorSpec"),
("tf.data.experimental.SparseTensorStructure", "tf.SparseTensorSpec"),
("tf.data.experimental.Structure", "tf.TypeSpec"),
("tf.data.experimental.TensorArrayStructure", "tf.TensorArraySpec"),
("tf.data.experimental.TensorStructure", "tf.TensorSpec"),
]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testMapAndBatch(self):
suffix = ".data.experimental.map_and_batch_with_legacy_function(args)"
text = "tf" + suffix
expected = "tf.compat.v1" + suffix
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testCast(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, name='test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testCastPositionalSecondArgument(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, 'test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResize(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s)" % method
expected_text = ("tf.image.resize(i, s, "
"method=tf.image.ResizeMethod.%s)" % method.upper())
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResizeExtraPositionalArgs(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s, a, p)" % method
expected_text = [
"tf.image.resize(i, s, ", "preserve_aspect_ratio=p, ",
"method=tf.image.ResizeMethod.%s)" % method.upper()
]
_, unused_report, unused_errors, new_text = self._upgrade(text)
for s in expected_text:
self.assertIn(s, new_text)
def testCond(self):
text = "tf.cond(a, b, c, True)"
expected_text = "tf.cond(pred=a, true_fn=b, false_fn=c)"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("tf.cond", errors[0])
self.assertIn("requires manual check", errors[0])
def testParens(self):
text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
expected_text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testAssertStatements(self):
for name in ["assert_greater", "assert_equal", "assert_none_equal",
"assert_less", "assert_negative", "assert_positive",
"assert_non_negative", "assert_non_positive", "assert_near",
"assert_less", "assert_less_equal", "assert_greater",
"assert_greater_equal", "assert_integer", "assert_type",
"assert_scalar"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def testAssertRankStatements(self):
for name in ["assert_rank", "assert_rank_at_least", "assert_rank_in"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def test_assert_equal_graph_def(self):
text = ("tf.test.assert_equal_graph_def(a, b, checkpoint_v2=x, "
"hash_table_shared_name=y)")
expected = "tf.test.assert_equal_graph_def(actual=a, expected=b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_upgrade(self):
text = "tf.contrib.framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_CriticalSection_upgrade(self):
text = "tf.contrib.framework.CriticalSection(shared_name='blah')"
expected = "tf.CriticalSection(shared_name='blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_sample_distorted_bounding_box(self):
# pylint: disable=line-too-long
text = "tf.image.sample_distorted_bounding_box(a, b, c, d, e, f, g, h, i, j)"
expected = "tf.image.sample_distorted_bounding_box(image_size=a, bounding_boxes=b, seed=c, min_object_covered=e, aspect_ratio_range=f, area_range=g, max_attempts=h, use_image_if_no_bounding_boxes=i, name=j)"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_initialize(self):
text = "tf.contrib.summary.initialize"
expected = "tf.compat.v1.summary.initialize"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_framework_argsort(self):
text = "tf.contrib.framework.argsort"
expected = "tf.argsort"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_flags_bare(self):
_, _, errors, _ = self._upgrade("tf.flags")
self.assertIn("tf.flags and tf.app.flags have been removed", errors[0])
def test_flags_flags(self):
_, _, errors, _ = self._upgrade("tf.flags.FLAGS")
self.assertIn("tf.flags and tf.app.flags have been removed", errors[0])
def test_contrib_estimator_head_deprecation(self):
api_symbols = ["binary_classification_head", "logistic_regression_head",
"multi_class_head", "multi_head", "multi_label_head",
"poisson_regression_head", "regression_head"]
for symbol in api_symbols:
text = "tf.contrib.estimator." + symbol
_, report, _, _ = self._upgrade(text)
self.assertIn("`tf.contrib.estimator.*_head` has been deprecated", report)
def test_contrib_layers_layer_norm_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.layers.layer_norm")
self.assertIn("`tf.contrib.layers.layer_norm` has been deprecated", report)
def test_contrib_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.rnn")
self.assertIn("tf.contrib.rnn.* has been deprecated", report)
def test_contrib_cudnn_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.cudnn_rnn")
self.assertIn("tf.contrib.cudnn_rnn.* has been deprecated", report)
def test_max_pool_2d(self):
text = "tf.nn.max_pool(value=4)"
expected_text = "tf.nn.max_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_estimator_early_stopping(self):
api_symbols = [
"make_early_stopping_hook", "stop_if_higher_hook", "stop_if_lower_hook",
"stop_if_no_decrease_hook", "stop_if_no_increase_hook"
]
for symbol in api_symbols:
text = "tf.contrib.estimator." + symbol
expected_text = "tf.estimator.experimental." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_cell(self):
api_symbols = ["RNNCell", "BasicLSTMCell", "BasicRNNCell", "GRUCell",
"LSTMCell", "MultiRNNCell"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn.rnn_cell." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_function(self):
api_symbols = ["static_rnn", "static_state_saving_rnn",
"static_bidirectional_rnn"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_summary_generic(self):
text = "tf.contrib.summary.generic('foo', myval, meta, 'fam', 42)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"metadata=meta, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
# Arg errors come in alphabetical order of arguments, not appearance order.
self.assertIn("'family' argument", errors[0])
self.assertIn("'name' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio(self):
text = "tf.contrib.summary.audio('foo', myval, 44100, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram(self):
text = "tf.contrib.summary.histogram('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image(self):
text = "tf.contrib.summary.image('foo', myval, red, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'bad_color' argument", errors[0])
self.assertIn("'family' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_scalar(self):
text = "tf.contrib.summary.scalar('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_generic_nostep(self):
text = "tf.contrib.summary.generic('foo', myval)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("'step' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio_nostep(self):
text = "tf.contrib.summary.audio('foo', myval, 44100)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram_nostep(self):
text = "tf.contrib.summary.histogram('foo', myval)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image_nostep(self):
text = "tf.contrib.summary.image('foo', myval)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_scalar_nostep(self):
text = "tf.contrib.summary.scalar('foo', myval)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_graph(self):
text = "tf.contrib.summary.graph(my_graph)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.trace"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_import_event(self):
text = "tf.contrib.summary.import_event(my_event)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.experimental.write_raw_pb"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_flush(self):
text = "tf.contrib.summary.flush(writer=foo)"
expected = "tf.compat.v2.summary.flush(writer=foo)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_create_file_writer(self):
text = ("tf.contrib.summary.create_file_writer('my_logdir', 0, 1000, "
"'.foo', 'shared-name')")
expected = ("tf.compat.v2.summary.create_file_writer(logdir='my_logdir', "
"max_queue=0, flush_millis=1000, filename_suffix='.foo')")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("no longer re-uses existing event files", errors[1])
def test_contrib_summary_always_record_summaries(self):
text = "tf.contrib.summary.always_record_summaries()"
expected = "tf.compat.v2.summary.record_if(True)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_never_record_summaries(self):
text = "tf.contrib.summary.never_record_summaries()"
expected = "tf.compat.v2.summary.record_if(False)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_record_summaries_every_n_global_steps(self):
text = "tf.contrib.summary.record_summaries_every_n_global_steps(10)"
_, _, errors, _ = self._upgrade(text)
expected_error = "replaced by a call to tf.compat.v2.summary.record_if()"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_all_summary_ops(self):
text = "tf.contrib.summary.all_summary_ops()"
expected = "tf.compat.v1.summary.all_v2_summary_ops()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_full_example(self):
deindent = lambda n, s: "\n".join(line[n:] for line in s.split("\n"))
text = deindent(4, """
import tensorflow as tf
tf.enable_eager_execution()
writer = tf.contrib.summary.create_file_writer(
"/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", 0.42)
tf.contrib.summary.histogram("weights", [1.0, 2.0], step=7)
tf.contrib.summary.flush()
""")
expected = deindent(4, """
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
writer = tf.compat.v2.summary.create_file_writer(
logdir="/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.compat.v2.summary.record_if(True):
tf.compat.v2.summary.scalar(name="loss", data=0.42, step=tf.compat.v1.train.get_or_create_global_step())
tf.compat.v2.summary.histogram(name="weights", data=[1.0, 2.0], step=7)
tf.compat.v2.summary.flush()
""")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_summary_api_warning(self):
text = "tf.summary.scalar('foo', 42)"
_, report, _, _ = self._upgrade(text)
expected_info = "TF 1.x summary API cannot be automatically migrated"
self.assertIn(expected_info, report)
def test_avg_pool_2d(self):
text = "tf.nn.avg_pool(value=4)"
expected_text = "tf.nn.avg_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_saved_model_load(self):
text = "tf.saved_model.load(sess, ['foo_graph'])"
expected = "tf.compat.v1.saved_model.load(sess, ['foo_graph'])"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_saved_model_load_v2(self):
text = "tf.saved_model.load_v2('/tmp/blah')"
expected = "tf.compat.v2.saved_model.load('/tmp/blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_app_flags(self):
text = "flags = tf.app.flags"
expected = "flags = tf.compat.v1.app.flags"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_uniform_unit_scaling_initializer(self):
text = "tf.uniform_unit_scaling_initializer(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.initializers.uniform_unit_scaling(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_name_scope(self):
text = "tf.name_scope(None, default_name, [some, values])"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(default_name=default_name, values=stuff)"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(name=n, default_name=d, values=s)"
expected_text = "tf.compat.v1.name_scope(name=n, default_name=d, values=s)"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("`name` passed to `name_scope`", report)
text = "tf.name_scope(name=None, values=stuff)"
_, _, errors, _ = self._upgrade(text)
self.assertIn("name_scope call with neither name nor default_name",
errors[0])
@parameterized.parameters(
# Rename parameter: delimiter -> sep and add .to_sparse()
["tf.string_split('test', delimiter=' ')",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Rename parameter: source -> input
["tf.strings.split(source='test1')",
"tf.strings.split(input='test1').to_sparse()"],
# Use compat.v1 for skip_empty parameter.
["tf.string_split('test', ' ', True)",
"tf.compat.v1.string_split(source='test', sep=' ', skip_empty=True)"],
["tf.string_split('test', ' ', skip_empty=False)",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Split behavior for sep=None changed. (In particular, it now splits on
# all whitespace, not just the space character)
["tf.string_split(x)",
"tf.compat.v1.string_split(source=x)"],
# Split behavior for sep='' changed:
["tf.string_split(x, '')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, sep='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, delimiter='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, '', result_type='RaggedTensor')",
"tf.strings.bytes_split(input=x)"],
# If sep is a variable, we can't tell if it's empty:
["tf.string_split(x, sep)",
"tf.compat.v1.string_split(source=x, sep=sep)"],
# If sep is a non-empty string literal, then we don't need compat.v1.
["tf.string_split(x, 'non-empty-sep')",
"tf.strings.split(input=x, sep='non-empty-sep').to_sparse()"],
# Add to_sparse unless result_type is RaggedTensor:
["tf.string_split(x, ' ')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='SparseTensor')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='RaggedTensor')",
"tf.strings.split(input=x, sep=' ')"],
["tf.string_split(x, ' ', result_type=x)",
"tf.compat.v1.string_split(source=x, sep=' ', result_type=x)"],
) # pyformat: disable
# TODO(b/129398290)
def DISABLED_test_string_split(self, text, expected_text):
"""Tests for transforming from tf.string_split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
@parameterized.parameters(
# Add to_sparse unless result_type is RaggedTensor:
["tf.strings.split(x, sep)",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='SparseTensor')",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='RaggedTensor')",
"tf.strings.split(x, sep)"],
["tf.strings.split(x, sep, result_type=x)",
"tf.compat.v1.strings.split(x, sep, result_type=x)"],
) # pyformat: disable
def test_strings_split(self, text, expected_text):
"""Tests for transforming from tf.strings.split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_sdca_to_raw_ops(self):
text = "tf.train.sdca_fprint(input_tensor)"
expected_text = "tf.raw_ops.SdcaFprint(input=input_tensor)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_fprint(input, name=n)"
expected_text = "tf.raw_ops.SdcaFprint(input=input, name=n)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_shrink_l1(w, l, ll)"
expected_text = "tf.raw_ops.SdcaShrinkL1(weights=w, l1=l, l2=ll)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = (
"tf.train.sdca_optimizer(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o)")
expected_text = (
"tf.raw_ops.SdcaOptimizer(sparse_example_indices=a, "
"sparse_feature_indices=b, sparse_feature_values=c, dense_features=d, "
"example_weights=e, example_labels=f, sparse_indices=g, "
"sparse_weights=h, dense_weights=i, example_state_data=j, loss_type=k, "
"l1=l, l2=m, num_loss_partitions=n, num_inner_iterations=o)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_to_addons_move(self):
small_mapping = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
}
for symbol, replacement in small_mapping.items():
text = "{}('stuff', *args, **kwargs)".format(symbol)
_, report, _, _ = self._upgrade(text)
self.assertIn(replacement, report)
def testXlaExperimental(self):
text = "tf.xla.experimental.jit_scope(0)"
expected_text = "tf.xla.experimental.jit_scope(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.xla.experimental.compile(0)"
expected_text = "tf.xla.experimental.compile(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnErosion2d(self):
text = "tf.nn.erosion2d(v, k, s, r, p)"
expected_text = "tf.nn.erosion2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnDilation2d(self):
text = "tf.nn.dilation2d(v, k, s, r, p)"
expected_text = "tf.nn.dilation2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPywrapTensorflowWarning(self):
text = "tf.pywrap_tensorflow.foo()"
expected = "tf.pywrap_tensorflow.foo()"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("`tf.pywrap_tensorflow` will not be distributed", errors[0])
def testKerasSaveModelFormat(self):
text = "tf.keras.models.save_model(model, path)"
expected_text = "tf.keras.models.save_model(model, path, save_format='h5')"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertNotIn(
"saves to the Tensorflow SavedModel format by default", report)
_, report, _, _ = self._upgrade("model.save(path)")
self.assertIn(
"saves to the Tensorflow SavedModel format by default", report)
def test_distribute_strategy(self):
text = "tf.contrib.distribute.CrossDeviceOps()"
expected = "tf.distribute.CrossDeviceOps()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
text = "tf.contrib.distribute.MirroredStrategy"
expected = "tf.contrib.distribute.MirroredStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.MirroredStrategy", errors[0])
text = "tf.distribute.MirroredStrategy"
expected = "tf.distribute.MirroredStrategy"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.distribute.MirroredStrategy API has changed", report)
self.assertIn("make_dataset_iterator->experimental_distribute_dataset",
report)
text = "tf.contrib.distribute.TPUStrategy"
expected = "tf.contrib.distribute.TPUStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.experimental.TPUStrategy",
errors[0])
text = "tf.contrib.distribute.foo"
expected = "tf.contrib.distribute.foo"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.contrib.distribute.* have been migrated", report)
def test_decode_raw(self):
text = "tf.io.decode_raw(bytes=[1,2,3], output_dtype=tf.int32)"
expected_text = (
"tf.io.decode_raw(input_bytes=[1,2,3], output_dtype=tf.int32)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testRecomputeGrad(self):
text = "tf.contrib.layers.recompute_grad()"
expected = "tf.recompute_grad()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_load_variable(self):
text = "tf.contrib.framework.load_variable('a')"
expected_text = (
"tf.train.load_variable('a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.contrib.framework.load_variable(checkpoint_dir='a')"
expected_text = (
"tf.train.load_variable(ckpt_dir_or_file='a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_import_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
# We upgrade the base un-versioned tensorflow aliased as tf
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
# We don't handle unaliased tensorflow imports currently,
# So the upgrade script show log errors
import_header = "import tensorflow\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("unaliased `import tensorflow`", "\n".join(errors))
# Upgrading explicitly-versioned tf code is unsafe, but we don't
# need to throw errors when we detect explicitly-versioned tf.
import_header = "import tensorflow.compat.v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf, v2 as tf2\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "import tensorflow.compat.v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf1, v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
def test_api_spec_reset_between_files(self):
for old_symbol, new_symbol in [
("tf.conj(a)", "tf.math.conj(a)"),
("tf.to_int32(x)", "tf.cast(x, dtype=tf.int32)")]:
## Test that the api spec is reset in between files:
import_header = "import tensorflow.compat.v2 as tf\n"
text_a = import_header + old_symbol
expected_text_a = import_header + old_symbol
text_b = old_symbol
expected_text_b = new_symbol
results = self._upgrade_multiple([text_a, text_b])
result_a, result_b = results[0], results[1]
self.assertEqual(result_a[3], expected_text_a)
self.assertEqual(result_b[3], expected_text_b)
def test_model_to_estimator_checkpoint_warning(self):
text = "tf.keras.estimator.model_to_estimator(model)"
_, report, _, _ = self._upgrade(text)
expected_info = "will save object-based checkpoints"
self.assertIn(expected_info, report)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
| 41.74014 | 211 | 0.659353 |
a681c87b059892bb2a6cd702bf12ea413eb32f68 | 76 | py | Python | molpal/models/chemprop/models/__init__.py | mchaker/lab-molpal | f4db7ee2ca51515b4246604867a93a3aac08107d | [
"MIT"
] | 81 | 2020-12-15T14:28:57.000Z | 2022-03-14T12:26:00.000Z | molpal/models/chemprop/models/__init__.py | mchaker/lab-molpal | f4db7ee2ca51515b4246604867a93a3aac08107d | [
"MIT"
] | 13 | 2021-01-07T17:09:00.000Z | 2022-03-01T12:45:22.000Z | molpal/models/chemprop/models/__init__.py | mchaker/lab-molpal | f4db7ee2ca51515b4246604867a93a3aac08107d | [
"MIT"
] | 23 | 2020-12-15T16:09:48.000Z | 2022-03-13T11:29:40.000Z | from .mpn import MPN, MPNEncoder
__all__ = [
'MPN',
'MPNEncoder'
]
| 10.857143 | 32 | 0.605263 |
a90d67bd0a0125dfdfb98b8147609454c026eb2c | 163 | py | Python | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_ConstantTrend_Seasonal_Hour_ARX.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_ConstantTrend_Seasonal_Hour_ARX.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_ConstantTrend_Seasonal_Hour_ARX.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['ConstantTrend'] , ['Seasonal_Hour'] , ['ARX'] ); | 40.75 | 90 | 0.760736 |
a4f2a2054794caba81daa6a2dfc5ed650a465883 | 1,052 | py | Python | experiments/tf_trainer/tf_hub_classifier/run.py | CyberFlameGO/conversationai-models | f82f66398b221d9fe3bcfd7641610af454b3db46 | [
"Apache-2.0"
] | 139 | 2018-03-05T16:34:41.000Z | 2022-03-09T01:36:13.000Z | experiments/tf_trainer/tf_hub_classifier/run.py | CyberFlameGO/conversationai-models | f82f66398b221d9fe3bcfd7641610af454b3db46 | [
"Apache-2.0"
] | 125 | 2018-03-05T21:19:31.000Z | 2020-11-13T17:50:12.000Z | experiments/tf_trainer/tf_hub_classifier/run.py | CyberFlameGO/conversationai-models | f82f66398b221d9fe3bcfd7641610af454b3db46 | [
"Apache-2.0"
] | 47 | 2018-03-21T19:37:14.000Z | 2022-03-09T01:36:21.000Z | """Experiments with Toxicity Dataset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tf_trainer.common import base_model
from tf_trainer.common import model_trainer
from tf_trainer.common import serving_input
from tf_trainer.common import tfrecord_input
from tf_trainer.tf_hub_classifier import model as tf_hub_classifier
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def main(argv):
del argv # unused
dataset = tfrecord_input.TFRecordInput()
model = tf_hub_classifier.TFHubClassifierModel(dataset.labels())
trainer = model_trainer.ModelTrainer(dataset, model)
trainer.train_with_eval()
serving_input_fn = serving_input.create_text_serving_input_fn(
text_feature_name=base_model.TEXT_FEATURE_KEY,
example_key_name=base_model.EXAMPLE_KEY)
trainer.export(serving_input_fn, base_model.EXAMPLE_KEY,
metrics_key="auc/%s" % FLAGS.labels.split(',')[0])
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| 28.432432 | 67 | 0.803232 |
cc295b5e7b46aa89659d61347d28628799eb4bd1 | 18,497 | py | Python | COT/commands/tests/test_deploy_esxi.py | morneaup/cot | 3d4dc7079a33aa0c09216ec339b44f84ab69ff4b | [
"MIT"
] | 81 | 2015-01-18T22:31:42.000Z | 2022-03-14T12:34:33.000Z | COT/commands/tests/test_deploy_esxi.py | morneaup/cot | 3d4dc7079a33aa0c09216ec339b44f84ab69ff4b | [
"MIT"
] | 67 | 2015-01-05T15:24:39.000Z | 2021-08-16T12:44:58.000Z | COT/commands/tests/test_deploy_esxi.py | morneaup/cot | 3d4dc7079a33aa0c09216ec339b44f84ab69ff4b | [
"MIT"
] | 20 | 2015-07-09T14:20:25.000Z | 2021-09-18T17:59:57.000Z | #!/usr/bin/env python
#
# test_deploy_esxi.py - test cases for the COTDeployESXi class and helpers
#
# August 2015, Glenn F. Matthews
# Copyright (c) 2013-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Unit test cases for the COT.deploy.COTDeployESXi class and helpers."""
import errno
import getpass
import logging
import re
import socket
import ssl
from distutils.version import StrictVersion
import mock
import requests
from pyVmomi import vim
try:
import unittest2 as unittest
except ImportError:
import unittest
from COT.tests import COTTestCase
from COT.commands.tests.command_testcase import CommandTestCase
import COT.commands.deploy_esxi
from COT.commands.deploy_esxi import COTDeployESXi, SmarterConnection
from COT.data_validation import InvalidInputError
logger = logging.getLogger(__name__)
# pylint: disable=missing-param-doc,missing-type-doc
@mock.patch('COT.ui.UI.get_password', return_value='passwd')
@mock.patch('subprocess.check_call')
class TestCOTDeployESXi(CommandTestCase):
"""Test cases for COTDeployESXi class."""
# Some WARNING logger messages we may expect at various points:
SERIAL_PORT_NOT_FIXED = {
'levelname': 'WARNING',
'msg': 'serial port.*will not be created or configured',
}
VSPHERE_ENV_WARNING = {
'levelname': 'WARNING',
'msg': "deploying.*vSphere.*power-on.*environment properties.*ignored",
}
OVFTOOL_VER_TOO_LOW = {
'levelname': 'WARNING',
'msg': "ovftool version is too low.*environment properties.*ignored",
}
BAD_CERTIFICATE = {
'levelname': 'WARNING',
'msg': "certificate verify failed",
}
SESSION_FAILED = {
'levelname': 'ERROR',
'msg': "Session failed",
}
command_class = COTDeployESXi
# pylint thinks self.command is a Command instead of a COTDeployESXi,
# so tell it to be quiet about members specific to COTDeployESXi:
# pylint: disable=no-member
def setUp(self):
"""Test case setup function called automatically prior to each test."""
super(TestCOTDeployESXi, self).setUp()
self.command.package = self.input_ovf
self.command.hypervisor = 'esxi'
# Stub out all ovftool dependencies
# pylint: disable=protected-access
self._ovftool_path = self.command.ovftool._path
self._ovftool_version = self.command.ovftool._version
self.command.ovftool._path = "/fake/ovftool"
self.command.ovftool._version = StrictVersion("4.0.0")
def tearDown(self):
"""Test case cleanup function called automatically."""
# Remove our stub
# pylint: disable=protected-access
self.command.ovftool._path = self._ovftool_path
self.command.ovftool._version = self._ovftool_version
super(TestCOTDeployESXi, self).tearDown()
def test_not_ready_with_no_args(self, *_):
"""Verify ready_to_run() is False without all mandatory args."""
ready, reason = self.command.ready_to_run()
self.assertEqual(ready, False)
self.assertTrue(re.search("LOCATOR.*mandatory", reason))
self.assertRaises(InvalidInputError, self.command.run)
self.command.locator = "localhost"
self.command.package = None
ready, reason = self.command.ready_to_run()
self.assertEqual(ready, False)
self.assertTrue(re.search("PACKAGE.*mandatory", reason))
self.assertRaises(InvalidInputError, self.command.run)
def test_invalid_args(self, *_):
"""Negative tests for various arguments."""
with self.assertRaises(InvalidInputError):
self.command.configuration = ""
with self.assertRaises(InvalidInputError):
self.command.configuration = "X"
with self.assertRaises(InvalidInputError):
self.command.power_on = "frobozz"
def test_ovftool_args_basic(self, mock_check_call, *_):
"""Test that ovftool is called with the basic arguments."""
self.command.locator = "localhost"
self.command.run()
mock_check_call.assert_called_once_with([
'ovftool',
'--deploymentOption=4CPU-4GB-3NIC', # default configuration
'--name=input',
self.input_ovf,
'vi://{user}:passwd@localhost'.format(user=getpass.getuser())
])
self.assertLogged(**self.VSPHERE_ENV_WARNING)
self.assertLogged(**self.SERIAL_PORT_NOT_FIXED)
def test_ovftool_args_advanced(self, mock_check_call, *_):
"""Test that ovftool is called with more involved arguments."""
self.command.locator = "localhost/host/foo"
self.command.datastore = "datastore1"
self.command.configuration = "2CPU-2GB-1NIC"
self.command.vm_name = "myVM"
self.command.power_on = True
self.command.ovftool_args = "--overwrite --vService:'A B=C D'"
self.command.username = "u"
self.command.password = "p"
self.command.network_map = ["VM Network=VM Network"]
self.command.run()
mock_check_call.assert_called_once_with([
'ovftool',
'--overwrite',
'--vService:A B=C D',
'--deploymentOption=2CPU-2GB-1NIC',
'--net:VM Network=VM Network',
'--name=myVM',
'--powerOn',
'--datastore=datastore1',
self.input_ovf,
'vi://u:p@localhost/host/foo',
])
self.assertLogged(**self.SERIAL_PORT_NOT_FIXED)
def test_ovftool_vsphere_env_fixup(self, mock_check_call, *_):
"""Test fixup of environment when deploying directly to vSphere."""
# With 4.0.0 (our default) and no power_on, there's no fixup.
# This is tested by test_ovftool_args_basic() above.
# With 4.0.0 and power_on, we fixup when deploying to vSphere:
self.command.locator = "vsphere"
self.command.power_on = True
self.command.run()
mock_check_call.assert_called_once_with([
'ovftool',
'--X:injectOvfEnv',
'--deploymentOption=4CPU-4GB-3NIC', # default configuration
'--name=input',
'--powerOn',
self.input_ovf,
'vi://{user}:passwd@vsphere'.format(user=getpass.getuser()),
])
self.assertLogged(**self.SERIAL_PORT_NOT_FIXED)
# Make sure we DON'T see the ENV_WARNING message
self.logging_handler.assertNoLogsOver(logging.INFO)
# With 4.0.0, we don't (need to) fixup when deploying to vCenter.
# This is tested by test_ovftool_args_advanced() above.
# With <4.0.0, we don't (can't) fixup, regardless.
# Discard cached information and update the info that will be returned
# pylint: disable=protected-access
mock_check_call.reset_mock()
self.command.ovftool._version = StrictVersion("3.5.0")
self.command.run()
mock_check_call.assert_called_once_with([
'ovftool',
# Nope! #'--X:injectOvfEnv',
'--deploymentOption=4CPU-4GB-3NIC', # default configuration
'--name=input',
'--powerOn',
self.input_ovf,
'vi://{user}:passwd@vsphere'.format(user=getpass.getuser()),
])
self.assertLogged(**self.OVFTOOL_VER_TOO_LOW)
self.assertLogged(**self.SERIAL_PORT_NOT_FIXED)
def test_serial_fixup_invalid_host(self, *_):
"""Failure in fixup_serial_ports() connecting to an invalid host."""
self.command.locator = "localhost"
self.command.serial_connection = ['tcp::2222', 'tcp::2223']
# pyvmomi 6.0.0.2016 and earlier raises ConnectionError,
# pyvmomi 6.0.0.2016.4 and later raises socket.error
with self.assertRaises((requests.exceptions.ConnectionError,
socket.error)) as catcher:
self.command.run()
# In requests 2.7 and earlier, we get the errno,
# while in requests 2.8+, it's munged into a string only
if catcher.exception.errno is not None:
self.assertEqual(catcher.exception.errno, errno.ECONNREFUSED)
self.assertRegex(
catcher.exception.strerror,
"(Error connecting to localhost:443: )?.*Connection refused")
self.assertLogged(**self.VSPHERE_ENV_WARNING)
mock_si = mock.create_autospec(
COT.commands.deploy_esxi.vim.ServiceInstance)
mock_sic = mock.create_autospec(
COT.commands.deploy_esxi.vim.ServiceInstanceContent)
mock_si.RetrieveContent.return_value = mock_sic
mock_sic.rootFolder = 'vim.Folder:group-d1'
mock_v = mock.create_autospec(COT.commands.deploy_esxi.vim.ViewManager)
mock_sic.viewManager = mock_v
mock_cv = mock.create_autospec(
COT.commands.deploy_esxi.vim.view.ContainerView)
mock_v.CreateContainerView.return_value = mock_cv
@mock.patch('pyVim.connect.__FindSupportedVersion', return_value=['vim25'])
@mock.patch('pyVim.connect.__Login', return_value=(mock_si, None))
def test_serial_fixup_stubbed(self, *_):
"""Test fixup_serial_ports by mocking pyVmomi library."""
self.command.locator = "localhost"
self.command.vm_name = "mockery"
mock_vm0 = mock.create_autospec(
COT.commands.deploy_esxi.vim.VirtualMachine)
mock_vm0.name = "wrong_vm"
mock_vm = mock.create_autospec(
COT.commands.deploy_esxi.vim.VirtualMachine)
mock_vm.name = self.command.vm_name
self.mock_cv.view = [mock_vm0, mock_vm]
self.command.serial_connection = ['tcp:localhost:2222',
'tcp::2223,server',
'/dev/ttyS0']
self.command.run()
self.assertLogged(**self.VSPHERE_ENV_WARNING)
self.assertTrue(mock_vm.ReconfigVM_Task.called)
# TODO: any other validation of args or kwargs?
_args, kwargs = mock_vm.ReconfigVM_Task.call_args
spec = kwargs['spec']
self.assertEqual(3, len(spec.deviceChange))
change1, change2, change3 = spec.deviceChange
self.assertEqual('add', change1.operation)
self.assertEqual('add', change2.operation)
self.assertEqual('add', change3.operation)
self.assertEqual('tcp://localhost:2222',
change1.device.backing.serviceURI)
self.assertEqual('client', change1.device.backing.direction)
self.assertEqual('tcp://:2223', change2.device.backing.serviceURI)
self.assertEqual('server', change2.device.backing.direction)
self.assertEqual('/dev/ttyS0', change3.device.backing.deviceName)
self.command.serial_connection = [
'file:/tmp/foo.txt,datastore=datastore1'
]
self.assertRaises(NotImplementedError,
self.command.run)
self.assertLogged(**self.VSPHERE_ENV_WARNING)
self.assertLogged(**self.SERIAL_PORT_NOT_FIXED)
self.assertLogged(**self.SESSION_FAILED)
@mock.patch('pyVim.connect.__FindSupportedVersion', return_value=['vim25'])
@mock.patch('pyVim.connect.__Login', return_value=(mock_si, None))
@mock.patch('COT.ui.UI.confirm_or_die', return_value=True)
def test_serial_fixup_stubbed_create(self, mock_cod, *_):
"""Test fixup_serial_ports creation of serial ports not in the OVF."""
self.command.package = self.minimal_ovf
self.command.locator = "localhost"
self.command.vm_name = "mockery"
mock_vm = mock.create_autospec(
COT.commands.deploy_esxi.vim.VirtualMachine)
mock_vm.name = self.command.vm_name
self.mock_cv.view = [mock_vm]
self.command.serial_connection = ['tcp:localhost:2222']
self.command.run()
self.assertTrue(mock_vm.ReconfigVM_Task.called)
self.assertTrue(mock_cod.called)
# TODO: any other validation of args or kwargs?
_args, kwargs = mock_vm.ReconfigVM_Task.call_args
spec = kwargs['spec']
self.assertEqual(1, len(spec.deviceChange))
change1 = spec.deviceChange[0]
self.assertEqual('add', change1.operation)
self.assertEqual('tcp://localhost:2222',
change1.device.backing.serviceURI)
self.assertEqual('client', change1.device.backing.direction)
@mock.patch('pyVim.connect.__FindSupportedVersion', return_value=['vim25'])
@mock.patch('pyVim.connect.__Login', return_value=(mock_si, None))
def test_serial_fixup_stubbed_vm_not_found(self, *_):
"""Test fixup_serial_ports error case where the VM isn't found."""
self.command.locator = "localhost"
self.command.vm_name = "mockery"
mock_vm0 = mock.create_autospec(
COT.commands.deploy_esxi.vim.VirtualMachine)
mock_vm0.name = "wrong_vm"
mock_vm1 = mock.create_autospec(
COT.commands.deploy_esxi.vim.VirtualMachine)
mock_vm1.name = "also_wrong"
self.mock_cv.view = [mock_vm0, mock_vm1]
self.command.serial_connection = ['tcp:localhost:2222',
'tcp::2223,server',
'/dev/ttyS0']
self.assertRaises(LookupError, self.command.run)
self.assertLogged(**self.VSPHERE_ENV_WARNING)
self.assertLogged(**self.SESSION_FAILED)
@mock.patch('COT.commands.deploy_esxi.SmartConnection.__enter__')
@unittest.skipUnless(hasattr(ssl, '_create_unverified_context'),
"Only applicable to Python 2.7+ and 3.4+")
def test_serial_fixup_ssl_failure(self, mock_parent, *_):
"""Test SSL failure in pyVmomi."""
mock_parent.side_effect = vim.fault.HostConnectFault(
msg="certificate verify failed")
self.command.locator = "localhost"
self.command.serial_connection = ['tcp://localhost:2222']
# Try twice - first time with default behavior encounters certificate
# failure, second time (with self-signed certificates accepted)
# encounters the same error again and raises it
self.assertRaises(vim.fault.HostConnectFault,
self.command.fixup_serial_ports)
self.assertEqual(mock_parent.call_count, 2)
self.assertLogged(**self.BAD_CERTIFICATE)
@mock.patch('COT.commands.deploy_esxi.SmartConnection.__enter__')
def test_serial_fixup_other_hostconnectfault(self, mock_parent, *_):
"""Test HostConnectFault other than SSL failure."""
mock_parent.side_effect = vim.fault.HostConnectFault(
msg="Malformed response while querying for local ticket: foo")
self.command.locator = "localhost"
self.command.serial_connection = ['tcp://localhost:2222']
# Try once and fail immediately
self.assertRaises(vim.fault.HostConnectFault,
self.command.fixup_serial_ports)
self.assertEqual(mock_parent.call_count, 1)
@mock.patch('COT.commands.deploy_esxi.SmartConnection.__enter__')
def test_serial_fixup_connectionerror(self, mock_parent, *_):
"""Test generic ConnectionError handling."""
mock_parent.side_effect = requests.exceptions.ConnectionError
self.command.locator = "localhost"
self.command.serial_connection = ['tcp://localhost:2222']
with self.assertRaises(requests.exceptions.ConnectionError) as catcher:
self.command.fixup_serial_ports()
self.assertEqual(catcher.exception.errno, None)
self.assertEqual(catcher.exception.strerror,
"Error connecting to localhost:443: None")
class TestSmarterConnection(COTTestCase):
"""Test cases for SmarterConnection class methods."""
def test_unwrap_connection_error_27(self):
"""Unwrap an error like a ConnectionError raised by requests 2.7."""
errnum, inner_message = SmarterConnection.unwrap_connection_error(
IOError(
Exception(
'Connection aborted.',
IOError(61, 'Connection refused')
)
)
)
self.assertEqual(errnum, 61)
self.assertEqual(inner_message, "Connection refused")
class MaxRetryError28(Exception):
"""Mock of requests 2.8 MaxRetryError exception class."""
def __init__(self, pool, url, reason):
"""Create fake exception."""
self.pool = pool
self.url = url
self.reason = reason
self.message = ("Max retries exceeded with url: %s (Caused by %r)"
% (url, reason))
super(self.__class__, self).__init__("%s: %s" %
(pool, self.message))
class NewConnectionError28(Exception):
"""Mock of requests 2.8 NewConnectionError exception class."""
def __init__(self, pool, message):
"""Create fake exception."""
self.pool = pool
self.message = message
super(self.__class__, self).__init__("%s: %s" % (pool, message))
def test_unwrap_connection_error_28(self):
"""Unwrap an error like a ConnectionError raised by requests 2.8."""
errnum, inner_message = SmarterConnection.unwrap_connection_error(
self.MaxRetryError28(
pool="HTTPSConnectionPool(host='localhost', port=443)",
url="//sdk/vimServiceVersions.xml",
reason=self.NewConnectionError28(
pool="VerifiedHTTPSConnection",
message="Failed to establish a new connection: "
"[Errno 61] Connection refused")
)
)
self.assertEqual(errnum, None)
self.assertEqual(inner_message,
"Failed to establish a new connection: "
"[Errno 61] Connection refused")
| 42.81713 | 79 | 0.647727 |
728278e740e3f9cdbb508d27c4bcacd4c4309c99 | 1,165 | py | Python | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DescribeMetricRuleTargetsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DescribeMetricRuleTargetsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DescribeMetricRuleTargetsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeMetricRuleTargetsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'DescribeMetricRuleTargets','cms')
def get_RuleId(self):
return self.get_query_params().get('RuleId')
def set_RuleId(self,RuleId):
self.add_query_param('RuleId',RuleId) | 38.833333 | 84 | 0.751931 |
92e089dedb45b52802162e9ba31c5cbca369f8fb | 26,552 | py | Python | billforward/apis/cybersourcetokens_api.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 2 | 2016-11-23T17:32:37.000Z | 2022-02-24T05:13:20.000Z | billforward/apis/cybersourcetokens_api.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | null | null | null | billforward/apis/cybersourcetokens_api.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 1 | 2016-12-30T20:02:48.000Z | 2016-12-30T20:02:48.000Z | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CybersourcetokensApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_cybersource_token(self, cybersource_token, **kwargs):
"""
Create a cybersource-token.
{\"nickname\":\"Create a cybersource-token\",\"request\":\"createCybersourceTokenRequest.html\",\"response\":\"createCybersourceTokenResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_cybersource_token(cybersource_token, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param MutableBillingEntity cybersource_token: The cybersource-token object to be created. (required)
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_cybersource_token_with_http_info(cybersource_token, **kwargs)
else:
(data) = self.create_cybersource_token_with_http_info(cybersource_token, **kwargs)
return data
def create_cybersource_token_with_http_info(self, cybersource_token, **kwargs):
"""
Create a cybersource-token.
{\"nickname\":\"Create a cybersource-token\",\"request\":\"createCybersourceTokenRequest.html\",\"response\":\"createCybersourceTokenResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_cybersource_token_with_http_info(cybersource_token, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param MutableBillingEntity cybersource_token: The cybersource-token object to be created. (required)
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cybersource_token']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cybersource_token" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cybersource_token' is set
if ('cybersource_token' not in params) or (params['cybersource_token'] is None):
raise ValueError("Missing the required parameter `cybersource_token` when calling `create_cybersource_token`")
resource_path = '/cybersource-tokens'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cybersource_token' in params:
body_params = params['cybersource_token']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CybersourceTokenPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_cybersource_token_by_id(self, token_id, **kwargs):
"""
Returns a single cybersource-token, specified by the token-ID parameter.
{\"nickname\":\"NICKNAME\",\"response\":\"getCybersourceTokenByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cybersource_token_by_id(token_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str token_id: The unique sting ID of the cybersource token. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_cybersource_token_by_id_with_http_info(token_id, **kwargs)
else:
(data) = self.get_cybersource_token_by_id_with_http_info(token_id, **kwargs)
return data
def get_cybersource_token_by_id_with_http_info(self, token_id, **kwargs):
"""
Returns a single cybersource-token, specified by the token-ID parameter.
{\"nickname\":\"NICKNAME\",\"response\":\"getCybersourceTokenByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cybersource_token_by_id_with_http_info(token_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str token_id: The unique sting ID of the cybersource token. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cybersource_token_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token_id' is set
if ('token_id' not in params) or (params['token_id'] is None):
raise ValueError("Missing the required parameter `token_id` when calling `get_cybersource_token_by_id`")
resource_path = '/cybersource-tokens/{token-ID}'.replace('{format}', 'json')
path_params = {}
if 'token_id' in params:
path_params['token-ID'] = params['token_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CybersourceTokenPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_cybersource_token_by_recurring_subscription_id(self, recurring_subscription_id, **kwargs):
"""
Returns a single cybersource-token, specified by the recurring-subscription-ID parameter.
{\"nickname\":\"NICKNAME\",\"response\":\"getCybersourceTokenByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cybersource_token_by_recurring_subscription_id(recurring_subscription_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str recurring_subscription_id: The recurring-subscription-info-subscription-ID of the cybersource token. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_cybersource_token_by_recurring_subscription_id_with_http_info(recurring_subscription_id, **kwargs)
else:
(data) = self.get_cybersource_token_by_recurring_subscription_id_with_http_info(recurring_subscription_id, **kwargs)
return data
def get_cybersource_token_by_recurring_subscription_id_with_http_info(self, recurring_subscription_id, **kwargs):
"""
Returns a single cybersource-token, specified by the recurring-subscription-ID parameter.
{\"nickname\":\"NICKNAME\",\"response\":\"getCybersourceTokenByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cybersource_token_by_recurring_subscription_id_with_http_info(recurring_subscription_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str recurring_subscription_id: The recurring-subscription-info-subscription-ID of the cybersource token. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['recurring_subscription_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cybersource_token_by_recurring_subscription_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'recurring_subscription_id' is set
if ('recurring_subscription_id' not in params) or (params['recurring_subscription_id'] is None):
raise ValueError("Missing the required parameter `recurring_subscription_id` when calling `get_cybersource_token_by_recurring_subscription_id`")
resource_path = '/cybersource-tokens/recurring-subscription-info/{recurring-subscription-ID}'.replace('{format}', 'json')
path_params = {}
if 'recurring_subscription_id' in params:
path_params['recurring-subscription-ID'] = params['recurring_subscription_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CybersourceTokenPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def retire_cybersource_token(self, token_id, organizations, **kwargs):
"""
Retires the cybersource token specified by the token-ID parameter.
{\"nickname\":\"NICKNAME\",\"response\":\"deleteCybersourceToken.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.retire_cybersource_token(token_id, organizations, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str token_id: ID of the cybersource-token. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. (required)
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.retire_cybersource_token_with_http_info(token_id, organizations, **kwargs)
else:
(data) = self.retire_cybersource_token_with_http_info(token_id, organizations, **kwargs)
return data
def retire_cybersource_token_with_http_info(self, token_id, organizations, **kwargs):
"""
Retires the cybersource token specified by the token-ID parameter.
{\"nickname\":\"NICKNAME\",\"response\":\"deleteCybersourceToken.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.retire_cybersource_token_with_http_info(token_id, organizations, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str token_id: ID of the cybersource-token. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. (required)
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retire_cybersource_token" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token_id' is set
if ('token_id' not in params) or (params['token_id'] is None):
raise ValueError("Missing the required parameter `token_id` when calling `retire_cybersource_token`")
# verify the required parameter 'organizations' is set
if ('organizations' not in params) or (params['organizations'] is None):
raise ValueError("Missing the required parameter `organizations` when calling `retire_cybersource_token`")
resource_path = '/cybersource-tokens/{token-ID}'.replace('{format}', 'json')
path_params = {}
if 'token_id' in params:
path_params['token-ID'] = params['token_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CybersourceTokenPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_cybersource_token(self, cybersource_token, **kwargs):
"""
Update a cybersource-token.
{\"nickname\":\"Update a cybersource-token\",\"request\":\"updateCybersourceTokenRequest.html\",\"response\":\"updateCybersourceTokenResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_cybersource_token(cybersource_token, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param MutableBillingEntity cybersource_token: The cybersource-token object to be updated. (required)
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_cybersource_token_with_http_info(cybersource_token, **kwargs)
else:
(data) = self.update_cybersource_token_with_http_info(cybersource_token, **kwargs)
return data
def update_cybersource_token_with_http_info(self, cybersource_token, **kwargs):
"""
Update a cybersource-token.
{\"nickname\":\"Update a cybersource-token\",\"request\":\"updateCybersourceTokenRequest.html\",\"response\":\"updateCybersourceTokenResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_cybersource_token_with_http_info(cybersource_token, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param MutableBillingEntity cybersource_token: The cybersource-token object to be updated. (required)
:return: CybersourceTokenPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cybersource_token']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_cybersource_token" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cybersource_token' is set
if ('cybersource_token' not in params) or (params['cybersource_token'] is None):
raise ValueError("Missing the required parameter `cybersource_token` when calling `update_cybersource_token`")
resource_path = '/cybersource-tokens'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cybersource_token' in params:
body_params = params['cybersource_token']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CybersourceTokenPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| 45.23339 | 157 | 0.60809 |
c2d7626ee5fd366f333e36f9d3c333865bf1cac9 | 2,192 | py | Python | control/initial_data_loader.py | yusenjeng/invintdex | ea68477d248ca8bdb4d62fd09ee4008e01cb831c | [
"MIT"
] | null | null | null | control/initial_data_loader.py | yusenjeng/invintdex | ea68477d248ca8bdb4d62fd09ee4008e01cb831c | [
"MIT"
] | null | null | null | control/initial_data_loader.py | yusenjeng/invintdex | ea68477d248ca8bdb4d62fd09ee4008e01cb831c | [
"MIT"
] | null | null | null | import csv
import pandas
import pandas_datareader
import pandas_datareader.data as web
from control.models import Company, ETF, SP500, Quote, ETFQuote, IS, BS, CF
def loadETF():
print('[listETF]', 'loading ETF')
with open('data/TICKER_ETF.csv') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if row[0] == 'Symbol':
continue
doc, created = ETF.objects.get_or_create(ticker=row[0])
doc.name = row[1]
doc.tag = row[2]
doc.save()
print(doc)
def loadNASDAQ_NYSE():
print('[listNASDAQ_NYSE]', 'loading NASDAQ')
with open('data/TICKER_NASDAQ.csv') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if row[0] == 'Symbol':
continue
doc, created = Company.objects.get_or_create(ticker=row[0])
doc.name = row[1]
doc.sector = row[6]
doc.industry = row[7]
doc.ipoyear = row[5]
doc.exchange = 'NASDAQ'
doc.summary = row[8]
doc.save()
print('[listNASDAQ_NYSE]', 'loading NYSE')
with open('data/TICKER_NYSE.csv') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if row[0] == 'Symbol':
continue
doc, created = Company.objects.get_or_create(ticker=row[0])
doc.name = row[1]
doc.sector = row[5]
doc.industry = row[6]
doc.ipoyear = row[4]
doc.exchange = 'NYSE'
doc.summary = row[7]
doc.save()
pass
def loadSP500():
df = pandas.read_excel('data/SPY_All_Holdings.xls')
print(df.columns)
for i in df.index:
ticker = df['Identifier'][i]
name = df['Name'][i]
sector = df['Sector'][i]
if str(ticker) != 'nan':
print(ticker, sector, name)
doc, created = Company.objects.get_or_create(ticker=ticker)
doc.name = name
doc.sector = sector
doc.save()
doc, created = SP500.objects.get_or_create(ticker_id=ticker)
doc.save()
pass | 30.027397 | 75 | 0.534672 |
f00c0763a9b52ac1161335e6fa0da7bb9be12248 | 6,628 | py | Python | allennlp/data/token_indexers/token_indexer.py | YerevaNN/allennlp | 374acec5e62d6d74586b18a3d5bb2f9b5a169da4 | [
"Apache-2.0"
] | 1 | 2021-01-28T09:42:02.000Z | 2021-01-28T09:42:02.000Z | allennlp/data/token_indexers/token_indexer.py | YerevaNN/allennlp | 374acec5e62d6d74586b18a3d5bb2f9b5a169da4 | [
"Apache-2.0"
] | null | null | null | allennlp/data/token_indexers/token_indexer.py | YerevaNN/allennlp | 374acec5e62d6d74586b18a3d5bb2f9b5a169da4 | [
"Apache-2.0"
] | 2 | 2021-01-19T10:58:28.000Z | 2022-02-23T19:09:36.000Z | from typing import Any, Dict, List
import math
import torch
from allennlp.common import Registrable
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.tokenizers.token import Token
from allennlp.data.vocabulary import Vocabulary
# An indexed token list represents the arguments that will be passed to a TokenEmbedder
# corresponding to this TokenIndexer. Each argument that the TokenEmbedder needs will have one
# entry in the IndexedTokenList dictionary, and that argument will typically be a list of integers
# (for single ID word embeddings) or a nested list of integers (for character ID word embeddings),
# though it could also be a mask, or any other data that you want to pass.
IndexedTokenList = Dict[str, List[Any]]
class TokenIndexer(Registrable):
"""
A `TokenIndexer` determines how string tokens get represented as arrays of indices in a model.
This class both converts strings into numerical values, with the help of a
:class:`~allennlp.data.vocabulary.Vocabulary`, and it produces actual arrays.
Tokens can be represented as single IDs (e.g., the word "cat" gets represented by the number
34), or as lists of character IDs (e.g., "cat" gets represented by the numbers [23, 10, 18]),
or in some other way that you can come up with (e.g., if you have some structured input you
want to represent in a special way in your data arrays, you can do that here).
# Parameters
token_min_padding_length : `int`, optional (default=`0`)
The minimum padding length required for the :class:`TokenIndexer`. For example,
the minimum padding length of :class:`SingleIdTokenIndexer` is the largest size of
filter when using :class:`CnnEncoder`.
Note that if you set this for one TokenIndexer, you likely have to set it for all
:class:`TokenIndexer` for the same field, otherwise you'll get mismatched tensor sizes.
"""
default_implementation = "single_id"
has_warned_for_as_padded_tensor = False
def __init__(self, token_min_padding_length: int = 0,
pad_to_multiple_of: int = 1) -> None:
self._token_min_padding_length: int = token_min_padding_length
self._pad_to_multiple_of = pad_to_multiple_of
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
"""
The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training
data (possibly doing some frequency filtering and using an OOV, or out of vocabulary,
token). This method takes a token and a dictionary of counts and increments counts for
whatever vocabulary items are present in the token. If this is a single token ID
representation, the vocabulary item is likely the token itself. If this is a token
characters representation, the vocabulary items are all of the characters in the token.
"""
raise NotImplementedError
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary) -> IndexedTokenList:
"""
Takes a list of tokens and converts them to an `IndexedTokenList`.
This could be just an ID for each token from the vocabulary.
Or it could split each token into characters and return one ID per character.
Or (for instance, in the case of byte-pair encoding) there might not be a clean
mapping from individual tokens to indices, and the `IndexedTokenList` could be a complex
data structure.
"""
raise NotImplementedError
def indices_to_tokens(
self, indexed_tokens: IndexedTokenList, vocabulary: Vocabulary
) -> List[Token]:
"""
Inverse operations of tokens_to_indices. Takes an `IndexedTokenList` and converts it back
into a list of tokens.
"""
raise NotImplementedError
def get_empty_token_list(self) -> IndexedTokenList:
"""
Returns an `already indexed` version of an empty token list. This is typically just an
empty list for whatever keys are used in the indexer.
"""
raise NotImplementedError
def get_padding_lengths(self, indexed_tokens: IndexedTokenList) -> Dict[str, int]:
"""
This method returns a padding dictionary for the given `indexed_tokens` specifying all
lengths that need padding. If all you have is a list of single ID tokens, this is just the
length of the list, and that's what the default implementation will give you. If you have
something more complicated, like a list of character ids for token, you'll need to override
this.
"""
padding_lengths = {}
for key, token_list in indexed_tokens.items():
num_tokens = len(token_list)
num_tokens = max(num_tokens, self._token_min_padding_length)
# Here we round up to the closest multiple above
num_tokens = math.ceil(num_tokens / self._pad_to_multiple_of) * self._pad_to_multiple_of
padding_lengths[key] = num_tokens
return padding_lengths
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
"""
This method pads a list of tokens given the input padding lengths (which could actually
truncate things, depending on settings) and returns that padded list of input tokens as a
`Dict[str, torch.Tensor]`. This is a dictionary because there should be one key per
argument that the `TokenEmbedder` corresponding to this class expects in its `forward()`
method (where the argument name in the `TokenEmbedder` needs to make the key in this
dictionary).
The base class implements the case when all you want to do is create a padded `LongTensor`
for every list in the `tokens` dictionary. If your `TokenIndexer` needs more complex
logic than that, you need to override this method.
"""
tensor_dict = {}
for key, val in tokens.items():
if val and isinstance(val[0], bool):
tensor = torch.BoolTensor(
pad_sequence_to_length(val, padding_lengths[key], default_value=lambda: False)
)
else:
tensor = torch.LongTensor(pad_sequence_to_length(val, padding_lengths[key]))
tensor_dict[key] = tensor
return tensor_dict
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| 49.462687 | 100 | 0.694176 |
a64b9588b6c9bdc8a146bd66740d040adfd87d8a | 1,143 | py | Python | fabfile.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | fabfile.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | fabfile.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import with_statement
from fabric.api import run, sudo, cd, env, prefix, local
from fabric.contrib.console import confirm
env.hosts = ['localshot.org.il']
env.user = 'oshot'
def loadflatb():
with cd('~oshot/src/oshot'):
with prefix('. ENV/bin/activate'):
run('honcho run python manage.py loaddata fixtures/flatblocks.json')
def dumpflatb():
local('python manage.py dumpdata -n > fixtures/flatblocks.json')
def refresh(branch='master'):
local('git push origin ' + branch)
with cd('~oshot/src/oshot'):
run('git pull origin ' + branch)
sudo('restart oshot')
def deploy(branch='master'):
local('git push origin ' + branch)
with cd('~oshot/src/oshot'):
run('git pull origin ' + branch)
with prefix('. ENV/bin/activate'):
run('pip install -r requirements.txt')
run('python manage.py test')
run('honcho run python manage.py syncdb --no-initial-data')
run('honcho run python manage.py migrate --no-initial-data')
run('honcho run python manage.py collectstatic --noinput')
sudo('restart oshot')
| 34.636364 | 80 | 0.64392 |
b6e0b67c016a3a8dcbed3e7a1782c900968f19f8 | 287 | py | Python | catnip/middleware.py | ObjectifLibre/catnip | 5d89c92de0396b1e912bb498af88687dd046718d | [
"Apache-2.0"
] | 2 | 2020-03-13T12:45:10.000Z | 2020-04-01T12:04:49.000Z | catnip/middleware.py | ObjectifLibre/catnip | 5d89c92de0396b1e912bb498af88687dd046718d | [
"Apache-2.0"
] | 1 | 2020-07-24T21:54:08.000Z | 2020-07-24T21:54:08.000Z | catnip/middleware.py | ObjectifLibre/catnip | 5d89c92de0396b1e912bb498af88687dd046718d | [
"Apache-2.0"
] | 1 | 2020-05-11T19:19:12.000Z | 2020-05-11T19:19:12.000Z | from catnip import backend
backend.patch_middleware_get_user()
class AuthPatchMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Do nothing actually
return self.get_response(request)
| 20.5 | 41 | 0.724739 |
7a6d6a858bdc8cf64b4b33e8e76c7e54ea3f63db | 1,629 | py | Python | example/java/python_script/Color_recognition.py | JiekangHuang/VMX_Pi_Example | aefafe85824df21458d3433ffe7ab1743ac8bbd7 | [
"MIT"
] | null | null | null | example/java/python_script/Color_recognition.py | JiekangHuang/VMX_Pi_Example | aefafe85824df21458d3433ffe7ab1743ac8bbd7 | [
"MIT"
] | null | null | null | example/java/python_script/Color_recognition.py | JiekangHuang/VMX_Pi_Example | aefafe85824df21458d3433ffe7ab1743ac8bbd7 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
ColorData = 'color not Found'
ColorPixelPercentage = '0'
lower_blue = np.array([100,43,46])
upper_blue = np.array([124,255,255])
lower_green = np.array([35,43,46])
upper_green = np.array([77,255,255])
lower_red = np.array([0, 43, 46])
upper_red = np.array([10, 255, 255])
lower_yellow = np.array([26, 43, 46])
upper_yellow = np.array([34, 255, 255])
lower_black = np.array([0, 0, 0])
upper_black = np.array([180, 255, 46])
boundaries = [
['red', (lower_red, upper_red)],
['green', (lower_green, upper_green)],
['blue', (lower_blue, upper_blue)],
['yellow', (lower_yellow, upper_yellow)],
['black', (lower_black, upper_black)],
]
cap = cv2.VideoCapture(0)
for i in range(20):
_, img = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# cv2.imwrite("test.png", img)
color_P = []
for [color, (lower, upper)] in boundaries:
# creates numpy array from boundaries
lower = np.array(lower)
upper = np.array(upper)
# finds colors in boundaries a applies a mask
mask = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(img, img, mask=mask)
tot_pixel = output.size
color_pixel = np.count_nonzero(output)
percentage = round(color_pixel * 100 / tot_pixel, 2)
color_P.append(percentage)
ColorData = boundaries[color_P.index(max(color_P))][0]
ColorPixelPercentage = max(color_P)
# print("Answer: {}".format(ColorData))
# print("Pixel percentage: {} %".format(ColorPixelPercentage))
file = open('./color.txt', 'w')
file.write(ColorData)
file.write('\n')
file.write(str(ColorPixelPercentage))
file.close()
| 25.857143 | 62 | 0.677103 |
3056f7a96a38fcf553ec8bd433694b7af23a68e6 | 2,925 | py | Python | saleor/dashboard/staff/forms.py | acabezasg/urpi-master | 7c9cd0fbe6d89dad70652482712ca38b21ba6f84 | [
"BSD-3-Clause"
] | 1 | 2019-05-02T17:24:05.000Z | 2019-05-02T17:24:05.000Z | saleor/dashboard/staff/forms.py | acabezasg/urpi-master | 7c9cd0fbe6d89dad70652482712ca38b21ba6f84 | [
"BSD-3-Clause"
] | 5 | 2021-03-09T16:22:37.000Z | 2022-02-10T19:10:03.000Z | saleor/dashboard/staff/forms.py | acabezasg/urpi-master | 7c9cd0fbe6d89dad70652482712ca38b21ba6f84 | [
"BSD-3-Clause"
] | 1 | 2020-12-26T10:25:37.000Z | 2020-12-26T10:25:37.000Z | from django import forms
from django.utils.translation import pgettext_lazy
from ...account.models import User
from ...core.permissions import get_permissions
from ..customer.forms import get_name_placeholder
from ..forms import PermissionMultipleChoiceField
class StaffForm(forms.ModelForm):
user_permissions = PermissionMultipleChoiceField(
queryset=get_permissions(),
widget=forms.CheckboxSelectMultiple, required=False,
label=pgettext_lazy(
'Label above the permissions choicefield', 'Permissions'))
class Meta:
model = User
fields = ['first_name', 'last_name', 'email',
'user_permissions', 'is_active', 'is_staff']
labels = {
'first_name': pgettext_lazy(
'Customer form: Given name field', 'Given name'),
'last_name': pgettext_lazy(
'Customer form: Family name field', 'Family name'),
'email': pgettext_lazy(
'Email', 'Email'),
'is_active': pgettext_lazy(
'User active toggle', 'User is active'),
'is_staff': pgettext_lazy(
'User staff toggle', 'User is staff')}
def __init__(self, *args, **kwargs):
# The user argument is required
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
# Non-superusers shouldn't be able to edit a superuser's profile
if self.instance.is_superuser and not self.user.is_superuser:
self.fields['email'].disabled = True
self.fields['user_permissions'].disabled = True
self.fields['is_active'].disabled = True
self.fields['is_staff'].disabled = True
# Disable editing other staff's email for non-superuser staff
if self.instance.is_staff and not self.user.is_superuser:
self.fields['email'].disabled = True
# Disable users editing their own following fields except for email
if self.user == self.instance:
self.fields['email'].disabled = False
self.fields['user_permissions'].disabled = True
self.fields['is_active'].disabled = True
self.fields['is_staff'].disabled = True
address = self.instance.default_billing_address
if not address:
return
if address.first_name:
placeholder = get_name_placeholder(address.first_name)
self.fields['first_name'].widget.attrs['placeholder'] = placeholder
if address.last_name:
placeholder = get_name_placeholder(address.last_name)
self.fields['last_name'].widget.attrs['placeholder'] = placeholder
def clean(self):
cleaned_data = super().clean()
# Remove all permissions if user is not staff
if not cleaned_data['is_staff']:
cleaned_data['user_permissions'] = []
return cleaned_data
| 39.527027 | 79 | 0.631111 |
a6db3a33c340f1cb25dd5bfdb07bfa899bde4e59 | 2,952 | py | Python | improver/metadata/constants/mo_attributes.py | thbom001/improver | 6f6e334a1e8e44a151125cf123ecdda2c56dbec4 | [
"BSD-3-Clause"
] | null | null | null | improver/metadata/constants/mo_attributes.py | thbom001/improver | 6f6e334a1e8e44a151125cf123ecdda2c56dbec4 | [
"BSD-3-Clause"
] | null | null | null | improver/metadata/constants/mo_attributes.py | thbom001/improver | 6f6e334a1e8e44a151125cf123ecdda2c56dbec4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module defining Met Office specific attributes"""
GRID_TYPE = "standard"
STAGE_VERSION = "1.3.0"
MOSG_GRID_ATTRIBUTES = {"mosg__grid_type", "mosg__grid_version", "mosg__grid_domain"}
# Define current StaGE and MONOW grid metadata
MOSG_GRID_DEFINITION = {
"uk_ens": {
"mosg__grid_type": GRID_TYPE,
"mosg__model_configuration": "uk_ens",
"mosg__grid_domain": "uk_extended",
"mosg__grid_version": STAGE_VERSION,
},
"gl_ens": {
"mosg__grid_type": GRID_TYPE,
"mosg__model_configuration": "gl_ens",
"mosg__grid_domain": "global",
"mosg__grid_version": STAGE_VERSION,
},
"uk_det": {
"mosg__grid_type": GRID_TYPE,
"mosg__model_configuration": "uk_det",
"mosg__grid_domain": "uk_extended",
"mosg__grid_version": STAGE_VERSION,
},
"gl_det": {
"mosg__grid_type": GRID_TYPE,
"mosg__model_configuration": "gl_det",
"mosg__grid_domain": "global",
"mosg__grid_version": STAGE_VERSION,
},
"nc_det": {"mosg__model_configuration": "nc_det"},
}
# Map correct metadata from StaGE v1.1.0
GRID_ID_LOOKUP = {
"enukx_standard_v1": "uk_ens",
"engl_standard_v1": "gl_ens",
"ukvx_standard_v1": "uk_det",
"glm_standard_v1": "gl_det",
}
| 39.891892 | 85 | 0.70122 |
ecb3503b8f16ae0b9bcbf8e48d2a180be51e1b5c | 1,757 | py | Python | env/Lib/site-packages/OpenGL/GL/EXT/pixel_transform.py | 5gconnectedbike/Navio2 | 8c3f2b5d8bbbcea1fc08739945183c12b206712c | [
"BSD-3-Clause"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | env/Lib/site-packages/OpenGL/GL/EXT/pixel_transform.py | 5gconnectedbike/Navio2 | 8c3f2b5d8bbbcea1fc08739945183c12b206712c | [
"BSD-3-Clause"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | env/Lib/site-packages/OpenGL/GL/EXT/pixel_transform.py | 5gconnectedbike/Navio2 | 8c3f2b5d8bbbcea1fc08739945183c12b206712c | [
"BSD-3-Clause"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''OpenGL extension EXT.pixel_transform
This module customises the behaviour of the
OpenGL.raw.GL.EXT.pixel_transform to provide a more
Python-friendly API
Overview (from the spec)
This extension provides support for scaling, rotation, translation and
shearing of two-dimensional pixel rectangles in the pixel rasterizer.
The transformation is defined via a 4x4 matrix, where only those entries
which apply as a 2D affine transformation will be accepted and used.
These matrices can be manipulated using the same functions as the other
OpenGL matrix stacks.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/pixel_transform.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.pixel_transform import *
from OpenGL.raw.GL.EXT.pixel_transform import _EXTENSION_NAME
def glInitPixelTransformEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glPixelTransformParameterivEXT=wrapper.wrapper(glPixelTransformParameterivEXT).setInputArraySize(
'params', 1
)
glPixelTransformParameterfvEXT=wrapper.wrapper(glPixelTransformParameterfvEXT).setInputArraySize(
'params', 1
)
glGetPixelTransformParameterivEXT=wrapper.wrapper(glGetPixelTransformParameterivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetPixelTransformParameterfvEXT=wrapper.wrapper(glGetPixelTransformParameterfvEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION | 39.931818 | 97 | 0.821286 |
ab395a42e8f312446e5087de9cf66c1ac9e414a1 | 73 | py | Python | loghandler/modules/__init__.py | math280h/loghandler | 7e752493df91e49b3b205fda2829e6e7b8cc050c | [
"MIT"
] | 2 | 2021-11-05T17:56:33.000Z | 2021-11-07T01:58:01.000Z | loghandler/modules/__init__.py | math280h/loghandler | 7e752493df91e49b3b205fda2829e6e7b8cc050c | [
"MIT"
] | 33 | 2021-11-05T02:09:29.000Z | 2022-03-23T11:09:45.000Z | loghandler/modules/__init__.py | math280h/loghandler | 7e752493df91e49b3b205fda2829e6e7b8cc050c | [
"MIT"
] | null | null | null | import loghandler.modules.elasticsearch
import loghandler.modules.stdout
| 24.333333 | 39 | 0.890411 |
f44013e51db0c1e0bbde278fca76b9ed2a7cff22 | 139 | py | Python | python/1409.A.py | arechesk/cf | 8d2209398f0fc4a73c139f4101634a8ed8c62ff6 | [
"BSD-3-Clause"
] | null | null | null | python/1409.A.py | arechesk/cf | 8d2209398f0fc4a73c139f4101634a8ed8c62ff6 | [
"BSD-3-Clause"
] | null | null | null | python/1409.A.py | arechesk/cf | 8d2209398f0fc4a73c139f4101634a8ed8c62ff6 | [
"BSD-3-Clause"
] | null | null | null | t=int(input())
for i in range(t):
a,b=tuple(map(int,input().split()))
print(int(abs(a-b)/10)+(int((a-b)%10!=0)))
| 19.857143 | 46 | 0.482014 |
652f76251a2e73a257277bca3470608d4bcde9b0 | 160 | py | Python | tests/test_A000041.py | TyomaMtl/oeis | 2feeb44c00d9fd81e39c8db85e330cbcbdca3a39 | [
"MIT"
] | null | null | null | tests/test_A000041.py | TyomaMtl/oeis | 2feeb44c00d9fd81e39c8db85e330cbcbdca3a39 | [
"MIT"
] | null | null | null | tests/test_A000041.py | TyomaMtl/oeis | 2feeb44c00d9fd81e39c8db85e330cbcbdca3a39 | [
"MIT"
] | null | null | null | from oeis import A000041
def test_partitions():
assert A000041(5) == 7
assert A000041(3) == 3
assert A000041(4) == 5
assert A000041(10) == 42
| 17.777778 | 28 | 0.63125 |
879710014cf2ea54017c30095fe07fa5495e03d1 | 13,435 | py | Python | igdectk/rest/restmiddleware.py | coll-gate/igdectk | 2b658652b01e2998549aee0b2a0ca6b71724f222 | [
"MIT"
] | 2 | 2017-04-27T20:08:17.000Z | 2017-07-04T16:19:16.000Z | igdectk/rest/restmiddleware.py | coll-gate/igdectk | 2b658652b01e2998549aee0b2a0ca6b71724f222 | [
"MIT"
] | null | null | null | igdectk/rest/restmiddleware.py | coll-gate/igdectk | 2b658652b01e2998549aee0b2a0ca6b71724f222 | [
"MIT"
] | null | null | null | # -*- coding: utf-8; -*-
#
# @file restmiddleware.py
# @brief REST django middleware.
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2015-04-13
# @copyright Copyright (c) 2015 INRA
# @license MIT (see LICENSE file)
# @details Middleware thats manage common view errors.
# The middleware decorate the request with a format (HTML by default), and by a list of URL parameters.
# When a view is decorated by def_request or def_auth_request,
# this modify the data attached to the request and the format.
import json
import logging
import threading
from django import http
from django.core.exceptions import *
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib import messages
from django.urls import resolve
from django.apps import apps
from django.utils.translation.trans_real import parse_accept_lang_header
from validictory import FieldValidationError
import igdectk.xmlio
from igdectk.rest.response import ComplexEncoder
from . import Format
logger = logging.getLogger(__name__)
class ViewExceptionRest(Exception):
"""
Formatted exception with message and code.
:param str message: Cause of the exception
:param int code: HTTP error code
"""
def __init__(self, message, code):
super(Exception, self).__init__(message, code)
self.code = code
class HttpResponseUnauthorized(http.HttpResponse):
status_code = 401
def parse_content_type(content_type):
parts = content_type.strip().split(";")
media_type = parts.pop(0)
media_params = []
for part in parts:
(key, value) = part.lstrip().split("=", 1)
media_params.append((key, value))
return (media_type, tuple(media_params))
def parse_accept_header(accept):
"""
Parse the Accept header.
:param str accept: Accept string from HTTP header.
:return: A list with pairs of (media_type, q_value), ordered by q values.
:rtype: list(tuple)
"""
result = []
for media_range in accept.split(","):
parts = media_range.strip().split(";")
media_type = parts.pop(0)
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(key=lambda x: -x[2])
return result
class HttpHeader(object):
"""
HTTP header parser with cache.
"""
def __init__(self, request):
self.request = request
# HTTP_ACCEPT
self._accept = None
self._accepted_types = None
# HTTP_ACCEPT_LANGUAGE
self._accept_language = None
self._accepted_language_codes = None
# CONTENT_TYPE
self._content_type = None
self._content_format = None
def _cache_http_accept(self):
accept = parse_accept_header(self.request.META.get("HTTP_ACCEPT", ""))
self._accept = accept
self._accepted_types = [t[0] for t in accept]
@property
def accept(self):
"""
Returns the HTTP_ACCEPT list of pairs (media_type, q_value).
This list is cached the first time.
"""
# not cached
if not self._accept:
self._cache_http_accept()
return self._accept
@property
def accepted_types(self):
"""
Sames as :ref:`accept` but returns only the media_type.
This list is cached the first time.
"""
# not cached
if not self._accepted_types:
self._cache_http_accept()
return self._accepted_types
@property
def preferred_type(self):
"""
Get the preferred media_type as Format enum.
"""
# not cached
if not self._accepted_types:
self._cache_http_accept()
if not self._accepted_types:
return Format.HTML
if self._accepted_types[0] == Format.JSON.content_type:
return Format.JSON
elif self._accepted_types[0] == Format.XML.content_type:
return Format.XML
elif self._accepted_types[0] == Format.HTML.content_type:
return Format.HTML
elif self._accepted_types[0] == Format.MULTIPART.content_type:
return Format.MULTIPART
elif self._accepted_types[0] == Format.TEXT.content_type:
return Format.TEXT
else:
return Format.TEXT
def _cache_http_accept_language(self):
accept_language = parse_accept_lang_header(self.request.META.get("HTTP_ACCEPT_LANGUAGE", ""))
self._accept_language = accept_language
self._accepted_language_codes = [t[0] for t in accept_language]
@property
def accept_language(self):
"""
Returns the HTTP_ACCEPT_LANGUAGE as a list of pairs.
This list is cached the first time.
"""
# not cached
if not self._accept_language:
self._cache_http_accept_language()
return self._accept_language
@property
def accepted_language_codes(self):
"""
Sames as :ref:`accept_language` but returns only the languages codes.
This list is cached the first time.
"""
# not cached
if not self._accepted_laguage_codes:
self._cache_http_accept_language()
return self._accepted_laguage_codes
@property
def preferred_language_code(self):
# not cached
if not self._accepted_language_codes:
self._cache_http_accept_language()
if len(self._accepted_language_codes) > 0:
return self._accepted_language_codes[0]
else:
return 'en_US'
def _cache_content_type(self):
self._content_type = parse_content_type(self.request.META.get("CONTENT_TYPE", ""))
if self._content_type[0] == Format.JSON.content_type:
self._content_format = Format.JSON
elif self._content_type[0] == Format.XML.content_type:
self._content_format = Format.XML
elif self._content_type[0] == Format.HTML.content_type:
self._content_format = Format.HTML
elif self._content_type[0] == Format.MULTIPART.content_type:
self._content_format = Format.MULTIPART
elif self._content_type[0] == Format.TEXT.content_type:
self._content_format = Format.TEXT
else:
self._content_format = Format.ANY
@property
def content_type(self):
"""
Returns a pair with content type and a tuples of content settings.
"""
if not self._content_type:
self._cache_content_type()
return self._content_type
@property
def content_format(self):
"""
Returns a Format for the content type.
"""
if not self._content_format:
self._cache_content_type()
return self._content_format
class RestMiddleware(object):
"""
Middleware that manages request format and catch views exceptions.
It also manage the customized view errors (page if HTML else JSON).
The middleware decorate the request with a format (HTML by default),
and by a list of URL parameters.
When a view is decorated by :meth:`igdectk.rest.handler.RestHandler.def_request`,
:meth:`igdectk.rest.handler.RestHandler.def_auth_request` or by
:meth:`igdectk.rest.handler.RestHandler.def_admin_request`,
the decorator can attach a data dict to the request object.
"""
TYPES = {
400: http.HttpResponseBadRequest,
401: HttpResponseUnauthorized,
403: http.HttpResponseForbidden,
404: http.HttpResponseNotFound,
500: http.HttpResponseServerError,
}
thread_local = threading.local()
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
self.process_request(request)
try:
response = self.get_response(request)
except BaseException as e:
response = self.process_exception(request, e)
return response
@staticmethod
def format_response(request, message, code, error=""):
"""
Helper to format a response related to the format and parameters
defined into the request, a message, and an HTTP code.
:param RequestContext request: Django request object.
:param str message: Message constant string.
:param int code: HTTP code.
:return: An HTTP response object.
:rtype: HttpResponse
"""
response_type = RestMiddleware.TYPES.get(code, http.HttpResponse)
result = {
"result": "failed",
"cause": message,
"code": code,
"error": error
}
# JSON format
if request.format == Format.JSON:
data = json.dumps(result, cls=ComplexEncoder)
# HTML format
elif request.format == Format.HTML:
# append a Bootstrap message error
messages.error(request, 'Http %i: %s' % (code, message))
# render a default error page if it excepts
try:
# get HTTP_TEMPLATE_STRING from the app of the request
app_name = resolve(request.path).app_name
current_app = apps.get_app_config(app_name)
http_template_string = current_app.http_template_string
data = render_to_string(
http_template_string % (code,),
result,
request=request)
except Exception:
return response_type('Http %i: %s' % (code, message), RequestContext(request))
return response_type(data)
# XML format
elif request.format == Format.XML:
data = igdectk.xmlio.dumps(result)
# TEXT format
elif request.format == Format.TEXT:
data = "result: %(result)s\ncause: %(cause)s\ncode: %(code)i" % result
# ANY others formats
else:
data = "result: %(result)s\ncause: %(cause)s\ncode: %(code)i" % result
return response_type(data, content_type=request.format.content_type)
def process_request(self, request):
# default request data format to HTML
request.format = Format.HTML
# an empty list of url parameters
request.parameters = ()
request.header = HttpHeader(request)
# initialize thread local current request information
RestMiddleware.thread_local.current_user = request.user
RestMiddleware.thread_local.current_remote_addr = request.META.get('REMOTE_ADDR', '')
def process_exception(self, request, exception):
if isinstance(exception, ViewExceptionRest):
cause, code = exception.args
error = "view_exception"
elif isinstance(exception, ValueError):
cause = exception.args[0]
code = 400
error = "value_error" if len(exception.args) < 2 else exception.args[1]
elif isinstance(exception, SuspiciousOperation):
cause = exception.args[0]
code = 400
error = "suspicious_operation" if len(exception.args) < 2 else exception.args[1]
elif isinstance(exception, PermissionDenied):
cause = exception.args[0]
code = 403
error = "permission_denied" if len(exception.args) < 2 else exception.args[1]
elif isinstance(exception, http.Http404):
cause = exception.args[0]
code = 404
error = "http404" if len(exception.args) < 2 else exception.args[1]
elif isinstance(exception, ObjectDoesNotExist):
cause = exception.args[0]
code = 404
error = "object_does_not_exists" if len(exception.args) < 2 else exception.args[1]
elif isinstance(exception, MultipleObjectsReturned):
cause = exception.args[0]
code = 404
error = "multiple_objects_returned" if len(exception.args) < 2 else exception.args[1]
elif isinstance(exception, FieldValidationError):
cause = exception.args[0]
code = 400
error = "field_validation_error" if len(exception.args) < 2 else exception.args[1]
elif isinstance(exception, ValidationError):
cause = exception.messages
code = 400
error = "field_validation_error" if len(exception.args) < 2 else exception.args[1]
else:
cause = repr(exception)
code = 500
error = "internal_error" if len(exception.args) < 2 else exception.args[1]
import traceback
# write the traceback to the logger (should be redirected to console)
logger.error(traceback.format_exc())
return RestMiddleware.format_response(request, cause, code, error)
@staticmethod
def current_user():
tl = RestMiddleware.thread_local
if hasattr(tl, 'current_user'):
return tl.current_user
else:
return None
@staticmethod
def current_remote_addr():
tl = RestMiddleware.thread_local
if hasattr(tl, 'current_remote_addr'):
return tl.current_remote_addr
else:
return ''
| 32.848411 | 103 | 0.630443 |
cc1334fcfc6f7f4114b2972f3b71eb31a0f908cf | 643 | py | Python | qcodes/dataset/database.py | RobertHenry6bev/Qcodes | 16655ea70228fed77db170c19e326f180ad8a49d | [
"MIT"
] | null | null | null | qcodes/dataset/database.py | RobertHenry6bev/Qcodes | 16655ea70228fed77db170c19e326f180ad8a49d | [
"MIT"
] | null | null | null | qcodes/dataset/database.py | RobertHenry6bev/Qcodes | 16655ea70228fed77db170c19e326f180ad8a49d | [
"MIT"
] | null | null | null | """
Code of this module has been moved to `.sqlite.database`. This module now
only re-imports the functions which it used to contain, for backwards
compatibility. Do not import functions from this module because it will be
removed soon.
"""
import warnings
from .sqlite.database import get_DB_debug, get_DB_location, \
initialise_database, initialise_or_create_database_at, path_to_dbfile
warnings.warn('The module `qcodes.dataset.database` is deprecated.\n'
'Public features are available at the import of `qcodes`.\n'
'Private features are available in `qcodes.dataset.sqlite.*` '
'modules.')
| 40.1875 | 76 | 0.735614 |
d6435706a8bef7c7b1d92b53cd0300443ad02af3 | 21,014 | py | Python | delphin_6_automation/backend/backend.py | ribuild/delphin_6_automation | 12024381fc1042b46314c55d88b6349229ea33b7 | [
"MIT"
] | 2 | 2017-11-08T18:37:36.000Z | 2018-01-09T12:10:58.000Z | delphin_6_automation/backend/backend.py | ribuild/delphin_6_automation | 12024381fc1042b46314c55d88b6349229ea33b7 | [
"MIT"
] | 111 | 2018-02-26T08:25:44.000Z | 2021-03-31T19:17:19.000Z | delphin_6_automation/backend/backend.py | thp44/delphin_6_automation | 12024381fc1042b46314c55d88b6349229ea33b7 | [
"MIT"
] | 3 | 2017-11-06T10:01:25.000Z | 2018-02-14T09:45:28.000Z | __author__ = "Thomas Perkov"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import os
import logging
import sys
# RiBuild Modules:
import delphin_6_automation.database_interactions.mongo_setup as mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions import general_interactions
from delphin_6_automation.database_interactions import delphin_interactions
from delphin_6_automation.database_interactions import weather_interactions
from delphin_6_automation.database_interactions.db_templates import delphin_entry as delphin_db
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.file_parsing import delphin_parser
from delphin_6_automation.database_interactions import user_interactions
# -------------------------------------------------------------------------------------------------------------------- #
# DELPHIN PERMUTATION FUNCTIONS
"""
backend user interface:
- Add new simulation(s)
- Monitor the simulation process
- Queue and watch finished simulations
"""
def main():
print_header()
config_mongo()
account = login()
main_menu(account)
def print_header():
print('---------------------------------------------------')
print('| |')
print('| RiBuild EU Research Project |')
print('| for Hygrothermal Simulations |')
print('| |')
print('| WORK IN PROGRESS |')
print('| Test Environment |')
print('| |')
print('---------------------------------------------------')
def config_mongo():
mongo_setup.global_init(auth_dict)
def close_connections():
mongo_setup.global_end_ssh(auth_dict)
def login():
print('')
print('------------------- LOGIN -------------------------')
email = input('What is your email? >').strip().lower()
account = user_interactions.find_account_by_email(email)
if not account:
print(f'Could not find account with email {email}.')
create = input('Do you which to create a new account? [y/n] >').strip().lower()
if create == 'y':
create_account(email)
else:
return
print('Logged in successfully.')
return account
def create_account(email: str):
print('')
print('------------------- REGISTER ----------------------')
name = input('What is your name? >')
old_account = user_interactions.find_account_by_email(email)
if old_account:
print(f"ERROR: Account with email {email} already exists.")
return
return user_interactions.create_account(name, email)
def main_menu(account):
while True:
print('')
print('------------------- MAIN MENU ---------------------')
print('')
print("Available actions:")
print("[a] Add new simulation to queue")
print("[b] Add new simulation with permutations to queue")
print("[c] List simulations")
print("[d] List materials")
print("[w] List weather")
print("[t] Test Connection")
print("[x] Exit")
print()
choice = input("> ").strip().lower()
if choice == 'a':
[sim_id, *_] = add_to_queue()
save_ids(sim_id, account)
elif choice == 'b':
id_list = add_permutations_to_queue()
save_ids(id_list, account)
elif choice == 'c':
view_simulations(account)
elif choice == 'd':
view_material_data()
elif choice == 'w':
view_weather_data()
elif not choice or choice == 'x':
close_connections()
print("see ya!")
sys.exit()
def view_simulations(account):
while True:
print('')
print('------------------ SIMULATIONS --------------------')
print('')
print("Available actions:")
print("[l] List simulations")
print("[f] Find simulation")
print("[d] Download simulations")
print("[a] Add new simulation to queue")
print("[b] Add new simulation with permutations to queue")
print("[x] Return to main menu")
print('')
choice = input("> ").strip().lower()
if choice == 'l':
user_interactions.list_user_simulations(account)
elif choice == 'f':
find_simulations()
elif choice == 'd':
download_simulation_result()
elif choice == 'a':
[sim_id, *_] = add_to_queue()
save_ids(sim_id, account)
elif choice == 'b':
id_list = add_permutations_to_queue()
save_ids(id_list, account)
elif choice == 'x':
return None
def get_simulation_status(id_):
delphin_document = delphin_db.Delphin.objects(id=id_).first()
if delphin_document.simulating:
status = "Is currently being simulated."
elif delphin_document.simulated:
status = f"Was simulated on {delphin_document.simulated}"
else:
status = 'Is waiting to be simulated'
print('')
print(f'Simulation with ID: {id_}\n'
f'\tAdded: {delphin_document.added_date}\n'
f'\t{status}')
if status == f"Was simulated on {delphin_document.simulated}":
print('')
download = input("Do you wish to download the results? y/n >")
if download == 'y':
print(f'Simulation result will be saved on the Desktop as in the folder: {id_}')
user_desktop = os.path.join(os.environ["HOMEPATH"], "Desktop")
general_interactions.download_raw_result(delphin_document.results_raw.id, user_desktop + f'/{id_}')
def find_simulations():
print('')
print("The simulations will be identified by their database ID")
database_ids = input("What is the database ID?\n"
"If more than 1 simulation is wished, then the IDs have to be separated with a comma. >")
database_ids = [id_.strip()
for id_ in database_ids.split(',')]
for id_ in database_ids:
get_simulation_status(id_)
def view_material_data():
while True:
print('')
print('------------------- MATERIALS ---------------------')
print('')
print("Available actions:")
print("[l] List materials")
print("[m] Add Delphin material to the database")
print("[d] Download material")
print("[x] Return to main menu")
print('')
choice = input("> ").strip().lower()
if choice == 'l':
print('Looking up the weather stations may take some time. Please wait.')
print('The RIBuild Database currently contains the following materials:\n')
materials = general_interactions.list_materials()
general_interactions.print_material_dict(materials)
elif choice == 'm':
add_delphin_material_to_db()
elif choice == 'd':
download_delphin_material()
elif choice == 'x':
return None
def test_connection():
print('if materials are printing the sh#t is running:\n')
materials = general_interactions.list_materials()
general_interactions.print_material_dict(materials)
def view_weather_data():
while True:
print('')
print('------------------ WEATHER DATA -------------------')
print('')
print("[l] List weather stations")
print("[x] Return to main menu")
print('')
choice = input("> ").strip().lower()
if choice == 'l':
print('Looking up the weather stations may take some time. Please wait.')
print('The RIBuild Database currently contains the following weather stations:\n')
weather_stations = general_interactions.list_weather_stations()
general_interactions.print_weather_stations_dict(weather_stations)
elif choice == 'x':
return None
def add_to_queue():
delphin_file = ' '
while not os.path.isfile(delphin_file):
delphin_file = str(input("File path for the Delphin file >"))
if not os.path.isfile(delphin_file):
print('Could not find file. Please try again')
priority = str(input("Simulation Priority - high, medium or low >"))
climate_class = str(input('What climate class should be assigned? A or B can be chosen. >'))
if check_delphin_file(delphin_file):
sim_id = general_interactions.add_to_simulation_queue(delphin_file, priority)
weather_interactions.assign_indoor_climate_to_project(sim_id, climate_class)
location_name, years = add_weather_to_simulation(sim_id)
change_year = input('Do you wish to change the simulation length to match the weather input? [Y/n] >')
if change_year != 'n':
delphin_interactions.change_entry_simulation_length(sim_id, len(years), 'a')
print(f'Simulation length changed to {len(years)} a')
print('Simulation ID:', sim_id,
'\nTo retrieve the results of a simulation the simulation ID is needed.')
return sim_id, general_interactions.queue_priorities(priority), location_name, years, climate_class
else:
return None
def add_permutations_to_queue():
print('First upload the original file. Afterwards permutations can be chosen.')
id_list = []
original_id, priority, location_name, years, climate_class = add_to_queue()
id_list.append(original_id)
modified_ids, choice = list_permutation_options(original_id, priority)
if choice != 'c':
for id_ in modified_ids:
weather_interactions.assign_weather_by_name_and_years(id_, location_name, years)
weather_interactions.assign_indoor_climate_to_project(id_, climate_class)
id_list.extend(modified_ids)
return id_list
def save_ids(simulation_id, account):
if not simulation_id:
return
else:
if isinstance(simulation_id, list):
for id_ in simulation_id:
user_interactions.add_simulation_to_user(account, delphin_db.Delphin.objects(id=id_).first())
else:
user_interactions.add_simulation_to_user(account, delphin_db.Delphin.objects(id=simulation_id).first())
save = str(input('Save Simulation ID to text file? (y/n)'))
if save == 'y':
print('Simulation will be saved on the Desktop as simulation_id.txt ')
user_desktop = os.path.join(os.environ["HOMEPATH"], "Desktop")
id_file = open(user_desktop + '/simulation_id.txt', 'w')
if not isinstance(simulation_id, list):
id_file.write(str(simulation_id))
else:
for id_ in simulation_id:
id_file.write(str(id_) + '\n')
id_file.close()
else:
print('Simulation ID was not saved.')
return
def check_delphin_file(delphin_file):
delphin_dict = delphin_parser.dp6_to_dict(delphin_file)
if delphin_interactions.check_delphin_file(delphin_dict):
delphin_logger = logging.getLogger("delphin_6_automation.database_interactions.delphin_interactions")
log_file = delphin_logger.handlers[0].baseFilename
print('\n------------------- ERROR -------------------------')
print('Uploaded Delphin Project does not comply with the guidelines for the simulation system.')
print(f'An error log has been created and can be found here:\n{log_file}\n')
return False
else:
return True
def add_weather_to_simulation(simulation_id):
location_name = str(input("What weather station should be used? >"))
years = input("Which years should be used?.\n"
"If more than 1 year is wished, then the values have to be separated with a comma. >")
years = [int(year.strip())
for year in years.split(',')]
weather_interactions.assign_weather_by_name_and_years(simulation_id, location_name, years)
return location_name, years
def list_permutation_options(original_id, priority):
print('-------------- PERMUTATION OPTIONS ----------------')
print('')
print("Available options:")
print("[a] Change layer width")
print("[b] Change layer material")
print("[c] Change weather")
print("[d] Change wall orientation")
print("[e] Change boundary coefficient")
print("[f] Change simulation length")
print("[x] Exit")
print()
choice = input("> ").strip().lower()
if choice == 'a':
ids = layer_width_permutation(original_id, priority)
elif choice == 'b':
ids = layer_material_permutation(original_id, priority)
elif choice == 'c':
ids = weather_permutation(original_id, priority)
elif choice == 'd':
ids = wall_permutation(original_id, priority)
elif choice == 'e':
ids = boundary_permutation(original_id, priority)
elif choice == 'f':
ids = simulation_length_permutation(original_id, priority)
else:
ids = ''
return ids, choice
def layer_width_permutation(simulation_id, priority):
print('')
print("The layer will be identified by the name of the material in the layer.")
layer_material = input("What is the name of the material? >")
widths = input("Input wished layer widths in meter.\n"
"If more than 1 width is wished, then the values have to be separated with a comma. >")
widths = [float(width.strip())
for width in widths.split(',')]
print('')
print(f'Following values given: {widths}')
print('')
ids = delphin_interactions.permutate_entry_layer_width(simulation_id, layer_material, widths, priority)
return ids
def layer_material_permutation(original_id, priority):
print('')
print("The layer will be identified by the name of the material in the layer.")
layer_material = input("What is the name of the original material you want to change? >")
material_list = input("Input wished layer materials.\n"
"If more than 1 material is wished, then the values have to be separated with a comma. >")
materials = []
for material in material_list.split(','):
try:
materials.append(int(material.strip()))
print('Material identified by ID')
except ValueError:
materials.append(material.strip())
print('Material identified by Material Name')
print('')
print(f'Following values given: {materials}')
print('')
ids = delphin_interactions.permutate_entry_layer_material(original_id, layer_material, materials, priority)
return ids
def weather_permutation(original_id, priority):
print('')
weather_stations = {'years': [], 'stations': []}
stations = input("Input wished weather stations.\n"
"If more than 1 weather station with the same years is wished, "
"then the weather station have to be separated with a comma. >")
for station in stations.split(','):
weather_stations['stations'].append(station.strip())
year_list = input(f"Input wished years for the following weather stations: {stations}.\n"
f"If more than 1 year is wished, then the years have to be separated with a comma. >")
year_list = [[int(year.strip())
for year in years.strip().split(' ')]
for years in year_list.split(',')]
weather_stations['years'] = year_list
print('')
print(f'Following values given: {weather_stations}')
print('')
return delphin_interactions.permutate_entry_weather(original_id, weather_stations, priority)
def wall_permutation(original_id, priority):
print('')
orientation_list = input("Input wished orientations.\n"
"If more than 1 orientation is wished, "
"then the values have to be separated with a comma. >")
orientation_list = [int(orientation.strip())
for orientation in orientation_list.split(',')]
print('')
print(f'Following values given: {orientation_list}')
print('')
return delphin_interactions.permutate_entry_orientation(original_id, orientation_list, priority)
def boundary_permutation(original_id, priority):
print('')
boundary_condition = input("Input wished boundary condition to change. >")
coefficient_name = input("Input wished climate coefficient to change. >")
coefficient_list = input("Input wished boundary coefficients.\n"
"If more than 1 coefficient is wished, "
"then the values have to be separated with a comma. >")
coefficient_list = [float(coefficient.strip())
for coefficient in coefficient_list.split(',')]
print('')
print(f'Following values given: {coefficient_list}')
print('')
return delphin_interactions.permutate_entry_boundary_coefficient(original_id, boundary_condition, coefficient_name,
coefficient_list, priority)
def list_latest_added_simulations():
documents = delphin_db.Delphin.objects.order_by("added_date")
for document in documents:
print(f"ID: {document.id} - Added: {document.added_date} - With priority: {document.queue_priority}")
def add_delphin_material_to_db():
user_input = input("Please type the path a .m6 file or a folder with multiple files: ")
id_ = material_interactions.upload_material_file(user_input)
print(f'\nMaterial was upload with ID: {id_}')
def download_delphin_material():
# TODO - download_delphin_material
print('Not implemented')
return
def download_simulation_result():
print('')
choice = input('Do you wish to download a [s]ingle result or [m]ultiple? >')
if choice == 's':
download_single_result()
elif choice == 'm':
download_result_from_file()
else:
return
def download_result_from_file():
print('')
file_path = str(input('Path to text file with simulation IDs >'))
download_path = str(input('The path to which the results should be downloaded? >'))
file = open(file_path, 'r')
lines = file.readlines()
file.close()
for line in lines:
sim_id = line.strip()
if not general_interactions.does_simulation_exists(sim_id):
print(f'Simulation ID: {sim_id} can not be found in database. Skipping to next ID.')
pass
elif general_interactions.is_simulation_finished(sim_id):
print(f'Downloading: {sim_id}')
delphin_document = delphin_db.Delphin.objects(id=sim_id).first()
result_id = delphin_document.results_raw.id
general_interactions.download_raw_result(result_id, download_path)
delphin_interactions.download_delphin_entry(delphin_document, f'{download_path}/{result_id}')
else:
print(f'Simulation with ID: {sim_id} is not done yet. Skipping to next ID.')
pass
print(f'Wanted files are now downloaded to: {download_path}')
return
def download_single_result():
print('')
sim_id = str(input('Simulation ID to retrieve? >'))
if general_interactions.is_simulation_finished(sim_id):
print('Simulation is ready to download.')
download_path = str(input('Download Path? >'))
delphin_document = delphin_db.Delphin.objects(id=sim_id).first()
result_id = delphin_document.results_raw.id
general_interactions.download_raw_result(result_id, download_path)
delphin_interactions.download_delphin_entry(delphin_document, download_path)
else:
print('Simulation is not done yet. Please return later')
return
def simulation_length_permutation(original_id, priority):
print('')
length_list = input("Input wished simulation lengths.\n"
"If more than 1 length is wished, then the values have to be separated with a comma. >")
unit_list = input("Input wished simulation unit.\n"
"If more than 1 unit is wished, then the values have to be separated with a comma. >")
length_list = [int(length.strip())
for length in length_list.split(',')]
unit_list = [unit.strip()
for unit in unit_list.split(',')]
print('')
print(f'Following simulation values given:\nLengths: {length_list}\nUnits: {unit_list}')
print('')
return delphin_interactions.permutate_entry_simulation_length(original_id, length_list, unit_list, priority)
| 34.336601 | 120 | 0.620015 |
64a5ff8f7f91efb81b9351e90d5d2e02ff32dced | 6,861 | py | Python | dextre/settings/base.py | UWCS/uwcs-dextre | 99f93ad0be6d54d67df5b39a080405c145131121 | [
"MIT"
] | null | null | null | dextre/settings/base.py | UWCS/uwcs-dextre | 99f93ad0be6d54d67df5b39a080405c145131121 | [
"MIT"
] | 19 | 2021-05-07T12:21:06.000Z | 2022-02-11T13:37:36.000Z | dextre/settings/base.py | UWCS/uwcs-dextre | 99f93ad0be6d54d67df5b39a080405c145131121 | [
"MIT"
] | 2 | 2021-04-12T23:40:41.000Z | 2021-06-29T14:55:52.000Z | """
Django settings for dextre project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'lib',
'blog',
'accounts',
'events',
'api',
'report',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.contrib.table_block',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'modelcluster',
'taggit',
'taggit_templatetags2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'oauth2_provider',
'corsheaders',
'djangobower',
'compressor',
'anymail',
'markdownx',
'widget_tweaks',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
# 'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'dextre.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dextre.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
'compressor.finders.CompressorFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Django Compressor
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
BOWER_COMPONENTS_ROOT = os.path.abspath(os.path.join(PROJECT_PATH, "../components"))
COMPRESS_PRECOMPILERS = (
('text/x-scss',
'sass --style compressed'
' -I "%s/bower_components/foundation-sites/scss"'
' -I "%s/bower_components/bulma"'
' -I "%s/bower_components/motion-ui"'
' {infile} "{outfile}"' % (BOWER_COMPONENTS_ROOT, BOWER_COMPONENTS_ROOT, BOWER_COMPONENTS_ROOT)),
)
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
]
# Anymail
DEFAULT_FROM_EMAIL = "noreply@uwcs.co.uk"
# Django-bower
BOWER_INSTALLED_APPS = [
'bulma~0.9.0'
]
# OAuth2 groups
OAUTH2_PROVIDER = {
'SCOPES': {
'read': 'Read scope',
'write': 'Write scope',
'seating': 'Pick, move, and unpick seats for a UWCS LAN event',
'event': 'Access to sign up to and deregister from UWCS events',
'lan': 'Access to your nickname and seat location at UWCS LANs',
'lanapp': 'Access to your name, nickname, and university ID for LAN applications',
'roles': 'Access to your nickname and whether or not you are or have been a member of the exec committee',
'profile': 'Access to your name, nickname, university ID'
},
'DEFAULT_SCOPES': {
'event'
}
}
REQUEST_APPROVAL_PROMPT = 'auto'
# WarwickSU Membership API key
UNION_API_KEY = 'insert-api-key'
# Wagtail settings
WAGTAIL_SITE_NAME = 'UWCS (Dextre)'
WAGTAIL_FRONTEND_LOGIN_URL = '/accounts/login/'
# Cross-origin Requests
CORS_ORIGIN_ALLOW_ALL = True
# X-frame Requests
X_FRAME_OPTIONS = 'SAMEORIGIN'
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'uwcs.co.uk'
# Apache template conf
APACHE_SSL_CIPHER_SUITE = ''
APACHE_SSL_CERT_FILE = ''
APACHE_SSL_KEY_FILE = ''
APACHE_SSL_CHAIN_FILE = ''
APACHE_SITES_AVAILABLE = ''
APACHE_SITES_ENABLED = ''
APACHE_WEBSITE_DIR = ''
# Celery
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'GMT' | 28.118852 | 114 | 0.701064 |
01d0b6c71e58629d86117096ca804fc115aa44ab | 1,879 | py | Python | examples/dfp/v201511/forecast_service/get_delivery_forecast_for_line_items.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | 1 | 2020-05-23T11:32:32.000Z | 2020-05-23T11:32:32.000Z | examples/dfp/v201511/forecast_service/get_delivery_forecast_for_line_items.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201511/forecast_service/get_delivery_forecast_for_line_items.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | 2 | 2018-04-20T02:16:33.000Z | 2020-11-12T20:58:54.000Z | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a delivery forecast for two existing line items.
To determine which line items exist, run get_all_line_items.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
# Set the line items to get forecasts for.
LINE_ITEM_ID_1 = 'INSERT_LINE_ITEM_1_ID_HERE'
LINE_ITEM_ID_2 = 'INSERT_LINE_ITEM_2_ID_HERE'
def main(client, line_item_id1, line_item_id2):
# Initialize appropriate service.
forecast_service = client.GetService('ForecastService', version='v201511')
# Get forecast for line item.
forecast = forecast_service.getDeliveryForecastByIds(
[line_item_id1, line_item_id2], None)
for single_forecast in forecast['lineItemDeliveryForecasts']:
unit_type = single_forecast['unitType']
print ('Forecast for line item %d:\n\t%d %s matched\n\t%d %s delivered\n\t'
'%d %s predicted\n' % (
single_forecast['lineItemId'], single_forecast['matchedUnits'],
unit_type, single_forecast['deliveredUnits'], unit_type,
single_forecast['predictedDeliveryUnits'], unit_type))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, LINE_ITEM_ID_1, LINE_ITEM_ID_2)
| 35.45283 | 79 | 0.743481 |
d17305ab243d1a713fd98c66e49574a3e81f4850 | 4,664 | py | Python | setup.py | tsangel/dicomsdl | 779006728a79a95230a10021888a8a7f089107a0 | [
"BSD-3-Clause"
] | 3 | 2020-08-28T01:15:20.000Z | 2020-11-10T06:42:48.000Z | setup.py | tsangel/dicomsdl | 779006728a79a95230a10021888a8a7f089107a0 | [
"BSD-3-Clause"
] | null | null | null | setup.py | tsangel/dicomsdl | 779006728a79a95230a10021888a8a7f089107a0 | [
"BSD-3-Clause"
] | null | null | null | import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
USE_DEBUG_MESSAGE=False
def get_dicomsdl_version():
lines = open('src/include/dicomcfg.h').readlines()
line = [l for l in lines if 'DICOMSDL_VERSION' in l][0]
# line = 'const char *const DICOMSDL_VERSION = "0.105.1";\n'
verstr = line.split('=')[-1].split('"')[1].strip()
# verstr = '0.105.1'
return verstr
def get_long_description():
return open('README.md').read()
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
if USE_DEBUG_MESSAGE:
cmake_args.append('-DUSE_DEBUG_MESSAGE=ON')
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
if 'USE_AVX2' in env:
cmake_args.append('-DUSE_AVX2=ON')
print("Use AVX2")
elif 'USE_SSE2' in env:
cmake_args.append('-USE_SSE2=ON')
print("Use SSE2")
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
classifiers = """
Development Status :: 3 - Alpha
Development Status :: 4 - Beta
Intended Audience :: Developers
Intended Audience :: Healthcare Industry
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Medical Science Apps.
""".strip().splitlines()
classifiers = [l.strip() for l in classifiers]
setup(
name='dicomsdl',
version=get_dicomsdl_version(),
author='Kim, Tae-Sung',
author_email='taesung.angel@gmail.com',
description='A fast and light-weighted DICOM software development library',
long_description=get_long_description(),
long_description_content_type='text/markdown',
url='https://github.com/tsangel/dicomsdl',
packages=find_packages('src/python'),
package_dir={"dicomsdl":"src/python/dicomsdl"},
ext_modules=[CMakeExtension('dicomsdl.dicomsdl')],
entry_points={
"console_scripts" : [
"dicomdump=dicomsdl.dump:main",
"dicomshow=dicomsdl.show:main",
]},
classifiers=classifiers,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
| 36.155039 | 98 | 0.621355 |
98c7aeccc4b19edfc433a6556108ef8b77d12aa4 | 18,881 | py | Python | tensorflow/python/framework/importer.py | aeverall/tensorflow | 7992bf97711919f56f80bff9e5510cead4ab2095 | [
"Apache-2.0"
] | 2 | 2018-12-12T23:33:05.000Z | 2019-02-26T07:20:22.000Z | tensorflow/python/framework/importer.py | aeverall/tensorflow | 7992bf97711919f56f80bff9e5510cead4ab2095 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/importer.py | aeverall/tensorflow | 7992bf97711919f56f80bff9e5510cead4ab2095 | [
"Apache-2.0"
] | 2 | 2019-10-11T00:17:03.000Z | 2020-05-23T18:59:45.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A utility function for importing TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import graph_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python import tf2
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
def _IsControlInput(input_name):
# Expected format: '^operation_name' (control input).
return input_name.startswith('^')
def _ParseTensorName(tensor_name):
"""Parses a tensor name into an operation name and output index.
This function will canonicalize tensor names as follows:
* "foo:0" -> ("foo", 0)
* "foo:7" -> ("foo", 7)
* "foo" -> ("foo", 0)
* "foo:bar:baz" -> ValueError
Args:
tensor_name: The name of a tensor.
Returns:
A tuple containing the operation name, and the output index.
Raises:
ValueError: If `tensor_name' cannot be interpreted as the name of a tensor.
"""
components = tensor_name.split(':')
if len(components) == 2:
# Expected format: 'operation_name:output_index'.
try:
output_index = int(components[1])
except ValueError:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
return components[0], output_index
elif len(components) == 1:
# Expected format: 'operation_name' (implicit 0th output).
return components[0], 0
else:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
@contextlib.contextmanager
def _MaybeDevice(device):
"""Applies the given device only if device is not None or empty."""
if device:
with ops.device(device):
yield
else:
yield
def _ProcessGraphDefParam(graph_def, op_dict):
"""Type-checks and possibly canonicalizes `graph_def`."""
if not isinstance(graph_def, graph_pb2.GraphDef):
# `graph_def` could be a dynamically-created message, so try a duck-typed
# approach
try:
old_graph_def = graph_def
graph_def = graph_pb2.GraphDef()
graph_def.MergeFrom(old_graph_def)
except TypeError:
raise TypeError('graph_def must be a GraphDef proto.')
else:
# If we're using the graph_def provided by the caller, modify graph_def
# in-place to add attr defaults to the NodeDefs (this is visible to the
# caller).
# NOTE(skyewm): this is undocumented behavior that at least meta_graph.py
# depends on. It might make sense to move this to meta_graph.py and have
# import_graph_def not modify the graph_def argument (we'd have to make sure
# this doesn't break anything else.)
for node in graph_def.node:
if node.op not in op_dict:
# Assume unrecognized ops are functions for now. TF_ImportGraphDef will
# report an error if the op is actually missing.
continue
op_def = op_dict[node.op]
_SetDefaultAttrValues(node, op_def)
return graph_def
def _ProcessInputMapParam(input_map):
"""Type-checks and possibly canonicalizes `input_map`."""
if input_map is None:
input_map = {}
else:
if not (isinstance(input_map, dict) and all(
isinstance(k, compat.bytes_or_text_types) for k in input_map.keys())):
raise TypeError('input_map must be a dictionary mapping strings to '
'Tensor objects.')
return input_map
def _ProcessReturnElementsParam(return_elements):
"""Type-checks and possibly canonicalizes `return_elements`."""
if return_elements is None:
return None
if not all(
isinstance(x, compat.bytes_or_text_types) for x in return_elements):
raise TypeError('return_elements must be a list of strings.')
return tuple(compat.as_str(x) for x in return_elements)
def _FindAttrInOpDef(attr_name, op_def):
for attr_def in op_def.attr:
if attr_name == attr_def.name:
return attr_def
return None
def _RemoveDefaultAttrs(op_dict, producer_op_list, graph_def):
"""Removes unknown default attrs according to `producer_op_list`.
Removes any unknown attrs in `graph_def` (i.e. attrs that do not appear in
the OpDefs in `op_dict`) that have a default value in `producer_op_list`.
Args:
op_dict: dict mapping operation name to OpDef.
producer_op_list: OpList proto.
graph_def: GraphDef proto
"""
producer_op_dict = {op.name: op for op in producer_op_list.op}
for node in graph_def.node:
# Remove any default attr values that aren't in op_def.
if node.op in producer_op_dict:
op_def = op_dict[node.op]
producer_op_def = producer_op_dict[node.op]
# We make a copy of node.attr to iterate through since we may modify
# node.attr inside the loop.
for key in list(node.attr):
if _FindAttrInOpDef(key, op_def) is None:
# No attr_def in consumer, look in producer.
attr_def = _FindAttrInOpDef(key, producer_op_def)
if (attr_def and attr_def.HasField('default_value') and
node.attr[key] == attr_def.default_value):
# Unknown attr had default value in producer, delete it so it can be
# understood by consumer.
del node.attr[key]
def _ConvertInputMapValues(name, input_map):
"""Ensures all input map values are tensors.
This should be called from inside the import name scope.
Args:
name: the `name` argument passed to import_graph_def
input_map: the `input_map` argument passed to import_graph_def.
Returns:
An possibly-updated version of `input_map`.
Raises:
ValueError: if input map values cannot be converted due to empty name scope.
"""
if not all(isinstance(v, ops.Tensor) for v in input_map.values()):
if name == '': # pylint: disable=g-explicit-bool-comparison
raise ValueError(
'tf.import_graph_def() requires a non-empty `name` if `input_map` '
'contains non-Tensor values. Try calling tf.convert_to_tensor() on '
'`input_map` values before calling tf.import_graph_def().')
with ops.name_scope('_inputs'):
input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}
return input_map
def _PopulateTFImportGraphDefOptions(options, prefix, input_map,
return_elements):
"""Populates the TF_ImportGraphDefOptions `options`."""
c_api.TF_ImportGraphDefOptionsSetPrefix(options, prefix)
c_api.TF_ImportGraphDefOptionsSetUniquifyNames(options, True)
for input_src, input_dst in input_map.items():
input_src = compat.as_str(input_src)
if input_src.startswith('^'):
src_name = compat.as_str(input_src[1:])
dst_op = input_dst._as_tf_output().oper # pylint: disable=protected-access
c_api.TF_ImportGraphDefOptionsRemapControlDependency(
options, src_name, dst_op)
else:
src_name, src_idx = _ParseTensorName(input_src)
src_name = compat.as_str(src_name)
dst_output = input_dst._as_tf_output() # pylint: disable=protected-access
c_api.TF_ImportGraphDefOptionsAddInputMapping(options, src_name, src_idx,
dst_output)
for name in return_elements or []:
if ':' in name:
op_name, index = _ParseTensorName(name)
op_name = compat.as_str(op_name)
c_api.TF_ImportGraphDefOptionsAddReturnOutput(options, op_name, index)
else:
c_api.TF_ImportGraphDefOptionsAddReturnOperation(options,
compat.as_str(name))
def _ProcessNewOps(graph):
"""Processes the newly-added TF_Operations in `graph`."""
# Maps from a node to the names of the ops it's colocated with, if colocation
# is specified in the attributes.
colocation_pairs = {}
for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access
original_device = new_op.device
new_op._set_device('') # pylint: disable=protected-access
colocation_names = _GetColocationNames(new_op)
if colocation_names:
colocation_pairs[new_op] = colocation_names
# Don't set a device for this op, since colocation constraints override
# device functions and the original device. Note that this op's device may
# still be set by the loop below.
# TODO(skyewm): why does it override the original device?
else:
with _MaybeDevice(original_device):
graph._apply_device_functions(new_op) # pylint: disable=protected-access
# The following loop populates the device field of ops that are colocated
# with another op. This is implied by the colocation attribute, but we
# propagate the device field for completeness.
for op, coloc_op_list in colocation_pairs.items():
coloc_device = None
# Find any device in the list of colocated ops that have a device, if it
# exists. We assume that if multiple ops have devices, they refer to the
# same device. Otherwise, a runtime error will occur since the colocation
# property cannot be guaranteed. Note in TF2 colocations have been removed
# from the public API and will be considered a hint, so there is no runtime
# error.
#
# One possible improvement is to try to check for compatibility of all
# devices in this list at import time here, which would require
# implementing a compatibility function for device specs in python.
for coloc_op_name in coloc_op_list:
try:
coloc_op = graph._get_operation_by_name_unsafe(coloc_op_name) # pylint: disable=protected-access
except KeyError:
# Do not error in TF2 if the colocation cannot be guaranteed
if tf2.enabled():
continue
raise ValueError('Specified colocation to an op that '
'does not exist during import: %s in %s' %
(coloc_op_name, op.name))
if coloc_op.device:
coloc_device = pydev.DeviceSpec.from_string(coloc_op.device)
break
if coloc_device:
op._set_device(coloc_device) # pylint: disable=protected-access
def _GetColocationNames(op):
"""Returns names of the ops that `op` should be colocated with."""
colocation_names = []
try:
class_values = op.get_attr('_class')
except ValueError:
# No _class attr
return
for val in class_values:
val = compat.as_str(val)
if val.startswith('loc:@'):
colocation_node_name = val[len('loc:@'):]
if colocation_node_name != op.name:
colocation_names.append(colocation_node_name)
return colocation_names
def _GatherReturnElements(requested_return_elements, graph, results):
"""Returns the requested return elements from results.
Args:
requested_return_elements: list of strings of operation and tensor names
graph: Graph
results: wrapped TF_ImportGraphDefResults
Returns:
list of `Operation` and/or `Tensor` objects
"""
return_outputs = c_api.TF_ImportGraphDefResultsReturnOutputs(results)
return_opers = c_api.TF_ImportGraphDefResultsReturnOperations(results)
combined_return_elements = []
outputs_idx = 0
opers_idx = 0
for name in requested_return_elements:
if ':' in name:
combined_return_elements.append(
graph._get_tensor_by_tf_output(return_outputs[outputs_idx])) # pylint: disable=protected-access
outputs_idx += 1
else:
combined_return_elements.append(
graph._get_operation_by_tf_operation(return_opers[opers_idx])) # pylint: disable=protected-access
opers_idx += 1
return combined_return_elements
def _SetDefaultAttrValues(node_def, op_def):
"""Set any default attr values in `node_def` that aren't present."""
assert node_def.op == op_def.name
for attr_def in op_def.attr:
key = attr_def.name
if attr_def.HasField('default_value'):
value = node_def.attr[key]
if value is None or value.WhichOneof('value') is None:
node_def.attr[key].CopyFrom(attr_def.default_value)
@tf_export('graph_util.import_graph_def', 'import_graph_def')
@deprecated_args(None, 'Please file an issue at '
'https://github.com/tensorflow/tensorflow/issues if you depend'
' on this feature.', 'op_dict')
def import_graph_def(graph_def,
input_map=None,
return_elements=None,
name=None,
op_dict=None,
producer_op_list=None):
"""Imports the graph from `graph_def` into the current default `Graph`.
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
`tf.Tensor` and `tf.Operation` objects. Once extracted,
these objects are placed into the current default `Graph`. See
`tf.Graph.as_graph_def` for a way to create a `GraphDef`
proto.
Args:
graph_def: A `GraphDef` proto containing operations to be imported into
the default graph.
input_map: A dictionary mapping input names (as strings) in `graph_def`
to `Tensor` objects. The values of the named input tensors in the
imported graph will be re-mapped to the respective `Tensor` values.
return_elements: A list of strings containing operation names in
`graph_def` that will be returned as `Operation` objects; and/or
tensor names in `graph_def` that will be returned as `Tensor` objects.
name: (Optional.) A prefix that will be prepended to the names in
`graph_def`. Note that this does not apply to imported function names.
Defaults to `"import"`.
op_dict: (Optional.) Deprecated, do not use.
producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped)
list of `OpDef`s used by the producer of the graph. If provided,
unrecognized attrs for ops in `graph_def` that have their default value
according to `producer_op_list` will be removed. This will allow some more
`GraphDef`s produced by later binaries to be accepted by earlier binaries.
Returns:
A list of `Operation` and/or `Tensor` objects from the imported graph,
corresponding to the names in `return_elements`,
and None if `returns_elements` is None.
Raises:
TypeError: If `graph_def` is not a `GraphDef` proto,
`input_map` is not a dictionary mapping strings to `Tensor` objects,
or `return_elements` is not a list of strings.
ValueError: If `input_map`, or `return_elements` contains names that
do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.
it refers to an unknown tensor).
"""
op_dict = op_def_registry.get_registered_ops()
graph_def = _ProcessGraphDefParam(graph_def, op_dict)
input_map = _ProcessInputMapParam(input_map)
return_elements = _ProcessReturnElementsParam(return_elements)
if producer_op_list is not None:
# TODO(skyewm): make a copy of graph_def so we're not mutating the argument?
_RemoveDefaultAttrs(op_dict, producer_op_list, graph_def)
graph = ops.get_default_graph()
with ops.name_scope(name, 'import', input_map.values()) as scope:
# Save unique prefix generated by name_scope
if scope:
assert scope.endswith('/')
prefix = scope[:-1]
else:
prefix = ''
# Generate any input map tensors inside name scope
input_map = _ConvertInputMapValues(name, input_map)
scoped_options = c_api_util.ScopedTFImportGraphDefOptions()
options = scoped_options.options
_PopulateTFImportGraphDefOptions(options, prefix, input_map,
return_elements)
# _ProcessNewOps mutates the new operations. _mutation_lock ensures a
# Session.run call cannot occur between creating the TF_Operations in the
# TF_GraphImportGraphDefWithResults call and mutating the them in
# _ProcessNewOps.
with graph._mutation_lock(): # pylint: disable=protected-access
with c_api_util.tf_buffer(graph_def.SerializeToString()) as serialized:
try:
results = c_api.TF_GraphImportGraphDefWithResults(
graph._c_graph, serialized, options) # pylint: disable=protected-access
results = c_api_util.ScopedTFImportGraphDefResults(results)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
# Create _DefinedFunctions for any imported functions.
#
# We do this by creating _DefinedFunctions directly from `graph_def`, and
# adding them to `graph`. Adding an existing function to a TF_Graph is a
# no-op, so this only has the effect of updating the Python state (usually
# _DefinedFunction.add_to_graph also adds the function to the TF_Graph).
#
# TODO(skyewm): fetch the TF_Functions directly from the TF_Graph
# TODO(skyewm): avoid sending serialized FunctionDefs back to the TF_Graph
_ProcessNewOps(graph)
if graph_def.library and graph_def.library.function:
# pylint: disable=protected-access
functions = function._from_library(graph_def.library)
for f in functions:
f.add_to_graph(graph)
# pylint: enable=protected-access
# Treat input mappings that don't appear in the graph as an error, because
# they are likely to be due to a typo.
missing_unused_input_keys = (
c_api.TF_ImportGraphDefResultsMissingUnusedInputMappings_wrapper(
results.results))
if missing_unused_input_keys:
missing_unused_input_keys = [
compat.as_str(s) for s in missing_unused_input_keys
]
raise ValueError(
'Attempted to map inputs that were not found in graph_def: [%s]' %
', '.join(missing_unused_input_keys))
if return_elements is None:
return None
else:
return _GatherReturnElements(return_elements, graph, results.results)
| 40.344017 | 108 | 0.710662 |
48afcca6ad3b9080c345494a9dd9feae196260a7 | 456 | py | Python | banco_de_dados/povoar_grupo.py | higorsantana-omega/Python_Aprendizado | f02dd4bd104e3a25f9207e7f0d99e54e34e4a9c0 | [
"MIT"
] | null | null | null | banco_de_dados/povoar_grupo.py | higorsantana-omega/Python_Aprendizado | f02dd4bd104e3a25f9207e7f0d99e54e34e4a9c0 | [
"MIT"
] | null | null | null | banco_de_dados/povoar_grupo.py | higorsantana-omega/Python_Aprendizado | f02dd4bd104e3a25f9207e7f0d99e54e34e4a9c0 | [
"MIT"
] | null | null | null | from mysql.connector.errors import ProgrammingError
from bd import nova_conexao
sql = 'INSERT INTO grupo (descricao) VALUES (%s)'
args = (
('Casa',),
('Trabalho',),
)
with nova_conexao() as conexao:
try:
cursor = conexao.cursor()
cursor.executemany(sql, args)
conexao.commit()
except ProgrammingError as e:
print(f'Erro: {e.msg}')
else:
print(f'Foram incluidos {cursor.lastrowid} no registro!') | 25.333333 | 65 | 0.640351 |
04ea56559406467248a51405f951343ec6944ba4 | 7,975 | py | Python | cpgw/gateway.py | blavka/cpgw | 501cdb4cde109574f3afd50dbd7038a35d76cd67 | [
"MIT"
] | 3 | 2019-01-29T12:18:10.000Z | 2019-07-13T07:15:32.000Z | cpgw/gateway.py | blavka/cpgw | 501cdb4cde109574f3afd50dbd7038a35d76cd67 | [
"MIT"
] | 1 | 2021-08-28T18:44:29.000Z | 2021-08-31T15:45:25.000Z | cpgw/gateway.py | blavka/cpgw | 501cdb4cde109574f3afd50dbd7038a35d76cd67 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import decimal
import logging
import platform
import time
from ctypes import *
from threading import Event, Lock, Thread
import serial
try:
import fcntl
except ImportError:
fcntl = None
context_prec1 = decimal.Context(prec=1)
context_prec2 = decimal.Context(prec=2)
recv_start = (
("rssi", int),
("id", str),
("header", int),
("sequence", int),
("uptime", int),
)
recv_type_lut = {
1: {'type': 'beacon',
'items': (
("altitude", int),
("co2_conc", int),
("humidity", lambda x: decimal.Decimal(x, context_prec1)),
("illuminance", int),
("motion_count", int),
("orientation", int),
("press_count", int),
("pressure", int),
("sound_level", int),
("temperature", lambda x: decimal.Decimal(x, context_prec2)),
("voc_conc", int),
("voltage", lambda x: decimal.Decimal(x, context_prec2))
)},
2: {'type': 'sound',
'items': (
("min", int),
("max", int),
)}
}
items_v1_0_x = (
("rssi", int),
("id", str),
("sequence", int),
("altitude", int),
("co2-conc", int),
("humidity", lambda x: decimal.Decimal(x, context_prec1)),
("illuminance", int),
("motion-count", int),
("orientation", int),
("press-count", int),
("pressure", int),
("sound-level", int),
("temperature", lambda x: decimal.Decimal(x, context_prec2)),
("voc-conc", int),
("voltage", lambda x: decimal.Decimal(x, context_prec2))
)
class Gateway:
def __init__(self, device, separator):
self._ser = None
self._device = device
self.on_line = None
self.on_recv = None
self._command_mutex = Lock()
self._event = Event()
self._response = None
logging.info("Connecting on device %s", self._device)
self._ser = serial.Serial(self._device, baudrate=115200, timeout=3)
self._lock()
self._speed_up()
logging.info("Success connect on device %s", self._device)
self._ser.flush()
self._ser.reset_input_buffer()
self._ser.reset_output_buffer()
time.sleep(0.5)
self._ser.write(b'\x1b')
self.is_run = False
self._command('')
cgmr = self.get_cgmr()
self._old_recv = cgmr.startswith("1.0.") or cgmr.startswith("v1.0.")
logging.info("FW: %s", self.command('I')[0])
self._recv_type_lut = {}
for header in recv_type_lut:
items = []
for item in recv_type_lut[header]['items']:
items.append((item[0].replace('_', separator), item[1]))
self._recv_type_lut[header] = {
'type': recv_type_lut[header]['type'],
'items': tuple(items),
}
def __del__(self):
self._unlock()
try:
self._ser.close()
except Exception as e:
pass
self._ser = None
def run(self):
self.is_run = True
while self.is_run:
self._loop()
def _loop(self):
try:
line = self._ser.readline()
except serial.SerialException as e:
logging.error("SerialException %s", e)
self._ser.close()
raise
if line:
logging.debug("Read line %s", line)
line = line.decode().strip()
if line[0] == '{':
return
if line[0] == '#':
return
if self.on_line:
self.on_line(line)
if self.on_recv and line.startswith("$RECV:"):
payload = {}
values = line[7:].split(',')
if self._old_recv:
for i, item in enumerate(items_v1_0_x):
value = values[i]
payload[item[0]] = None if value == '' else item[1](value)
else:
for i, item in enumerate(recv_start):
value = values[i]
payload[item[0]] = None if value == '' else item[1](value)
recv_type = self._recv_type_lut.get(payload['header'], None)
if recv_type:
del payload['header']
payload['type'] = recv_type['type']
for i, item in enumerate(recv_type['items']):
value = values[i + 5]
payload[item[0]] = None if value == '' else item[1](value)
self.on_recv(payload)
elif self._response is not None:
if line == 'OK':
self._event.set()
elif line == 'ERROR':
self._response = None
self._event.set()
else:
self._response.append(line)
def _command(self, command):
with self._command_mutex:
logging.debug("Command AT%s", command)
self._event.clear()
command = 'AT' + command + '\r\n'
self._response = []
self._ser.write(command.encode('ascii'))
if self.is_run:
self._event.wait()
else:
while not self._event.is_set():
self._loop()
response = self._response
self._response = None
return response
def command(self, command, repeat=3):
for i in range(repeat):
response = self._command(command)
if response is None:
time.sleep(0.5)
continue
return response
raise Exception("Command %s not work." % command)
def get_cgsn(self):
response = self.command("+CGSN")
return response[0].split(':')[1].strip()
def get_cgmr(self):
response = self.command("+CGMR")
return response[0].split(':')[1].strip()
def start(self):
"""Run in thread"""
Thread(target=self.run, args=[]).start()
def _lock(self):
if not fcntl or not self._ser:
return
try:
fcntl.flock(self._ser.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception as e:
raise Exception('Could not lock device %s' % self._device)
def _unlock(self):
if not fcntl or not self._ser:
return
fcntl.flock(self._ser.fileno(), fcntl.LOCK_UN)
def _speed_up(self):
if not fcntl:
return
if platform.system() != 'Linux':
return
TIOCGSERIAL = 0x0000541E
TIOCSSERIAL = 0x0000541F
ASYNC_LOW_LATENCY = 0x2000
class serial_struct(Structure):
_fields_ = [("type", c_int),
("line", c_int),
("port", c_uint),
("irq", c_int),
("flags", c_int),
("xmit_fifo_size", c_int),
("custom_divisor", c_int),
("baud_base", c_int),
("close_delay", c_ushort),
("io_type", c_byte),
("reserved_char", c_byte * 1),
("hub6", c_uint),
("closing_wait", c_ushort),
("closing_wait2", c_ushort),
("iomem_base", POINTER(c_ubyte)),
("iomem_reg_shift", c_ushort),
("port_high", c_int),
("iomap_base", c_ulong)]
buf = serial_struct()
try:
fcntl.ioctl(self._ser.fileno(), TIOCGSERIAL, buf)
buf.flags |= ASYNC_LOW_LATENCY
fcntl.ioctl(self._ser.fileno(), TIOCSSERIAL, buf)
except Exception as e:
pass
| 29.319853 | 86 | 0.487273 |
8032560dc1d44c5b331ea8cf92478bc82b1b7802 | 6,857 | py | Python | scripts/mars-room-sensor.py | macroEagle/MARS_Sensor | 205df33831e8f1da889d2bb5363b455433b034cf | [
"Apache-2.0"
] | null | null | null | scripts/mars-room-sensor.py | macroEagle/MARS_Sensor | 205df33831e8f1da889d2bb5363b455433b034cf | [
"Apache-2.0"
] | null | null | null | scripts/mars-room-sensor.py | macroEagle/MARS_Sensor | 205df33831e8f1da889d2bb5363b455433b034cf | [
"Apache-2.0"
] | null | null | null | import requests
import configparser
import time
import logging
from logging.handlers import RotatingFileHandler
sensor_config = configparser.ConfigParser()
mars_config = configparser.ConfigParser()
sensor_config.read('/mars/mars-sensor.ini')
mars_config.read('/mars/scripts/mars.ini')
# === Read MARS configuration ===
cloudRetryTimes = int(mars_config['mars']['post_retry_times'])
sleep_interval = int(mars_config['mars']['post_interval'])
sensor_on_last_time = int(mars_config['mars']['sensor_on_last_time'])
sensor_interval = 2
# === Read sensor configuration ===
raspi_id = sensor_config['sensor']['sensor_id']
sensor_room_list = mars_config['raspi'][raspi_id].split(';')
room_availability = {}
sensor_status = {}
# === Init logger ===
logger = logging.getLogger("MARS")
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
logger.setLevel(logging.DEBUG)
# add a rotating handler
handler = RotatingFileHandler(mars_config['logger']['log_file'], maxBytes=int(mars_config['logger']['file_size']), backupCount=int(mars_config['logger']['backup_count']))
handler.setFormatter(formatter)
logger.addHandler(handler)
#logging.basicConfig(filename="/mars/logs/mars_room_sensor.log", level=logging.DEBUG,format='%(asctime)s %(message)s')
# Init headers
api_headers = {
'Authorization': 'Bearer '+sensor_config['homeassistant']['api_token'],
'Content-Type': 'application/json',
}
cloud_headers = {
'Content-Type': 'application/json',
}
def get_and_send_sensor_signal():
sleep_count = sleep_interval / sensor_interval
sleep_time = 0
temp_room_availability = 'error'
for sensor_room in sensor_room_list:
room_availability[sensor_room] = 'error'
while(sleep_time < sleep_count):
for sensor_room in sensor_room_list:
log_info("Start to check for room : "+sensor_room)
#if(room_availability[sensor_room] != 'on'):
temp_room_availability = check_room_availability_by_sensors(sensor_room)
if(room_availability[sensor_room] != temp_room_availability):
room_availability[sensor_room] = temp_room_availability
post_room_status(sensor_room,room_availability[sensor_room])
log_info("Room availability for " + sensor_room + " = " + room_availability[sensor_room])
log_info("Sleep for "+str(sensor_interval)+" seconds ["+str(sleep_time)+"].")
time.sleep(sensor_interval)
sleep_time = sleep_time + 1
for sensor_room in sensor_room_list:
post_room_status(sensor_room,room_availability[sensor_room])
def post_room_status(sensor_room,room_status):
room_status_code = "-1"
if(room_status == 'on'):
room_status_code = "1"
else:
if(room_status == 'off'):
room_status_code = "0"
else:
room_status_code = "-1"
post_url = mars_config['mars']['post_url_room_status']+mars_config[sensor_room]['server_room_id']+'/status'
responseCode = 123
retryTimes = cloudRetryTimes
while(retryTimes > 0):
log_debug("Sending..."+post_url)
try:
response = requests.post(url=post_url,data = room_status_code, headers = cloud_headers)
log_debug("Send to server for " + sensor_room + "["+mars_config[sensor_room]['server_room_id']+"] with data:" + str(room_status_code) + ":"+str(response.status_code))
responseCode = response.status_code
except requests.exceptions.RequestException as e:
log_error(e)
finally:
if(responseCode==200 or responseCode==201):
retryTimes = 0
else:
retryTimes = retryTimes - 1
time.sleep(1)
def check_room_availability_by_sensors(sensor_room):
all_motion_sensor_status = 'error'
one_motion_sensor_status = 'error'
for motion_sensor_name in mars_config[sensor_room]['motion_sensor'].split(';'):
one_motion_sensor_status = get_motion_sensor_status(motion_sensor_name)
if(one_motion_sensor_status == 'on'):
all_motion_sensor_status = 'on'
else:
if(all_motion_sensor_status == 'error' and one_motion_sensor_status == 'off'):
all_motion_sensor_status = 'off'
return all_motion_sensor_status
def get_motion_sensor_status(motion_sensor_name):
motion_sensor_last_on_time = 0
motion_sensor_status = 'error'
if(motion_sensor_name in sensor_status):
motion_sensor_last_on_time = sensor_status[motion_sensor_name]
motion_sensor_status = get_motion_sensor_status_from_ha(motion_sensor_name)
if ((motion_sensor_last_on_time > 0) and (time.time() - motion_sensor_last_on_time < sensor_on_last_time)):
if(motion_sensor_status == 'on'):
sensor_status[motion_sensor_name] = time.time()
motion_sensor_status = 'on'
log_debug("Caching motion sensor on status for " + motion_sensor_name + " since [" + str(motion_sensor_last_on_time)+"] ("+str(time.time())+").")
else:
cache_sensor_status(motion_sensor_name,motion_sensor_status)
return motion_sensor_status
def cache_sensor_status(motion_sensor_name,motion_sensor_status):
if(motion_sensor_status == 'on'):
sensor_status[motion_sensor_name] = time.time()
log_debug("Start caching for " + motion_sensor_name +" ["+str(sensor_status[motion_sensor_name])+"]")
else:
sensor_status[motion_sensor_name] = 0
# Return stats: on / off / error
def get_motion_sensor_status_from_ha(motion_sensor_name):
url = 'http://127.0.0.1:8123/api/states/binary_sensor.'+motion_sensor_name
data = 'error'
responseCode = 123
try:
response = requests.get(url, headers=api_headers)
responseCode = response.status_code
if (responseCode == 200):
data = response.json()['state']
except requests.exceptions.RequestException as e:
log_error(e)
finally:
log_debug("[get_motion_sensor_status]["+motion_sensor_name+"]: HTTP response = "+str(responseCode) + " status ="+str(data))
if(data != 'on' and data != 'off'):
log_error("return data wrong ["+str(data)+"]")
data = 'error'
return data
def log_debug(debug):
logger.debug(debug)
def log_info(info):
logger.info(info)
def log_error(error):
logger.error(error)
### Main ######################################################################
if __name__ == '__main__':
#time.sleep(200)#sleep for 200 to wait ha service to start
log_info("MARS sensor room start...")
try:
while(True):
get_and_send_sensor_signal()
finally:
log_info("MARS sensor room end.") | 37.266304 | 180 | 0.666326 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.