content stringlengths 5 1.05M |
|---|
from django.db import migrations, models
from django.db.migrations import operations
from django.db.migrations.optimizer import MigrationOptimizer
from django.test import SimpleTestCase
from .models import EmptyManager, UnicodeModel
class OptimizerTests(SimpleTestCase):
"""
Tests the migration autodetector.
"""
def optimize(self, operations, app_label):
"""
Handy shortcut for getting results + number of loops
"""
optimizer = MigrationOptimizer()
return optimizer.optimize(operations, app_label), optimizer._iterations
def assertOptimizesTo(self, operations, expected, exact=None, less_than=None, app_label=None):
result, iterations = self.optimize(operations, app_label)
result = [repr(f.deconstruct()) for f in result]
expected = [repr(f.deconstruct()) for f in expected]
self.assertEqual(expected, result)
if exact is not None and iterations != exact:
raise self.failureException(
"Optimization did not take exactly %s iterations (it took %s)" % (exact, iterations)
)
if less_than is not None and iterations >= less_than:
raise self.failureException(
"Optimization did not take less than %s iterations (it took %s)" % (less_than, iterations)
)
def assertDoesNotOptimize(self, operations, **kwargs):
self.assertOptimizesTo(operations, operations, **kwargs)
def test_single(self):
"""
The optimizer does nothing on a single operation,
and that it does it in just one pass.
"""
self.assertOptimizesTo(
[migrations.DeleteModel("Foo")],
[migrations.DeleteModel("Foo")],
exact=1,
)
def test_create_delete_model(self):
"""
CreateModel and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_rename_model(self):
"""
CreateModel should absorb RenameModels.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameModel("Foo", "Bar"),
],
[
migrations.CreateModel(
"Bar",
[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
)
],
)
def test_rename_model_self(self):
"""
RenameModels should absorb themselves.
"""
self.assertOptimizesTo(
[
migrations.RenameModel("Foo", "Baa"),
migrations.RenameModel("Baa", "Bar"),
],
[
migrations.RenameModel("Foo", "Bar"),
],
)
def test_create_alter_model_options(self):
self.assertOptimizesTo(
[
migrations.CreateModel('Foo', fields=[]),
migrations.AlterModelOptions(name='Foo', options={'verbose_name_plural': 'Foozes'}),
],
[
migrations.CreateModel('Foo', fields=[], options={'verbose_name_plural': 'Foozes'}),
]
)
def _test_create_alter_foo_delete_model(self, alter_foo):
"""
CreateModel, AlterModelTable, AlterUniqueTogether/AlterIndexTogether/
AlterOrderWithRespectTo, and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.AlterModelTable("Foo", "woohoo"),
alter_foo,
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_alter_unique_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterOrderWithRespectTo("Foo", "a"))
def _test_alter_alter_model(self, alter_foo, alter_bar):
"""
Two AlterUniqueTogether/AlterIndexTogether/AlterOrderWithRespectTo
should collapse into the second.
"""
self.assertOptimizesTo(
[
alter_foo,
alter_bar,
],
[
alter_bar,
],
)
def test_alter_alter_table_model(self):
self._test_alter_alter_model(
migrations.AlterModelTable("Foo", "a"),
migrations.AlterModelTable("Foo", "b"),
)
def test_alter_alter_unique_model(self):
self._test_alter_alter_model(
migrations.AlterUniqueTogether("Foo", [["a", "b"]]),
migrations.AlterUniqueTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_index_model(self):
self._test_alter_alter_model(
migrations.AlterIndexTogether("Foo", [["a", "b"]]),
migrations.AlterIndexTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_owrt_model(self):
self._test_alter_alter_model(
migrations.AlterOrderWithRespectTo("Foo", "a"),
migrations.AlterOrderWithRespectTo("Foo", "b"),
)
def test_optimize_through_create(self):
"""
We should be able to optimize away create/delete through a create or delete
of a different model, but only if the create operation does not mention the model
at all.
"""
# These should work
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Bar"),
migrations.DeleteModel("Foo"),
],
[],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
migrations.DeleteModel("Bar"),
],
[],
)
# This should not work - FK should block it
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
)
# The same operations should be optimized if app_label is specified and
# a FK references a model from the other app.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
],
app_label="otherapp",
)
# But it shouldn't work if a FK references a model with the same
# app_label.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
# This should not work - bases should block it
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)),
migrations.DeleteModel("Foo"),
],
)
# The same operations should be optimized if app_label and none of
# bases belong to that app.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)),
],
app_label="otherapp",
)
# But it shouldn't work if some of bases belongs to the specified app.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
def test_create_model_add_field(self):
"""
AddField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AddField("Foo", "age", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_add_field_not_through_fk(self):
"""
AddField should NOT optimize into CreateModel if it's an FK to a model
that's between them.
"""
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField("Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
)
def test_create_model_add_field_not_through_m2m_through(self):
"""
AddField should NOT optimize into CreateModel if it's an M2M using a
through that's created between them.
"""
# Note: The middle model is not actually a valid through model,
# but that doesn't matter, as we never render it.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("LinkThrough", []),
migrations.AddField(
"Foo", "link", models.ManyToManyField("migrations.Link", through="migrations.LinkThrough")
),
],
)
def test_create_model_alter_field(self):
"""
AlterField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AlterField("Foo", "name", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_rename_field(self):
"""
RenameField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("title", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_rename_field(self):
"""
RenameField should optimize into AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.AddField("Foo", "title", models.CharField(max_length=255)),
],
)
def test_alter_field_rename_field(self):
"""
RenameField should optimize to the other side of AlterField,
and into itself.
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
migrations.RenameField("Foo", "title", "nom"),
],
[
migrations.RenameField("Foo", "name", "nom"),
migrations.AlterField("Foo", "nom", models.CharField(max_length=255)),
],
)
def test_create_model_remove_field(self):
"""
RemoveField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RemoveField("Foo", "age"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_alter_field(self):
"""
AlterField should optimize into AddField.
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AlterField("Foo", "age", models.FloatField(default=2.4)),
],
[
migrations.AddField("Foo", name="age", field=models.FloatField(default=2.4)),
],
)
def test_add_field_delete_field(self):
"""
RemoveField should cancel AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[],
)
def test_alter_field_delete_field(self):
"""
RemoveField should absorb AlterField
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[
migrations.RemoveField("Foo", "age"),
],
)
def _test_create_alter_foo_field(self, alter):
"""
CreateModel, AlterFooTogether/AlterOrderWithRespectTo followed by an
add/alter/rename field should optimize to CreateModel and the Alter*
"""
# AddField
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AddField("Foo", "c", models.IntegerField()),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
],
)
# AlterField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "b", models.CharField(max_length=255)),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "c", models.CharField(max_length=255)),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.CharField(max_length=255)),
]),
alter,
],
)
# RenameField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "x"),
migrations.RenameField("Foo", "x", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "c", "d"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("d", models.IntegerField()),
]),
alter,
],
)
# RemoveField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "b"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
],
)
def test_create_alter_unique_field(self):
self._test_create_alter_foo_field(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_field(self):
self._test_create_alter_foo_field(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_field(self):
self._test_create_alter_foo_field(migrations.AlterOrderWithRespectTo("Foo", "b"))
def test_optimize_through_fields(self):
"""
field-level through checking is working. This should manage to collapse
model Foo to nonexistence, and model Bar to a single IntegerField
called "width".
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AddField("Bar", "width", models.IntegerField()),
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RenameField("Bar", "size", "dimensions"),
migrations.RemoveField("Foo", "age"),
migrations.RenameModel("Foo", "Phou"),
migrations.RemoveField("Bar", "dimensions"),
migrations.RenameModel("Phou", "Fou"),
migrations.DeleteModel("Fou"),
],
[
migrations.CreateModel("Bar", [("width", models.IntegerField())]),
],
)
def test_optimize_elidable_operation(self):
elidable_operation = operations.base.Operation()
elidable_operation.elidable = True
self.assertOptimizesTo(
[
elidable_operation,
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
elidable_operation,
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
elidable_operation,
migrations.RenameModel("Foo", "Phou"),
migrations.DeleteModel("Bar"),
elidable_operation,
],
[
migrations.CreateModel("Phou", [("name", models.CharField(max_length=255))]),
],
)
|
# Your App secret key
SECRET_KEY = '\2\1[SECRETKEY]\1\2\e\y\y\h'
# Set this API key to enable Mapbox visualizations
MAPBOX_API_KEY = '[MAPBOXAPIKEY]'
# ---------------------------------------------------------
# Superset specific config
# ---------------------------------------------------------
ROW_LIMIT = [ROWLIMIT]
|
from time import sleep
class Repository:
def __init__(self, objList = []):
self.list = []
self.list += objList
def getByID(self, ID):
for obj in self.list:
if obj.getID() == ID:
return obj
return None
class User:
def __init__(self, name, ID, mediator):
self.name = name
self.ID = ID
self.mediator = mediator
def createOrderList(self, restaurantID):
orderList = []
menu = self.mediator.react(self, restaurantID, None)
while(True):
choice = int(input("Input 0 for restaurant menu, numbers from menu to select item: "))
if choice == 0:
print(menu)
continue
if choice > 0 and choice < len(menu) + 1:
orderList.append(choice)
continue
break
return orderList
def order(self, restaurantID):
orderList = self.createOrderList(restaurantID)
self.mediator.react(self, restaurantID, orderList)
def notify(self, message):
print(message)
def getID(self):
return self.ID
class Restaurant:
def __init__(self, menu, ID, mediator):
self.menu = menu
self.ID = ID
self.currentOrders = []
self.mediator = mediator
def placeOrder(self, orderList, client):
self.currentOrders.append([orderList, client])
def deliver(self, orderId, deliveryID):
order = self.currentOrders[orderId]
self.mediator.react(self, deliveryID, order)
self.currentOrders.remove(order)
def getID(self):
return self.ID
def getMenu(self):
return self.menu
class Delivery:
def __init__(self, ID, mediator):
self.order = None
self.client = None
self.ID = ID
self.mediator = mediator
def dispatch(self, orderData):
self.order = orderData[0]
self.client = orderData[1]
self.mediator.react(self, self.client, "Delivery in progress")
def arrived(self):
self.mediator.react(self, self.client, "Delivery arrived")
def getID(self):
return self.ID
class Mediator:
def __init__(self, repoU, repoR, repoD):
self.repoU = repoU
self.repoR = repoR
self.repoD = repoD
def react(self, obj, ID, data):
if type(obj) == User:
if type(data) == type([]):
restaurant = self.repoR.getByID(ID)
restaurant.placeOrder(data, obj.getID())
elif data == None:
return self.repoR.getByID(ID).getMenu()
elif type(obj) == Restaurant:
delivery = self.repoD.getByID(ID).dispatch(data)
elif type(obj) == Delivery:
self.repoU.getByID(ID).notify(data)
print("Żeby zakończyć wybieranie wybierz element którego spoza listy")
# technicznie powinna być hermetyzacja, ale to tylko przykład
# działania mediatora, więc chyba ujdzie?
testMediator = Mediator(Repository(), Repository(), Repository())
testUser = User("John", 1, testMediator)
testMediator.repoU.list.append(testUser)
testRestaurant = Restaurant(["1. Kotlet schabowy", "2. Bigos", "3. Grochówka"], 1, testMediator)
testMediator.repoR.list.append(testRestaurant)
testDelivery = Delivery(1, testMediator)
testMediator.repoD.list.append(testDelivery)
testUser.order(1)
print("\nOrder beeing processed I guess...")
sleep(2)
testRestaurant.deliver(0, 1)
sleep(2)
testDelivery.arrived()
|
# django
from django import forms
# local django
from recommendation.models import CustomRecommendation
from recommendation.validators import CustomRecommendationValidator
from exam import constants
class UpdateCustomRecommendationForm(forms.ModelForm):
"""
Form to edit a custom recommendation.
"""
name = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}))
recommendation = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control'}))
class Meta:
# Define model to form.
model = CustomRecommendation
fields = ('recommendation', 'name',)
def get_pk(self, pk):
self.pk = pk
def clean(self):
"""
Get Custom Recommendation fields.
"""
recommendation = self.cleaned_data.get('recommendation')
name = self.cleaned_data.get('name')
exists = CustomRecommendation.objects.get(pk=self.pk)
name_base = CustomRecommendation.objects.filter(name=name)
if name_base.exists() and exists.name != name:
raise forms.ValidationError({'name': [(constants.NAME_EXISTS)]})
# Verify validations in form.
self.validator_all(recommendation, name)
def validator_all(self, description, name):
"""
Checks validator in all fields.
"""
validator = CustomRecommendationValidator()
validator.validator_name_update(name)
validator.validator_description(description)
|
class BaseValidationError(Exception):
pass
class ValidationError(BaseValidationError):
pass
|
import struct
import time
import io
import gzip
MAGIC_HEADER = '\037\213\010'
FLAGS = '\004'
XFL_OS = '\000\003'
EMPTY_DATA = '\003\000'
#=================================================================
class LengthMetadata:
"""
Sample metadata which stores an 8-byte lengtg offset in the gzip header
Could be used to store an offset
This is used to test the gzip metadata write/read ops
"""
def __init__(self, length=-1):
self.length = length
def id(self):
return 'LN'
def size(self):
return 8
def write(self, fh):
write64(fh, long(self.length))
def read(self, fh):
self.length = long(read64(fh))
#=================================================================
def write_length_metadata(fh, length):
r"""
>>> buff = io.BytesIO()
>>> write_length_metadata(buff, 1234)
Verify block contents
>>> buff.getvalue()
'\x1f\x8b\x08\x04\x00\x00\x00\x00\x00\x03\x0c\x00LN\x08\x00\xd2\x04\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Verify block
>>> len(buff.getvalue())
34
Verify actual block is empty
>>> gzip.GzipFile(fileobj=buff).read(64)
''
"""
write_metadata(fh, LengthMetadata(length))
#=================================================================
def write_metadata(fh, metadata):
fh.write(MAGIC_HEADER)
fh.write(FLAGS)
#timestamp
write32(fh, 0)
fh.write(XFL_OS)
# total length
write16(fh, metadata.size() + 4)
fh.write(metadata.id()[:2])
# length of metadata
write16(fh, metadata.size())
metadata.write(fh)
# empty data
fh.write(EMPTY_DATA)
write32(fh, 0)
write32(fh, 0)
#=================================================================
def size_of_header(metadata):
return 26 + metadata.size()
#=================================================================
def write16(fh, value):
fh.write(struct.pack(b'<H', int(value)))
def read16(input):
return struct.unpack("<H", input.read(2))[0]
def write32(fh, value):
fh.write(struct.pack(b'<I', long(value)))
# currentl unused
def read32(input): # pragma: no cover
return struct.unpack("<I", input.read(4))[0]
def write64(fh, value):
fh.write(struct.pack(b'<Q', long(value)))
def read64(input):
return struct.unpack("<Q", input.read(8))[0]
#=================================================================
def read_length_metadata(fh):
"""
write and read a length
>>> buff = io.BytesIO()
>>> write_length_metadata(buff, 1234)
>>> read_length_metadata(buff)
1234L
write and read 0
>>> buff = io.BytesIO()
>>> write_length_metadata(buff, 0)
>>> read_length_metadata(buff)
0L
write and read a full long
>>> buff = io.BytesIO()
>>> write_length_metadata(buff, 0x7fffffffffffffee)
>>> hex(read_length_metadata(buff))
'0x7fffffffffffffeeL'
ensure gzip still consistent (empty)
>>> b = buff.seek(0)
>>> gzip.GzipFile(fileobj=buff).read()
''
>>> read_length_metadata('')
"""
len_metadata = LengthMetadata()
if read_metadata(fh, len_metadata):
return len_metadata.length
else:
return None
#=================================================================
def read_metadata(fh, metadata, seek=True):
try:
if seek:
fh.seek(-14 - metadata.size(), 2)
else:
assert fh.read(len(MAGIC_HEADER)) == MAGIC_HEADER
assert fh.read(len(FLAGS)) == FLAGS
# ignore timestamp
buff = read32(fh)
assert fh.read(len(XFL_OS)) == XFL_OS
# total length
assert read16(fh) == (metadata.size() + 4)
buff = fh.read(2)
assert buff == metadata.id()[:2]
buff = read16(fh)
assert buff == metadata.size()
metadata.read(fh)
buff = fh.read(2)
assert buff == EMPTY_DATA
return True
except AssertionError as ae:
return False
except Exception:
return False
#=================================================================
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
|
print('Gerador de PA')
print('-=' * 15)
primeiro_termo = int(input('Primeiro termo da PA: '))
razao = int(input('Digite a Razão da PA: '))
termo = primeiro_termo
contador_termos = 0
while contador_termos < 10:
print(f'{termo} -> ', end='')
termo += razao
contador_termos += 1
print('FIM!')
|
from vilbert.vilbert import VILBertForVLTasks
from vilbert.vilbert import BertConfig
from vilbert.vilbert import GeLU
from vilbert.optimization import RAdam
from torch.nn import functional as F
from vilbert.datasets._image_features_reader import ImageFeaturesH5Reader
from pytorch_transformers import AdamW
from pytorch_transformers.optimization import AdamW, WarmupConstantSchedule, WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
import torch
from torch.optim import lr_scheduler
from torch import nn
import torch.optim as optim
import random, json
from sklearn import metrics
import copy
from sklearn.utils.class_weight import compute_class_weight
from helper_functions import *
import argparse
parser = argparse.ArgumentParser(description='Train VilBERT with averaged features')
parser.add_argument('--model', type=str, default='refcoco')
parser.add_argument('--dset', type=str, default='lesa',
help='clef_en | clef_ar | mediaeval')
parser.add_argument('--split', type=int, default=0,
help='0-4')
parser.add_argument('--unfr', type=int, default=2,
help='2 | 4 | 6')
parser.add_argument('--lr', type=str, default='5e-5',
help='2e-5 | 3e-5 | 5e-5')
parser.add_argument('--wt_ce', type=int, default=0,
help='0 | 1')
parser.add_argument('--bs', type=int, default=16,
help='4 | 8 | 16')
parser.add_argument('--epochs', type=int, default=6)
args = parser.parse_args()
seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
## A simple classifier on top of ViLBert last layer features and concatenation of visual and textual features.
class SimpleClassifier(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, dropout):
super().__init__()
self.vilbert = vilbert
self.dropout = nn.Dropout(dropout)
self.logit_fc = nn.Sequential(
nn.Linear(in_dim, hid_dim),
# GeLU(),
nn.ReLU(inplace=True),
BertLayerNorm(hid_dim, eps=1e-12),
nn.Linear(hid_dim, out_dim),
)
def add(self, a, b):
return a+b
def mul(self, a, b):
return a*b
def forward(self, tokens, features, spatials, segment_ids, input_masks, image_masks, co_attention_masks, task_tokens):
sequence_output_t, sequence_output_v, _, _, _ = self.vilbert(tokens, features, spatials, segment_ids, input_masks, image_masks, co_attention_masks, task_tokens)
average_output_t = torch.mean(sequence_output_t, dim=1)
average_output_v = torch.mean(sequence_output_v, dim=1)
pooled_out = torch.cat((average_output_t, average_output_v), dim=1)
return self.logit_fc(self.dropout(pooled_out))
def evaluate(model, loader, phase):
model.eval()
total_loss = 0
all_preds = []
all_labels = []
for i, batch in enumerate(loader):
tokens, features, spatials, segment_ids, input_masks, image_masks, \
co_attention_masks, labels = batch
tokens, features, spatials, segment_ids, input_masks, image_masks, \
co_attention_masks, labels = tokens.to(device), features.to(device), spatials.to(device), \
segment_ids.to(device), input_masks.to(device), image_masks.to(device), co_attention_masks.to(device), \
labels.to(device)
task_tokens = tokens.new().resize_(tokens.size(0), 1).fill_(1).to(device)
with torch.no_grad():
logits = model(
tokens, features, spatials, segment_ids, input_masks, image_masks, co_attention_masks, task_tokens).squeeze(1)
loss = criterion(logits, labels)
total_loss += loss.item()
preds = logits.max(1)[1]
all_preds.extend(preds.cpu().numpy().flatten())
all_labels.extend(labels.cpu().numpy().flatten())
avg_loss = total_loss / len(loader)
avg_acc = metrics.accuracy_score(all_labels, all_preds)
avg_f1 = metrics.f1_score(all_labels, all_preds, average='weighted')
return avg_acc, avg_f1, avg_loss, all_preds
## Arguments
vilbert_type = args.model
dset = args.dset
split = args.split
## Initialize VilBERT
if vilbert_type == 'concap8':
config = BertConfig.from_json_file("vilbert-multi-task/config/bert_base_8layer_8conect.json")
else:
config = BertConfig.from_json_file("vilbert-multi-task/config/bert_base_6layer_6conect.json")
vilbert = VILBertForVLTasks.from_pretrained(
'vilbert-multi-task/data/pretrained/%s_model.bin'%(vilbert_type),
config=config,
num_labels=2,
default_gpu=False,
)
vilbert = vilbert.bert
## Dataset Stuff
data_loc = 'data/%s/'%(args.dset)
## Training, Validation and Test Text
data_dict = json.load(open(data_loc+'data.json', 'r', encoding='utf-8'))
train_df = pd.read_csv(data_loc+'splits/train_%d.txt'%(split), header=None)
val_df = pd.read_csv(data_loc+'splits/val.txt', header=None)
test_df = pd.read_csv(data_loc+'splits/test_%d.txt'%(split), header=None)
test_idxs = np.array([idx for idx in test_df[0]])
## Bert Tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
img_feat_header = ImageFeaturesH5Reader(data_loc+'rcnn_lmdbs/')
missing_feats = json.load(open(data_loc+'rcnn_missing/%s.json'%(dset), 'r'))
tr_data = CustomDataset(dset, tokenizer, img_feat_header, missing_feats, train_df, data_dict)
tr_loader = DataLoader(tr_data, batch_size=args.bs, shuffle=True, num_workers=4, pin_memory=True)
vl_data = CustomDataset(dset, tokenizer, img_feat_header, missing_feats, val_df, data_dict)
vl_loader = DataLoader(vl_data, batch_size=int(args.bs/2), num_workers=2, pin_memory=True)
te_data = CustomDataset(dset, tokenizer, img_feat_header, missing_feats, test_df, data_dict)
te_loader = DataLoader(te_data, batch_size=int(args.bs/2), num_workers=2, pin_memory=True)
num_epochs = args.epochs
model = SimpleClassifier(1792, 128, 2, 0.5)
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
if args.unfr == 2:
not_freeze = ['c_layer.4', 'c_layer.5']
elif args.unfr == 4:
not_freeze = ['c_layer.2', 'c_layer.3', 'c_layer.4', 'c_layer.5']
elif args.unfr == 6:
not_freeze = ['c_layer.0','c_layer.1','c_layer.2', 'c_layer.3', 'c_layer.4', 'c_layer.5']
else:
not_freeze = ['t_pooler', 'v_pooler']
print(not_freeze)
for name, param in model.named_parameters():
if not any(nf in name for nf in not_freeze):
param.requires_grad = False
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
print('\n---------------Training the below parameters:------------')
for name, param in model.named_parameters():
if param.requires_grad == True:
print(name)
print('----------------------------------------------------------\n')
model.to(device)
optimizer = AdamW(optimizer_grouped_parameters, lr=float(args.lr))
total_steps = num_epochs*len(tr_loader)
warmup_steps = int(total_steps*0.1)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps, total_steps)
if args.wt_ce:
class_weights = compute_class_weight('balanced', classes=[0,1], y=train_df[1].to_numpy())
class_weights = torch.tensor(class_weights,dtype=torch.float)
class_weights = class_weights.to(device)
criterion = nn.CrossEntropyLoss(weight=class_weights).to(device)
else:
criterion = nn.CrossEntropyLoss().to(device)
best_model = model
best_acc = 0.0
best_val_loss = 100
best_epoch = 0
best_test_acc = 0.0
best_test_f1 = 0.0
best_test_loss = 0.0
best_preds = []
for epoch in range(0, num_epochs):
print("----- Epoch %d/%d ------"%(epoch+1, num_epochs))
running_loss = 0.0
running_corrects = 0
tot = 0.0
model.train()
for i, batch in enumerate(tr_loader):
# zero the parameter gradients
optimizer.zero_grad()
tokens, features, spatials, segment_ids, input_masks, image_masks, \
co_attention_masks, labels = batch
tokens, features, spatials, segment_ids, input_masks, image_masks, \
co_attention_masks, labels = tokens.to(device), features.to(device), spatials.to(device), \
segment_ids.to(device), input_masks.to(device), image_masks.to(device), co_attention_masks.to(device), \
labels.to(device)
task_tokens = tokens.new().resize_(tokens.size(0), 1).fill_(1).to(device)
logits = model(
tokens, features, spatials, segment_ids, input_masks, image_masks, co_attention_masks, task_tokens).squeeze(1)
preds = logits.max(1)[1]
loss = criterion(logits, labels)
# backward + optimize
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
# statistics
running_loss += loss.item()
running_corrects += preds.eq(labels.view_as(preds)).sum().item()
tot += len(labels)
if i % 20 == 0:
print('[%d, %5d] loss: %.5f, Acc: %.2f' %
(epoch+1, i + 1, loss.item(), (100.0 * running_corrects) / tot))
train_loss = running_loss / len(tr_loader)
train_acc = running_corrects * 1.0 / (len(tr_loader.dataset))
print('Training Loss: {:.6f} Acc: {:.2f}'.format(train_loss, 100.0 * train_acc))
val_acc, val_f1, val_loss, _ = evaluate(model, vl_loader, 'val')
print('Epoch: {:d}, Val Loss: {:.4f}, Val Acc: {:.4f}, Val F1: {:.4f}'.format(epoch+1,
val_loss,val_acc, val_f1))
test_acc, test_f1, test_loss, test_preds = evaluate(model, te_loader, 'test')
print('Epoch: {:d}, Test Loss: {:.4f}, Test Acc: {:.4f}, Test F1: {:.4f}'.format(epoch+1,
test_loss, test_acc, test_f1))
# deep copy the model
if val_acc >= best_acc:
best_acc = val_acc
best_val_loss = val_loss
best_epoch = epoch
best_test_acc = test_acc
best_test_f1 = test_f1
best_test_loss = test_loss
best_preds = test_preds
print('Best Epoch: {} : Test Loss: {:.4f} : Test Acc: {:.4f} : Test F1: {:.4f}'.format(best_epoch+1, best_test_loss,best_test_acc, best_test_f1)) |
""" Implements the game object that represents a black hole.
"""
from timeit import default_timer as timer
import pyg
from .game_object import GameObject
_MIN_SCALING_INTERVAL = 0.01
_SCALE_STEP_FACTOR = 1.02
_BLACK_HOLE_GRAVITY_FACTOR = 0.075
class BlackHole(GameObject):
""" Represents a black hole. """
def __init__(self,
initial_pos: pyg.utils.Coord2D = (0.82, 0.82),
initial_scale: float = 0.8) -> None:
self._scaling_counter = 0
self._reverse_scales = False
self._last_scaling = 0
super().__init__(
initial_pos=initial_pos,
initial_scale=initial_scale,
all_graphics=[
pyg.objects.Circle(
color=(r/140, r/120, r/100, 1),
fill_mode=(pyg.FillMode.POINT if r > 16
else pyg.FillMode.FILL),
center_pos=(0, 0),
radius=0.15 * r / 100,
)
for r in range(100)
],
)
def calc_gravitational_pull(self, obj: GameObject) -> tuple[float, float]:
""" Calculates the gravitational pull the black hole exerts on the given
object.
"""
d = max(self.distance(obj), 1e-4)
g = _BLACK_HOLE_GRAVITY_FACTOR / (d ** 2)
gX = g * (self.x - obj.x) / d
gY = g * (self.y - obj.y) / d
return gX, gY
def update(self, dT: float, *args, **kwargs) -> None:
if (timer() - self._last_scaling) >= _MIN_SCALING_INTERVAL:
sx = sy = (1 / _SCALE_STEP_FACTOR if self._reverse_scales
else _SCALE_STEP_FACTOR)
self.scale(sx=sx, sy=sy)
self._scaling_counter += 1
self._last_scaling = timer()
if self._scaling_counter >= 20:
self._scaling_counter = 0
self._reverse_scales = not self._reverse_scales
self.rotate(angle=dT * 30, axis="z")
super().update(dT, *args, **kwargs)
|
import boto3
from .quota_check import QuotaCheck, QuotaScope
class TopicCountCheck(QuotaCheck):
key = "ses_daily_sends"
description = "SES messages sent during the last 24 hours"
scope = QuotaScope.REGION
service_code = 'ses'
quota_code = 'L-804C8AE8'
@property
def current(self):
return self.boto_session.client('ses').get_send_quota()['SentLast24Hours']
|
from typing import TypedDict
import datetime as dt
class ISection_1_1_3(TypedDict):
wr_tot_cons_mu: float
wr_avg_cons_mu: float
wr_max_cons_mu: float
wr_max_cons_mu_date: str
wr_avg_cons_mu_perc_inc: float
wr_avg_cons_mu_last_yr: float |
"""/config/custom_components/cryptoportfolio"""
"""Support for Etherscan sensors."""
from datetime import timedelta
import logging
# import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from .constants import CONF_ADDRESS, CONF_NAME, CONF_TOKEN, CONF_TOKEN_ADDRESS, CONF_EXPLORER_API_URL, CONF_MAIN_COIN, \
CONF_EXPLORER_API_KEY, CONF_DECIMALS, CONF_BLOCKCHAIN, SUPPORTED_BLOCKCHAINS, SUPPORTED_BLOCKCHAIN_ETHORFORK, SUPPORTED_BLOCKCHAIN_BTC
from .ETHorForkPortfolioSensor import ETHorForkPortfolioSensor
from .BTCPortfolioSensor import BTCPortfolioSensor
from .BlockChainInfoQueue import BlockChainInfoQueue
import homeassistant.helpers.config_validation as cv
SCAN_INTERVAL = timedelta(minutes=5)
_LOGGER = logging.getLogger(__name__)
blockChainInfoQueue = BlockChainInfoQueue()
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESS): cv.string, # TODO: [cv.string] == Array
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TOKEN): cv.string,
vol.Optional(CONF_TOKEN_ADDRESS): cv.string,
vol.Optional(CONF_EXPLORER_API_URL, default="https://api.etherscan.io/api"): cv.string,
vol.Optional(CONF_MAIN_COIN, default="ETH"): cv.string,
vol.Optional(CONF_DECIMALS, default=18): cv.positive_int,
vol.Optional(CONF_EXPLORER_API_KEY): cv.string,
vol.Optional(CONF_BLOCKCHAIN, default=SUPPORTED_BLOCKCHAIN_ETHORFORK): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the cryptoPortfolio sensors."""
address = config.get(CONF_ADDRESS)
explorer_api_url = config.get(CONF_EXPLORER_API_URL)
main_coin = config.get(CONF_MAIN_COIN)
explorer_api_key = config.get(CONF_EXPLORER_API_KEY)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
token_address = config.get(CONF_TOKEN_ADDRESS)
decimals = config.get(CONF_DECIMALS)
blockchain = config.get(CONF_BLOCKCHAIN)
if not blockchain:
blockchain = SUPPORTED_BLOCKCHAIN_ETHORFORK
if blockchain == SUPPORTED_BLOCKCHAIN_ETHORFORK:
if not explorer_api_key:
_LOGGER.error("No api key provided for ETH or fork.")
return False
if not explorer_api_url or not main_coin:
"""Default blockchain"""
explorer_api_url = "https://api.etherscan.io/api"
main_coin = "ETH"
main_coin = main_coin.upper()
if token:
token = token.upper()
if not name:
name = f"{token} Balance"
if not decimals:
decimals = 9 # default value for tokens
elif not decimals:
decimals = 18
if not name:
name = f"{main_coin} Balance"
add_entities([ETHorForkPortfolioSensor(main_coin, explorer_api_url, explorer_api_key, name, address, token,
token_address, decimals)], True)
elif blockchain == SUPPORTED_BLOCKCHAIN_BTC:
if not name:
name = "BTC Balance"
add_entities([BTCPortfolioSensor(name, address, blockChainInfoQueue)], True)
else:
_LOGGER.error(f"Unsupported blockchain provided: {blockchain}")
return False
|
"""
This Module handles the generation of results in the CLI, based on the ticker passed as the argument to the object of the Main class.
The execution begins by retrieving the financial data from yfinace library which is handled in the data module of stockDL.
After the data is collected the first trading day of each month is stored in a list by the date_calculation() function in the data module.
The data is then preprocessed by the preprocessor module which creates a window to run the predictions on.
It also comprises min-max scaling which reduces sudden highs and lows in the data which would have resulted in anomalies.
The training module trains the model created in the model module on the data retrieved by the preprocessing module.
The plots module helps in plotting the necessary graphs for better visualisation for EDA.
The result module uses the calculation and market modules to run the necessary calculations on the predictions,
and generate net and the gross yield on the predictions obtained.
"""
from . import preprocessing, train, plots, results
class Main:
def __init__(self, ticker):
'''Ticker is a unique symbol for every stock in the share market'''
self.ticker = ticker
'''this module helps in loading the data by calling an instance of data.py module and also helps in data preprocessing.'''
self.data_preprocessor = preprocessing.data_preprocessing(ticker)
'''This variable stores the monthly data for analysis by the model'''
self.df_monthly = self.data_preprocessor.df_monthly
'''This instance of train module will help in preventing the training to occur more than once thus reducing the probability of overfitting.'''
self.train = train.Training(ticker)
'''Trains the data on the defined models'''
self.train.train_model()
'''Creates an instance of the plots module for better visualisation of training and validation data'''
self.plots = plots.Plots(ticker)
'''Generates a comparison plot of the 4 methos used'''
self.plots.comparison_plots()
'''An instance of the result module to calculate and process the final predictions on the data by the trained model.'''
self.results = results.Results(ticker)
'''Stores the final result in a pandas dataframe'''
self.result = self.results.result
'''Converts the pandas dataframe to JSON for better utilisation by web developers'''
self.result_json = self.result.to_json(orient="split")
print(self.result)
|
import unittest
import main
class TestMain(unittest.TestCase):
def test_get_current_layer_in_non_layer_line(self):
layer_at = main.get_current_layer(';Layer height: 0.2')
self.assertEqual(layer_at, None, "Parsed layer position should be None")
def test_get_current_layer_in_non_layer_line_again(self):
layer_at = main.get_current_layer(';LAYER_COUNT:115')
self.assertEqual(layer_at, None, "Parsed layer position should be None")
def test_get_current_layer(self):
layer_at = main.get_current_layer(';LAYER:115')
self.assertEqual(layer_at, 115, "Parsed layer position should be a number")
def test_not_initial_layer(self):
self.assertTrue(main.not_initial_layer(1))
self.assertTrue(main.not_initial_layer(2))
self.assertTrue(main.not_initial_layer(3))
self.assertFalse(main.not_initial_layer(0))
def test_have_to_change_variable_at_layer(self):
self.assertFalse(main.have_to_change_variable_at_layer(1, 5))
self.assertFalse(main.have_to_change_variable_at_layer(2, 5))
self.assertFalse(main.have_to_change_variable_at_layer(3, 5))
self.assertFalse(main.have_to_change_variable_at_layer(4, 5))
self.assertTrue(main.have_to_change_variable_at_layer(5, 5))
self.assertTrue(main.have_to_change_variable_at_layer(10, 5))
self.assertTrue(main.have_to_change_variable_at_layer(25, 5))
def test_is_changing_only_extruder(self):
self.assertFalse(main.is_changing_only_extruder('G92 E0'))
self.assertFalse(main.is_changing_only_extruder('G1 X134.432 Y103.996 E0.05422'))
self.assertTrue(main.is_changing_only_extruder('G1 F2700 E-5'))
self.assertTrue(main.is_changing_only_extruder('G1 F2700 E7.6531'))
def test_get_extruder_position(self):
self.assertEqual(main.get_extruder_position('G1 F1200 X133.539 Y103.864 E0.02712'), 0.02712)
self.assertEqual(main.get_extruder_position('G1 F2700 E0'), 0)
self.assertEqual(main.get_extruder_position('G1 F2700 E-2'), -2)
self.assertEqual(main.get_extruder_position('G1 F600 X128.835 Y119.171 E193.50153'), 193.50153)
self.assertEqual(main.get_extruder_position('G1 F600 X128.835 Y119.171 E193.50153'), 193.50153)
self.assertEqual(main.get_extruder_position('G1 X0.1 Y200.0 Z0.3 F1500.0 E15 ; Draw the first line'), 15)
self.assertEqual(main.get_extruder_position('G92 E0 ; Reset Extruder'), 0)
self.assertEqual(main.get_extruder_position('G1 X5 Y20 Z0.3 F5000.0 ; Move over to prevent blob squish'), None)
def test_is_printing(self):
self.assertTrue(main.is_printing('G1 F1200 X133.539 Y103.864 E0.02712'))
self.assertTrue(main.is_printing('G1 X136.166 Y104.495 E0.10843'))
self.assertFalse(main.is_printing('G0 F6000 X132.637 Y103.811 Z0.2'))
self.assertFalse(main.is_printing('G1 F2700 E0'))
if __name__ == '__main__':
unittest.main() |
from __future__ import annotations
from pathlib import Path
import re
from typing import Union
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit, fmin
import sympy as sym
from sympy.utilities.lambdify import lambdify
# from sympy.utilities.autowrap import ufuncify # speedup over lambdify
from sympy.parsing.sympy_parser import parse_expr
def decompose_r_func(formula: str) -> tuple[str, str, sym.core.expr.Expr]:
"""Parses R-style fit formula (`response ~ terms`), decomposing it into
its component parts and casting `terms` to a symbolic sympy function.
"""
if formula.count("~") != 1:
err_msg = (f"[decompose_r_func] Could not decompose r-style fit "
f"formula: `formula` is malformed (expected `response ~ "
f"terms`, received `{formula}`)")
raise ValueError(err_msg)
dep_var, str_func = tuple(map(str.strip, re.split("~", formula)))
sym_func = parse_expr(str_func)
return (dep_var, str_func, sym_func)
def identify_vars(sym_func: sym.core.expr.Expr,
dep_var: str,
data_cols: list[str],
n_vars: int = None
) -> tuple[set[sym.Symbol], set[sym.Symbol]]:
"""Identifies independent variable(s) and fit parameters in symbolic sympy
function via comparison against `data_cols`.
"""
ind_vars = set([s for s in sym_func.free_symbols if str(s) in data_cols])
fit_pars = set([s for s in sym_func.free_symbols if s not in ind_vars])
if dep_var not in data_cols:
err_msg = (f"[identify_vars] Could not identify fit variables: "
f"dependent variable `{dep_var}` was not detected in "
f"columns of `data` (columns: `{data_cols}`)")
raise ValueError(err_msg)
if n_vars is not None and len(ind_vars) != n_vars:
err_msg = (f"[identify_vars] Could not identify fit variables: "
f"`sym_func` must have exactly {n_vars} independent "
f"variable(s) (candidates: `{data_cols}`, sym_func: "
f"`{sym_func}`)")
raise ValueError(err_msg)
if len(fit_pars) < 1:
err_msg = (f"[identify_vars] Could not identify fit variables: "
f"`sym_func` contains no fit parameters (`{sym_func}` must "
f"have at least one free variable other than `{ind_vars}`)")
raise ValueError(err_msg)
return (ind_vars, fit_pars)
def filter_data(data: pd.DataFrame,
to_keep: list[str],
uncertainties: dict[str, str] = None,
drop_na: bool = True,
numeric_only: bool = True,
min_points: int = 2) -> pd.DataFrame:
"""Removes unwanted columns from `data`, not including those marked as
containing uncertainty information of the `to_keep` variables.
"""
if uncertainties is not None:
for k, v in uncertainties.items():
if k not in to_keep:
err_msg = (f"[filter_data] Could not filter data: invalid key "
f"in uncertainty map (`{k}` does not reference "
f"independent/dependent variables: `{to_keep}`")
raise ValueError(err_msg)
if v not in data:
err_msg = (f"[filter_data] Could not filter data: invalid "
f"value in uncertainty map (`{v}` does not "
f"reference a column name in `data`: "
f"`{data.columns}`)")
raise ValueError(err_msg)
to_keep.extend(uncertainties.values())
filtered = data[to_keep]
if numeric_only:
numeric_cols = list(filtered.select_dtypes(include=np.number).columns)
data_cols = list(filtered.columns)
if len(numeric_cols) != len(data_cols):
err_msg = (f"[filter_data] Could not filter data: `data` must "
f"contain only numeric values, after filtering columns "
f"(detected non-numerics in `{data_cols-numeric_cols}`)")
raise ValueError(err_msg)
if drop_na:
filtered = filtered.dropna()
if len(filtered) < min_points:
err_msg = (f"[filter_data] Could not filter data: `data` must "
f"contain at least {min_points} data points, after "
f"filtering columns and removing missing values "
f"(received {len(filtered)})")
raise ValueError(err_msg)
return filtered
def gather_guesses_from_kwargs(keys: list[str],
random_seed = 12345,
**kwargs) -> dict[str, float]:
"""Gathers initial fit parameter guesses and/or bounds from a **kwargs dict
and casts to scipy.optimize.curve_fit format:
tuple(p0 = [...], bounds = ([...], [...]))
Supports interpretation of tuple values as (min, max) parameter bounds.
"""
p0 = []
bounds = ([], [])
for k in kwargs:
if k not in keys:
err_msg = (f"[gather_guesses_from_kwargs] Could not gather "
f"parameter guesses/bounds: parameter `{k}` was not "
f"recognized (available parameters: `{keys}`)")
raise ValueError(err_msg)
for k in keys:
if k not in kwargs:
p0.append(1) # default value in scipy.optimize.curve_fit
bounds[0].append(-np.inf) # disables parameter bounds in curve_fit
bounds[1].append(np.inf) # ^
continue
val = kwargs[k]
if not issubclass(type(val), (int, float, tuple)):
err_msg = (f"[gather_guesses_from_kwargs] Could not gather "
f"parameter guesses/bounds: parameter `{k}` must be "
f"assigned either an int, float, or tuple of "
f"ints/floats (received: `{val}`)")
raise TypeError(err_msg)
if issubclass(type(val), tuple):
if len(val) != 2:
err_msg = (f"[gather_guesses_from_kwargs] Could not gather "
f"parameter guesses/bounds: parameter `{k}` was "
f"assigned a bounding tuple of unexpected length "
f"(expected tuple of length 2, received `{val}`)")
raise ValueError(err_msg)
allowed_types = (int, float, type(None))
if not all(issubclass(type(i), allowed_types) for i in val):
err_msg = (f"[gather_guesses_from_kwargs] Could not gather "
f"parameter guesses/bounds: parameter `{k}` was "
f"assigned a bounding tuple of invalid type (all "
f"elements must be int/float/None, received: "
f"`{val}`)")
raise TypeError(err_msg)
# interpret bounds
if val[0] is None and val[1] is None: # no bounds
p0.append(1) # default value in scipy.optimize.curve_fit
bounds[0].append(-np.inf)
bounds[1].append(np.inf)
elif val[0] is None: # no lower bound
p0.append(val[1]) # initialize at max value
bounds[0].append(-np.inf)
bounds[1].append(val[1])
elif val[1] is None: # no upper bound
p0.append(val[0]) # initialize at min value
bounds[0].append(val[0])
bounds[1].append(np.inf)
else:
val = sorted(val)
rng = np.random.default_rng(random_seed)
p0.append(rng.uniform(val[0], val[1]))
bounds[0].append(val[0])
bounds[1].append(val[1])
else: # val is a single int/float
p0.append(val if val is not None else 1)
bounds[0].append(-np.inf)
bounds[1].append(np.inf)
return (p0, bounds)
def covariance_to_correlation(cov_mat: pd.DataFrame) -> pd.DataFrame:
"""Convert covariance matrix into correlation matrix of the same form."""
par_names = list(cov_mat.columns)
rows, cols = cov_mat.shape # should be square
corr_mat = np.full((rows, cols), None)
for i in range(rows):
for j in range(cols):
covar = cov_mat.iloc[i, j]
x_var = cov_mat.iloc[i, i]
y_var = cov_mat.iloc[j, j]
if any(var == 0 for var in [x_var, y_var]):
corr_mat[i, j] = np.nan
else:
corr_mat[i, j] = covar / np.sqrt(x_var * y_var)
return pd.DataFrame(corr_mat, columns = par_names, index = par_names)
class SymFunc:
def __init__(self, str_func: str):
raise NotImplementedError()
def derivative(self, with_respect_to: Union[str, sym.Symbol]) -> SymFunc:
raise NotImplementedError()
def intersection(self, other: Union[str, SymFunc]):
raise NotImplementedError()
def maximum(self, around) -> float:
raise NotImplementedError()
def minimum(self, around) -> float:
raise NotImplementedError()
def plot(self, *) -> None:
raise NotImplementedError()
def substitute(self):
raise NotImplementedError()
def __call__(self, *) -> float:
raise NotImplementedError()
def __str__(self) -> str:
raise NotImplementedError()
class GeneralizedFitFunc:
"""
Private:
_data
_orig_function
_dep_var
_str_func
_sym_func
_ind_vars
_fit_pars
_covariance
_correlation
_adj_r_squared
_chi_squared
"""
def __init__(self, data: pd.DataFrame, function: str):
"""Column names should match independent variables in expression.
Everything else will be interpreted as a free parameter for fits.
Use r-style: y ~ a * x + b
match vars to columns to determine dependent and independent
"""
# TODO: check for missing values, or implement an na_rm flag
# TODO: check that all columns are numerical
# check for sufficient data points
if len(data) < 2:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not initialize FitFunc: `data` "
f"must have at least 2 data points to perform fit "
f"(received {len(data)})")
raise ValueError(err_msg)
self._data = data
# check that function is of expected form (`response ~ terms`)
if function.count("~") != 1:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not initialize FitFunc: "
f"`function` is malformed (expected `response ~ terms`, "
f"received `{function}`)")
raise ValueError(err_msg)
self._orig_function = function
# parse function for dependent variable and fit equation
parts = tuple(map(str.strip, re.split("~", self._orig_function)))
dep_var, str_func = parts
if dep_var not in self._data.columns:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not initialize FitFunc: "
f"dependent variable `{dep_var}` was not detected in "
f"columns of `data` (columns: {self._data.columns})")
raise ValueError(err_msg)
self._dep_var = dep_var
self._str_func = str_func
# convert fit equation to symbolic function and identify independent
# variables + fit parameters via introspection
sym_func = parse_expr(str_func)
ind_vars = set([sym.Symbol(c) for c in data.columns if c in str_func])
fit_pars = set([s for s in sym_func.free_symbols if s not in ind_vars])
if len(ind_vars) < 1:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not initialize FitFunc: "
f"`function` must have at least one independent "
f"variable (column names: `{self._data.columns}`, "
f"function: `{self._orig_function}`)")
raise ValueError(err_msg)
if len(fit_pars) < 1:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not initialize FitFunc: "
f"`function` contains no parameters to fit (column "
f"names: `{self._data.columns}`, function: "
f"`{self._orig_function}`)")
raise ValueError(err_msg)
self._sym_func = sym_func
self._ind_vars = ind_vars
# initialize parameter dictionary, preserving order from input function
in_order = sorted(fit_pars, key=lambda s: self._str_func.index(str(s)))
self._fit_pars = dict.fromkeys(in_order, None)
# as of python 3.7, dict preserves insertion order by default. For
# python <= 3.6, replace above with the following:
# from collections import OrderedDict
# self._fit_pars = OrderedDict.fromkeys(in_order, None)
# keep only columns of data that are referenced in function
to_keep = set(map(str, self._ind_vars.copy()))
to_keep.add(self._dep_var)
self._data = self._data[[*to_keep]]
@classmethod
def from_simulated(cls,
func: Union[str, sym.core.expr.Expr],
ind_var: Union[str, sym.Symbol],
par_values: dict[Union[str, sym.Symbol], float],
*args,
start: float = 0,
stop: float = 10,
num_points: int = 20,
noise: float = 1,
seed = 12345):
# TODO: generalize to higher dimensions
if issubclass(type(func), str):
func = parse_expr(func)
if issubclass(type(ind_var), str):
ind_var = sym.Symbol(ind_var)
if ind_var not in func.free_symbols:
class_name = cls.__name__
err_msg = (f"[{class_name}] Could not generate simulated data: "
f"`func` must contain the symbol identified in "
f"`ind_var` (expected '{str(func)}' to contain "
f"'{str(ind_var)})'")
raise ValueError(err_msg)
# substitute in parameter values and create lambda function
sub_func = func.subs(par_values)
if len(sub_func.free_symbols) != 1:
class_name = cls.__name__
err_msg = (f"[{class_name}] Could not generate simulated data: "
f"after substituting `par_values`, `func` must have "
f"exactly 1 free variable remaining ({str(sub_func)})")
raise ValueError(err_msg)
lam_func = lambdify(ind_var, sub_func, "numpy")
# generate simulated data
rng = np.random.default_rng(seed)
xdata = np.linspace(start, stop, num_points)
y = np.array([lam_func(x) for x in xdata])
y_noise = noise * rng.normal(size=xdata.size)
ydata = y + y_noise
data = pd.DataFrame({str(ind_var): xdata, "data": ydata})
return cls(data, func)
@property
def data(self) -> pd.DataFrame:
return self._data
@property
def function(self) -> sym.core.expr.Expr:
return self._sym_func
@property
def independent_variables(self) -> set[sym.Symbol]:
return self._ind_vars
@property
def fit_parameters(self) -> dict[sym.Symbol, float]:
return self._fit_pars
@property
def covariance_matrix(self) -> pd.DataFrame:
if self._lambda_func is None:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not generate covariance matrix: "
f"function has not yet been fitted")
raise RuntimeError(err_msg)
return self._covariance
@property
def correlation_matrix(self) -> pd.DataFrame:
raise NotImplementedError()
@property
def residuals(self) -> pd.DataFrame:
# TODO: generalize to higher dimensions
if self._lambda_func is None:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not generate residuals: function "
f"has not yet been fitted")
raise RuntimeError(err_msg)
if self._residuals is not None:
return self._residuals
columns = list(self.data.columns)
observed = columns[len(columns) - columns.index(str(self._ind_var)) - 1]
get_expected = lambda x: self._lambda_func(x[str(self._ind_var)])
get_residual = lambda x: x[observed] - x["expected"]
df = self.data.assign(expected = get_expected, residual = get_residual)
self._residuals = df[[str(self._ind_var), "residual"]]
return self._residuals
@property
def degrees_of_freedom(self) -> int:
return len(self.data) - len(self.fit_parameters)
@property
def r_squared(self) -> float:
raise NotImplementedError()
@property
def adjusted_r_squared(self) -> float:
"""Returns the adjusted r-squared coefficient of determination for the
current fit.
"""
# TODO: update for new model
if self._adj_r_squared is not None:
return self._adj_r_squared
residuals = list(self.residuals["residual"])
observed = list(self.data[self._result_column])
n_obs = len(observed)
avg_obs = sum(observed) / n_obs
SS_res = sum([e**2 for e in residuals])
SS_tot = sum([(obs - avg_obs)**2 for obs in observed])
df_t = n_obs - 1
df_e = self.degrees_of_freedom - 1
self._adj_r_squared = 1 - ((SS_res/df_e) / (SS_tot/df_t))
return self._adj_r_squared
@property
def chi_squared(self) -> float:
"""Returns the reduced chi-square statistic for the current fit."""
# TODO: update for new model
if self._chi_squared is not None:
return self._chi_squared
residuals = list(self.residuals["residual"])
SS_res = sum([e**2 for e in residuals])
self._chi_squared = SS_res / self.degrees_of_freedom
return self._chi_squared
@property
def info(self) -> tuple[str]:
# contents
# 0 - original fit function
# 1 - observation data
# 2 - fit parameters
# 3 - covariance matrix
# 4 - adjusted r squared
# 5 - chi squared
raise NotImplementedError()
def derivative(self, with_respect_to: Union[str, sym.Symbol]) -> FitFunc:
"""Returns a partial derivative of the current FitFunc with respect to
the indicated variable. Interpolates new values for the dependent
variable, scaling residuals appropriately.
"""
if issubclass(type(with_respect_to), str):
with_respect_to = sym.Symbol(with_respect_to)
if with_respect_to not in self._ind_vars:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not calculate derivative of "
f"FitFunc: `with_respect_to` was not recognized as "
f"an independent variable (expected `{self._ind_vars}`, "
f"received `{with_respect_to}`)")
raise ValueError(err_msg)
sym_deriv = sym.diff(self._sym_func, with_respect_to)
ind_vars = [s for s in sym_deriv.free_symbols if s in self._ind_vars]
lam_deriv = lambdify(ind_vars, sym_deriv, "numpy")
# interpolate `dep_var` column of data
interp = {self._dep_var: lambda x: lam_deriv(*[x[i] for i in ind_vars])}
data = self._data[[*ind_vars]].assign(**interp)
# scale residuals
# TODO: get scaling coefficient, multiply by residuals, add to data
function = f"{str(self.dep_var)} ~ {str(sym_deriv)}"
return FitFunc(data, function)
def fit(self, p0: dict[Union[str, sym.Symbol],
Union[float, tuple[float]]] = None) -> None:
"""TODO: implement parameter guesses (maybe use kwargs?).
if p0 values are given as tuples, they are interpreted as a bounded
range, else they are unbounded guesses.
sorted(dict, key = ...) works to sort dictionaries by key
TODO: return a FitInfo object
"""
if self._ind_var not in self._sym_func.free_symbols:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Invalid sympy function: `function` "
f"must contain the same independent variable as "
f"FitFunc.data (expected '{str(self._sym_func)}' to "
f"contain independent variable '{str(self._ind_var)}')")
raise ValueError(err_msg)
# identify columns
x = list(self.data[str(self._ind_var)])
y = list(self.data[self._result_column])
# set up lambda function and perform fit
variables = [self._ind_var, *self._fit_pars.keys()]
fit_func = lambdify(variables, self._sym_func, "numpy")
par_values, covariance = curve_fit(fit_func, x, y)
# gather fit parameters
for k, v in zip(self._fit_pars.keys(), par_values):
self._fit_pars[k] = v
# format covariance/correlation matrix
par_names = list(map(str, self._fit_pars.keys()))
self._covariance = pd.DataFrame(covariance, columns = par_names,
index = par_names)
# rows = cols = len(par_names)
# self.correlation = np.zeros((rows, cols))
# for i in range(rows):
# for j in range(cols):
# cov = self.covariance[i, j]
# x_var = self.covariance[i, i]
# y_var = self.covariance[j, j]
# self.correlation[i, j] = cov / np.sqrt(x_var * y_var)
# assign fitted lambda function
fit_func = self._sym_func.subs(self._fit_pars)
self._lambda_func = lambdify(self._ind_var, fit_func, "numpy")
def intersection(self, other: FitFunc, x0: float, tolerance: float = 1e-8,
recursion_limit: int = 1000) -> float:
# TODO: move this to a specific, 1D case
# newton-raphson gradient descent
this_deriv = self.derivative(with_respect_to = ind_var)
other_deriv = other.derivative(with_respect_to = ind_var)
f = lambda x: self.__call__(x) - other.__call__(x)
f_prime = lambda x: this_deriv.__call__(x) - other_deriv.__call__(x)
recursion_depth = 0
while abs(f(x0)) > tolerance:
x0 = x0 - f(x0) / f_prime(x0)
recursion_depth += 1
if recursion_depth > recursion_limit:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not compute "
f"intersection: recursion_limit reached "
f"({recursion_limit})")
raise RecursionError(err_msg)
return x0
def maximum(self, around: float) -> float:
# reflect function, then minimize
raise NotImplementedError()
def minimum(self, around: float) -> float:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin.html
raise NotImplementedError()
def plot(self):
raise NotImplementedError()
def t_test(self, hypothesis: str, alpha: float = None, plot: bool = False,
save_to: Path = None) -> Union[float, bool]:
"""two-tailed t_test (one-sided optional). Returns p-value of a
particular proposed parameter value. If alpha is given, returns
boolean.
Maybe pass in alternative hypothesis as a string? e.g. "a=2" would
imply a two-sided hypothesis test. "a > 2" would be one-sided.
"""
# TODO: finish writing test logic
comp = re.findall("(={1,2}|<=|>=|<|>)", hypothesis)
if len(comp) != 1:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not perform t-test: "
f"`hypothesis` must have exactly one of the following "
f"(in)equality operators: ['=', '==', '>', '>=', '<', "
f"'<='] (received {comp})")
raise ValueError(err_msg)
lhs, rhs = list(map(str.strip, re.split(comp[0], hypothesis)))
# check lhs is in par_values and rhs is a number
if any(c in comp for c in ["=", "=="]): # two-tailed
pass
if any(c in comp for c in [">", ">="]): # right-tailed
pass
if any(c in comp for c in ["<", "<="]): # left-tailed
pass
raise NotImplementedError()
def __call__(self, x) -> float:
if self._lambda_func is None:
self.fit()
return self._lambda_func(x)
def __enter__(self):
raise NotImplementedError()
def __exit__(self):
raise NotImplementedError()
def __str__(self) -> str:
return str(self.function)
class CurveFit:
"""
Private:
_data
_uncertainties
_ind_var
_dep_var
_orig_formula
_str_func
_sym_func
_lam_func
_fit_pars
_covariance
_correlation
_chi_squared
_adj_r_squared
_r_squared
_residuals
"""
def __init__(self, data: pd.DataFrame, formula: str,
uncertainties: dict[str, str] = None):
"""Column names should match independent variables in expression.
Everything else will be interpreted as a free parameter for fits.
Use r-style: y ~ a * x + b
match vars to columns to determine dependent and independent
"""
dep_var, str_func, sym_func = decompose_r_func(formula)
ind_vars, fit_pars = identify_vars(sym_func, dep_var,
list(data.columns), n_vars = 1)
to_keep = [*list(map(str, ind_vars)), dep_var]
data = filter_data(data, to_keep, uncertainties, drop_na=True,
numeric_only=True, min_points=2)
# assign private fields
self._orig_formula = formula
self._str_func = str_func
self._sym_func = sym_func
self._dep_var = dep_var
self._ind_var = ind_vars.pop()
self._data = data
# initialize fit parameter dictionary, preserving order from input func
in_order = sorted(fit_pars, key=lambda s: self._str_func.index(str(s)))
self._fit_pars = dict.fromkeys(in_order, None)
# as of python 3.7, dict preserves insertion order by default. For
# python <= 3.6, replace above with the following:
# from collections import OrderedDict
# self._fit_pars = OrderedDict.fromkeys(in_order, None)
if uncertainties is not None:
self._uncertainties = uncertainties
@classmethod
def from_simulated(cls,
formula: str,
ind_var: str,
par_values: dict[str, float],
*,
start: float = 0,
stop: float = 10,
num_points: int = 10,
noise: float = 1,
x_uncertain: bool = False,
y_uncertain: bool = False,
mag_uncertain: float = 1,
seed = 12345):
"""Returns a new CurveFit object from randomly-generated simulation
data, following the relationship specified in `formula`
"""
# check that formula is of expected form (`response ~ terms`)
decomposed = decompose_r_func(formula)
dep_var, _, sym_func = decomposed
ind_var = sym.Symbol(ind_var)
if ind_var not in sym_func.free_symbols:
err_msg = (f"[{cls.__name__}.from_simulated] Could not generate "
f"simulation data: `formula` must contain the symbol "
f"identified in `ind_var` (expected `{str(formula)}` "
f"to contain `{str(ind_var)})`")
raise ValueError(err_msg)
# substitute in parameter values and create lambda function
sub_func = sym_func.subs(par_values)
if len(sub_func.free_symbols) != 1:
err_msg = (f"[{cls.__name__}.from_simulated] Could not generate "
f"simulation data: after substituting `par_values`, "
f"`formula` must have exactly 1 free variable "
f"remaining ({str(sub_func)})")
raise ValueError(err_msg)
lam_func = lambdify(ind_var, sub_func, "numpy")
# generate simulated data
x_data = np.linspace(start, stop, num_points)
y = lam_func(x_data)
rng = np.random.default_rng(seed)
y_noise = noise * rng.normal(size=x_data.size)
y_data = y + y_noise
data = pd.DataFrame({str(ind_var): x_data, dep_var: y_data})
# add simulated uncertainties if applicable
if x_uncertain or y_uncertain:
uncertainties = {}
if x_uncertain:
x_unc = abs(mag_uncertain * rng.normal(size=x_data.size))
x_unc_col = f"{str(ind_var)}_unc"
data[x_unc_col] = x_unc
uncertainties[str(ind_var)] = x_unc_col
if y_uncertain:
y_unc = abs(mag_uncertain * rng.normal(size=y_data.size))
y_unc_col = f"{dep_var}_unc"
data[y_unc_col] = y_unc
uncertainties[dep_var] = y_unc_col
return cls(data, formula, uncertainties)
return cls(data, formula)
@property
def adjusted_r_squared(self) -> float:
"""Returns the adjusted r-squared coefficient of determination for the
current fit.
"""
if hasattr(self, "_adj_r_squared") and self._adj_r_squared is not None:
return self._adj_r_squared
residuals = list(self.residuals["residual"])
observed = list(self._data[self._dep_var])
n_obs = len(observed)
avg_obs = sum(observed) / n_obs
SS_res = sum([e**2 for e in residuals])
SS_tot = sum([(obs - avg_obs)**2 for obs in observed])
df_t = n_obs - 1
df_e = self.degrees_of_freedom - 1
self._adj_r_squared = 1 - ((SS_res/df_e) / (SS_tot/df_t))
return self._adj_r_squared
@property
def chi_squared(self) -> float:
"""Returns the reduced chi-square statistic for the current fit."""
if hasattr(self, "_chi_squared") and self._chi_squared is not None:
return self._chi_squared
residuals = list(self.residuals["residual"])
SS_res = sum([e**2 for e in residuals])
self._chi_squared = SS_res / self.degrees_of_freedom
return self._chi_squared
@property
def degrees_of_freedom(self) -> int:
return len(self._data) - len(self._fit_pars)
@property
def fitted(self) -> bool:
if hasattr(self, "_lambda_func"):
return self._lambda_func is not None
return False
@property
def r_squared(self) -> float:
if hasattr(self, "_r_squared") and self._r_squared is not None:
return self._r_squared
residuals = list(self.residuals["residual"])
observed = list(self._data[self._dep_var])
n_obs = len(observed)
avg_obs = sum(observed) / n_obs
SS_res = sum([e**2 for e in residuals])
SS_tot = sum([(obs - avg_obs)**2 for obs in observed])
self._r_squared = 1 - (SS_res / SS_tot)
return self._r_squared
@property
def residuals(self) -> Residuals:
if not self.fitted:
err_msg = (f"[{self.__class__.__name__}.residuals] Could not "
f"compute residuals: CurveFit has not yet been fitted")
raise RuntimeError(err_msg)
if hasattr(self, "_residuals") and self._residuals is not None:
return self._residuals
get_expected = lambda x: self._lambda_func(x[str(self._ind_var)])
get_residual = lambda x: x[self._dep_var] - x["expected"]
df = self._data.assign(expected = get_expected, residual = get_residual)
self._residuals = df[[str(self._ind_var), "residual"]]
return self._residuals
# def derivative(self) -> CurveFit:
# return CurveFit(self._data, sym.diff(self._sym_func, self._ind_var))
def fit(self, random_seed = None, **kwargs) -> None:
"""TODO: return a FitInfo object."""
x_data = list(self._data[str(self._ind_var)])
y_data = list(self._data[self._dep_var])
# set up lambda function
variables = [self._ind_var, *self._fit_pars.keys()]
fit_func = lambdify(variables, self._sym_func, "numpy")
# gather parameter guesses/bounds, variable uncertainties, then fit
keys = list(map(str, self._fit_pars.keys()))
p0, bounds = gather_guesses_from_kwargs(keys, **kwargs)
if hasattr(self, "_uncertainties"):
if str(self._ind_var) in self._uncertainties:
# perform Orthogonal Distance Regression (see scipy docs)
raise NotImplementedError()
else:
sigma = list(self._data[self._uncertainties[self._dep_var]])
fit_kwargs = {"p0": p0, "bounds": bounds, "sigma": sigma,
"absolute_sigma": True}
par_v, cov = curve_fit(fit_func, x_data, y_data, **fit_kwargs)
else:
par_v, cov = curve_fit(fit_func, x_data, y_data,
p0=p0, bounds=bounds)
# gather fit parameters
for k, v in zip(self._fit_pars, par_v):
self._fit_pars[k] = v
# format covariance/correlation matrix
par_names = list(map(str, self._fit_pars.keys()))
self._covariance = pd.DataFrame(cov, columns=par_names, index=par_names)
self._correlation = covariance_to_correlation(self._covariance)
# assign fitted lambda function
fit_func = self._sym_func.subs(self._fit_pars)
self._lambda_func = lambdify(self._ind_var, fit_func, "numpy")
def intersection(self, other: FitFunc, x0: float, tolerance: float = 1e-8,
recursion_limit: int = 1000) -> float:
# newton-raphson gradient descent
this_deriv = self.derivative(with_respect_to = ind_var)
other_deriv = other.derivative(with_respect_to = ind_var)
f = lambda x: self.__call__(x) - other.__call__(x)
f_prime = lambda x: this_deriv.__call__(x) - other_deriv.__call__(x)
recursion_depth = 0
while abs(f(x0)) > tolerance:
x0 = x0 - f(x0) / f_prime(x0)
recursion_depth += 1
if recursion_depth > recursion_limit:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not compute "
f"intersection: recursion_limit reached "
f"({recursion_limit})")
raise RecursionError(err_msg)
return x0
def maximum(self, around: float) -> float:
# reflect function, then minimize
raise NotImplementedError()
def minimum(self, around: float) -> float:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin.html
raise NotImplementedError()
def plot(self,
*,
height: float = None,
width: float = None,
background_color: str = None,
foreground_color: str = None,
title: str = None,
x_label: str = None,
y_label: str = None,
legend: bool = True,
x_lim: Union[float, tuple[float, float]] = None,
y_lim: Union[float, tuple[float, float]] = None,
log_x: bool = False,
log_y: bool = False,
data_alpha: float = 1,
data_color: str = None,
data_marker: Union[str, Path] = "s",
data_size: float = None,
data_label: str = None,
errorbar_alpha: float = None,
errobar_color: str = None,
errorbar_width: float = None,
errorbar_capsize: float = None,
fit_alpha: float = 0.8,
fit_color: str = None,
fit_line_style: str = "solid",
fit_line_width: float = None,
fit_label: str = None,
add_to_figure: bool = False,
add_to_subplot: bool = False,
subplots_per_row: int = 3,
save_to: Path = None) -> None:
"""Plots the current fit data and trend line, along with error bars if
applicable.
Named colors in matplotlib:
https://matplotlib.org/stable/gallery/color/named_colors.html
TODO: directly reference figure rather than using `add_to_figure`
TODO: ^ same with axis
"""
# get figure
if not add_to_figure: # create new figure
fig = plt.figure(constrained_layout=True)
# configure fig
if width is not None:
fig.set_figwidth(width)
if height is not None:
fig.set_figheight(height)
if background_color is not None:
fig.set_facecolor(background_color)
else: # add to existing figure
fig = plt.gcf()
# get axes
if not add_to_subplot: # create new axes/subplot
size = len(fig.get_axes())
if size > 0:
orig_gs = fig.axes[0].get_subplotspec() \
.get_topmost_subplotspec() \
.get_gridspec()
rows, cols = orig_gs.get_geometry()
# search for first empty subplot
gen = (i for i, j in enumerate(fig.axes) if not j.has_data())
first_empty = next(gen, size)
if first_empty >= rows * cols: # add a new column/row
old_width, old_height = fig.get_size_inches()
if cols < subplots_per_row: # add a column
fig.set_figwidth(old_width * (cols + 1) / cols)
new_gs = gridspec.GridSpec(rows, cols + 1)
else: # add a row
fig.set_figheight(old_height * (rows + 1) / rows)
new_gs = gridspec.GridSpec(rows + 1, cols)
for i in range(size): # reposition existing subplots
new_sps = gridspec.SubplotSpec(new_gs, i)
fig.axes[i].set_subplotspec(new_sps)
new_sps = gridspec.SubplotSpec(new_gs, first_empty)
ax = fig.add_subplot(new_sps)
else:
if first_empty < size: # found an empty subplot
fig.axes[first_empty].remove()
new_sps = gridspec.SubplotSpec(orig_gs, first_empty)
ax = fig.add_subplot(new_sps)
else:
ax = fig.add_subplot(1, 1, 1)
if foreground_color is not None:
ax.set_facecolor(foreground_color)
# labels
if title is not None:
ax.set_title(title)
else:
ax.set_title(f"CurveFit: {self._orig_formula}")
if x_label is not None:
ax.set_xlabel(x_label)
else:
ax.set_xlabel(str(self._ind_var))
if y_label is not None:
ax.set_ylabel(y_label)
else:
ax.set_ylabel(self._dep_var)
# limits
if x_lim is not None:
if issubclass(type(x_lim), tuple):
ax.set_xlim(left=x_lim[0], right=x_lim[1])
else:
ax.set_xlim(right=x_lim)
if y_lim is not None:
if issubclass(type(y_lim), tuple):
ax.set_ylim(left=y_lim[0], right=y_lim[1])
else:
ax.set_ylim(right=y_lim)
else: # add to existing axes/subplot
ax = plt.gca()
# plot scatter data
if data_label is not None:
label = data_label
else:
label = f"Observed {self._dep_var}"
plt.scatter(str(self._ind_var),
self._dep_var,
s=data_size,
c=data_color,
marker=data_marker,
alpha=data_alpha,
data=self._data,
label=label)
# add error bars if applicable:
if hasattr(self, "_uncertainties"):
if str(self._ind_var) in self._uncertainties: # include x errors
plt.errorbar(str(self._ind_var),
self._dep_var,
xerr=self._uncertainties[str(self._ind_var)],
yerr=self._uncertainties[self._dep_var],
ecolor=errobar_color,
alpha=errorbar_alpha,
elinewidth=errorbar_width,
capsize=errorbar_capsize,
linestyle="",
data=self._data)
else: # only has y errors
plt.errorbar(str(self._ind_var),
self._dep_var,
yerr=self._uncertainties[self._dep_var],
ecolor=errobar_color,
alpha=errorbar_alpha,
elinewidth=errorbar_width,
capsize=errorbar_capsize,
linestyle="",
data=self._data)
# add fit line:
min_x, max_x = sorted(ax.get_xlim())
min_x = int(np.floor(self._data[str(self._ind_var)].min()))
max_x = int(np.ceil(self._data[str(self._ind_var)].max()))
x_fitted = np.linspace(min_x, max_x, 10 * (max_x - min_x))
y_fitted = self.__call__(x_fitted)
if fit_label is not None:
label = fit_label
else:
label = f"Predicted {self._dep_var}"
plt.plot(x_fitted,
y_fitted,
c=fit_color,
ls=fit_line_style,
lw=fit_line_width,
alpha=fit_alpha,
label=label)
# configure scale/legend
if log_x:
plt.xscale("symlog")
if log_y:
plt.yscale("symlog")
if legend:
plt.legend()
plt.tight_layout()
# display plot:
if save_to is None:
plt.show()
else:
plt.savefig(save_to)
def t_test(self, hypothesis: str, alpha: float = None, plot: bool = False,
save_to: Path = None) -> Union[float, bool]:
"""two-tailed t_test (one-sided optional). Returns p-value of a
particular proposed parameter value. If alpha is given, returns
boolean.
Maybe pass in alternative hypothesis as a string? e.g. "a=2" would
imply a two-sided hypothesis test. "a > 2" would be one-sided.
"""
comp = re.findall("(={1,2}|<=|>=|<|>)", hypothesis)
if len(comp) != 1:
class_name = self.__class__.__name__
err_msg = (f"[{class_name}] Could not perform t-test: "
f"`hypothesis` must have exactly one of the following "
f"(in)equality operators: ['=', '==', '>', '>=', '<', "
f"'<='] (received {comp})")
raise ValueError(err_msg)
lhs, rhs = list(map(str.strip, re.split(comp[0], hypothesis)))
# check lhs is in par_values and rhs is a number
if any(c in comp for c in ["=", "=="]): # two-tailed
pass
if any(c in comp for c in [">", ">="]): # right-tailed
pass
if any(c in comp for c in ["<", "<="]): # left-tailed
pass
raise NotImplementedError()
def __call__(self, x: Union[float, np.array], **kwargs) -> float:
if not self.fitted:
self.fit(**kwargs)
return self._lambda_func(x)
def __str__(self) -> str:
if self.fitted:
short = {k: np.round(self._fit_pars[k], 4) for k in self._fit_pars}
return str(self._sym_func.subs(short))
return str(self._sym_func)
class Residuals:
def __init__(self, res_df: pd.DataFrame):
if "residual" not in res_df.columns:
err_msg = ()
raise ValueError(err_msg)
@property
def sum_squares(self) -> float:
raise NotImplementedError()
def histogram(self, save_to: Path = None):
raise NotImplementedError()
def plot(self, save_to: Path = None):
raise NotImplementedError()
def qq_plot(self, save_to: Path = None):
raise NotImplementedError()
def rescale(self, coef: float):
raise NotImplementedError()
def main() -> None:
formula = "y ~ a * exp(b * x) + c"
# formula = "y ~ a * x**2 + b * x + c"
# formula = "y ~ a * 1/x + b * x + c"
f = CurveFit.from_simulated(formula,
"x",
{"a": 1.1, "b": 1.2, "c": 1.3},
start = 0,
stop = 10,
num_points = 11,
noise = 1,
y_uncertain=True,
mag_uncertain=10,
seed=123)
f.fit()
print(f._data)
print(f"{'Formula:':<30} {f._orig_formula}")
print(f"{'Dependent variable:':<30} {f._dep_var}")
print(f"{'String formula:':<30} {f._str_func}")
print(f"{'Symbolic function:':<30} {f._sym_func}")
# print(f"{'Derivative:':<30} {f.derivative()._sym_func}")
print(f"{'Independent Variable:':<30} {f._ind_var}")
print(f"{'Fit parameters:':<30} {f._fit_pars}")
print(f"{'After substitution:':<30} {f}")
print(f"{'R-squared:':<30} {f.r_squared}")
print(f"{'Adjusted R-squared:':<30} {f.adjusted_r_squared}")
print(f"{'Reduced chi-squared:':<30} {f.chi_squared}")
interp_str = f"Value @ {f._dep_var} = 4:"
print(f"{interp_str:<30} {f(4)}")
print(f._covariance)
print(f._correlation)
fig = plt.figure()
f.plot(save_to = Path("CurveFit_plot.png"), log_y = False, add_to_figure=True)
f.plot(save_to = Path("CurveFit_plot.png"), log_y = True, add_to_figure=True, add_to_subplot=True)
# f.plot(save_to = Path("CurveFit_plot.png"), log_y = False, add_to_figure=True)
# f.plot(save_to = Path("CurveFit_plot.png"), log_y = True, add_to_figure=True)
# f.plot(save_to = Path("CurveFit_plot.png"), log_y = False, add_to_figure=True)
# f.plot(save_to = Path("CurveFit_plot.png"), log_y = True, add_to_figure=True)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-02 17:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_auto_20161124_1615'),
]
operations = [
migrations.AddField(
model_name='lan',
name='paytypes',
field=models.TextField(null=True, verbose_name='betalingstyper'),
),
migrations.AddField(
model_name='lan',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=8, null=True, verbose_name='price'),
),
migrations.AlterField(
model_name='lan',
name='blurb',
field=models.TextField(help_text='Teksten, specifikt til dette lan, der bliver vist på forsiden.<br>Husk at wrappe tekst i <p> tags!', verbose_name='blurb'),
),
migrations.AlterField(
model_name='lan',
name='schedule',
field=models.TextField(null=True, verbose_name='tidsplan'),
),
migrations.AlterField(
model_name='profile',
name='grade',
field=models.CharField(choices=[('16xaa', '16xaa'), ('16xab', '16xab'), ('16xac', '16xac'), ('16xad', '16xad'), ('16xae', '16xae'), ('16xaf', '16xaf'), ('16xaj', '16xaj'), ('16xap', '16xap'), ('16xar', '16xar'), ('13xa', '13xa'), ('14xaa', '14xaa'), ('14xab', '14xab'), ('14xad', '14xad'), ('14xae', '14xae'), ('14xaj', '14xaj'), ('14xap', '14xap'), ('14xaq', '14xaq'), ('14xar', '14xar'), ('15xaa', '15xaa'), ('15xab', '15xab'), ('15xac', '15xac'), ('15xad', '15xad'), ('15xae', '15xae'), ('15xaj', '15xaj'), ('15xap', '15xap'), ('15xar', '15xar'), ('teacher', 'Lærer'), ('none', 'Ukendt')], default='none', max_length=32, verbose_name='klasse'),
),
]
|
"""
@author: wangguanan
@contact: guan.wang0706@gmail.com
"""
import os, copy
from .reid_samples import ReIDSamples
class DukeMTMCreID(ReIDSamples):
"""DukeMTMC-reID.
Reference:
- Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016.
- Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017.
URL: `<https://github.com/layumi/DukeMTMC-reID_evaluation>`_
Dataset statistics:
- identities: 1404 (train + query).
- images:16522 (train) + 2228 (query) + 17661 (gallery).
- cameras: 8.
Args:
data_path(str): path to DukeMTMC-reID dataset
combineall(bool): combine train and test sets as train set if True
"""
dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip'
def __init__(self, data_path, combineall=False, download=False):
# is not exist and download true, download dataset or stop
if not os.path.exists(data_path):
if download:
print('dataset path {} is not existed, start download dataset'.format(data_path))
self.download_dataset(data_path, self.dataset_url)
else:
raise RuntimeError('Dataset path {} NotExist. recommend dowload set True to automatically download dataset'.format(data_path))
# paths of train, query and gallery
train_path = os.path.join(data_path, 'bounding_box_train/')
query_path = os.path.join(data_path, 'query/')
gallery_path = os.path.join(data_path, 'bounding_box_test/')
# load
train = self._load_samples(train_path)
query = self._load_samples(query_path)
gallery = self._load_samples(gallery_path)
# init
super(DukeMTMCreID, self).__init__(train, query, gallery, combineall=combineall)
def _load_samples(self, folder_dir):
'''return (img_path, identity_id, camera_id)'''
samples = []
root_path, _, files_name = self.os_walk(folder_dir)
for file_name in files_name:
if '.jpg' in file_name:
person_id, camera_id = self._analysis_file_name(file_name)
samples.append([root_path + file_name, person_id, camera_id])
return samples
def _analysis_file_name(self, file_name):
'''
:param file_name: format like 0002_c1_f0044158.jpg
:return:
'''
split_list = file_name.replace('.jpg', '').replace('c', '').split('_')
person_id, camera_id = int(split_list[0]), int(split_list[1])
return person_id, camera_id
|
from django import forms
from django.contrib.auth import authenticate, password_validation
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, PasswordResetForm
from django.utils.safestring import mark_safe
from accounts.models import EmailUser
from shared.forms import FormRenderMixin
from shared.models import normalize_email
def password_criterions_html():
"""Wraps password criterions into nice html used by other forms"""
def wrap_str(s, tagopen, tagclose=None):
if not tagclose:
tagclose = tagopen
return "<{}>{}</{}>".format(tagopen, s, tagclose)
criterions = password_validation.password_validators_help_texts()
criterions_html = wrap_str(
"\n".join(map(lambda crit: wrap_str(crit, "li"), criterions)),
'ul class="helptext"',
"ul",
)
return mark_safe(criterions_html)
class LoginForm(AuthenticationForm):
"""Form used when loging in"""
def clean(self, *args, **kwargs):
self.cleaned_data["username"] = normalize_email(self.cleaned_data.get("username"))
return super().clean(*args, **kwargs)
class CreateAccountForm(FormRenderMixin, UserCreationForm):
"""Form used to register a new user"""
class Meta:
model = EmailUser
fields = ('email', 'first_name', 'last_name', 'password1', 'password2',)
class UpdateAccountForm(FormRenderMixin, forms.ModelForm):
"""Form used to update name/email"""
class Meta:
model = EmailUser
fields = ('email', 'first_name', 'last_name')
help_texts = {"email": "Si vous la changez, il faudra confirmer la nouvelle adresse",}
def clean_email(self):
""" Check email uniqueness """
email = self.cleaned_data["email"]
if email == self.instance.email:
return email
norm_email = normalize_email(email)
if EmailUser.objects.filter(email=norm_email).count() > 0:
raise forms.ValidationError(
"Un autre compte avec cette adresse mail existe déjà."
)
return norm_email
def save(self, *args, commit=True, **kwargs):
email_changed = "email" in self.changed_data
user = super().save(*args, commit=False, **kwargs)
if email_changed:
user.email_confirmed = False
user.is_active = False
if commit:
user.save()
return user
class UpdatePasswordForm(FormRenderMixin, forms.Form):
""" Form to update one's password """
current_password = forms.CharField(
widget=forms.PasswordInput, label="Mot de passe actuel",
)
password = forms.CharField(
widget=forms.PasswordInput,
help_text=password_criterions_html(),
label="Nouveau mot de passe",
)
password_confirm = forms.CharField(
widget=forms.PasswordInput, label="Nouveau mot de passe (confirmation)",
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", None)
super().__init__(*args, **kwargs)
def clean_current_password(self):
""" Check current password correctness """
cur_password = self.cleaned_data["current_password"]
if authenticate(username=self.user.email, password=cur_password) != self.user:
raise forms.ValidationError("Votre mot de passe actuel est incorrect.")
return cur_password
def clean_password(self):
""" Check password strength """
password = self.cleaned_data["password"]
password_validation.validate_password(password)
return password
def clean_password_confirm(self):
""" Check that both passwords match """
cleaned_data = super().clean()
password = cleaned_data.get("password")
password_confirm = cleaned_data.get("password_confirm")
if not password:
return None
if password != password_confirm:
raise forms.ValidationError(
"Les deux mots de passe ne sont pas identiques."
)
return cleaned_data
def apply(self):
""" Apply the password change, assuming validation was already passed """
self.user.set_password(self.cleaned_data["password"])
self.user.save()
class PasswordResetEmailForm(PasswordResetForm):
"""Form used when asking email to send password reset linkk"""
def clean(self, *args, **kwargs):
self.cleaned_data["email"] = normalize_email(self.cleaned_data.get("email"))
return super().clean(*args, **kwargs)
|
import logging
import boto3
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
def delete_birthday_from_dynamodb(key: str) -> None:
"""
Deletes birthday from dynamodb, with given unsubscribe key.
:param key: Unsubscribe key identifier.
:return: None.
"""
dynamodb_client = boto3.client('dynamodb')
results = dynamodb_client.query(
IndexName='UnsubscribeKeyIndex',
ExpressionAttributeValues={
':keyValue': {
'S': key,
},
},
KeyConditionExpression='UnsubscribeKey = :keyValue',
TableName='BirthdaysTable',
)
birthday = results['Items'][0]
dynamodb_client.delete_item(
Key={
'DateKey': {
'S': birthday['DateKey']['S'],
},
'TimeName': {
'S': birthday['TimeName']['S'],
},
},
TableName='BirthdaysTable',
)
def lambda_handler(event, context):
logger.info(str(event).replace("'", '"'))
try:
delete_birthday_from_dynamodb(event['key'])
except Exception:
logger.exception("There was an exception while deleting the item")
raise Exception("Malformed input")
return {
'message': 'Successful'
}
|
"""
MA Model
===============================================================================
Overview
-------------------------------------------------------------------------------
This module contains MA models using four diferent parameter optimization
methods: SciPy's minimization, SciKit's Ridge linear model, SciKit's Lasso
linear model and SciKit's Elastic Net linear model.
Examples
-------------------------------------------------------------------------------
MA model using SciPy's minimization:
>>> ts = load_champagne()
>>> model = MA(q = 3)
>>> model
MA(q = 3, intercept = None, theta = None)
>>> random.seed(1)
>>> model.fit(ts) # doctest: +ELLIPSIS
MA(q = 3, intercept = 153..., theta = [-0.489... -0.678... -0.112...])
>>> random.seed(1)
>>> model.predict(ts, periods = 3) # doctest: +ELLIPSIS
ci_inf ci_sup ... forecast real
1972-10-01 3824... 11215... ... 7069... None
1972-11-01 3277... 11575... ... 6585... None
1972-12-01 3430... 11987... ... 6555... None
<BLANKLINE>
[3 rows x 6 columns]
>>> random.seed(1)
>>> model.predict(ts, periods = 3, confidence_interval = 0.90) # doctest: +ELLIPSIS
ci_inf ci_sup ... forecast real
1972-10-01 4473... 10328... ... 6864... None
1972-11-01 4235... 9363... ... 6396... None
1972-12-01 3279... 8801... ... 5835... None
<BLANKLINE>
[3 rows x 6 columns]
"""
from skfore.models.BaseModel import BaseModel
from sklearn import linear_model
class MA(BaseModel):
""" Moving-average model
Parameter optimization method: scipy's minimization
Args:
q (int): order.
intercept (boolean or double): False for set intercept to 0 or double
theta (array): array of q-length for set parameters without optimization
Returns:
MA model structure of order q.
"""
def __init__(self, q=None, intercept=None, theta=None):
if q == None:
self.q = 0
else:
self.q = q
if intercept == False:
self.theta0 = 0
else:
self.theta0 = intercept
self.theta = theta
if intercept == None and theta == None:
self.optim_type = 'complete'
elif intercept == None and theta != None:
self.optim_type = 'optim_intercept'
elif intercept == False and theta == None:
self.optim_type = 'no_intercept'
elif intercept != None and theta == None:
self.optim_type = 'optim_params'
elif intercept != None and theta != None:
self.optim_type = 'no_optim'
def __repr__(self):
return 'MA(q = ' + str(self.q) + ', intercept = ' + str(self.theta0) + ', theta = ' + str(self.theta) +')'
def params2vector(self):
""" Parameters to vector
Args:
None.
Returns:
Vector parameters of length q+1 to use in optimization.
"""
params = list()
if self.theta0 == None:
self.theta0 = numpy.random.rand(1)[0]
if self.theta == None:
self.theta = numpy.random.rand(self.q)
if self.optim_type == 'complete':
params.append(self.theta0)
for i in range(len(self.theta)):
params.append(self.theta[i])
return params
elif self.optim_type == 'no_intercept' or self.optim_type == 'optim_params':
for i in range(len(self.theta)):
params.append(self.theta[i])
return params
elif self.optim_type == 'optim_intercept':
params.append(self.theta0)
return params
elif self.optim_type == 'no_optim':
pass
def vector2params(self, vector):
""" Vector to parameters
Args:
vector (list): vector of length q+1 to convert into parameters of
the model
Returns:
self
"""
if self.optim_type == 'complete':
self.theta0 = vector[0]
self.theta = vector[1:]
elif self.optim_type == 'no_intercept' or self.optim_type == 'optim_params':
self.theta = vector
elif self.optim_type == 'optim_intercept':
self.theta0 = vector[0]
elif self.optim_type == 'no_optim':
pass
return self
def __get_X__(self, ts):
""" Get matrix of regressors
Args:
ts (pandas.Series): Time series to create matrix of regressors
Returns:
List of list of regressors for every time in series
"""
if self.y == None:
lon = len(ts.values)
y = numpy.random.randn(lon)
else:
y = self.y
X = list()
for i in range(len(ts)):
if i <= self.q:
if i == 0:
value = [0] * self.q
X.append(value)
else:
value_0 = [0] * (self.q - i)
value_1 = y[0:i].tolist()
value = value_0 + value_1
X.append(value)
else:
value = y[i-self.q:i].tolist()
X.append(value)
return X
def forecast(self, ts):
""" Next step
Args:
ts (pandas.Series): Time series to find next value
Returns:
Value of next time stamp
"""
lon = len(ts)
history = list()
predictions = list()
for t in numpy.arange(0,lon,1):
length = len(history)
if length <= self.q:
yhat = numpy.mean(ts.values[0:t])
else:
ts_last = history[length-self.q:length]
predicted = predictions[length-self.q:length]
mean_predicted = numpy.mean(ts_last)
new_predicted = self.theta0 + numpy.dot(numpy.subtract(ts_last, predicted), self.theta)
yhat = mean_predicted + new_predicted
predictions.append(yhat)
history.append(ts.values[t])
if lon == 1:
result = ts[0]
elif lon <= self.q:
result = numpy.mean(history[0:lon])
else:
ts_last = history[lon-self.q:lon]
predicted = predictions[lon-self.q:lon]
mean_predicted = numpy.mean(ts_last)
new_predicted = self.theta0 + numpy.dot(numpy.subtract(ts_last, predicted), self.theta)
result = mean_predicted + new_predicted
return result
def simulate(self, ts):
""" Fits a time series using self model parameters
Args:
ts (pandas.Series): Time series to fit
Returns:
Fitted time series
"""
prediction = list()
for i in range(len(ts)):
result = self.forecast(ts[0:i])
prediction.append(result)
prediction = pandas.Series((v for v in prediction), index = ts.index)
return prediction
def fit(self, ts, error_function = None):
""" Finds optimal parameters using a given optimization function
Args:
ts (pandas.Series): Time series to fit.
error_function (function): Function to estimates error.
Return:
self
"""
if self.optim_type == 'no_optim':
pass
else:
def f(x):
self.vector2params(x)
return self.calc_error(ts, error_function)
x0 = self.params2vector()
optim_params = scipy.optimize.minimize(f, x0)
self.vector2params(vector = optim_params.x)
return self
class MA_Ridge(MA):
""" Parameter optimization method: SciKit's Ridge linear model """
def __init__(self, q=None, intercept=None, theta=None, alpha=0.5, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001):
self.y = None
self.q = q
self.alpha = alpha
self.copy_X = copy_X
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.normalize = normalize
self.random_state = random_state
self.solver = solver
self.tol = tol
if intercept == None:
self.theta0 = numpy.random.rand(1)
elif intercept == False:
self.theta0 = 0
else:
self.theta0 = intercept
if theta == None:
self.theta = numpy.random.rand(q)
else:
self.theta = theta
if intercept == None and theta == None:
self.optim_type = 'complete'
elif intercept == None and theta != None:
self.optim_type = 'optim_intercept'
elif intercept == False and theta == None:
self.optim_type = 'no_intercept'
elif intercept != None and theta == None:
self.optim_type = 'optim_params'
elif intercept != None and theta != None:
self.optim_type = 'no_optim'
def __repr__(self):
return 'MA_Ridge(q = ' + str(self.q) + ', intercept = ' + str(self.theta) + ', theta = ' + str(self.theta) +')'
def fit(self, ts):
if self.optim_type == 'complete':
X = self.__get_X__(ts)
y = ts.values.tolist()
ridge_model = linear_model.Ridge(alpha = self.alpha, copy_X = self.copy_X,
fit_intercept = self.fit_intercept,
max_iter = self.max_iter,
normalize = self.normalize,
random_state = self.random_state,
solver = self.solver, tol = self.tol)
ridge_model.fit(X, y)
optim_params = list()
optim_params.append(ridge_model.intercept_)
optim_params = optim_params + ridge_model.coef_.tolist()
self.vector2params(vector = optim_params)
elif self.optim_type == 'no_intercept':
X = self.__get_X__(ts)
y = ts.values.tolist()
ridge_model = linear_model.Ridge(alpha = self.alpha, copy_X = self.copy_X,
fit_intercept = False,
max_iter = self.max_iter,
normalize = self.normalize,
random_state = self.random_state,
solver = self.solver, tol = self.tol)
ridge_model.fit(X, y)
optim_params = list()
optim_params = optim_params + ridge_model.coef_.tolist()
self.vector2params(vector = optim_params)
elif self.optim_type == 'no_optim':
pass
else:
error_message = "Can't apply Lasso regression using given parameters"
raise ValueError(error_message)
return self
class MA_Lasso(MA):
""" Parameter optimization method: SciKit's Lasso linear model """
def __init__(self, q=None, intercept=None, theta=None, alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False):
self.y = None
self.q = q
self.alpha = alpha
self.copy_X = copy_X
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.normalize = normalize
self.positive = positive
self.precompute = precompute
self.random_state = random_state
self.selection = selection
self.tol = tol
self.warm_start = warm_start
if intercept == None:
self.theta0 = numpy.random.rand(1)
elif intercept == False:
self.theta0 = 0
else:
self.theta0 = intercept
if theta == None:
self.theta = numpy.random.rand(q)
else:
self.theta = theta
if intercept == None and theta == None:
self.optim_type = 'complete'
elif intercept == None and theta != None:
self.optim_type = 'optim_intercept'
elif intercept == False and theta == None:
self.optim_type = 'no_intercept'
elif intercept != None and theta == None:
self.optim_type = 'optim_params'
elif intercept != None and theta != None:
self.optim_type = 'no_optim'
def __repr__(self):
return 'MA_Lasso(q = ' + str(self.q) + ', intercept = ' + str(self.theta0) + ', theta = ' + str(self.theta) +')'
def fit(self, ts):
if self.optim_type == 'complete':
X = self.__get_X__(ts)
y = ts.values.tolist()
lasso_model = linear_model.Lasso(alpha = self.alpha, copy_X = self.copy_X,
fit_intercept = self.fit_intercept,
max_iter = self.max_iter,
normalize = self.normalize,
positive = self.positive,
precompute = self.precompute,
random_state = self.random_state,
selection = self.selection, tol = self.tol,
warm_start = self.warm_start)
lasso_model.fit(X, y)
optim_params = list()
optim_params.append(lasso_model.intercept_)
optim_params = optim_params + lasso_model.coef_.tolist()
self.vector2params(vector = optim_params)
elif self.optim_type == 'no_intercept':
X = self.__get_X__(ts)
y = ts.values.tolist()
lasso_model = linear_model.Lasso(alpha = self.alpha, copy_X = self.copy_X,
fit_intercept = False,
max_iter = self.max_iter,
normalize = self.normalize,
positive = self.positive,
precompute = self.precompute,
random_state = self.random_state,
selection = self.selection, tol = self.tol,
warm_start = self.warm_start)
lasso_model.fit(X, y)
optim_params = list()
optim_params = optim_params + lasso_model.coef_.tolist()
self.vector2params(vector = optim_params)
elif self.optim_type == 'no_optim':
pass
else:
error_message = "Can't apply Lasso regression using given parameters"
raise ValueError(error_message)
return self
class MA_ElasticNet(MA):
""" Parameter optimization method: SciKit's Elastic Net linear model """
def __init__(self, q=None, intercept=None, theta=None, alpha=1.0, copy_X=True, fit_intercept=True, l1_ratio=0.5,
max_iter=1000, normalize=False, positive=False, precompute=False,
random_state=0, selection='cyclic', tol=0.0001, warm_start=False):
self.y = None
self.q = q
self.alpha = alpha
self.copy_X = copy_X
self.fit_intercept = fit_intercept
self.l1_ratio = l1_ratio
self.max_iter = max_iter
self.normalize = normalize
self.positive = positive
self.precompute = precompute
self.random_state = random_state
self.selection = selection
self.tol = tol
self.warm_start = warm_start
if intercept == None:
self.theta0 = numpy.random.rand(1)
elif intercept == False:
self.theta0 = 0
else:
self.theta0 = intercept
if theta == None:
self.theta = numpy.random.rand(q)
else:
self.theta = theta
if intercept == None and theta == None:
self.optim_type = 'complete'
elif intercept == None and theta != None:
self.optim_type = 'optim_intercept'
elif intercept == False and theta == None:
self.optim_type = 'no_intercept'
elif intercept != None and theta == None:
self.optim_type = 'optim_params'
elif intercept != None and theta != None:
self.optim_type = 'no_optim'
def __repr__(self):
return 'MA_ElasticNet(q = ' + str(self.q) + ', intercept = ' + str(self.theta0) + ', theta = ' + str(self.theta) +')'
def fit(self, ts):
if self.optim_type == 'complete':
X = self.__get_X__(ts)
y = ts.values.tolist()
lasso_model = linear_model.ElasticNet(alpha = self.alpha, copy_X = self.copy_X,
fit_intercept = self.fit_intercept,
l1_ratio = self.l1_ratio,
max_iter = self.max_iter,
normalize = self.normalize,
positive = self.positive,
precompute = self.precompute,
random_state = self.random_state,
selection = self.selection, tol = self.tol,
warm_start = self.warm_start)
lasso_model.fit(X, y)
optim_params = list()
optim_params.append(lasso_model.intercept_)
optim_params = optim_params + lasso_model.coef_.tolist()
self.vector2params(vector = optim_params)
elif self.optim_type == 'no_intercept':
X = self.__get_X__(ts)
y = ts.values.tolist()
lasso_model = linear_model.ElasticNet(alpha = self.alpha, copy_X = self.copy_X,
fit_intercept = False,
l1_ratio = self.l1_ratio,
max_iter = self.max_iter,
normalize = self.normalize,
positive = self.positive,
precompute = self.precompute,
random_state = self.random_state,
selection = self.selection, tol = self.tol,
warm_start = self.warm_start)
lasso_model.fit(X, y)
optim_params = list()
optim_params = optim_params + lasso_model.coef_.tolist()
self.vector2params(vector = optim_params)
elif self.optim_type == 'no_optim':
pass
else:
error_message = "Can't apply Elastic Net regression using given parameters"
raise ValueError(error_message)
return self
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags = doctest.ELLIPSIS)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-05-09 11:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tt_storage', '0002_auto_20170509_1125'),
]
operations = [
migrations.AddField(
model_name='logrecord',
name='type',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from habt.database import Base, GetOrCreateMixin
class Architecture(GetOrCreateMixin, Base):
"""
Represents the hardware architecture
"""
__tablename__ = "architecture"
""" Database table name """
id = Column(Integer, primary_key=True)
""" Unique id on the database """
name = Column(String)
""" Architecture name """
installtargets = relationship("InstallTarget", back_populates="architecture")
""" Installtarget, referencing this architecture """
|
from ebi.battleroyale.models import *
from django.contrib import admin
class StyleAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(Style, StyleAdmin)
class SkillAdmin(admin.ModelAdmin):
list_display = ('player', 'style', 'level', 'experience')
admin.site.register(Skill, SkillAdmin)
class ActionPhraseAdmin(admin.ModelAdmin):
list_display = ('style', 'phrase', 'action')
admin.site.register(ActionPhrase, ActionPhraseAdmin)
class WinPhraseAdmin(admin.ModelAdmin):
list_display = ('style', 'phrase')
admin.site.register(WinPhrase, WinPhraseAdmin)
class DuelAdmin(admin.ModelAdmin):
list_display = ('created',
'open',
'challenger',
'challenge_move',
'challenge_awesomeness',
'target',
'responded',
'response_move',
'response_awesomeness')
admin.site.register(Duel, DuelAdmin) |
from webtest import TestApp # type: ignore [import]
import liscopridge.cache
def pytest_configure(config):
liscopridge.cache._pytest = True
# https://github.com/Pylons/webtest/pull/227
TestApp.__test__ = False
|
import numpy as np
import yaml
import sys
import os
from nose.tools import assert_equal,assert_almost_equal,assert_not_equal
from ..__init__ import parser, command, user_run_defaults
from argparse import ArgumentParser
from mock import Mock, patch
sys.path.insert(0,'..')
from boids import BoidsMethod
class test_session(object):
'''
Description: A class to test the boids methods and command init
'''
def analyse( self, arg1, arg2, type):
if( type == 'almost_equal' ):
#.all() as the truth value of an array > 1 element is ambiguous
assert_almost_equal( arg1.all(), arg2.all() )
elif( type == 'exactly_equal' ):
assert_equal( arg1, arg2 )
def generator( self, test_data, file ):
for point in test_data:
before = point.pop( 'before' )
after = point.pop( 'after' )
boids = BoidsMethod()
boids.positions = np.array( before[0:2] )
boids.velocities = np.array( before[2:4] )
if( file == 'update_boids_1.yaml' ):
boids.update_boids()
self.analyse( boids.positions , np.array(after[0:2]), 'almost_equal' )
self.analyse( boids.velocities, np.array(after[2:4]), 'almost_equal' )
elif( file == 'fly_towards_middle_1.yaml' ):
boids.fly_towards_middle()
self.analyse( boids.positions , np.array(after[0:2]), 'almost_equal' )
self.analyse( boids.velocities, np.array(after[2:4]), 'almost_equal' )
elif( file == 'fly_away_from_nearby_boids_1.yaml' ):
boids.fly_away_from_nearby_boids()
self.analyse( boids.positions , np.array(after[0:2]), 'almost_equal' )
self.analyse( boids.velocities, np.array(after[2:4]), 'almost_equal' )
elif( file == 'try_to_match_speed_with_nearby_boids_1.yaml' ):
boids.try_to_match_speed_with_nearby_boids()
self.analyse( boids.positions , np.array(after[0:2]), 'almost_equal' )
self.analyse( boids.velocities, np.array(after[2:4]), 'almost_equal' )
elif( file == 'move_according_to_velocities_1' ):
boids.move_according_to_velocities()
self.analyse( boids.positions , np.array(after[0:2]), 'almost_equal' )
self.analyse( boids.velocities, np.array(after[2:4]), 'almost_equal' )
# Testing update_boids
def test_update_boids( self ):
with open( os.path.join(os.path.dirname(__file__), 'fixtures',
'update_boids_1.yaml') ) as data:
self.generator( yaml.load(data), 'update_boids_1.yaml' )
# Testing fly_towards_middle
def test_fly_towards_middle( self ):
with open( os.path.join(os.path.dirname(__file__), 'fixtures',
'fly_towards_middle_1.yaml') ) as data:
self.generator( yaml.load(data), 'fly_towards_middle_1.yaml' )
# Testing fly_away_from_nearby_boids
def test_fly_away_from_nearby_boids( self ):
with open( os.path.join(os.path.dirname(__file__), 'fixtures',
'fly_away_from_nearby_boids_1.yaml') ) as data:
self.generator( yaml.load(data), 'fly_away_from_nearby_boids_1.yaml' )
# Testing try_to_match_speed_with_nearby_boids
def test_try_to_match_speed_with_nearby_boids( self ):
with open( os.path.join(os.path.dirname(__file__), 'fixtures',
'try_to_match_speed_with_nearby_boids_1.yaml') ) as data:
self.generator( yaml.load(data), 'try_to_match_speed_with_nearby_boids_1.yaml' )
# Testing test_move_according_to_velocities
def test_move_according_to_velocities( self ):
with open( os.path.join(os.path.dirname(__file__), 'fixtures',
'move_according_to_velocities_1.yaml')) as data:
self.generator( yaml.load(data), 'move_according_to_velocities_1.yaml' )
# Testing generate_boids_flock
def test_generate_boids_flock( self ):
boids = BoidsMethod()
flock = boids.generate_boids_flock( np.array([-450.0, 300.0]), np.array([50.0, 600.0]) )
# Ensure the generated flock is in the correct dimensions
# to be processed later on in the programme
desired_dimensions = ( 2L, 50L )
self.analyse( flock.shape, desired_dimensions, 'exactly_equal' )
# Testing initiation of config file
def test_command( self ):
commands = parser.parse_args(['--config', 'my_config_file.cfg']) # Use default config
self.analyse( commands.config , 'my_config_file.cfg', 'exactly_equal' )
if __name__ == "__main__":
test_session() |
from typing import TypeVar
from typing.io import IO
import click
import pytest
from adhoc_pdb.cli import cli
def test_cli_fails_on_wrong_signum():
with pytest.raises(click.UsageError) as e:
click.Context(cli).invoke(cli, signum="bla")
assert "bla" in str(e.value).lower()
def test_cli_fails_on_wrong_pid():
pid = 123456789
with pytest.raises(click.UsageError) as e:
click.Context(cli).invoke(cli, pid=pid)
assert str(pid) in str(e.value)
T = TypeVar("T", str, bytes)
def _read_until(buf, pattern):
# type: (IO[T], T) -> T
c = buf.read(1)
while not c.endswith(pattern):
c += buf.read(1)
return c
def test_cli_happy_flow(script, script_path, cli_client):
assert script_path.encode("utf-8") in cli_client.stdout.readline()
_read_until(cli_client.stdout, b"(Pdb) ")
cli_client.stdin.write(b"b 10\n")
cli_client.stdin.flush()
breakpoint_response = cli_client.stdout.readline()
assert b"Breakpoint 1 at" in breakpoint_response
assert breakpoint_response.strip().endswith(b".py:10")
|
# Generated by Django 3.2.7 on 2021-10-28 23:54
import django.core.validators
import django_inet.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0078_ix_add_error_ixf_import_status"),
]
operations = [
migrations.AddField(
model_name="organization",
name="flagged",
field=models.BooleanField(null=True, blank=True),
),
migrations.AddField(
model_name="organization",
name="flagged_date",
field=models.DateTimeField(null=True, blank=True),
),
]
|
# -*- coding: utf-8 -*-
import datetime
from collections import defaultdict
from django import forms
from django.contrib import admin, messages
from django.core.exceptions import ValidationError
from django.forms.models import modelform_factory
from django.db.models.fields import BooleanField
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from django.utils import simplejson as json
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
#==============================================================================
class MassUpdateForm(forms.ModelForm):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
_validate = forms.BooleanField(label=_('Validate'), help_text=_('if checked allows validating of many to many realtions'))
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, forms.models.ModelMultipleChoiceField) and value:
raise ValidationError(_("Unable to mass update many to many relation without using 'validate'"))
if isinstance(field, forms.FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
enabler = 'chk_id_%s' % name
if self.data.get(enabler, False):
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError, e:
self._errors[name] = self.error_class(e.messages)
if name in self.cleaned_data:
del self.cleaned_data[name]
def clean__validate(self):
return self.data.get('_validate', '') == 'on'
def _post_clean(self):
# avoid running internals of _post_clean
pass
def configured_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields, and
is the opposite of the hidden_fields() method.
This is onlt used in the template.
"""
return [field for field in self if not field.is_hidden and field.name.startswith('_')]
def model_fields(self):
"""
Returns a list of BoundField objects that aren't "private" fields.
This is only used in the template.
"""
return [field for field in self if not field.name.startswith('_')]
#==============================================================================
def mass_update_action(description=_("Mass update")):
def mass_update(modeladmin, request, queryset):
"""
mass update queryset
"""
form = None
FormClass = modelform_factory(modeladmin.model, form=MassUpdateForm)
if 'apply' in request.POST:
form = FormClass(request.POST)
if form.is_valid():
done = 0
if form.cleaned_data.get('_validate', False):
for record in queryset:
for k, v in form.cleaned_data.items():
setattr(record, k, v)
record.save()
done += 1
messages.info(request, "Updated %s records" % done)
else:
values = {}
for field_name, value in form.cleaned_data.items():
if isinstance(form.fields[field_name], forms.models.ModelMultipleChoiceField):
messages.error(request, _("Unable to mass update many to many relation without using 'validate'"))
return HttpResponseRedirect(request.get_full_path())
elif field_name not in ['_selected_action', '_validate']:
values[field_name] = value
queryset.update(**values)
return HttpResponseRedirect(request.get_full_path())
else:
messages.error(request, _("Please correct the errors below"))
grouped = defaultdict(lambda: [])
if not form:
initial = { admin.helpers.ACTION_CHECKBOX_NAME: request.POST.getlist(admin.helpers.ACTION_CHECKBOX_NAME)}
for el in queryset.all()[:10]:
for f in modeladmin.model._meta.fields:
if hasattr(f, 'flatchoices') and f.flatchoices:
grouped[f.name] = dict(getattr(f, 'flatchoices')).values()
elif hasattr(f, 'choices') and f.choices:
grouped[f.name] = dict(getattr(f, 'choices')).values()
elif isinstance(f, BooleanField):
grouped[f.name] = [True, False]
else:
value = getattr(el, f.name)
if value is not None and value not in grouped[f.name]:
grouped[f.name].append(value)
initial[f.name] = initial.get(f.name, value)
form = FormClass(initial=initial)
adminForm = admin.helpers.AdminForm(form, modeladmin.get_fieldsets(request), {}, [], model_admin=modeladmin)
media = modeladmin.media + adminForm.media
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime.date) else str(obj)
return render_to_response('admin/actions/mass_update.html',
RequestContext(request, {'adminform': adminForm,
'form': form,
'title': _("Mass update %s") % modeladmin.opts.verbose_name_plural,
'grouped': grouped,
'fieldvalues': json.dumps(grouped, default=dthandler),
'change': True,
'is_popup': False,
'save_as': False,
'has_delete_permission': False,
'has_add_permission': False,
'has_change_permission': True,
'opts': modeladmin.model._meta,
'app_label': modeladmin.model._meta.app_label,
'action': 'mass_update',
'media': mark_safe(media),
'selection': queryset,
}))
mass_update.short_description = description
return mass_update
|
#! /usr/bin/python3
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
target = ttbl.test_target("t0")
ttbl.config.target_add(target)
|
FMT_TOK_STREAMER_NAME = "streamer_name"
FMT_TOK_STREAM_URL = "stream_url"
format_args = {
FMT_TOK_STREAMER_NAME: None,
FMT_TOK_STREAM_URL: None
}
def validate_format_tokens(phrase):
try:
phrase.format(**format_args)
except KeyError:
return False
return True
def text_looks_like_url(text):
if (not text.startswith('http://')) and (not text.startswith('www.')):
return False
space_count = 0
dotcount = 0
slashcount = 0
for c in text:
if c.isspace():
space_count += 1
elif c == '.':
dotcount += 1
elif c in ['\\', '/']:
slashcount += 1
# If it starts with 'http://' or 'www.', has no spaces, 1 or more dots, and
# 1 or more slashes, then it's probably a long-ish URL
return (space_count == 0) and (dotcount > 0) and (slashcount > 0)
def mockify_text(text):
text = text.strip().lower()
# If message resembles a URL, don't mockify
if text_looks_like_url(text):
return None
return ''.join([text[i] if i % 2 else text[i].upper() for i in range(len(text))])
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.vault import Vault
from .common import INSTANCES
def test_run(benchmark):
instance = INSTANCES['main']
c = Vault('vault', None, {}, [instance])
# Run once to get instantiation of config out of the way.
c.check(instance)
benchmark(c.check, instance)
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import numpy as np
# from .sparse_matrix_old import SparseTensor
from .custom_functions.sparse_matrix import sparsify, unsparsify
from torch.nn import Dropout, Softmax, Linear
@torch.no_grad()
def activation_prune(activation, prune_ratio):
num_small = int(np.clip(activation[0].numel() * prune_ratio, 1, activation[0].numel()))
activation_mag = torch.abs(activation)
threshold, _ = torch.kthvalue(activation_mag.flatten(1), num_small)
while len(threshold.shape) < len(activation_mag.shape):
threshold = threshold.unsqueeze(-1)
mask = activation_mag >= threshold
return mask
class SoftmaxActivationPrune(torch.autograd.Function):
@staticmethod
def forward(ctx, x_dense, prune_ratio_attn_mat_store=0):
dense_out = Softmax(dim=-1)(x_dense)
mask = activation_prune(dense_out, prune_ratio_attn_mat_store)
sparse_out = mask * dense_out
# print("attn prune ratio: {}".format(1 - mask.float().mean()))
# ctx.sparse_out = SparseTensor(sparse_out, mask)
shape, mask_, sparse = sparsify(sparse_out, mask)
ctx.save_for_backward(shape, mask_, sparse)
# save sparse activation, but forward with dense
return dense_out, mask
@staticmethod
def backward(ctx, grad_in, *args):
shape, mask, sparse = ctx.saved_tensors
sparse_out = unsparsify(shape, mask, sparse)
# sparse_out = ctx.sparse_out
# sparse_out = sparse_out.to_dense()
# del ctx.sparse_out
shape_A = sparse_out.shape
unsqueeze_cnt = len(shape_A) - 1
eye = torch.eye(shape_A[-1]).to(sparse_out.device)
for _ in range(unsqueeze_cnt):
eye = eye.unsqueeze(0)
A = sparse_out
# grad_Softmax = (A * (1 - A)).unsqueeze(-1) * eye - (A.unsqueeze(-1) * A.unsqueeze(-2)) * (1 - eye)
# grad_out = (grad_in.unsqueeze(-2) @ grad_Softmax).squeeze(-2)
# merge togather
# set_trace()
grad_out = grad_in * A - ((grad_in.unsqueeze(-2) @ A.unsqueeze(-1)) @ A.unsqueeze(-2)).squeeze(-2)
return grad_out, None
class LinearFunctionActivationPrune(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, weight, bias=None, input_prune=None, mask_prune=None):
# ctx.save_for_backward(input_prune, weight, bias)
# ctx.save_for_backward(weight, bias)
# ctx.input_prune = SparseTensor(input_prune, mask_prune)
shape, mask, sparse = sparsify(input_prune, mask_prune)
ctx.save_for_backward(weight, bias, shape, mask, sparse)
output = torch.matmul(input, weight.t())
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
# input_prune, weight, bias = ctx.saved_tensors
# weight, bias = ctx.saved_tensors
# input_prune = ctx.input_prune.to_dense()
# del ctx.input_prune
weight, bias, shape, mask, sparse = ctx.saved_tensors
input_prune = unsparsify(shape, mask, sparse)
grad_input = grad_weight = grad_bias = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# print("grad_output shape is {}".format(grad_output.shape))
if ctx.needs_input_grad[0]:
grad_input = torch.matmul(grad_output, weight)
if ctx.needs_input_grad[1]:
grad_weight = torch.matmul(grad_output.transpose(-2, -1), input_prune)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
return grad_input, grad_weight, grad_bias, None, None
class MatMulActivationPrune(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, A, B, A_prune=None, A_prune_mask=None, B_prune=None, B_prune_mask=None):
# ctx.A_prune = SparseTensor(A_prune, A_prune_mask)
# ctx.B_prune = SparseTensor(B_prune, B_prune_mask)
A_shape, A_mask, A_sparse = sparsify(A_prune, A_prune_mask)
B_shape, B_mask, B_sparse = sparsify(B_prune, B_prune_mask)
ctx.save_for_backward(A_shape, A_mask, A_sparse, B_shape, B_mask, B_sparse)
output = torch.matmul(A, B)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
# A_prune = ctx.A_prune.to_dense()
# del ctx.A_prune
# B_prune = ctx.B_prune.to_dense()
# del ctx.B_prune
grad_A = grad_B = None
A_shape, A_mask, A_sparse, B_shape, B_mask, B_sparse = ctx.saved_tensors
A_prune = unsparsify(A_shape, A_mask, A_sparse)
B_prune = unsparsify(B_shape, B_mask, B_sparse)
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_A = torch.matmul(grad_output, B_prune.transpose(-2, -1))
if ctx.needs_input_grad[1]:
grad_B = torch.matmul(A_prune.transpose(-2, -1), grad_output)
return grad_A, grad_B, None, None, None, None
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish}
class LinearActivationPrune(Linear):
def forward(self, input, input_prune, mask_prune):
return LinearFunctionActivationPrune.apply(input, self.weight, self.bias, input_prune, mask_prune)
class MlpActivationPrune(nn.Module):
def __init__(self, config, prune_ratio_act_store):
super(MlpActivationPrune, self).__init__()
self.prune_ratio_act_store = prune_ratio_act_store
self.fc1 = LinearActivationPrune(config.hidden_size, config.transformer["mlp_dim"])
self.fc2 = LinearActivationPrune(config.transformer["mlp_dim"], config.hidden_size)
self.act_fn = ACT2FN["gelu"]
self.dropout = Dropout(config.transformer["dropout_rate"])
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.normal_(self.fc1.bias, std=1e-6)
nn.init.normal_(self.fc2.bias, std=1e-6)
def forward(self, x):
mask = activation_prune(x, self.prune_ratio_act_store)
x_prune = mask.detach() * x
# print("mask.detach() prune ratio is {}".format(mask.detach().float().mean()))
x = self.fc1(x, x_prune, mask.detach())
x = self.act_fn(x)
x = self.dropout(x)
mask = activation_prune(x, self.prune_ratio_act_store)
x_prune = mask.detach() * x
x = self.fc2(x, x_prune, mask.detach())
x = self.dropout(x)
return x
class AttentionStoreActivationPrune(nn.Module):
def __init__(self, config, vis, prune_ratio_attn_mat_store=0, prune_ratio_act_store=0):
super(AttentionStoreActivationPrune, self).__init__()
self.vis = vis
# prune ratio for stored attn matrix
self.prune_ratio_attn_mat_store = prune_ratio_attn_mat_store
# prune ratio for query, key, value, input activation matrix
self.prune_ratio_act_store = prune_ratio_act_store
self.num_attention_heads = config.transformer["num_heads"]
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = LinearActivationPrune(config.hidden_size, self.all_head_size)
self.key = LinearActivationPrune(config.hidden_size, self.all_head_size)
self.value = LinearActivationPrune(config.hidden_size, self.all_head_size)
self.out = LinearActivationPrune(config.hidden_size, config.hidden_size)
self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
# self.softmax = Softmax(dim=-1)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states):
# magnitude based prune activation
mask = activation_prune(hidden_states, self.prune_ratio_act_store)
hidden_states_prune = mask.detach() * hidden_states
# dense activate forward
mixed_query_layer = self.query(hidden_states, hidden_states_prune, mask.detach())
mixed_key_layer = self.key(hidden_states, hidden_states_prune, mask.detach())
mixed_value_layer = self.value(hidden_states, hidden_states_prune, mask.detach())
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
#print(query_layer.shape)
# magnitude based prune activation
with torch.no_grad():
mask_query = activation_prune(query_layer, self.prune_ratio_act_store)
mask_key = activation_prune(key_layer, self.prune_ratio_act_store)
query_layer_prune = mask_query.detach() * query_layer
key_layer_prune = mask_key.detach() * key_layer
# # dense activate forward
# with torch.no_grad():
# attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)).detach()
#
# # pruned activate backward
# attention_scores += torch.matmul(query_layer_prune, key_layer_prune.transpose(-1, -2)) - \
# torch.matmul(query_layer_prune, key_layer_prune.transpose(-1, -2)).detach()
attention_scores = MatMulActivationPrune.apply(query_layer, key_layer.transpose(-1, -2),
query_layer_prune, mask_query.detach(),
key_layer_prune.transpose(-1, -2), mask_key.detach().transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# dense activation forward and prune activation backward
attention_probs, mask_attention_probs = SoftmaxActivationPrune.apply(attention_scores, self.prune_ratio_attn_mat_store)
attention_probs_prune = attention_probs * mask_attention_probs
# debug use
# attention_probs = Softmax(dim=-1)(attention_scores)
# attention_probs_prune = attention_probs
weights = attention_probs if self.vis else None
attention_probs = self.attn_dropout(attention_probs)
# magnitude based prune activation
mask = activation_prune(value_layer, self.prune_ratio_act_store)
value_layer_prune = mask.detach() * value_layer
# # dense activate forward
# with torch.no_grad():
# context_layer = torch.matmul(attention_probs, value_layer).detach()
#
# # pruned activate backward
# context_layer += torch.matmul(attention_probs_prune, value_layer_prune) - \
# torch.matmul(attention_probs_prune, value_layer_prune).detach()
context_layer = MatMulActivationPrune.apply(attention_probs, value_layer,
attention_probs_prune, mask_attention_probs,
value_layer_prune, mask.detach())
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
# magnitude based prune activation
mask = activation_prune(context_layer, self.prune_ratio_act_store)
context_layer_prune = mask.detach() * context_layer
attention_output = self.out(context_layer, context_layer_prune, mask.detach())
attention_output = self.proj_dropout(attention_output)
return attention_output, weights
def test_activation_prune():
a = torch.rand(2, 100, 100)
prune_ratio = 0.7
mask = activation_prune(a, prune_ratio)
print("prune ratio set is {}, real is {}".format(prune_ratio, 1 - mask.float().mean()))
if __name__ == "__main__":
test_activation_prune()
|
# Pot - Discord bot which is pog. Pot.
# Version - 0.1
# Licensed under the MIT license - https://opensource.org/licenses/MIT
#
# Copyright (c) 2021 Vidhu Kant Sharma
import traceback
import discord
from discord.ext import commands
# to import from parent directory
import sys
sys.path.append('..')
# import ../phrases.py
try:
from phrases import PING_MESSAGE
except ImportError:
PING_MESSAGE = "pong"
try:
from phrases import COMMAND_NOT_FOUND_MESSAGE
except ImportError:
COMMAND_NOT_FOUND_MESSAGE = "I don't know that command?!"
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
# handle all command exceptions
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send(COMMAND_NOT_FOUND_MESSAGE)
else:
print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
@commands.command()
async def ping(self, ctx):
await ctx.send(PING_MESSAGE)
@commands.command()
async def ctx(self, ctx):
await ctx.send(ctx.__dict__)
print(ctx.__dict__)
def setup(bot):
bot.add_cog(Commands(bot))
|
import torch
from torch import nn
import torch.nn.functional as F
from .functions import ConvMotionFunction, InvConvMotionFunction, ConvClsFunction, LineFunction
import numpy as np
class NUConv2d(nn.Module):
def __init__(self):
super(NUConv2d, self).__init__()
def forward(self, x, mag, ori):
return ConvMotionFunction.apply(x, mag, ori)
class InvNUConv2d(nn.Module):
def __init__(self, weight):
super(InvNUConv2d, self).__init__()
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(torch.zeros(self.weight.size(0)).float(), requires_grad=False)
def forward(self, x, labels):
labels_unique = labels.unique().float()
return InvConvMotionFunction.apply(x, labels, labels_unique, self.weight, self.bias)
class ConvMotion(nn.Module):
def __init__(self, weight):
super(ConvMotion, self).__init__()
self.weight = weight
def forward(self, x, mag, ori):
y0 = ConvMotionFunction.apply(x, mag, ori)
y = F.conv2d(F.pad(x, (2,2,2,2), 'replicate'), self.weight)
return torch.cat([y0,y], 1)
class ConvCls(nn.Module):
def __init__(self, weightpath):
super(ConvCls, self).__init__()
self.weight = nn.Parameter(torch.load(weightpath))
self.bias = nn.Parameter(torch.zeros(self.weight.size(0)), requires_grad=False)
def forward(self, x, labels):
labels_unique = labels.unique().float()
return ConvClsFunction.apply(x, labels, labels_unique, self.weight, self.bias)
class Line(nn.Module):
def __init__(self, lambd, beta, N_l, N_c):
super(Line, self).__init__()
self.weightx = nn.Parameter(torch.arange(-500, 510, 10, dtype=torch.float) / 255, requires_grad=False)
l = lambd / max(beta, 1e-4)
self.weighty = nn.Parameter(F.softshrink(self.weightx.view(1,1,-1).repeat(N_l, N_c, 1), l).contiguous())
def forward(self, input, labels):
return LineFunction.apply(input, labels, self.weightx, self.weighty) |
from ws.RLUtils.monitoring.graphing.data_compaction.compaction_mgt import compaction_mgt
def plugin_for_skipping_mgt():
_median_xindex = None
def fn_compute_yval(number_of_entries, strand_num, yvals_for_strands):
computed_yval_strand = yvals_for_strands[strand_num][_median_xindex]
return computed_yval_strand
def fn_compute_xindex(x_index_list_for_pipe):
x_index = x_index_list_for_pipe[_median_xindex]
return x_index
fn_compress_stream_data = compaction_mgt(fn_compute_xindex, fn_compute_yval)
def fn_compress(x_index_list_for_pipe, y_vals_list_for_pipe):
nonlocal _median_xindex
_median_xindex = int(len(x_index_list_for_pipe) / 2)
return fn_compress_stream_data(x_index_list_for_pipe, y_vals_list_for_pipe)
return fn_compress
|
from .na_xerath_top import *
from .na_xerath_jng import *
from .na_xerath_mid import *
from .na_xerath_bot import *
from .na_xerath_sup import *
|
# coding: utf-8
from dataclasses import dataclass
from bookworm import typehints as t
from bookworm import app
from bookworm.paths import app_path
from bookworm.reader import get_document_format_info
from . import PLATFORM
@dataclass
class SupportedFileFormat:
format: str
ext: str
name: str
@property
def icon(self):
ficos_path = app_path("resources", "icons")
icon = ficos_path.joinpath(f"{self.format}.ico")
return icon if icon.exists() else ficos_path.joinpath("file.ico")
@property
def ext_prog_id(self):
return f"{app.prog_id}.{self.format}"
@property
def display_name(self):
return _(self.name)
def astuple(self):
return (self.ext_prog_id, _(self.display_name), str(self.icon))
def get_ext_info(supported="*"):
doctypes = {}
shell_integratable_docs = [
doc_cls
for doc_cls in get_document_format_info().values()
if (not doc_cls.__internal__) and (doc_cls.extensions is not None)
]
for cls in shell_integratable_docs:
for ext in cls.extensions:
cext = ext.replace("*", "")
if (supported == "*") or (cext in supported):
doctypes[cext] = SupportedFileFormat(
cls.format, ext, cls.name
).astuple()
return doctypes
if PLATFORM == "win32":
from ._win32.shell import shell_integrate, shell_disintegrate
elif PLATFORM == "linux":
from ._linux.shell import shell_integrate, shell_disintegrate
|
# This software was developed by employees of the National Institute
# of Standards and Technology (NIST), an agency of the Federal
# Government. Pursuant to title 17 United States Code Section 105, works
# of NIST employees are not subject to copyright protection in the United
# States and are considered to be in the public domain. Permission to freely
# use, copy, modify, and distribute this software and its documentation
# without fee is hereby granted, provided that this notice and disclaimer
# of warranty appears in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND,
# EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED
# TO, ANY WARRANTY THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
# AND FREEDOM FROM INFRINGEMENT, AND ANY WARRANTY THAT THE DOCUMENTATION
# WILL CONFORM TO THE SOFTWARE, OR ANY WARRANTY THAT THE SOFTWARE WILL BE
# ERROR FREE. IN NO EVENT SHALL NASA BE LIABLE FOR ANY DAMAGES, INCLUDING,
# BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES,
# ARISING OUT OF, RESULTING FROM, OR IN ANY WAY CONNECTED WITH THIS
# SOFTWARE, WHETHER OR NOT BASED UPON WARRANTY, CONTRACT, TORT, OR
# OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED BY PERSONS OR PROPERTY
# OR OTHERWISE, AND WHETHER OR NOT LOSS WAS SUSTAINED FROM, OR AROSE OUT
# OF THE RESULTS OF, OR USE OF, THE SOFTWARE OR SERVICES PROVIDED HEREUNDER.
#
# Distributions of NIST software should also include copyright and licensing
# statements of any third-party software that are legally bundled with
# the code in compliance with the conditions of those licenses.
import os
import sys
import argparse
import nptdms
import pymongo
import json
import nptdms
import timezone
import datetime
import time
import numpy as np
import math
import csv
from nptdms import TdmsFile
db = pymongo.MongoClient()
def drop_metadata(dataset_name):
get_metadata(dataset_name).drop()
db.metadata.drop_collection("metadata." + dataset_name)
def purge_dataset(dataset_name):
"""
Drop the data and metadata corresponding to a dataset.
"""
get_metadata(dataset_name).drop()
get_datasets().remove({"name":dataset_name})
def get_metadata(dataset_name):
if "metadata." + dataset_name in db.metadata.collection_names():
return db.metadata["metadata." + dataset_name]
else:
db.metadata.create_collection("metadata." + dataset_name)
return db.metadata["metadata." + dataset_name]
def get_datasets():
if "datasets" in db.metadata.collection_names():
return db.metadata["datasets"]
else:
db.metadata.create_collection("datasets")
return db.metadata["datasets"]
def dataset_exists(dataset_name):
"""
return true if a dataset descriptor exists.
"""
return get_datasets().find_one({"name":dataset_name}) is not None
def extract_prefix(filename, prefix):
prefixLength = len(filename) - len(prefix)
return filename[0:prefixLength]
def get_month(month):
"""
convert from a month string to a month integer.
"""
if month == "Jan" :
return 1
elif month == "Feb" :
return 2
elif month == "Mar" :
return 3
elif month == "Apr":
return 4
elif month == "May" :
return 5
elif month == "Jun" :
return 6
elif month == "Jul" :
return 7
elif month == "Aug" :
return 8
elif month == "Sep" :
return 9
elif month == "Oct" :
return 10
elif month == "Nov" :
return 11
elif month == "Dec" :
return 12
else :
raise Exception("Invalid month string " + month)
def extract_timestamp(prefix):
"""
Return a python date-time structure corresponding to a file name
prefix.
"""
pos = prefix.index('_')
if pos == -1:
print "Invalid prefix. Must be of the form Name_xxxxxx where xxxxxx is \
a timestamp"
#Characters 4&5 are the day, e.g. 11
day = int(prefix[3:5])
#Characters 6,7 & 8 are the month, e.g. Apr
monthStr = prefix[5:8]
month = get_month(monthStr)
#Characters 9 & 10 are the year, e.g. 16
year = int("20" + prefix[8:10])
#Character 11 is always an underscore
#Characters 12 & 13 are the hour in 24 hour time, e.g. 10
hour = int(prefix[11:13])
#Characters 14 &15 are the minutes: e.g. 54
minutes = int(prefix[13:15])
#Characters 16 & 17 are the seconds: e.g. 22
seconds = int(prefix[15:17])
d = datetime.datetime(year,month,day,hour,minutes,seconds)
return d
def extract_date(prefix):
pos = prefix.index('_')
if pos == -1:
print "Invalid prefix. Must be of the form Name_xxxxxx where xxxxxx is \
a timestamp"
date_string = prefix[3,pos]
def debug_print_files(folder):
for folderName, subfolders, filenames in os.walk(folder):
if subfolders:
for subfolder in subfolders:
debug_print_files(subfolder)
for filename in filenames:
filepath = os.path.abspath(folderName + "/" + filename)
if os.path.exists(filepath) :
print folderName + "/" + filename
def configure(googleApiKey):
toWrite = {}
pathname = os.environ.get("HOME") + "/.sdbconfig"
toWrite['GOOGLE_TIMEZONE_API_KEY'] = googleApiKey
with open(pathname,"w") as config:
jsonData = json.loads(json.dumps(toWrite))
json.dump(jsonData,config)
def create_dataset(dataset_name=None,
lat=None,
lon=None,
alt=None,
instrument_tz=None,
antenna=None,
gain=None,
#reflevel_dbm=None,
flo_mhz=None,
fmin=None,
fmax=None,
sample_rate=None,
fft_size=None):
"""
Create a dataset if it does not exist. Throw exception if it does
exist.
"""
if dataset_exists(dataset_name):
raise Exception("Dataset is already present")
datasets = get_datasets()
info = {}
info["name"] = dataset_name
info["lat"] = lat
info["lon"] = lon
info["alt"] = alt
info["instrumentTz"] = instrument_tz
tzId, tzName = timezone.getLocalTimeZoneFromGoogle(int(time.time()),
lat,lon)
info["measurementTz"] = tzId
info["antenna"] = antenna
info["gain"] = gain
#info["reflevel_dbm"] = reflevel_dbm
info["fmin"] = fmin
info["fmax"] = fmax
info["flo_mhz"]= flo_mhz
info["sample_rate"] = sample_rate
info["fft_size"] = fft_size
datasets.insert(info)
def get_dataset(dataset_name):
"""
Get the dataset descriptor for a given dataset_name.
"""
retval = get_datasets().find_one({"name":dataset_name})
if retval is None:
raise Exception("Dataset " + dataset_name + " not found ")
return retval
def compute_peak_stats_worker(fname,fmin,fmax,flo_mhz,fft_size,sample_rate,gaindB):
# convert amplifier gain to linear units
Vgain=pow(10,(gaindB/20.0))
# VST calibration in dB
VcaldB=1.64
Vcal=pow(10,(VcaldB/20.0))
# Load the data into a 2d array.
temp = np.loadtxt(fname,dtype='float',delimiter='\t')
# Normalize the data with fft size
z = temp/fft_size
#Apply calibrations for front end gain, cable loss, splitter loss
z = z/Vgain
z = z*Vcal
z_len = len(z)
#Frequency array for the FFT
fmhz = (float(sample_rate)/fft_size)*np.arange(-fft_size/2,fft_size/2) + flo_mhz
# Power values in dbm
z_dbm = 20*np.log10(z) - 10*np.log10(100) + 30
#set fc to frequency of peak power between 3520 MHz and fLO+100 MHz,
#excluding the LO. The following will return an index array of
#non-contiguous elements.
fj = ((fmhz >= 3520) & (fmhz <= flo_mhz+100) & ((fmhz < flo_mhz-1) |
(fmhz > flo_mhz+1))).nonzero()
# fmhz_j is the frequencies of interest in our range
fmhz_j = fmhz[fj]
# slice the array to the indices of interest. All rows and a subset of
# columns in the range of interest.
sliced_array = z_dbm[:,fj[0]]
# compute the 2d index value of the max location. argmax retuns a 1-d
# result. unravel_index gives the 2d index values.
fci = np.unravel_index(np.argmax(sliced_array),np.shape(sliced_array))
# The max power value at the location of interest.
pmax_dbm = np.round(sliced_array[fci],decimals=1)
# Find the center frequency where the power is max.
fc_mhz = fmhz_j[fci[1]]
# The frequencies of interest where we want to compute statistics
fi = np.arange(fmin,fmax + 10,10)
# initialize indices. These are indices which fmhz is closest to fi
r = fmhz.searchsorted(fi)
# r now contains FFT indices that are close to the frequencies of interest.
# Adjust the indices to be closest to the frequencies of interest by
# looking in the neighborhood. j is the fft freqindices that are
# closest to the indices of interest.
j = []
k = 0
for m in r:
d = abs(fi[k] - fmhz[m])
d1 = abs(fi[k] - fmhz[m-1])
if d<=d1:
j.append(m)
else:
j.append(m-1)
k = k +1
# now compute the power vector for each column
zisq = np.power(z[:,j],2)
# Compute the mean for each column
zsq_avg = np.mean(zisq,axis=0)
# Compute the 75th. percentile for each column
zsq_75 = np.percentile(zisq,75,axis=0)
# Compute the 25th. percentile for each column
zsq_25 = np.percentile(zisq,25,axis=0)
# Compute the mean value in dbm
pmean_dBm=np.round(10*np.log10(zsq_avg)-10*np.log10(100)+30,decimals=1)
# Compute the inter quartile range for each column
iqr_dBm=np.round(10*np.log10(zsq_75)-10*np.log10(zsq_25),decimals=1)
retval = {}
retval["filename"] = fname
retval["pmax_dbm"] = pmax_dbm.tolist()
retval["fpeak_mhz"] = np.round(fc_mhz,decimals=0)
retval["pmean_dbm"] = pmean_dBm.tolist()
retval["iqr_dbm"] = iqr_dBm.tolist()
return retval
def compute_peak_stats(dataset,fname) :
fmin = dataset['fmin']
fmax = dataset['fmax']
flo_mhz = dataset["flo_mhz"]
sample_rate = dataset["sample_rate"]
fft_size = dataset["fft_size"]
gain = dataset["gain"]
return compute_peak_stats_worker(fname,fmin,fmax,flo_mhz,fft_size,sample_rate,gain)
def recursive_walk_metadata(widget, dataset_name,folder):
"""
Walk through the subfolders picking up the metadata and populating
the metadata as a JSON document into mongodb.
"""
if not dataset_exists(dataset_name):
raise Exception("dataset " + dataset_name + " not found. Create it.")
dataset = get_dataset(dataset_name)
for folderName, subfolders, filenames in os.walk(folder):
if subfolders:
for subfolder in subfolders:
recursive_walk_metadata(widget,dataset_name,subfolder)
for filename in filenames:
pathname = os.path.abspath(folderName + "/" + filename)
if filename.endswith("_MaxSpectra.tsv"):
prefix = extract_prefix(filename, "_MaxSpectra.tsv")
metadataType = "MaxSpectra"
elif filename.endswith("_PeakAmplitude.txt"):
prefix = extract_prefix(filename,"_PeakAmplitude.txt")
metadataType = "PeakAmplitude"
elif filename.endswith(".tdms"):
prefix = extract_prefix(filename,".tdms")
metadataType = "tdms"
tdmsMetadata = TdmsFile(pathname).getJsonMetadata()
else:
continue
query = {"prefix":prefix}
metadata = get_metadata(dataset_name).find_one(query)
if widget is None:
print "Processing ", prefix
else:
import dbgui
widget.insertPlainText(prefix + "\n")
dbgui.processQtEvents()
if metadata is None:
metadata = {}
metadata["prefix"] = prefix
metadata[metadataType] = pathname
date = extract_timestamp(prefix)
ts = time.mktime(date.timetuple())
metadata["instrumentDate"] = str(date)
metadata["instrumentTimeStamp"] = ts
target_tz = dataset["measurementTz"]
instrument_tz = dataset["instrumentTz"]
targetTimestamp = timezone.translateTime(
date,instrument_tz,target_tz)
localTimestamp = time.mktime(targetTimestamp.timetuple())
metadata["measurementDate"] = str(targetTimestamp)
metadata["measurementTimeStamp"] = localTimestamp
lat = dataset["lat"]
lon = dataset["lon"]
#universalTimestamp = timezone.getUniversalTimeAtLocation(
# localTimestamp,lat,lon)
#metadata["universalTimeStamp"] = universalTimestamp
if metadataType == "tdms":
metadata["tdmsMetadata"] = tdmsMetadata
elif metadataType == "MaxSpectra":
metadata["maxSpectraStats"] = compute_peak_stats(
dataset,pathname)
get_metadata(dataset_name).insert(metadata)
else:
metadata[metadataType] = pathname
if metadataType == "tdms":
metadata["tdmsMetadata"] = tdmsMetadata
elif metadataType == "MaxSpectra":
metadata["maxSpectraStats"] = compute_peak_stats(
dataset,pathname)
get_metadata(dataset_name).update({"prefix":prefix},
metadata, upsert = False)
def list_datasets():
datasets = get_datasets()
cur = datasets.find()
result = []
if cur is None or cur.count() == 0:
return result
else:
for dataset in cur:
del dataset["_id"]
result.append(dataset)
return result
def get_dataset(datasetName):
datasets = get_datasets()
return datasets.find_one({"name":datasetName})
def get_metadata_list(dataset_name):
retval = []
cur = get_metadata(dataset_name).find()
for metadata in cur:
del metadata["_id"]
retval.append(metadata)
return retval
def print_datasets():
datasets = get_datasets()
cur = datasets.find()
for dataset in cur:
del dataset["_id"]
print json.dumps(dataset, indent=4)
def dump_db(dataset_name):
"""
Dump the metadata corresponding to the dataset provided in the
argument.
"""
cur = get_metadata(dataset_name).find()
for metadata in cur:
del metadata["_id"]
print (json.dumps(metadata,indent = 4))
def import_csv_file(dataset_name,csv_file_name):
"""
import the xl file metadata into the dataset.
"""
metadataRecords = get_metadata(dataset_name)
if metadataRecords is None:
raise "No metadata found"
try :
with open(csv_file_name) as f:
f_csv = csv.reader(f)
headings = next(f_csv)
headings1 = next(f_csv)
radar1Indices = []
radar3Index = -1
commentsIndex = -1
fileNameIndex = -1
refLvlIndex = -1
i = 0
for head in headings1:
heading = head.strip()
if heading.startswith("Radar 1"):
radar1Indices.append(i)
elif heading.startswith("Radar 3 present"):
radar3Index = i
elif heading.startswith("Comments"):
commentsIndex = i
elif heading.startswith("File name"):
fileNameIndex = i
elif heading.startswith("Ref Lvl"):
refLvlIndex = i
i += 1
headings2 = next(f_csv)
row = next(f_csv)
if fileNameIndex == -1 or refLvlIndex == -1 or commentsIndex == -1:
raise "Invalid File Format"
while row is not None:
try:
radar1 = []
fileName = row[fileNameIndex]
recordName = extract_prefix(fileName, ".tdms")
metadata = metadataRecords.find_one({"prefix":recordName})
if metadata is not None:
del metadata["_id"]
toUpdate = False
for ri in radar1Indices:
fc = ri
peakPowerIndex = ri + 1
fadeDepthIndex = ri + 2
if row[fc] != "" and row[peakPowerIndex] != "" \
and row[fadeDepthIndex] != "":
radarRec = {"fc_mhz" : float(row[fc]) ,
"peakPowerDbm": float(row[peakPowerIndex]),
"fadeDepthDb" : float(row[fadeDepthIndex])}
radar1.append(radarRec)
if len(radar1) != 0 :
toUpdate = True
metadata["RADAR1"] = radar1
if commentsIndex > -1 and row[commentsIndex] != "":
toUpdate = True
metadata["Comments"] = row[commentsIndex]
if radar3Index > -1 and row[radar3Index] != "":
toUpdate = True
metadata["RADAR3"] = row[radar3Index]
if refLvlIndex > -1 and row[refLvlIndex] != "":
toUpdate = True
metadata["refLvl"] = float(row[refLvlIndex])
if len(radar1) != 0 and radar3Index == -1:
print "WARNING : RADAR3 entry not found - skipping entry"
toUpdate = False
if toUpdate :
print "Updating " + str(metadata)
metadataRecords.update({"prefix":recordName},metadata,upsert=False)
row = next(f_csv)
except StopIteration :
row = None
pass
except:
raise
def main():
parser = argparse.ArgumentParser(description = "Setup the DB",
add_help=False)
subparsers = parser.add_subparsers()
config_parser = subparsers.add_parser('config', help='config timzone API KEY')
drop_parser = subparsers.add_parser('drop', help='drop the dataset')
populate_parser = subparsers.add_parser('populate',
help = 'populate dataset')
print_parser = subparsers.add_parser('print', help = 'print all datasets')
create_parser = subparsers.add_parser('create', help = 'create dataset')
print_metadata_parser = subparsers.add_parser('print-metadata',help =
"print metadata for a dataset")
import_parser = subparsers.add_parser('import',help = "import XLS"
" annotations" )
config_parser.set_defaults(action="config")
drop_parser.set_defaults(action="drop")
print_parser.set_defaults(action="print")
populate_parser.set_defaults(action="populate")
create_parser.set_defaults(action="create")
print_metadata_parser.set_defaults(action="print-metadata")
import_parser.set_defaults(action="import")
config_parser.add_argument('-api-key',
required=True,
type = str, help = "Google Timezone API Key",
default = None)
populate_parser.add_argument('-dir', type = str ,
required=True,
help = "root directory for the"
" data", default=None)
create_parser.add_argument('-dataset-name',
required=True,
type = str, help = "Dataset Name",
default = None)
populate_parser.add_argument('-dataset-name',
required=True,
type = str, help = "Dataset Name",
default = None)
drop_parser.add_argument('-dataset-name',
required=True,
type = str, help = "Dataset Name",
default = None)
print_metadata_parser.add_argument('-dataset-name',
required=True,
type = str, help = "Dataset Name",
default = None)
create_parser.add_argument('-lat', type = float,
required=True,
help = "latitude ", default=None)
create_parser.add_argument('-lon', type = float,
required=True,
help = "longitude", default=None)
create_parser.add_argument('-alt', type = float,
required=True,
help = "altitude (m)", default=None)
create_parser.add_argument('-instrument-tz',
required=True,
type = str, help = "timezone ID for"
"measurement system (e.g. America/Denver). "
"Note: Do not use names such as EDT, MDT etc.", default = None)
create_parser.add_argument('-gain', type = float,
required=True,
help = "net of front end gain, cable loss, splitter loss",
default = None)
create_parser.add_argument('-fmin', type = float,
required=True,
help = "Min frequency (MHz)",
default = None)
create_parser.add_argument('-fmax', type = float,
required=True,
help = "Max frequency (MHz)",
default = None)
create_parser.add_argument('-antenna', type = str,
required=True,
help = "Antenna type (string)",
default = None)
create_parser.add_argument('-flo-mhz', type = float,
required=True,
help = "local oscillator frequency in MHz",
default = None)
create_parser.add_argument("-reflevel-dbm", type = float,
required=True,
help = "reference level of VST (dBm)",
default = None)
create_parser.add_argument("-sample-rate", type=float,
required=True,
help = "sampling frequency in MHz",
default = None)
create_parser.add_argument("-fft-size", type = int,
required=True,
help = "fft size",
default = None)
import_parser.add_argument("-dataset-name",
type = str, help = "Dataset Name",
default = None)
import_parser.add_argument("-csv-file-name",
type = str, help = "csv file Name exported from excell",
default = None)
args = parser.parse_args()
action = args.action
if action == "config":
api_key = args.api_key
configure(api_key)
elif action == "populate":
root_dir = args.dir
dataset_name = args.dataset_name
recursive_walk_metadata(None,dataset_name,root_dir)
elif action == "drop":
dataset_name = args.dataset_name
purge_dataset(dataset_name)
elif action == "create":
dataset_name = args.dataset_name
lat = float(args.lat)
lon = float(args.lon)
alt = float(args.alt)
instrument_tz = args.instrument_tz
gain = float(args.gain)
minfreq = float(args.fmin)
maxfreq = float(args.fmax)
antenna = str(args.antenna)
flo_mhz = float(args.flo_mhz)
sample_rate = float(args.sample_rate)
fft_size = int(args.fft_size)
#reflevel_dbm = float(args.reflevel_dbm)
create_dataset(dataset_name=dataset_name,
lat=lat,
lon=lon,
alt=alt,
instrument_tz=instrument_tz,
antenna=antenna,
gain=gain,
#reflevel_dbm=reflevel_dbm,
flo_mhz=flo_mhz,
fmin=minfreq,
fmax=maxfreq,
sample_rate=sample_rate,
fft_size=fft_size)
elif action == "print":
print_datasets()
elif action == "print-metadata":
dataset_name = args.dataset_name
dump_db(dataset_name)
elif action == "import":
csv_file_name = args.csv_file_name
dataset_name = args.dataset_name
import_csv_file(dataset_name,csv_file_name)
else:
print("Invalid action specified")
if __name__ == "__main__":
main()
|
from django.contrib.auth.models import BaseUserManager
from django.db import models
from django.utils.translation import ugettext_lazy as _
class AccountManager(BaseUserManager):
def create_user(self, email, first_name, last_name, password=None):
if not any([email, first_name, last_name]):
raise ValueError(_("Required fields are in this order: email, firstname, lastname"))
email = self.normalize_email(email)
user = self.model(email=email, first_name=first_name, last_name=last_name)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, first_name, last_name, password, **other_fields):
other_fields.setdefault('is_staff', True)
other_fields.setdefault('is_superuser', True)
other_fields.setdefault('is_active', True)
if other_fields.get('is_staff') is not True:
raise ValueError(_("Superuser must be assigned to is_staff=True"))
if other_fields.get('is_superuser') is not True:
raise ValueError(_("Superuser must be assigned to is_superuser=True"))
return self.create_user(email, first_name, last_name, password)
class EmployeeManager(models.Manager):
def get_queryset(self, *args, **kwargs):
return super().get_queryset(*args, **kwargs).filter(type=self.model.Types.EMPLOYEE)
class DirectorManager(models.Manager):
def get_queryset(self, *args, **kwargs):
return super().get_queryset(*args, **kwargs).filter(type=self.model.Types.DIRECTOR)
class CEOManager(models.Manager):
def get_queryset(self, *args, **kwargs):
return super().get_queryset(*args, **kwargs).filter(type=self.model.Types.CEO)
|
# Copyright 2021 SpinQ Technology Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Gate:
def __init__(self, name: str, tag: str):
self._gname = name
self._gtag = tag
@property
def gtag(self):
return self._gtag
@property
def gname(self):
return self._gname
def to_dict(self):
gate_dict = {"gname": self._gname, "gtag": self._gtag}
return gate_dict
H = Gate('H', "C1")
I = Gate('I', "C1")
X = Gate('X', "C1")
Y = Gate('Y', "C1")
Z = Gate('Z', "C1")
X90 = Gate('X90', "C1")
Y90 = Gate('Y90', "C1")
Z90 = Gate('Z90', "C1")
X90dg = Gate('X90dg', "C1")
Y90dg = Gate('Y90dg', "C1")
Z90dg = Gate('Z90dg', "C1")
Rx = Gate('Rx', "R1")
Ry = Gate('Ry', "R1")
Rz = Gate('Rz', "R1")
CNOT = Gate('CNOT', "C2")
YCON = Gate('YCON', "C2")
ZCON = Gate('ZCON', "C2")
T = Gate('T', "C1")
Td = Gate('Td', "C1")
S = Gate('S', "C1")
Sd = Gate('Sd', "C1")
Measure = Gate('Measure', "Measure")
Barrier = Gate('Barrier', "Barrier")
U = Gate('U', "U1")
base_gate_list = [H, I, X, Y, Z, X90, Y90, Z90, X90dg, Y90dg, Z90dg, Rx, Ry, Rz, CNOT, YCON, ZCON, T, Td, S, Sd, Measure, Barrier, U]
def find_gate(gname: str):
for g in base_gate_list:
if g.gname == gname: return g
return None |
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from pathlib import Path
from typing import Optional
from matplotlib import pyplot as plt
from health_azure.utils import get_aml_run_from_run_id, get_workspace
from health_ml.utils.reports import HTMLReport
from histopathology.utils.analysis_plot_utils import (add_training_curves_legend, plot_crossval_roc_and_pr_curves,
plot_crossval_training_curves)
from histopathology.utils.report_utils import (collect_crossval_metrics, collect_crossval_outputs,
get_best_epoch_metrics, get_best_epochs, get_crossval_metrics_table,
get_formatted_run_info)
def generate_html_report(parent_run_id: str, output_dir: Path, workspace_config_path: Optional[Path] = None,
include_test: bool = False, overwrite: bool = False) -> None:
aml_workspace = get_workspace(workspace_config_path=workspace_config_path)
parent_run = get_aml_run_from_run_id(parent_run_id, aml_workspace=aml_workspace)
report_dir = output_dir / parent_run.display_name
report_dir.mkdir(parents=True, exist_ok=True)
report = HTMLReport(output_folder=report_dir)
report.add_text(get_formatted_run_info(parent_run))
report.add_heading("Azure ML metrics", level=2)
# Download metrics from AML. Can take several seconds for each child run
metrics_df = collect_crossval_metrics(parent_run_id, report_dir, aml_workspace, overwrite=overwrite)
best_epochs = get_best_epochs(metrics_df, 'val/auroc', maximise=True)
# Add training curves for loss and AUROC (train and val.)
report.add_heading("Training curves", level=3)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
plot_crossval_training_curves(metrics_df, train_metric='train/loss_epoch', val_metric='val/loss_epoch',
ylabel="Loss", best_epochs=best_epochs, ax=ax1)
plot_crossval_training_curves(metrics_df, train_metric='train/auroc', val_metric='val/auroc',
ylabel="AUROC", best_epochs=best_epochs, ax=ax2)
add_training_curves_legend(fig, include_best_epoch=True)
training_curves_fig_path = report_dir / "training_curves.png"
fig.savefig(training_curves_fig_path, bbox_inches='tight')
report.add_images([training_curves_fig_path], base64_encode=True)
# Add tables with relevant metrics (val. and test)
base_metrics_list = ['accuracy', 'auroc', 'f1score', 'precision', 'recall', '0', '1']
report.add_heading("Validation metrics (best epoch)", level=3)
val_metrics_list = ['val/' + metric for metric in base_metrics_list]
val_metrics_df = get_best_epoch_metrics(metrics_df, val_metrics_list, best_epochs)
val_metrics_table = get_crossval_metrics_table(val_metrics_df, val_metrics_list)
report.add_tables([val_metrics_table])
if include_test:
report.add_heading("Test metrics", level=3)
test_metrics_list = ['test/' + metric for metric in base_metrics_list]
test_metrics_table = get_crossval_metrics_table(metrics_df, test_metrics_list)
report.add_tables([test_metrics_table])
# Add test ROC and PR curves
crossval_dfs = collect_crossval_outputs(parent_run_id, report_dir, aml_workspace, overwrite=overwrite)
report.add_heading("Test ROC and PR curves", level=2)
fig = plot_crossval_roc_and_pr_curves(crossval_dfs)
roc_pr_curves_fig_path = report_dir / "roc_pr_curves.png"
fig.savefig(roc_pr_curves_fig_path, bbox_inches='tight')
report.add_images([roc_pr_curves_fig_path], base64_encode=True)
print(f"Rendering report to: {report.report_path_html.absolute()}")
report.render()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--run_id', help="The parent Hyperdrive run ID")
parser.add_argument('--output_dir', help="Directory where to download Azure ML data and save the report")
parser.add_argument('--workspace_config', help="Path to Azure ML workspace config.json file. "
"If omitted, will try to load default workspace.")
parser.add_argument('--include_test', action='store_true', help="Opt-in flag to include test results "
"in the generated report.")
parser.add_argument('--overwrite', action='store_true', help="Forces (re)download of metrics and output files, "
"even if they already exist locally.")
args = parser.parse_args()
if args.output_dir is None:
args.output_dir = Path.cwd() / "outputs"
workspace_config = Path(args.workspace_config).absolute() if args.workspace_config else None
print(f"Output dir: {args.output_dir.absolute()}")
if workspace_config is not None:
if not workspace_config.is_file():
raise ValueError(f"Specified workspace config file does not exist: {workspace_config}")
print(f"Workspace config: {workspace_config}")
generate_html_report(parent_run_id=args.run_id,
output_dir=Path(args.output_dir),
workspace_config_path=workspace_config,
include_test=args.include_test,
overwrite=args.overwrite)
|
import torch
class Label_smoothing(torch.nn.Module):
def __init__(self,num_classes,eps=0.1):
super(Label_smoothing,self).__init__()
self.eps = eps
self.v = self.eps/num_classes
self.logsoft = torch.nn.LogSoftmax(dim=1)
def forward(self,inputs,label):#inputs is last layer output,label is real class_label.,loss= (1-esp)*softmax(inputs)+esp/num_classes ,when
one_hot = torch.zeros_like(inputs)
one_hot.fill_(self.v)
y = label.to(torch.long).view(-1,1)
one_hot.scatter_(1, y, 1-self.eps+self.v)
loss = - torch.sum(self.logsoft(inputs) * (one_hot.detach())) / inputs.size(0)
return loss
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import os
import os.path as op
import sys
import logging
import numpy as np
from jcvi.formats.base import LineFile
from jcvi.apps.base import (
OptionParser,
ActionDispatcher,
need_update,
sh,
get_abs_path,
which,
)
class Sizes(LineFile):
"""
Two-column .sizes file, often generated by `faSize -detailed`
contigID size
"""
def __init__(self, filename, select=None):
assert op.exists(filename), "File `{0}` not found".format(filename)
# filename can be both .sizes file or FASTA formatted file
sizesname = filename
if not filename.endswith(".sizes"):
sizesname = filename + ".sizes"
filename = get_abs_path(filename)
if need_update(filename, sizesname):
cmd = "faSize"
if which(cmd):
cmd += " -detailed {0}".format(filename)
sh(cmd, outfile=sizesname)
else:
from jcvi.formats.fasta import Fasta
f = Fasta(filename)
fw = open(sizesname, "w")
for k, size in f.itersizes_ordered():
print("\t".join((k, str(size))), file=fw)
fw.close()
filename = sizesname
assert filename.endswith(".sizes")
super(Sizes, self).__init__(filename)
self.fp = open(filename)
self.filename = filename
# get sizes for individual contigs, both in list and dict
# this is to preserve the input order in the sizes file
sizes = list(self.iter_sizes())
if select:
assert select > 0
sizes = [x for x in sizes if x[1] >= select]
self.sizes_mapping = dict(sizes)
# get cumulative sizes, both in list and dict
ctgs, sizes = zip(*sizes)
self.sizes = sizes
cumsizes = np.cumsum([0] + list(sizes))
self.ctgs = ctgs
self.cumsizes = cumsizes
self.cumsizes_mapping = dict(zip(ctgs, cumsizes))
def __len__(self):
return len(self.sizes)
def get_size(self, ctg):
return self.sizes_mapping[ctg]
def get_cumsize(self, ctg):
return self.cumsizes_mapping[ctg]
def close(self, clean=False):
self.fp.close()
if clean:
os.remove(self.filename)
@property
def mapping(self):
return self.sizes_mapping
@property
def totalsize(self):
return sum(self.sizes)
def iter_sizes(self):
self.fp.seek(0)
for row in self.fp:
ctg, size = row.split()[:2]
yield ctg, int(size)
def iter_names(self):
self.fp.seek(0)
for row in self.fp:
ctg, size = row.split()[:2]
yield ctg
def get_position(self, ctg, pos):
if ctg not in self.cumsizes_mapping:
return None
return self.cumsizes_mapping[ctg] + pos
def get_breaks(self):
for i in range(len(self)):
yield self.ctgs[i], self.cumsizes[i], self.cumsizes[i + 1]
@property
def summary(self):
from jcvi.assembly.base import calculate_A50
ctgsizes = self.sizes
a50, l50, n50 = calculate_A50(ctgsizes)
return sum(ctgsizes), l50, n50
def main():
actions = (
("agp", "write to AGP format from sizes file"),
("extract", "extract the lines containing only the given IDs"),
("histogram", "plot read/contig length distribution"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def histogram(args):
"""
%prog histogram [reads.fasta|reads.fastq]
Plot read length distribution for reads. The plot would be similar to the
one generated by SMRT-portal, for example:
http://blog.pacificbiosciences.com/2013/10/data-release-long-read-shotgun.html
Plot has two axes - corresponding to pdf and cdf, respectively. Also adding
number of reads, average/median, N50, and total length.
"""
from jcvi.utils.cbook import human_size, thousands, SUFFIXES
from jcvi.formats.fastq import fasta
from jcvi.graphics.histogram import stem_leaf_plot
from jcvi.graphics.base import (
plt,
markup,
human_formatter,
human_base_formatter,
savefig,
set2,
set_ticklabels_helvetica,
)
p = OptionParser(histogram.__doc__)
p.set_histogram(
vmax=50000, bins=100, xlabel="Read length", title="Read length distribution"
)
p.add_option("--ylabel1", default="Counts", help="Label of y-axis on the left")
p.add_option(
"--color",
default="0",
choices=[str(x) for x in range(8)],
help="Color of bars, which is an index 0-7 in brewer set2",
)
opts, args, iopts = p.set_image_options(args, figsize="6x6", style="dark")
if len(args) != 1:
sys.exit(not p.print_help())
(fastafile,) = args
fastafile, qualfile = fasta([fastafile, "--seqtk"])
sizes = Sizes(fastafile)
all_sizes = sorted(sizes.sizes)
xmin, xmax, bins = opts.vmin, opts.vmax, opts.bins
left, height = stem_leaf_plot(all_sizes, xmin, xmax, bins)
plt.figure(1, (iopts.w, iopts.h))
ax1 = plt.gca()
width = (xmax - xmin) * 0.5 / bins
color = set2[int(opts.color)]
ax1.bar(left, height, width=width, linewidth=0, fc=color, align="center")
ax1.set_xlabel(markup(opts.xlabel))
ax1.set_ylabel(opts.ylabel1)
ax2 = ax1.twinx()
cur_size = 0
total_size, l50, n50 = sizes.summary
cdf = {}
hsize = human_size(total_size)
tag = hsize[-2:]
unit = 1000 ** SUFFIXES[1000].index(tag)
for x in all_sizes:
if x not in cdf:
cdf[x] = (total_size - cur_size) * 1.0 / unit
cur_size += x
x, y = zip(*sorted(cdf.items()))
ax2.plot(x, y, "-", color="darkslategray")
ylabel2 = "{0} above read length".format(tag)
ax2.set_ylabel(ylabel2)
for ax in (ax1, ax2):
set_ticklabels_helvetica(ax)
ax.set_xlim((xmin - width / 2, xmax + width / 2))
tc = "gray"
axt = ax1.transAxes
xx, yy = 0.95, 0.95
ma = "Total bases: {0}".format(hsize)
mb = "Total reads: {0}".format(thousands(len(sizes)))
mc = "Average read length: {0}bp".format(thousands(np.mean(all_sizes)))
md = "Median read length: {0}bp".format(thousands(np.median(all_sizes)))
me = "N50 read length: {0}bp".format(thousands(l50))
for t in (ma, mb, mc, md, me):
print(t, file=sys.stderr)
ax1.text(xx, yy, t, color=tc, transform=axt, ha="right")
yy -= 0.05
ax1.set_title(markup(opts.title))
# Seaborn removes ticks for all styles except 'ticks'. Now add them back:
ax1.tick_params(
axis="x",
direction="out",
length=3,
left=False,
right=False,
top=False,
bottom=True,
)
ax1.xaxis.set_major_formatter(human_base_formatter)
ax1.yaxis.set_major_formatter(human_formatter)
figname = sizes.filename + ".pdf"
savefig(figname)
def extract(args):
"""
%prog extract idsfile sizesfile
Extract the lines containing only the given IDs.
"""
p = OptionParser(extract.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
idsfile, sizesfile = args
sizes = Sizes(sizesfile).mapping
fp = open(idsfile)
for row in fp:
name = row.strip()
size = sizes[name]
print("\t".join(str(x) for x in (name, size)))
def agp(args):
"""
%prog agp <fastafile|sizesfile>
Convert the sizes file to a trivial AGP file.
"""
from jcvi.formats.agp import OO
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(sizesfile,) = args
sizes = Sizes(sizesfile)
agpfile = sizes.filename.rsplit(".", 1)[0] + ".agp"
fw = open(agpfile, "w")
o = OO() # Without a filename
for ctg, size in sizes.iter_sizes():
o.add(ctg, ctg, size)
o.write_AGP(fw)
fw.close()
logging.debug("AGP file written to `{0}`.".format(agpfile))
return agpfile
if __name__ == "__main__":
main()
|
"""SDS user rules management functions."""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from future import standard_library
standard_library.install_aliases()
import os
import json
from datetime import datetime
from sdscli.log_utils import logger
from sdscli.os_utils import validate_dir, normpath
from hysds.es_util import get_mozart_es
USER_RULES_MOZART = 'user_rules-mozart'
USER_RULES_GRQ = 'user_rules-grq'
mozart_es = get_mozart_es()
def export(args):
"""Export HySDS user rules."""
rules = {}
mozart_rules = mozart_es.query(index=USER_RULES_MOZART)
rules['mozart'] = [rule['_source'] for rule in mozart_rules]
logger.debug('%d mozart user rules found' % len(mozart_rules))
grq_rules = mozart_es.query(index=USER_RULES_GRQ)
rules['grq'] = [rule['_source'] for rule in grq_rules]
logger.debug('%d grq user rules found' % len(grq_rules))
logger.debug("rules: {}".format(json.dumps(rules, indent=2)))
outfile = normpath(args.outfile) # set export directory
export_dir = os.path.dirname(outfile)
logger.debug("export_dir: {}".format(export_dir))
validate_dir(export_dir) # create export directory
with open(outfile, 'w') as f:
json.dump(rules, f, indent=2, sort_keys=True) # dump user rules JSON
def import_rules(args):
"""
Import HySDS user rules.
rules json structure: {
"mozart": [...],
"grq": [...],
}
"""
rules_file = normpath(args.file) # user rules JSON file
logger.debug("rules_file: {}".format(rules_file))
if not os.path.isfile(rules_file):
logger.error("HySDS user rules file {} doesn't exist.".format(rules_file))
return 1
with open(rules_file) as f:
user_rules = json.load(f) # read in user rules
logger.debug("rules: {}".format(json.dumps(rules_file, indent=2, sort_keys=True)))
for rule in user_rules['mozart']:
now = datetime.utcnow().isoformat() + 'Z'
if not rule.get('creation_time', None):
rule['creation_time'] = now
if not rule.get('modified_time', None):
rule['modified_time'] = now
result = mozart_es.index_document(index=USER_RULES_MOZART, body=rule) # indexing mozart rules
logger.debug(result)
for rule in user_rules['grq']:
now = datetime.utcnow().isoformat() + 'Z'
if not rule.get('creation_time', None):
rule['creation_time'] = now
if not rule.get('modified_time', None):
rule['modified_time'] = now
result = mozart_es.index_document(index=USER_RULES_GRQ, body=rule) # indexing GRQ rules
logger.debug(result)
|
from models import MODEL_DICT
from parameters import DATA_PATH
import os
def train_model(model):
if not os.path.exists(DATA_PATH+'models'):
os.mkdir(DATA_PATH+'models')
print('Starting traning of ', model.NAME)
model.train(lang='pt')
model.train(lang='es')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--model', type=str, required=False, default='bi_lstm_gru_spat_clr',
help='model name')
parser.add_argument('--all', default=False, action='store_true',
help='train all models')
args = parser.parse_args()
if(args.all):
for model in MODEL_DICT.values():
train_model(model)
else:
train_model(MODEL_DICT[args.model]) |
from flask import Blueprint, redirect
from application.pypi_org.infrastructure import cookie_auth
from application.pypi_org.infrastructure.view_modifiers import response
from application.pypi_org.services import user_service
from application.pypi_org.viewmodels.account.index_viewmodel import IndexViewModel
from application.pypi_org.viewmodels.account.login_viewmodel import LoginViewModel
from application.pypi_org.viewmodels.account.register_viewmodel import RegisterViewModel
blueprint = Blueprint('account', __name__, template_folder='templates')
@blueprint.route('/account')
@response(template_file='account/index.html')
def index():
vm = IndexViewModel()
if not vm.user:
redirect('/account/login')
return vm.to_dict()
@blueprint.route('/account/register', methods=['GET'])
@response(template_file='account/register.html')
def register_get():
vm = RegisterViewModel()
return vm.to_dict()
@blueprint.route('/account/register', methods=['POST'])
@response(template_file='account/register.html')
def register_post():
vm = RegisterViewModel()
vm.validate()
if vm.error:
return vm.to_dict()
user = user_service.create_user(vm.name, vm.email, vm.password)
if not user:
vm.error = 'The account could not be created.'
return vm.to_dict()
resp = redirect('/account')
cookie_auth.set_auth(resp, user.id)
return resp
@blueprint.route('/account/login', methods=['GET'])
@response(template_file='account/login.html')
def login_get():
vm = LoginViewModel()
return vm.to_dict()
@blueprint.route('/account/login', methods=['POST'])
@response(template_file='account/login.html')
def login_post():
vm = LoginViewModel()
vm.validate()
if vm.error:
return vm.to_dict()
user = user_service.login_user(vm.email, vm.password)
if not user:
vm.error = "The account does not exist or the password is wrong."
return vm.to_dict()
resp = redirect('/account')
cookie_auth.set_auth(resp, user.id)
return resp
@blueprint.route('/account/logout')
def logout():
resp = redirect('/')
cookie_auth.logout(resp)
return resp
|
import json
import torch
from torch.utils.data import Dataset, DataLoader
class ClassifierDataset(Dataset):
def __init__(self, json_path):
self.json_path = json_path
with open(json_path, 'r', encoding = 'UTF-8-sig') as j:
self.mwp_cpae = json.load(j)
def read_cpae(self, idx):
c = self.mwp_cpae[str(idx)]['class']
p = self.mwp_cpae[str(idx)]['question']
a = self.mwp_cpae[str(idx)]['answer']
e = self.mwp_cpae[str(idx)]['equation'] if c == '산술연산' else 'none'
return c, p, a, e
def __len__(self):
return len(self.mwp_cpae.keys())
def __getitem__(self, idx):
return self.read_cpae(idx)
if __name__ == '__main__':
json_path = './public_mwp_data.json'
dataset = ClassifierDataset(json_path = json_path)
for i in range(1, dataset.__len__() + 1):
c, p, a, e = dataset.__getitem__(i)
print(c)
print(p)
print(a)
print(e)
|
"""
Utility routines for benchmarks on OT solvers
===================================================
"""
import time
import torch
import numpy as np
use_cuda = torch.cuda.is_available()
tensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
numpy = lambda x : x.detach().cpu().numpy()
################################################################################
# 3D dataset
# -------------------------
#
# Reading **.ply** files:
from plyfile import PlyData, PlyElement
def load_ply_file(fname, offset = [-0.011, 0.109, -0.008], scale = .04) :
"""Loads a .ply mesh to return a collection of weighted Dirac atoms: one per triangle face."""
# Load the data, and read the connectivity information:
plydata = PlyData.read(fname)
triangles = np.vstack( plydata['face'].data['vertex_indices'] )
# Normalize the point cloud, as specified by the user:
points = np.vstack( [ [x,y,z] for (x,y,z) in plydata['vertex'] ] )
points -= offset
points /= 2 * scale
# Our mesh is given as a collection of ABC triangles:
A, B, C = points[triangles[:, 0]], points[triangles[:, 1]], points[triangles[:, 2]]
# Locations and weights of our Dirac atoms:
X = (A + B + C) / 3 # centers of the faces
S = np.sqrt(np.sum(np.cross(B - A, C - A) ** 2, 1)) / 2 # areas of the faces
print("File loaded, and encoded as the weighted sum of {:,} atoms in 3D.".format(len(X)))
# We return a (normalized) vector of weights + a "list" of points
return tensor(S / np.sum(S)), tensor(X)
################################################################################
# Synthetic sphere - a typical source measure:
def create_sphere(n_samples = 1000):
"""Creates a uniform sample on the unit sphere."""
n_samples = int(n_samples)
indices = np.arange(0, n_samples, dtype=float) + 0.5
phi = np.arccos(1 - 2 * indices / n_samples)
theta = np.pi * (1 + 5**0.5) * indices
x, y, z = np.cos(theta) * np.sin(phi), np.sin(theta) * np.sin(phi), np.cos(phi);
points = np.vstack( (x, y, z)).T
weights = np.ones(n_samples) / n_samples
return tensor(weights), tensor(points)
############################################################
# Simple (slow) display routine:
def display_cloud(ax, measure, color) :
w_i, x_i = numpy( measure[0] ), numpy( measure[1] )
ax.view_init(elev=110, azim=-90)
#ax.set_aspect('equal')
weights = w_i / w_i.sum()
ax.scatter( x_i[:,0], x_i[:,1], x_i[:,2],
s = 25*500 * weights, c = color)
ax.axes.set_xlim3d(left=-1.4, right=1.4)
ax.axes.set_ylim3d(bottom=-1.4, top=1.4)
ax.axes.set_zlim3d(bottom=-1.4, top=1.4)
#############################################################
# Measuring the error made on the marginal constraints
# ---------------------------------------------------------
#
# Computing the marginals of the implicit transport plan:
#
# .. math::
# \pi ~&=~ \exp \tfrac{1}{\varepsilon}( f\oplus g - \text{C})~\cdot~ \alpha\otimes\beta,\\
# \text{i.e.}~~\pi_{x_i \leftrightarrow y_j}~&=~ \exp \tfrac{1}{\varepsilon}( F_i + G_j - \text{C}(x_i,y_j))~\cdot~ \alpha_i \beta_j.
#
#
from pykeops.torch import LazyTensor
def plan_marginals(blur, a_i, x_i, b_j, y_j, F_i, G_j) :
"""Returns the marginals of the transport plan encoded in the dual vectors F_i and G_j."""
x_i = LazyTensor( x_i[:,None,:] )
y_j = LazyTensor( y_j[None,:,:] )
F_i = LazyTensor( F_i[:,None,None] )
G_j = LazyTensor( G_j[None,:,None] )
# Cost matrix:
C_ij = ((x_i - y_j) ** 2).sum(-1) / 2
# Scaled kernel matrix:
K_ij = (( F_i + G_j - C_ij ) / blur**2 ).exp()
A_i = a_i * (K_ij@b_j) # First marginal
B_j = b_j * (K_ij.t()@a_i) # Second marginal
return A_i, B_j
########################################################
# Compare the marginals using the relevant kernel norm
#
# .. math::
# \|\alpha - \beta\|^2_{k_\varepsilon} ~=~
# \langle \alpha - \beta , k_\varepsilon \star (\alpha -\beta) \rangle,
#
# with :math:`k_\varepsilon(x,y) = \exp(-\text{C}(x,y)/\varepsilon)`.
#
def blurred_relative_error(blur, x_i, a_i, A_i):
"""Computes the relative error |A_i-a_i| / |a_i| with respect to the kernel norm k_eps."""
x_j = LazyTensor( x_i[None,:,:] )
x_i = LazyTensor( x_i[:,None,:] )
C_ij = ((x_i - x_j) ** 2).sum(-1) / 2
K_ij = ( - C_ij / blur**2 ).exp()
squared_error = (A_i - a_i).dot( K_ij@(A_i - a_i) )
squared_norm = a_i.dot( K_ij@a_i )
return ( squared_error / squared_norm ).sqrt()
##############################################################################
# Simple error routine:
def marginal_error(blur, a_i, x_i, b_j, y_j, F_i, G_j, mode="blurred"):
"""Measures how well the transport plan encoded in the dual vectors F_i and G_j satisfies the marginal constraints."""
A_i, B_j = plan_marginals(blur, a_i, x_i, b_j, y_j, F_i, G_j)
if mode == "TV":
# Return the (average) total variation error on the marginal constraints:
return ( (A_i - a_i).abs().sum() + (B_j - b_j).abs().sum() ) / 2
elif mode == "blurred":
# Use the kernel norm k_eps to measure the discrepancy
norm_x = blurred_relative_error(blur, x_i, a_i, A_i)
norm_y = blurred_relative_error(blur, y_j, b_j, B_j)
return ( norm_x + norm_y ) / 2
else:
raise NotImplementedError()
#############################################################
# Computing the entropic Wasserstein distance
# ---------------------------------------------------------
#
# Computing the transport cost, assuming that the dual vectors satisfy
# the equations at optimality:
#
# .. math::
# \text{OT}_\varepsilon(\alpha,\beta)~=~ \langle \alpha, f^\star\rangle + \langle \beta, g^\star \rangle.
#
def transport_cost(a_i, b_j, F_i, G_j):
"""Returns the entropic transport cost associated to the dual variables F_i and G_j."""
return a_i.dot(F_i) + b_j.dot(G_j)
##############################################################################
# Compute the "entropic Wasserstein distance"
#
# .. math::
# \text{D}_\varepsilon(\alpha,\beta)~=~ \sqrt{2 \cdot \text{OT}_\varepsilon(\alpha,\beta)},
#
# which is **homogeneous to a distance on the ambient space** and is
# associated to the (biased) Sinkhorn cost :math:`\text{OT}_\varepsilon`
# with cost :math:`\text{C}(x,y) = \tfrac{1}{2}\|x-y\|^2`.
def wasserstein_distance(a_i, b_j, F_i, G_j):
"""Returns the entropic Wasserstein "distance" associated to the dual variables F_i and G_j."""
return (2 * transport_cost(a_i, b_j, F_i, G_j)).sqrt()
##############################################################################
# Compute all these quantities simultaneously, with a proper clock:
def benchmark_solver(OT_solver, blur, source, target):
"""Returns a (timing, relative error on the marginals, wasserstein distance) triplet for OT_solver(source, target)."""
a_i, x_i = source
b_j, y_j = target
start = time.time()
F_i, G_j = OT_solver(a_i, x_i, b_j, y_j)
if x_i.is_cuda: torch.cuda.synchronize()
end = time.time()
F_i, G_j = F_i.view(-1), G_j.view(-1)
return end - start, \
marginal_error(blur, a_i, x_i, b_j, y_j, F_i, G_j).item(), \
wasserstein_distance(a_i, b_j, F_i, G_j).item()
#############################################################
# Benchmarking a collection of OT solvers
# ---------------------------------------------------------
#
def benchmark_solvers(name, OT_solvers, source, target, ground_truth,
blur = .01, display=False, maxtime=None):
timings, errors, costs = [], [], []
break_loop = False
print('Benchmarking the "{}" family of OT solvers - ground truth = {:.6f}:'.format(name, ground_truth))
for i, OT_solver in enumerate(OT_solvers):
try:
timing, error, cost = benchmark_solver(OT_solver, blur, source, target)
timings.append(timing) ; errors.append(error) ; costs.append(cost)
print("{}-th solver : t = {:.4f}, error on the constraints = {:.3f}, cost = {:.6f}".format(
i+1, timing, error, cost))
except RuntimeError:
print("** Memory overflow ! **")
break_loop = True
timings.append(np.nan) ; errors.append(np.nan) ; costs.append(np.nan)
if break_loop or (maxtime is not None and timing > maxtime):
not_performed = len(OT_solvers) - (i + 1)
timings += [np.nan] * not_performed
errors += [np.nan] * not_performed
costs += [np.nan] * not_performed
break
print("")
timings, errors, costs = np.array(timings), np.array(errors), np.array(costs)
if display: # Fancy display
fig = plt.figure(figsize=(12,8))
ax_1 = fig.subplots()
ax_1.set_title("Benchmarking \"{}\"\non a {:,}-by-{:,} entropic OT problem, with a blur radius of {:.3f}".format(
name, len(source[0]), len(target[0]), blur
))
ax_1.set_xlabel("time (s)")
ax_1.plot(timings, errors, color="b")
ax_1.set_ylabel("Relative error on the marginal constraints", color="b")
ax_1.tick_params("y", colors="b")
ax_1.set_yscale('log') ; ax_1.set_ylim(bottom=1e-5)
ax_2 = ax_1.twinx()
ax_2.plot(timings, abs(costs - ground_truth) / ground_truth, color="r")
ax_2.set_ylabel("Relative error on the cost value", color="r")
ax_2.tick_params("y", colors="r")
ax_2.set_yscale('log') ; ax_2.set_ylim(bottom=1e-5)
return timings, errors, costs
|
raise NotImplementedError("_strptime is not yet implemented in Skulpt")
|
from gzip import open as gzopen
import struct
from itertools import izip_longest, imap
def data():
for l in open("prices0.txt"):
if l.strip() == "\N" : pass
else : yield float(l.strip())
linear = open("column0.txt", "wb")
#hexdump = open("column1.txt", "wb")
def toHex(s):
lst = []
for ch in s:
hv = hex(ord(ch)).replace('0x', '')
if len(hv) == 1:
hv = '0'+hv
lst.append(hv)
return reduce(lambda x,y:x+y, lst)
def pack(v):
if v:
print v, toHex(struct.pack("d", v))
return struct.pack("d", v)
else:
return struct.pack("d", float("NaN"))
i = 0
for d in imap(lambda v : pack(v), data()):
d = list(d)
for v in d:
linear.write(v)
#hexdump.write(toHex(d))
i += 1
#print toHex("".join(d))
#print toHex("".join(mix(d)))
# print "%024d\r" % i,
linear.close()
#hexdump.close()
#vim: set ts=4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.misc
import scipy.integrate
def f(x):
return x**3 - 10*np.sin(x) - 4
def df(x):
return sp.misc.derivative(f, x)
@np.vectorize
def F(x):
return sp.integrate.quad(f, 0, x)[0]
X = np.linspace(-3,3, 100)
Y = f(X)
Y1 = df(X)
Y2 = F(X)
plt.plot(X, Y, linewidth=2, label="$f$")
plt.plot(X, Y1, linewidth=2, linestyle="dashed", label="$f'$")
plt.plot(X, Y2, linewidth=2, linestyle="dotted", label="$F$")
plt.legend()
plt.show()
|
import requests
import os
base = "https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial"
key = os.environ['KEY']
def get_driving_time(origin_lat, origin_lon, dest_lat, dest_lon):
parameters = {'origins': "{},{}".format(origin_lat, origin_lon), 'destinations': "{},{}".format(dest_lat, dest_lon),
'key': key}
r = requests.get(base, params=parameters)
data = r.json()
return round(int(data['rows'][0]['elements'][0]['duration']['value']) / 60, 2)
|
"""
Various helper functions for use with Maxmind's GeoIP2 Library
Getting started with our GeoIP2 wrapper
---------------------------------------
Basic Usage::
>>> from privex.helpers import geoip
>>> res = geoip.geolocate_ip('185.130.44.5')
>>> print(f"Country: {res.country} || Code: {res.country_code} || City: {res.city}")
Country: Sweden || Code: SE || City: Stockholm
>>> print(f"ISP: {res.as_name} || AS Num: {res.as_number}")
ISP: Privex Inc. || AS Num: 210083 || Network: 185.130.44.0/24
If your application won't need to touch the GeoIP database for a while, you should call :func:`.cleanup` to
close the GeoIP2 databases to save memory::
>>> geoip.cleanup
Using the GeoIP2 :func:`.geoip_manager` context manager
-------------------------------------------------------
Alternatively, you can use the context manager :func:`.geoip_manager` which will automatically call :func:`.cleanup`
at the end of a ``with`` block::
>>> with geoip.geoip_manager():
... res = geoip.geolocate_ip('2a07:e00::333')
... print(f"Postcode: {res['postcode']} || Lat: {res.get('lat', 'unknown')} || Long: {res.long}")
...
Postcode: 173 11 || Lat: 59.3333 || Long: 18.05
Accessing the underlying :mod:`geoip2` library instances
--------------------------------------------------------
If our wrappers don't provide certain features you need, you can easily access the raw GeoIP2 reader instances.
With our context manager
^^^^^^^^^^^^^^^^^^^^^^^^
Accessing :class:`geoip2.database.Reader` via the context manager::
>>> import geoip2.models
>>> with geoip.geoip_manager('city') as geo:
... data: geoip2.models.City = geo.city('95.216.3.171')
... print('Continent:', data.continent.names.get('en'), 'Time Zone:', data.location.time_zone)
Continent: Europe Time Zone: Europe/Helsinki
Directly, via the :mod:`privex.helpers.plugin` module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Accessing :class:`geoip2.database.Reader` via the plugin module::
>>> from privex.helpers import plugin
>>> geo = plugin.get_geoip('asn')
>>> as_data: geoip2.models.ASN = geo.asn('95.216.3.171')
>>> print(f"{as_data.autonomous_system_organization} (ASN: {as_data.autonomous_system_number})")
'Hetzner Online GmbH (ASN: 24940)'
>>> # To close the ASN database properly when you're done, call 'plugin.close_geoip' with 'asn'
>>> plugin.close_geoip('asn')
True
**Copyright**::
+===================================================+
| © 2020 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Originally Developed by Privex Inc. |
| License: X11 / MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| (+) Kale (@kryogenic) [Privex] |
| |
+===================================================+
Copyright 2020 Privex Inc. ( https://www.privex.io )
"""
import logging
import attr
import geoip2.database
import geoip2.models
import geoip2.errors
from contextlib import contextmanager
from typing import Optional, Tuple, Generator
from privex.helpers import plugin
from privex.helpers.extras.attrs import AttribDictable
from privex.helpers.exceptions import GeoIPAddressNotFound
from privex.helpers.common import empty
from privex.helpers.types import IP_OR_STR
log = logging.getLogger(__name__)
__all__ = [
'GeoIPResult', 'geolocate_ip', 'geolocate_ips', 'cleanup_geoip', 'geoip_manager'
]
@attr.s
class GeoIPResult(AttribDictable):
country = attr.ib(type=str, default=None)
"""Full English country name where this IP is based, e.g. ``Sweden``"""
country_code = attr.ib(type=str, default=None)
"""Two letter ISO country code representing the country where this IP is based, e.g. ``SE``"""
city = attr.ib(type=str, default=None)
"""Full English city name where this IP is based, e.g. ``Stockholm``"""
postcode = attr.ib(type=str, default=None)
"""Estimated Postcode / ZIP code where the IP is located e.g. ``173 11``"""
as_number = attr.ib(type=int, default=None)
"""The string name of the ISP / Organisation the IP belongs to, e.g. ``Privex Inc.``"""
as_name = attr.ib(type=str, default=None)
"""The numeric AS number identifying the ISP / Organisation the IP belongs to, e.g. ``210083``"""
ip_address = attr.ib(type=str, default=None)
"""The IP address the result is for"""
network = attr.ib(type=str, default=None)
"""The network the IP belongs to, e.g. ``185.130.44.0/22``"""
long = attr.ib(type=int, default=None)
"""An estimated longitude (part of co-ordinates) where the IP is based"""
lat = attr.ib(type=int, default=None)
"""An estimated latitude (part of co-ordinates) where the IP is based"""
geoasn_data = attr.ib(type=geoip2.models.ASN, default=None)
"""The raw object returned by :meth:`geoip2.database.Reader.asn` """
geocity_data = attr.ib(type=geoip2.models.City, default=None)
"""The raw object returned by :meth:`geoip2.database.Reader.city` """
def geolocate_ip(addr: IP_OR_STR, throw=True) -> Optional[GeoIPResult]:
"""
Looks up the IPv4/IPv6 address ``addr`` against GeoIP2 City + ASN, and returns a :class:`.GeoIPResult` containing the GeoIP data.
Usage::
>>> g = geolocate_ip('2a07:e00::333')
>>> print(g.city, g.country, g.country_code, g.as_number, g.as_name, sep='\t')
Stockholm Sweden SE 210083 Privex Inc.
>>> g = geolocate_ip('8.8.4.4')
None United States US 15169 Google LLC
:param IP_OR_STR addr: An IPv4 or IPv6 address to geo-locate
:param bool throw: (Default: ``True``) If ``True``, will raise :class:`.GeoIPAddressNotFound` if an IP address isn't found
in the GeoIP database. If ``False``, will simply return ``None`` if it's not found.
:raises GeoIPAddressNotFound: When ``throw`` is ``True`` and ``addr`` can't be found in a GeoIP database.
:raises ValueError: When ``addr`` is not a valid IP address.
:return Optional[GeoIPResult] res: A :class:`.GeoIPResult` containing the GeoIP data for the IP - or ``None`` if ``throw`` is
``False``, and the IP address wasn't found in the database.
"""
addr = str(addr)
res = GeoIPResult()
try:
# with geoip2.database.Reader(settings.GEOCITY) as g:
g = plugin.get_geoip('city')
try:
response: geoip2.models.City = g.city(addr)
res.geocity_data = response
res.country_code = response.country.iso_code
res.country = response.country.names.get('en', None)
res.city = response.city.names.get('en', None)
res.postcode = response.postal.code
res.long = response.location.longitude
res.lat = response.location.latitude
except geoip2.errors.AddressNotFoundError as e:
if throw:
raise GeoIPAddressNotFound(str(e))
return None
except ValueError as e:
# We always raise ValueError regardless of the 'throw' param - since ValueError
# usually means the address is completely invalid.
raise e
except Exception as e:
log.warning("Failed to resolve Country / City for %s - Reason: %s %s", addr, type(e), str(e))
g = plugin.get_geoip('asn')
try:
response: geoip2.models.ASN = g.asn(addr)
res.as_name = response.autonomous_system_organization
res.as_number = response.autonomous_system_number
res.network = response.network
res.ip_address = response.ip_address
res.geoasn_data = response
except geoip2.errors.AddressNotFoundError as e:
if throw:
raise GeoIPAddressNotFound(str(e))
except ValueError as e:
# We always raise ValueError regardless of the 'throw' param - since ValueError
# usually means the address is completely invalid.
raise e
except Exception as e:
log.warning("Failed to resolve ASN for %s - Reason: %s %s", addr, type(e), str(e))
except Exception as e:
log.exception("Serious error while resolving GeoIP for %s", addr)
raise e
return res
def geolocate_ips(*addrs, throw=False) -> Generator[Tuple[str, Optional[GeoIPResult]], None, None]:
"""
Same as :func:`.geolocate_ip` but accepts multiple IP addresses, and returns the results as a generator.
Usage::
>>> for ip, g in geolocate_ips('185.130.44.5', '8.8.4.4', '2a07:e00::333'):
... print(f"{ip:<20} -> {str(g.city):<15} {str(g.country):<15} ({g.as_number} {g.as_name})")
185.130.44.5 -> Stockholm Sweden (210083 Privex Inc.)
8.8.4.4 -> None United States (15169 Google LLC)
2a07:e00::333 -> Stockholm Sweden (210083 Privex Inc.)
>>> data = dict(geolocate_ips('185.130.44.5', '8.8.4.4', '2a07:e00::333'))
>>> data['8.8.4.4'].country
'United States'
>>> data['2a07:e00::333'].as_name
'Privex Inc.'
:param IP_OR_STR addrs: One or more IPv4 or IPv6 addresses to geo-locate
:param bool throw: (Default: ``True``) If ``True``, will raise :class:`.GeoIPAddressNotFound` if an IP address isn't found
in the GeoIP database. If ``False``, will simply return ``None`` if it's not found.
:raises GeoIPAddressNotFound: When ``throw`` is ``True`` and one of the ``addrs`` can't be found in a GeoIP database.
:raises ValueError: When ``throw`` is ``True`` and one of the ``addrs`` is not a valid IP address.
:return Tuple[str, Optional[GeoIPResult]] res: A generator which returns tuples containing the matching IP
address, and the :class:`.GeoIPResult` object containing the GeoIP data for the IP - or
``None`` if ``throw`` is ``False``, and the IP address wasn't found in the database.
"""
for addr in addrs:
try:
res = geolocate_ip(addr, throw=throw)
except (Exception, ValueError) as e:
if throw:
raise e
log.warning("Ignoring exception while geo-locating IP '%s': %s %s", addr, type(e), str(e))
res = None
if empty(res): yield (addr, None)
if all([empty(res.country), empty(res.country_code), res.as_name, res.as_number, res.lat, res.long]):
yield (addr, None)
yield (addr, res)
def cleanup(geo_type: str = None):
"""
With no arguments, closes and removes GeoIP city + asn + country from thread store.
With the first argument ``geo_type`` specified (either 'city', 'asn' or 'country'), only that specific GeoIP2 instance
will be closed and removed from the thread store.
"""
if geo_type is None:
return [plugin.close_geoip('city'),
plugin.close_geoip('asn'),
plugin.close_geoip('country')]
return plugin.close_geoip(geo_type)
cleanup_geoip = cleanup
@contextmanager
def geoip_manager(geo_type: str = None) -> Optional[geoip2.database.Reader]:
if not empty(geo_type):
yield plugin.get_geoip(geo_type)
cleanup()
else:
yield None
cleanup()
|
from torch import nn
from torch.nn import functional as F
from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_benchmark.modeling.make_layers import make_conv3x3
class ShapeClassMaskRCNNFPNFeatureExtractor(nn.Module):
"""
Feature extractor for shape classifier
"""
def __init__(self, cfg, in_channels):
"""
Arguments:
cfg: YACS configuration file for the Mask RCNN instance
"""
super(ShapeClassMaskRCNNFPNFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_SHAPE_CLASS_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_SHAPE_CLASS_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_SHAPE_CLASS_HEAD.POOLER_SAMPLING_RATIO
input_size = in_channels
use_gn = cfg.MODEL.ROI_MASK_HEAD.USE_GN
layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS
dilation = cfg.MODEL.ROI_MASK_HEAD.DILATION
self.pooler = Pooler(output_size=(resolution, resolution), scales=scales,
sampling_ratio=sampling_ratio)
next_feature = input_size
self.blocks = []
for layer_idx, layer_features in enumerate(layers):
layer_name = "shape_class_fcn{}".format(layer_idx)
module = make_conv3x3(
next_feature, layer_features,
dilation=dilation, stride=1, use_gn=use_gn
)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
def forward(self, x, proposals):
x = self.pooler(x, proposals)
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
return x
_ROI_SHAPE_CLASS_FEATURE_EXTRACTORS = {
"ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor,
"ShapeClassMaskRCNNFPNFeatureExtractor": ShapeClassMaskRCNNFPNFeatureExtractor,
}
def make_roi_shape_class_feature_extractor(cfg, in_channels):
func = _ROI_SHAPE_CLASS_FEATURE_EXTRACTORS[cfg.MODEL.ROI_SHAPE_CLASS_HEAD.FEATURE_EXTRACTOR]
return func(cfg, in_channels)
|
# IMPORTANT: as of now, this script does NOT support more than one unknown person on each frame. Please keep that in mind.
#
# The following script is used to annotate the real face positions on the video.
# Specify the name of the video in the "test" directory in the command line.
# In case it isn't specified, all videos in the "test" folder will be opened one by one.
#
# Examples:
# 'python utilities\videoannotations.py';
# 'python utilities\videoannotations.py christian_bale1_fin_s.mp4'.
#
# At first, you have to specify the indexes of available persons that are present in the video selected, separated by the space symbol.
# Then, the video will be opened exactly the number of selected persons times.
# You will have to select only one person's face on each play.
#
# Select the face with mouse. Only one face may be selected on the frame. Press SPACE or L key to continue.
# After that, tracker will follow the face until it fails to do so.
# You will have to select the face again or skip the frame until the one where you would want to select the face.
# To skip the frame, do not select anything, and press SPACE or L key.
# Press K key to skip 5 frames. Press J key to go to the previous frame.
# The annotations are saved in the same directory in "annotations" subfolder in the folder that matches the name of the video.
#
# It shouldn't take more than one minute to annotate one video of less than 20 seconds.
#
# OpenCV library must be installed. To install it, run the "pip install opencv-python" command in the terminal.
import cv2
from glob import glob
from os.path import isdir
from os import makedirs
from sys import argv
class IncorrectNumberOfArguments(Exception):
pass
def square_params(x_initial, x, y_initial, y):
"""
Calculates square parameters acquired from the mouse movements for rendering the square on the image.
"""
side = abs(y_initial - y)
x_top = round(x - side/2)
x_bottom = round(x + side/2)
y_top = min(y_initial, y)
y_bottom = max(y_initial, y)
return (x_top, y_top), (x_bottom, y_bottom), side
def annotations_filename(video_name, person_name):
"""
Returns the filename where the annotations will be saved.
"""
path = video_name.split("\\")
if not isdir(f"test\\annotations\\{path[-1][:-4]}"):
makedirs(f"test\\annotations\\{path[-1][:-4]}")
filename = f"test\\annotations\\{path[-1][:-4]}\\annotation_{person_name}.txt"
return filename
if __name__ == "__main__":
if len(argv) not in (1, 2):
raise IncorrectNumberOfArguments('Only the path to video may be specified.')
is_drawing = False
x_initial = -1
y_initial = -1
if len(argv) == 1:
video_names = glob('test\\*.mp4')
elif len(argv) == 2:
video_names = [f"test\\{argv[1]}"]
# Load names of persons available in the dataset:
instruction_names = glob('training_set\\*')
instruction_names = list(map(lambda x: x.split('\\')[-1], instruction_names)) + ['Unknown']
instruction_list = '\n'.join([f"{name}: {index}" for index, name in enumerate(instruction_names)])
for video_name in video_names:
instructions = f"\nPlease specify the names of people present in the \"{video_name}\""
instructions += ", separated with space, according to the following table:\n"
instructions += f"{instruction_list}\n"
names = input(instructions)
names = names.split()
exit_video = False
write_to_file = True
for person_name in names:
print(instruction_names[int(person_name)])
tracker = cv2.TrackerKCF_create()
tracked = False
capture = cv2.VideoCapture(video_name)
annotations_string = f"{person_name}\n"
face_box = 0
while capture.isOpened():
if exit_video:
exit_video = True
write_to_file = False
break
ret, frame = capture.read()
if not ret:
print("Video end. Exiting ...")
break
if tracked:
ok, face_box = tracker.update(frame)
if not ok:
cache = frame.copy()
top_corner = -1
bottom_corner = -1
side = -1
already_drawn = False
def draw_square(event, x, y, flags, param):
"""
Function that draws the square on the frame.
"""
global is_drawing, x_initial, y_initial
global frame, cache
global top_corner, bottom_corner, side
global already_drawn
if event == cv2.EVENT_LBUTTONDOWN:
if already_drawn:
cv2.putText(frame, 'There must be only one face', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
else:
is_drawing = True
x_initial = x
y_initial = y
elif event == cv2.EVENT_LBUTTONUP:
if already_drawn:
cv2.putText(frame, 'Only one face must be selected', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
else:
is_drawing = False
top_corner, bottom_corner, side = square_params(x_initial, x, y_initial, y)
cv2.rectangle(frame, top_corner, bottom_corner, color=(0, 0, 255), thickness=2)
already_drawn = True
elif event == cv2.EVENT_MOUSEMOVE:
if is_drawing:
frame = cache.copy()
top_corner, bottom_corner, side = square_params(x_initial, x, y_initial, y)
cv2.rectangle(frame, top_corner, bottom_corner, color=(0, 0, 255), thickness=2)
cv2.namedWindow(video_name)
cv2.setMouseCallback(video_name, draw_square)
while True:
cv2.imshow(video_name, frame)
keyboard = cv2.waitKey(1) & 0xFF
if keyboard in (32, ord('l')): # SPACE key or L key
break
elif keyboard == 27: # ESCAPE key
exit_video = True
break
elif keyboard == ord('j'): # J key
next_frame = capture.get(cv2.CAP_PROP_POS_FRAMES)
previous_frame = next_frame - 2
capture.set(cv2.CAP_PROP_POS_FRAMES, previous_frame)
prev_frame = True
break
elif keyboard == ord('k'): # K key
next_frame = capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
capture.set(cv2.CAP_PROP_POS_FRAMES, next_frame + 5)
skip_frames = True
tracked = False
break
if already_drawn:
annotations_string += f"{top_corner[0]} {top_corner[1]} {side}\n"
face_box = (top_corner[0], top_corner[1], side, side)
tracker = cv2.TrackerKCF_create()
tracker.init(frame, face_box)
tracked = True
else:
if prev_frame:
prev_frame = False
annotations_string = annotations_string[:-1]
elif skip_frames:
skip_frames = False
annotations_string += "\n\n\n\n\n"
else:
annotations_string += "\n"
tracked = False
elif ok:
annotations_string += f"{face_box[0]} {face_box[1]} {face_box[2]}\n"
top_corner = (int(face_box[0]), int(face_box[1]))
bottom_corner = (int(face_box[0] + face_box[2]), int(face_box[1] + face_box[3]))
cv2.rectangle(frame, top_corner, bottom_corner, color=(0, 0, 255), thickness=2)
cv2.imshow(video_name, frame)
cv2.waitKey(1)
elif not tracked:
# Giant chunck of code that had to be repeated beacause of difficulties with dealing with global variables:
cache = frame.copy()
top_corner = -1
bottom_corner = -1
side = -1
already_drawn = False
def draw_square(event, x, y, flags, param):
global is_drawing, x_initial, y_initial
global frame, cache
global top_corner, bottom_corner, side
global already_drawn
if event == cv2.EVENT_LBUTTONDOWN:
if already_drawn:
cv2.putText(frame, 'There must be only one face', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
else:
is_drawing = True
x_initial = x
y_initial = y
elif event == cv2.EVENT_LBUTTONUP:
if already_drawn:
cv2.putText(frame, 'Only one face must be selected', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
else:
is_drawing = False
top_corner, bottom_corner, side = square_params(x_initial, x, y_initial, y)
cv2.rectangle(frame, top_corner, bottom_corner, color=(0, 0, 255), thickness=2)
already_drawn = True
elif event == cv2.EVENT_MOUSEMOVE:
if is_drawing:
frame = cache.copy()
top_corner, bottom_corner, side = square_params(x_initial, x, y_initial, y)
cv2.rectangle(frame, top_corner, bottom_corner, color=(0, 0, 255), thickness=2)
cv2.namedWindow(video_name)
cv2.setMouseCallback(video_name, draw_square)
prev_frame = False
skip_frames = False
while True:
cv2.imshow(video_name, frame)
keyboard = cv2.waitKey(1) & 0xFF
if keyboard in (32, ord('l')): # SPACE key or L key
break
elif keyboard == 27: # ESCAPE key
exit_video = True
break
elif keyboard == ord('j'): # J key
next_frame = capture.get(cv2.CAP_PROP_POS_FRAMES)
previous_frame = next_frame - 2
capture.set(cv2.CAP_PROP_POS_FRAMES, previous_frame)
prev_frame = True
break
elif keyboard == ord('k'): # K key
next_frame = capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
capture.set(cv2.CAP_PROP_POS_FRAMES, next_frame + 5)
skip_frames = True
tracked = False
break
if already_drawn:
annotations_string += f"{top_corner[0]} {top_corner[1]} {side}\n"
face_box = (top_corner[0], top_corner[1], side, side)
tracker = cv2.TrackerKCF_create()
tracker.init(frame, face_box)
tracked = True
else:
if prev_frame:
prev_frame = False
annotations_string = annotations_string[:-1]
elif skip_frames:
skip_frames = False
annotations_string += "\n\n\n\n\n"
else:
annotations_string += "\n"
tracked = False
cv2.destroyAllWindows()
if write_to_file:
# Save annotations to file:
with open(annotations_filename(video_name, person_name), "w") as annotations_file:
annotations_file.write(annotations_string[:-1])
else:
write_to_file = True |
# Copyright 2021, Dylan Roger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='markdown-spring-shell-documentation',
packages=find_packages(),
version='1.0.0',
description='A markdown extension that creates a documentation from Java classes using Spring Shell or https://github.com/fonimus/ssh-shell-spring-boot',
long_description=long_description,
author='Dylan Roger',
author_email='dyl.roger@gmail.com',
url='https://github.com/dylan-roger/markdown-spring-shell-documentation',
keywords=['Markdown', 'ssh', 'plugin', 'shell', 'extension'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Software Development :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Markup :: HTML',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
install_requires=['markdown', 'javalang']
)
|
# encoding: utf-8
#
# Copyright (c) 2017 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2017-12-15
#
"""Common helper functions."""
from __future__ import print_function, absolute_import
from contextlib import contextmanager
from datetime import date, datetime
from HTMLParser import HTMLParser
import logging
import os
from os.path import getmtime
import re
from shutil import copyfile
import time
from unicodedata import normalize
log = logging.getLogger(__name__)
# Regex to match Zotero date values
match_date = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d).*').match
SQLITE_DATE_FMT = '%Y-%m-%d %H:%M:%S'
def dt2sqlite(dt):
"""Convert `datetime` to Sqlite time string.
Format string is `SQLITE_DATE_FMT`.
Args:
dt (datetime): `datetime` object to convert.
Returns:
str: Sqlite-formatted datetime string.
"""
return dt.strftime(SQLITE_DATE_FMT)
def sqlite2dt(s):
"""Convert Sqlite time string to `datetime` object.
Format string is `util.SQLITE_DATE_FMT`. Microseconds
are dropped on the floor.
Args:
s (str): Sqlite datetime string.
Returns:
datetime: `datetime` equivalent of `s`.
"""
s = s.split('.')[0]
return datetime.strptime(s, SQLITE_DATE_FMT)
class HTMLText(HTMLParser):
"""Extract text from HTML.
Strips all tags from HTML.
Attributes:
data (list): Accumlated text content.
"""
@classmethod
def strip(cls, html):
"""Extract text from HTML.
Args:
html (unicode): HTML to process.
decode (bool, optional): Decode from UTF-8 to Unicode.
Returns:
unicode: Text content of HTML.
"""
p = cls()
p.feed(html)
return unicode(p)
def __init__(self):
"""Create new HTMLText."""
self.reset()
self.data = []
def handle_data(self, s):
"""Callback for contents of HTML tags.
Args:
s (unicode): Text from between HTML tags.
"""
self.data.append(unicodify(s))
def __str__(self):
"""Return text UTF-8 encoded."""
return unicode(self).encode('utf-8', 'replace')
def __unicode__(self):
"""Return text as Unicode."""
return u''.join(self.data)
def strip_tags(html):
"""Strip tags from HTML.
Args:
html (unicode): HTML text.
Returns:
unicode: Text contained in HTML.
"""
return HTMLText.strip(html)
def copyifnewer(source, copy):
"""Replace path `copy` with a copy of file at `source`.
Returns path to `copy`, overwriting it first with a copy of
`source` if `source` is newer or if `copy` doesn't exist.
Args:
source (str): Path to original file
copy (str): Path to copy
Returns:
str: Path to copy
"""
if not os.path.exists(copy) or getmtime(source) > getmtime(copy):
log.debug('[util] copying %r to %r ...',
shortpath(source), shortpath(copy))
copyfile(source, copy)
return copy
def unicodify(s, encoding='utf-8'):
"""Ensure ``s`` is Unicode.
Returns Unicode unchanged, decodes bytestrings and calls `unicode()`
on anything else.
Args:
s (basestring): String to convert to Unicode.
encoding (str, optional): Encoding to use to decode bytestrings.
Returns:
unicode: Decoded Unicode string.
"""
if isinstance(s, unicode):
return s
if isinstance(s, str):
return s.decode(encoding, 'replace')
return unicode(s)
def utf8encode(s):
"""Ensure string is an encoded bytestring."""
if isinstance(s, str):
return s
if isinstance(s, unicode):
return s.encode('utf-8', 'replace')
return str(s)
def asciify(s):
"""Ensure string only contains ASCII characters.
Args:
s (basestring): Unicode or bytestring.
Returns:
unicode: String containing only ASCII characters.
"""
u = normalize('NFD', unicodify(s))
s = u.encode('us-ascii', 'ignore')
return unicodify(s)
def parse_date(datestr):
"""Parse a Zotero date into YYYY-MM-DD, YYYY-MM or YYYY format.
Zotero dates are in the format "YYYY-MM-DD <in words>",
where <in words> may be the year, month and year or full
date depending on whether month and day are set.
Args:
datestr (str): Date from Zotero database
Returns:
unicode: Parsed date if ``datestr``.
"""
if not datestr:
return None
m = match_date(datestr)
if not m:
return datestr[:4] # YYYY
try:
return u'-'.join(m.groups())
except ValueError:
return None
def json_serialise(obj):
"""Serialise `date` objects.
JSON serialisation helper to be passed as the ``default`` argument
to `json.dump`.
Args:
obj (object): Anything JSON can't serialise
Returns:
str: ISO date format
Raises:
TypeError: Raised if ``obj`` is not a `datetime.date`
"""
if isinstance(obj, date):
return obj.isoformat()
raise TypeError('Type %s is not serialisable' % type(obj))
_subunsafe = re.compile(r'[^a-z0-9\.-]').sub
_subdashes = re.compile(r'-+').sub
def safename(name):
"""Make a name filesystem-safe."""
name = asciify(name).lower()
name = _subunsafe('-', name)
name = _subdashes('-', name)
return unicodify(name)
def shortpath(p):
"""Replace ``$HOME`` in path with ~."""
if not p:
return p
h = os.path.expanduser(u'~')
return p.replace(h, '~')
@contextmanager
def timed(name=None):
"""Context manager that logs execution time."""
name = name or ''
start_time = time.time()
yield
log.info('[%0.2fs] %s', time.time() - start_time, name)
def time_since(ts):
"""Human-readable time since timestamp ``ts``."""
if not ts:
return 'never'
units = ('secs', 'mins', 'hours')
i = 0
n = time.time() - ts
while i < len(units) - 1:
if n > 60:
n /= 60
i += 1
else:
break
return '{:0.1f} {} ago'.format(n, units[i])
|
import tensorflow as tf
from ...utils.nn import weight, bias
from ...utils.attn_gru import AttnGRU
class EpisodeModule:
""" Inner GRU module in episodic memory that creates episode vector. """
def __init__(self, num_hidden, question, facts, is_training, bn):
self.question = question
self.facts = tf.unstack(tf.transpose(facts, [1, 2, 0])) # F x [d, N]
# transposing for attention
self.question_transposed = tf.transpose(question)
self.facts_transposed = [tf.transpose(f) for f in self.facts] # F x [N, d]
# parameters
self.w1 = weight('w1', [num_hidden, 4 * num_hidden])
self.b1 = bias('b1', [num_hidden, 1])
self.w2 = weight('w2', [1, num_hidden])
self.b2 = bias('b2', [1, 1])
self.gru = AttnGRU(num_hidden, is_training, bn)
@property
def init_state(self):
return tf.zeros_like(self.facts_transposed[0])
def new(self, memory):
""" Creates new episode vector (will feed into Episodic Memory GRU)
:param memory: Previous memory vector
:return: episode vector
"""
state = self.init_state
memory = tf.transpose(memory) # [N, D]
with tf.variable_scope('AttnGate') as scope:
for f, f_t in zip(self.facts, self.facts_transposed):
g = self.attention(f, memory)
state = self.gru(f_t, state, g)
scope.reuse_variables() # share params
return state
def attention(self, f, m):
""" Attention mechanism. For details, see paper.
:param f: A fact vector [N, D] at timestep
:param m: Previous memory vector [N, D]
:return: attention vector at timestep
"""
with tf.variable_scope('attention'):
# NOTE THAT instead of L1 norm we used L2
q = self.question_transposed
vec = tf.concat([f * q, f * m, tf.abs(f - q), tf.abs(f - m)], axis=0) # [4*d, N]
# attention learning
l1 = tf.matmul(self.w1, vec) + self.b1 # [N, d]
l1 = tf.nn.tanh(l1)
l2 = tf.matmul(self.w2, l1) + self.b2
l2 = tf.nn.softmax(l2)
return tf.transpose(l2)
|
"""Config flow for Palgate integration."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_DEVICE_ID, CONF_TOKEN
from homeassistant.data_entry_flow import FlowResult
from .const import DOMAIN as PALGATE_DOMAIN
SCHEMA = vol.Schema(
{
vol.Required(CONF_DEVICE_ID): str,
vol.Required(CONF_TOKEN): str,
}
)
class PollenvarselFlowHandler(config_entries.ConfigFlow, domain=PALGATE_DOMAIN):
"""Config flow for Pollenvarsel."""
VERSION = 1
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
if user_input is not None:
device_id: str = user_input[CONF_DEVICE_ID]
if await self._async_existing_devices(device_id):
return self.async_abort(reason="already_configured")
await self.async_set_unique_id(device_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=device_id.title(),
data=user_input,
)
return self.async_show_form(
step_id="user",
data_schema=SCHEMA,
errors={},
)
async def _async_existing_devices(self, area: str) -> bool:
"""Find existing devices."""
existing_devices = [
f"{entry.data.get(CONF_DEVICE_ID)}"
for entry in self._async_current_entries()
]
return area in existing_devices
|
#!/usr/bin/env python
""" ROS node that approximates velocity based on actuated values
"""
import math
import rospy
from svea_msgs.msg import lli_ctrl
from geometry_msgs.msg import TwistWithCovarianceStamped
from svea.states import SVEAControlValues
__author__ = "Tobias Bolin"
__copyright__ = "Copyright 2020, Tobias Bolin"
__credits__ = ["Tobias Bolin"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Tobia Bolin"
__email__ = "tbolin@kth.se"
__status__ = "Beta"
class Republish():
"""Estimate velocity based on actuation values
"""
# assumed max velocity
# By testing, the max velocity in Gear 0 is around 1.7 m/s.
# The max velocity in Gear 1 is around 3.6 m/s.
MAX_SPEED_0 = 1.7 # [m/s]
MAX_SPEED_1 = 3.6 # [m/s]
MAX_STEERING_ANGLE = 40*math.pi/180
VELOCITY_DEAD_ZONE = 15
TAU0 = 0.1
TAU1 = 0.4
def __init__(self):
## Pull necessary ROS parameters from launch file:
# Read control message topic
self.ctrl_msg_top = rospy.get_param("~ctrl_message_topic", "lli/ctrl_actuated")
# Read twist message topic
self.twist_msg_top = rospy.get_param("~twist_message_topic", "actuation_twist")
# Read vehicle frame id topic
self.vehicle_frame_id = rospy.get_param("~frame_id", "base_link")
# Read max speed for gear 0 and 1
self.max_speed_0 = rospy.get_param("~max_speed_0", self.MAX_SPEED_0)
self.max_speed_1 = rospy.get_param("~max_speed_1", self.MAX_SPEED_1)
# Read max steering angle
self.max_steering_angle = rospy.get_param("~max_steering_angle", self.MAX_STEERING_ANGLE)
# Read covariance values
self.lin_cov = rospy.get_param("~linear_covariance", 0.1)
self.ang_cov = rospy.get_param("~angular_covariance", 0.1)
# Publishing rate
self.rate = rospy.get_param("~rate", 50)
# Acceleration coefficient for gear 0 and gear 1
self.tau0 = rospy.get_param("~tau0", self.TAU0)
self.tau1 = rospy.get_param("~tau1", self.TAU1)
# Initialize class variables
self.twist_msg = TwistWithCovarianceStamped()
self.twist_msg.header.frame_id = self.vehicle_frame_id
self.twist_msg.twist.covariance = self.cov_matrix_build()
self._is_reverse = False
self._last_calc_time = None
self._actuation_values = SVEAControlValues()
self.velocity = 0.0
# Establish subscription to control message
rospy.Subscriber(self.ctrl_msg_top, lli_ctrl, self.ctrl_msg_callback)
# Establish publisher of converted Twist message
self.twist_pub = rospy.Publisher(
self.twist_msg_top,
TwistWithCovarianceStamped,
queue_size=10)
def ctrl_calc_and_pub(self):
# initialize message
rate = rospy.Rate(self.rate)
while not rospy.is_shutdown():
if self._actuation_values.gear is not None:
c_ang = self._steer_actuation_to_rad(self._actuation_values.steering)
# Apply Bicycyle Model
wheelbase = .32 # in meters
vel = self.calc_current_velocity()
B = math.atan2(math.tan(c_ang), 2)
ang_vel = (vel/(wheelbase/2)) * math.sin(B)
# Build Header for current time stamp
self.twist_msg.header.seq += 1
self.twist_msg.header.stamp = rospy.Time.now()
# Build Twist using bicycle model
self.twist_msg.twist.twist.linear.x = vel
self.twist_msg.twist.twist.angular.z = ang_vel
# Publish message
self.twist_pub.publish(self.twist_msg)
rate.sleep()
rospy.spin()
def cov_matrix_build(self):
self.cov_matrix = [self.lin_cov, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, self.lin_cov, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, self.lin_cov, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, self.ang_cov, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, self.ang_cov, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, self.ang_cov]
return self.cov_matrix
# Callback function for fiducial pose subscription (from aruco_detect)
def ctrl_msg_callback(self, ctrl_msg):
self._is_reverse = self._detect_reverse_state(ctrl_msg)
self.velocity = self.calc_current_velocity()
self._actuation_values.ctrl_msg = ctrl_msg
def _detect_reverse_state(self, msg):
dead_zone = 5 # velocities with abs values <= than this = 0 to ESC
velocity = msg.velocity
previous_velocity = self._actuation_values.velocity
if velocity > dead_zone or previous_velocity == -128:
is_reverse = False
if self._is_reverse:
debug_str = "is_reverse changed to False, velocity: {},\tprevious_velocity: {}"
rospy.logdebug(debug_str.format(velocity, previous_velocity))
elif (previous_velocity < -dead_zone
and abs(velocity) <= dead_zone):
is_reverse = True
if not self._is_reverse:
debug_str = "is_reverse changed to True, velocity: {},\tprevious_velocity: {}"
rospy.logdebug(debug_str.format(velocity, previous_velocity))
else:
is_reverse = self._is_reverse
return is_reverse
def calc_current_velocity(self):
act_values = self._actuation_values
time_now = rospy.Time.now()
if self._last_calc_time is not None:
dt = (time_now - self._last_calc_time).to_sec()
else:
dt = 0.1
self._last_calc_time = time_now
if (abs(act_values.velocity) < act_values.valid_range):
setpoint_velocity = self._vel_actuation_to_mps(act_values.velocity)
acc = self._sim_esc(self.velocity, setpoint_velocity)
self.velocity += acc * dt
if self.velocity < 0.0 and not self._is_reverse:
self.velocity = 0.0
return self.velocity
def _sim_esc(self, velocity, target_velocity):
# simulates esc dynamics
tau = self.tau1 if self._actuation_values.velocity else self.tau0
return 1/tau * (target_velocity - velocity)
def _steer_actuation_to_rad(self, steering):
"""Convert steering actuation value to radians"""
steering = float(steering)
steer_percent = steering/127.0 * self.max_steering_angle
steer_percent = -steer_percent # steering flipped
return steer_percent
def _vel_actuation_to_mps(self, vel_actuation):
"""Translate actuation value to a velocity in m/s
based on current gear and assumed max speed.
:param vel_actuation: velocity actuation value
:type vel_actuation: int
:return: steady state velocity in m/s based on actuation value
:rtype: float
"""
if abs(vel_actuation) < self.VELOCITY_DEAD_ZONE:
max_speed = 0
elif self._actuation_values.gear == 0:
max_speed = self.max_speed_0 / 127.0
elif self._actuation_values.gear == 1:
max_speed = self.max_speed_1 / 127.0
return vel_actuation * max_speed
if __name__ == '__main__':
rospy.init_node('actuation_to_twist', anonymous=False)
ctt = Republish()
ctt.ctrl_calc_and_pub()
rospy.loginfo("actuation_to_twist node successfuly initilized")
# Copyright (c) 2019-2021 Tobias Bolin
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Other utilities."""
import os
import sys
try:
import winsound
except ModuleNotFoundError:
pass
def makeSound():
"""Make a sound for two seconds.
On linux you need to instal sox:
sudo apt install sox
On windows you need install winsound:
pip install winsound
"""
uname = sys.platform.lower()
if os.name == 'nt':
uname = 'win'
if uname.startswith('linux'):
uname = 'linux'
duration = 2000 # milliseconds
freq = 440 # Hz
if uname == 'win':
try: winsound.Beep(freq, duration)
except: print('Warning: myModules.makeSound cannot generate sound.')
else:
try: os.system('play -nq -t alsa synth {} sine {}'.format(duration/1000, freq))
except: print('Warning: myModules.makeSound cannot generate sound.')
def sayOutLoud(message):
"""Make a sound.
You need to install the speech-dispatcher package in Ubuntu (or the
corresponding package on other distributions).
"""
os.system('spd-say "' + str(message) + '"')
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return answer.
Args:
question (str): is a string that is presented to the user.
default ('yes', 'no' or None): is the presumed answer if the user just hits
<Enter>. If None an answer is required of the user.
Returns
True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, "Y": True, "YES": True, "YE": True,
"no": False, "n": False, "No":True, "NO":True, "N":True}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
|
import json
class Tree:
def __init__(self):
self.topics = []
class Topic:
def __init__(self):
self.lessons = []
class Lesson:
def __init__(self):
pass
def load(self, filename):
lesson = json.load(filename)
self.title = lesson.title
self.description = lesson.description
|
##PyBank
# Dependencies
import csv
import os
# file input
PyBank = os.path.join("Resources", "budget_data.csv")
# Variables & Counters
profit_loss=[]
total=0
total_months=0
subtract_MoM=0
tot_MoM=0
Avg_MoM=0
with open(PyBank) as csvfile:
budget_reader=csv.reader(csvfile,delimiter=',')
next(budget_reader)
for line in budget_reader:
profit_loss.append(line)
# The total net amount of "Profit/Losses"
total+=int(line[1])
# The total number of months included in the dataset
total_months+=1
# Max increase and Max decrease values with latest increase/decrease value
max_decrease=int(profit_loss[total_months-1][1])-int(profit_loss[total_months-2][1])
max_increase=int(profit_loss[total_months-1][1])-int(profit_loss[total_months-2][1])
# Changes in "Profit/Losses" between months
for i in range(total_months,1,-1):
subtract_MoM=int(profit_loss[i-1][1])-int(profit_loss[i-2][1])
# Greatest Increase (max_increase) and Greatest Decrease (max_decrease)
if subtract_MoM < max_decrease:
min_month_yr=profit_loss[i-1][0]
max_decrease=subtract_MoM
elif subtract_MoM > max_increase:
max_increase=subtract_MoM
max_month_yr=profit_loss[i-1][0]
# Total amount change in "Profit/Losses" between months
tot_MoM=tot_MoM+subtract_MoM
# print(tot_MoM)
#Average change in "Profit/Losses" between months
Avg_MoM=tot_MoM/(total_months-1)
# print(Avg_MoM)
# Terminal Results
print("----------------------------------------------------------")
print('Financial Analysis')
print("----------------------------------------------------------")
print('Total Months: '+str(total_months))
print('Total: $'+str(total))
print('Average Change: $'+str(Avg_MoM))
print('Greatest Increase in Profits: '+max_month_yr+' ($'+str(max_increase)+')')
print('Greatest Decrease in Profits: '+min_month_yr+' ($'+str(max_decrease)+')')
print("----------------------------------------------------------")
# Text File Results
with open('financial_analysis.txt', 'w') as text:
text.write("----------------------------------------------------------")
text.write(" Financial Analysis"+ "")
text.write("----------------------------------------------------------")
text.write('Total Months: '+str(total_months))
text.write('Total: $'+str(total))
text.write('Average Change: $'+str(Avg_MoM))
text.write('Greatest Increase in Profits: '+max_month_yr+' ($'+str(max_increase)+')')
text.write('Greatest Decrease in Profits: '+min_month_yr+' ($'+str(max_decrease)+')')
text.write("----------------------------------------------------------") |
"""Tests for client config."""
from logging import INFO, DEBUG
from pprint import pprint
import pytest
from idact import get_default_retries
from idact.core.auth import AuthMethod
from idact.detail.config.client. \
client_cluster_config import ClusterConfigImpl
from idact.detail.config.client.client_config import ClientConfig
from idact.detail.config.client.client_config_serialize import \
serialize_client_config_to_json, deserialize_client_config_from_json
from idact.detail.config.client.setup_actions_config import \
SetupActionsConfigImpl
from idact.detail.log.logger_provider import LoggerProvider
from tests.helpers.config_defaults import DEFAULT_RETRIES_JSON
VALID_CLIENT_CLUSTER_CONFIG = ClusterConfigImpl(host='abc',
port=22,
user='user',
auth=AuthMethod.ASK)
VALID_CLIENT_CLUSTER_CONFIG_WITH_PUBLIC_KEY_AUTH = \
ClusterConfigImpl(host='abc',
port=22,
user='user',
auth=AuthMethod.GENERATE_KEY,
key='/home/user/.ssh/id_rsa',
install_key=False,
setup_actions=SetupActionsConfigImpl(jupyter=['echo a']))
def test_client_cluster_config_validation_is_used():
with pytest.raises(ValueError):
ClusterConfigImpl(host='',
port=22,
user='user',
auth=AuthMethod.ASK)
with pytest.raises(ValueError):
ClusterConfigImpl(host='abc',
port=-1,
user='user',
auth=AuthMethod.ASK)
with pytest.raises(ValueError):
ClusterConfigImpl(host='abc',
port=22,
user='',
auth=AuthMethod.ASK)
with pytest.raises(ValueError):
ClusterConfigImpl(host='abc',
port=22,
user='user',
auth=AuthMethod.GENERATE_KEY,
key='')
def test_client_cluster_config_create():
config = ClusterConfigImpl(host='abc',
port=22,
user='user',
auth=AuthMethod.ASK)
assert config.host == 'abc'
assert config.port == 22
assert config.user == 'user'
assert config.auth == AuthMethod.ASK
assert config.key is None
assert config.install_key
assert not config.disable_sshd
assert config.setup_actions.jupyter == []
assert config.setup_actions.dask == []
assert config.scratch == "$HOME"
assert config.retries == get_default_retries()
assert config.use_jupyter_lab
def test_client_config_validation_is_used():
cluster_cluster_config = VALID_CLIENT_CLUSTER_CONFIG
with pytest.raises(ValueError):
ClientConfig({' Illegal Cluster Name': cluster_cluster_config})
def test_client_config_create():
cluster_cluster_config = VALID_CLIENT_CLUSTER_CONFIG
print(cluster_cluster_config.__dict__)
clusters = {'cluster1': cluster_cluster_config}
client_config = ClientConfig(clusters=clusters)
assert client_config.clusters is not clusters
assert client_config.clusters['cluster1'] is cluster_cluster_config
def test_client_config_create_empty_and_add_cluster():
client_config = ClientConfig()
assert client_config.clusters == {}
cluster_cluster_config = VALID_CLIENT_CLUSTER_CONFIG
with pytest.raises(ValueError):
client_config.add_cluster(' Illegal Cluster Name',
cluster_cluster_config)
assert client_config.clusters == {}
client_config.add_cluster('cluster1', cluster_cluster_config)
assert client_config.clusters['cluster1'] is cluster_cluster_config
with pytest.raises(ValueError):
client_config.add_cluster('cluster1',
ClusterConfigImpl(host='another',
port=22,
user='user',
auth=AuthMethod.ASK))
assert client_config.clusters['cluster1'] is cluster_cluster_config
def test_client_config_serialize():
client_config = ClientConfig(clusters={
'cluster1': VALID_CLIENT_CLUSTER_CONFIG
}, log_level=INFO)
expected_json = {
'clusters': {
'cluster1': {'host': 'abc',
'user': 'user',
'port': 22,
'auth': 'ASK',
'key': None,
'installKey': True,
'disableSshd': False,
'setupActions': {'jupyter': [],
'dask': []},
'scratch': '$HOME',
'notebookDefaults': {},
'retries': DEFAULT_RETRIES_JSON,
'useJupyterLab': True}
},
'logLevel': INFO
}
assert serialize_client_config_to_json(client_config) == expected_json
def test_client_config_deserialize():
LoggerProvider().log_level = DEBUG
input_json = {
'clusters': {
'cluster1': {'host': 'abc',
'user': 'user',
'port': 22,
'auth': 'ASK',
'key': None,
'installKey': True,
'disableSshd': False,
'setupActions': {'jupyter': [],
'dask': []},
'scratch': '$HOME',
'retries': {}}
},
'logLevel': DEBUG
}
client_config = ClientConfig(
clusters={
'cluster1': ClusterConfigImpl(
host='abc',
user='user',
port=22,
auth=AuthMethod.ASK)},
log_level=DEBUG)
deserialized = deserialize_client_config_from_json(input_json)
pprint([i.__dict__ for i in deserialized.clusters.values()])
assert deserialize_client_config_from_json(input_json) == client_config
def test_client_config_serialize_public_key():
client_config = ClientConfig(clusters={
'cluster1': VALID_CLIENT_CLUSTER_CONFIG_WITH_PUBLIC_KEY_AUTH
})
expected_json = {
'clusters': {
'cluster1': {'host': 'abc',
'user': 'user',
'port': 22,
'auth': 'PUBLIC_KEY',
'key': '/home/user/.ssh/id_rsa',
'installKey': False,
'disableSshd': False,
'setupActions': {'jupyter': ['echo a'],
'dask': []},
'scratch': '$HOME',
'notebookDefaults': {},
'retries': DEFAULT_RETRIES_JSON,
'useJupyterLab': True}
}, 'logLevel': INFO}
assert serialize_client_config_to_json(client_config) == expected_json
def test_client_config_deserialize_public_key():
input_json = {
'clusters': {
'cluster1': {'host': 'abc',
'user': 'user',
'port': 22,
'auth': 'PUBLIC_KEY',
'key': '/home/user/.ssh/id_rsa',
'installKey': False,
'disableSshd': False,
'setupActions': {'jupyter': ['echo a'],
'dask': []},
'scratch': '$HOME',
'retries': {},
'useJupyterLab': True}
},
'logLevel': INFO
}
client_config = ClientConfig(clusters={
'cluster1': VALID_CLIENT_CLUSTER_CONFIG_WITH_PUBLIC_KEY_AUTH})
assert deserialize_client_config_from_json(input_json) == client_config
EXPECTED_DEFAULT_JSON = {
'clusters': {
'cluster1': {'host': 'abc',
'user': 'user',
'port': 22,
'auth': 'ASK',
'key': None,
'installKey': True,
'disableSshd': False,
'setupActions': {'jupyter': [],
'dask': []},
'scratch': '$HOME',
'notebookDefaults': {},
'retries': DEFAULT_RETRIES_JSON,
'useJupyterLab': True}
},
'logLevel': INFO
}
def test_client_config_fill_out_missing_fields():
input_json = {
'clusters': {
'cluster1': {'host': 'abc',
'user': 'user',
'port': 22,
'auth': 'ASK'}
},
'logLevel': INFO
}
client_config = deserialize_client_config_from_json(input_json)
assert serialize_client_config_to_json(client_config) == (
EXPECTED_DEFAULT_JSON)
def test_client_config_fill_out_missing_fields_setup_actions():
input_json = {
'clusters': {
'cluster1': {'host': 'abc',
'user': 'user',
'port': 22,
'auth': 'ASK',
'setupActions': {}}
},
'logLevel': INFO
}
client_config = deserialize_client_config_from_json(input_json)
assert serialize_client_config_to_json(client_config) == (
EXPECTED_DEFAULT_JSON)
|
# __init__.py
__module_name__ = "_format_string_printing_font.py"
__author__ = ", ".join(["Michael E. Vinyard"])
__email__ = ", ".join(
[
"vinyard@g.harvard.edu",
]
)
from ._SpotifyAnnoy import _SpotifyAnnoy as annoy |
# Copyright (c) 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
try:
from oci.analytics.models import ChangeAnalyticsInstanceNetworkEndpointDetails
from oci.util import to_dict
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = oci_common_utils.get_logger("oci_analytics_custom_helpers")
def _debug(s):
get_logger().debug(s)
def get_logger():
return logger
class AnalyticsInstanceHelperCustom:
# exclude the attributes from the create model which are not present in the get model for idempotency check
def get_exclude_attributes(self):
exclude_attributes = super(
AnalyticsInstanceHelperCustom, self
).get_exclude_attributes()
return exclude_attributes + [
"idcs_access_token",
]
class AnalyticsInstanceActionsHelperCustom:
ACTION_SCALE_ANALYTICS_INSTANCE = "scale"
ACTION_CHANGE_ANALYTICS_INSTANCE_NETWORK_ENDPOINT_DETAILS = (
"change_analytics_instance_network_endpoint"
)
def is_action_necessary(self, action, resource=None):
# Handling idempotency for `scale` operation when both `capacity_type` & `capacity_value` are same as
# existing capacity metadata.
if (
(action.lower() == self.ACTION_SCALE_ANALYTICS_INSTANCE)
and self.module.params.get("capacity")
and (
resource.capacity.capacity_type
== self.module.params.get("capacity").get("capacity_type")
and resource.capacity.capacity_value
== self.module.params.get("capacity").get("capacity_value")
)
):
return False
elif (
action.lower()
== self.ACTION_CHANGE_ANALYTICS_INSTANCE_NETWORK_ENDPOINT_DETAILS
):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeAnalyticsInstanceNetworkEndpointDetails
)
return not oci_common_utils.compare_dicts(
to_dict(action_details.network_endpoint_details),
to_dict(resource.network_endpoint_details),
)
return super(AnalyticsInstanceActionsHelperCustom, self).is_action_necessary(
action, resource
)
# adding state 'INACTIVE' to the list returned by `get_action_idempotent_states(action)` when performing
# `stop` operation.
def get_action_idempotent_states(self, action):
action_idempotent_states = super(
AnalyticsInstanceActionsHelperCustom, self
).get_action_idempotent_states(action)
if action.lower() == "stop":
return action_idempotent_states + [
"INACTIVE",
]
return action_idempotent_states
|
cars = {}
n = int(input())
for _ in range(n):
name, mileage, fuel = input().split("|")
cars[name] = [int(mileage), int(fuel)]
command = input().split(" : ")
while not command[0] == "Stop":
if command[0] == "Drive":
car, distance, fuel_needed = command[1], command[2], command[3]
if int(fuel_needed) <= cars[car][1]:
cars[car][0] += int(distance)
cars[car][1] -= int(fuel_needed)
print(f"{car} driven for {distance} kilometers. {fuel_needed} liters of fuel consumed.")
else:
print("Not enough fuel to make that ride")
if cars[car][0] >= 100000:
print(f"Time to sell the {car}!")
del cars[car]
elif command[0] == "Refuel":
car, refill = command[1], command[2]
if int(refill) + cars[car][1] > 75:
amount_refuel = 75 - cars[car][1]
cars[car][1] = 75
else:
amount_refuel = refill
cars[car][1] += int(refill)
print(f"{car} refueled with {amount_refuel} liters")
elif command[0] == "Revert":
car, kilometers = command[1], command[2]
cars[car][0] -= int(kilometers)
if cars[car][0] < 10000:
cars[car][0] = 10000
else:
print(f"{car} mileage decreased by {kilometers} kilometers")
command = input().split(" : ")
cars = sorted(cars.items(), key=lambda x: (-int(x[1][0]), x[0]))
for tup in cars:
car, mileage, fuel = tup[0], tup[1][0], tup[1][1]
print(f"{car} -> Mileage: {mileage} kms, Fuel in the tank: {fuel} lt.")
|
#!/usr/bin/env python
import datetime
import json
import os
import sys
import tempfile
import time
import urllib2
def main ():
certbot_domain = os.getenv('CERTBOT_DOMAIN').strip()
certbot_validation = os.getenv('CERTBOT_VALIDATION').strip()
api_token = os.getenv('API_TOKEN').strip()
log('processing certbot domain "{}"'.format(certbot_domain))
tld = get_tld(certbot_domain)
log('extracted TLD "{}"'.format(tld))
decoded_itldn = decode_idn(tld)
itldn_utf = decoded_itldn.encode('utf-8')
if decoded_itldn != tld:
log('decoded international TLD "{}"'.format(itldn_utf))
tld_id = get_domain_id(domain=decoded_itldn, token=api_token)
if not tld_id:
log('failed to find ID of domain "{}"'.format(itldn_utf), 'ERROR')
exit(1)
log('extracted ID "{}" of TLD "{}"'.format(tld_id, itldn_utf))
record_domain = get_record_domain(certbot_domain)
log('using record domain "{}"'.format(record_domain))
record_id = create_txt_record(domain_id=tld_id, name=record_domain, value=certbot_validation, token=api_token)
log('created record with ID "{}"'.format(record_id))
tmp_file = write_tmp_data(name=certbot_domain, domain_id=tld_id, record_id=record_id)
log('written tmp file to "{}"'.format(tmp_file))
log('sleeping 5 seconds')
time.sleep(5)
def get_tld (domain):
return '.'.join(domain.split('.')[-2:])
def decode_idn (domain):
return domain.decode('idna')
def get_record_domain (domain):
return '_acme-challenge.{}'.format(domain)
def get_domain_id (domain, token):
request = urllib2.Request('https://api.vscale.io/v1/domains/')
request.add_header('X-Token', token)
try:
contents = urllib2.urlopen(request).read()
except urllib2.HTTPError as err:
log('failed to get domain ID: {} {}'.format(err.code, err.msg), level='error')
exit(1)
data = json.loads(contents)
for entry in data:
if entry['name'] == domain:
return entry['id']
return None
def create_txt_record (domain_id, name, value, token):
data = {
'name': name,
'type': 'TXT',
'content': value,
'ttl': 600
}
body = json.dumps(data, separators=(',', ':'))
request = urllib2.Request('https://api.vscale.io/v1/domains/{}/records/'.format(domain_id), data=body)
request.add_header('X-Token', token)
request.add_header('Content-Type', 'application/json')
try:
contents = urllib2.urlopen(request).read()
except urllib2.HTTPError as err:
log('failed to create TXT record: {} {}'.format(err.code, err.msg), level='error')
exit(1)
response = json.loads(contents)
return response['id']
def write_tmp_data (name, domain_id, record_id):
tmp_dir = tempfile.gettempdir()
filepath = os.path.join(tmp_dir, 'certbot_{}.json'.format(name))
data = { 'domain_id': domain_id, 'record_id': record_id }
with open(filepath, 'w') as file:
json.dump(data, file, indent=2)
return filepath
def log (msg, level='info'):
ts = datetime.datetime.now().time()
level = level.upper()
sys.stderr.write('[ {} ] [ {} ] authenticator: {}\n'.format(ts, level, msg))
if __name__ == '__main__':
main() |
#! /usr/bin/env python3
# coding=utf-8
#================================================================
# Copyright (C) 2020 * Ltd. All rights reserved.
#
# Editor : pycharm
# File name : train.py
# Author : oscar chen
# Created date: 2020-10-13 9:50:26
# Description :
#
#================================================================
import os
import numpy as np
import tensorflow as tf
from network.ops import conv2d, batch_normalization
def residual_block(inputs, filters_num, blocks_num, conv_index, training = True, norm_decay = 0.99, norm_epsilon = 1e-3):
# 在输入feature map的长宽维度进行padding
inputs = tf.pad(inputs, paddings=[[0, 0], [1, 0], [1, 0], [0, 0]], mode='CONSTANT')
layer = conv2d(inputs, filters_num, kernel_size = 3, strides = 2, name = "conv2d_" + str(conv_index))
layer = batch_normalization(layer, name = "batch_normalization_" + str(conv_index), training = training, norm_decay = norm_decay, norm_epsilon = norm_epsilon)
conv_index += 1
for _ in range(blocks_num):
shortcut = layer
layer = conv2d(layer, filters_num // 2, kernel_size = 1, strides = 1, name = "conv2d_" + str(conv_index))
layer = batch_normalization(layer, name = "batch_normalization_" + str(conv_index), training = training, norm_decay = norm_decay, norm_epsilon = norm_epsilon)
conv_index += 1
layer = conv2d(layer, filters_num, kernel_size = 3, strides = 1, name = "conv2d_" + str(conv_index))
layer = batch_normalization(layer, name = "batch_normalization_" + str(conv_index), training = training, norm_decay = norm_decay, norm_epsilon = norm_epsilon)
conv_index += 1
layer += shortcut
return layer, conv_index
def darknet53(inputs, conv_index, training=True, norm_decay=0.99, norm_epsilon=1e-3):
# conv: return 52 layer if input shape is 416x416x3 output shape is 13x13x1024
# route1: return 26 layer 52x52x256
# route2: return 43 layer 26x26x512
with tf.variable_scope('darknet53'):
conv = conv2d(inputs, filters_num=32, kernel_size=3, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv, conv_index = residual_block(conv, conv_index=conv_index, filters_num=64, blocks_num=1, training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv, conv_index = residual_block(conv, conv_index=conv_index, filters_num=128, blocks_num=2, training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv, conv_index = residual_block(conv, conv_index=conv_index, filters_num=256, blocks_num=8, training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
route1 = conv
conv, conv_index = residual_block(conv, conv_index=conv_index, filters_num=512, blocks_num=8, training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
route2 = conv
conv, conv_index = residual_block(conv, conv_index=conv_index, filters_num=1024, blocks_num=4, training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
return route1, route2, conv, conv_index |
# Generates a linked list of a length determined by user input,
# consisting of random nonnegative integers whose upper bound is also determined
# by user input, and reorders the list so that it starts with all odd values and
# ends with all even values, preserving the order of odd and even values in the
# original list, respectively.
#
# Written by Eric Martin for COMP9021
import sys
from random import seed, randrange
from extended_linked_list import ExtendedLinkedList
def collect_references(L, length):
node = L.head
references = set()
for i in range(length):
references.add(id(node))
node = node.next_node
return references
try:
for_seed, length, upper_bound = [int(i) for i in input('Enter three nonnegative integers: '
).split()
]
if for_seed < 0 or length < 0 or upper_bound < 0:
raise ValueError
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
seed(for_seed)
LL = ExtendedLinkedList([randrange(upper_bound + 1) for _ in range(length)])
LL.print()
references = collect_references(LL, length)
LL.rearrange()
if collect_references(LL, length) != references:
print('You cheated!')
sys.exit()
else:
LL.print()
|
import argparse
import pickle
import logging
import os
import uuid
from datetime import datetime
from time import time
from PIL import Image
import gym
import psutil
import ray
# This script will generate the dataset of state, action, new state tuple
# and store it locally as dataset.pkl.
# We make use of ray to generate the datast in parallel
if not os.path.exists("logs"):
os.makedirs("logs")
today = datetime.now().strftime('%Y%m%d')
logger = logging.getLogger('worldmodels')
logger.setLevel(logging.DEBUG)
# Create logger
logger = logging.getLogger("worldmodels")
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logger.setLevel(logging.DEBUG)
filehandler = logging.FileHandler(filename='logs/dataset.{}.log'.format(today))
filehandler.setFormatter(formatter)
filehandler.setLevel(logging.DEBUG)
logger.addHandler(filehandler)
# Uncomment to enable console logger
steamhandler = logging.StreamHandler()
steamhandler.setFormatter(formatter)
steamhandler.setLevel(logging.INFO)
logger.addHandler(steamhandler)
def rollout(save_img=False):
env = gym.make('CarRacing-v0')
obs = env.reset()
total_score = 0
steps = 0
buffer = []
ctr = 0
while True:
action = env.action_space.sample()
new_obs, reward, done, info = env.step(action)
if save_img:
img = Image.fromarray(new_obs)
img.save('imgs/img_{}.png'.format(steps))
total_score += reward
steps += 1
buffer.append((obs.copy(), action, new_obs.copy()))
# buffer.append(obs.copy())
obs = new_obs
if done:
logger.info('Total reward {} in {} steps'.format(total_score, steps))
break
env.close()
return buffer
def save_dataset(dataset, fname=None):
if not os.path.exists("dataset"):
os.makedirs("dataset")
fname = fname if not fname is None else "{}.pkl".format(uuid.uuid4())
out_fname = "dataset/{}".format(fname)
with open(out_fname, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
return out_fname
def collect_samples(num_rollouts):
for rollout_idx in range(num_rollouts):
logger.info("Collecting dataset for rollout {}".format(rollout_idx))
dataset = rollout()
save_dataset(dataset, "dataset_{}.pkl".format(rollout_idx))
@ray.remote
def collect():
# env = gym.make('CarRacing-v0')
# obs = env.reset()
# total_score = 0
# steps = 0
# buffer = []
# while True:
# action = env.action_space.sample()
# new_obs, reward, done, info = env.step(action)
# total_score += reward
# steps += 1
# buffer.append((obs.copy(), action, new_obs.copy()))
# # buffer.append(obs.copy())
# obs = new_obs
# if done:
# print('Total reward {} in {} steps'.format(total_score, steps))
# break
# return buffer
data = rollout()
saved_fname = save_dataset(data)
return saved_fname
def collect_samples_using_ray(num_cpus, num_rollouts):
available_cpus = psutil.cpu_count(logical=False)
if num_cpus > 0:
if num_cpus > available_cpus:
msg = "You have {} cpus available, use a count <= {}".format(available_cpus, available_cpus)
raise Exception(msg)
else:
num_cpus = available_cpus
ray.init(num_cpus=num_cpus)
logger.info('Starting dataset collection in parallel on {} cpus'.format(num_cpus))
dataset = ray.get([collect.remote() for _ in range(num_rollouts)])
logger.debug(len(dataset))
# save_dataset(dataset, "dataset.pkl")
# with open('dataset.pkl', 'wb') as f:
# pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--parallel", default=False, help="Whether to use parallel processing powered by Ray.")
parser.add_argument("--cpus", default=-1, type=int, help="Number of CPUs to use in parallel to generate the dataset. Use -1 to use all cpus available")
parser.add_argument("--rollouts", default=5, type=int, help="Number of rollouts to use for dataset collection")
args = parser.parse_args()
start = time()
if args.parallel:
collect_samples_using_ray(args.cpus, args.rollouts)
else:
collect_samples(args.rollouts)
logger.info('Took {} seconds to generate the dataset'.format(time() - start))
|
from django.contrib import admin
from product.models import (
Product,
ProductImage
)
admin.site.register(Product)
admin.site.register(ProductImage) |
import sys
import numpy as np
# Install pyngrok.
server_args = []
if 'google.colab' in sys.modules:
server_args = ['--ngrok_http_tunnel']
from meshcat.servers.zmqserver import start_zmq_server_as_subprocess
proc, zmq_url, web_url = start_zmq_server_as_subprocess(server_args=server_args)
from pydrake.systems.meshcat_visualizer import MeshcatVisualizer
from pydrake.all import (
Adder, AddMultibodyPlantSceneGraph, ConnectMeshcatVisualizer, DiagramBuilder,
InverseDynamicsController, MultibodyPlant, Parser, SceneGraph, Simulator,
PassThrough, Demultiplexer, StateInterpolatorWithDiscreteDerivative,
SchunkWsgPositionController, MakeMultibodyStateToWsgStateSystem, Integrator,
RigidTransform, RollPitchYaw
)
from manipulation.scenarios import AddIiwa, AddWsg, AddRgbdSensors
from manipulation.utils import FindResource
class JugglerStation:
def __init__(self, kp=100, ki=1, kd=20, time_step=0.002, show_axis=False):
self.kp = kp
self.ki = ki
self.kd = kd
self.time_step = time_step
self.show_axis = show_axis
self.diagram, self.plant = self.make_manipulation_station(self.kp, self.ki, self.kd, self.time_step, self.show_axis)
def get_multibody_plant(self):
return self.plant
def get_diagram(self):
return self.diagram
@staticmethod
def make_manipulation_station(kp=100, ki=1, kd=20, time_step=0.002, show_axis=False):
"""
Create the juggler manipulation station.
Args:
kp (int, optional): proportional gain. Defaults to 100.
ki (int, optional): integral gain. Defaults to 1.
kd (int, optional): derivative gain. Defaults to 20.
time_step (float, optional): controller time step. Defaults to 0.002.
Returns:
(tuple[(diagram), (plant)]): the diagram and plant
"""
builder = DiagramBuilder()
# Add (only) the iiwa, WSG, and cameras to the scene.
plant, scene_graph = AddMultibodyPlantSceneGraph(
builder, time_step=time_step)
iiwa = AddIiwa(plant, collision_model="with_box_collision")
parser = Parser(plant)
parser.AddModelFromFile("utils/models/floor.sdf")
parser.AddModelFromFile("utils/models/paddle.sdf")
parser.AddModelFromFile("utils/models/ball.sdf")
parser.AddModelFromFile("utils/models/reflection_axis.sdf")
plant.WeldFrames(plant.GetFrameByName("iiwa_link_7"), plant.GetFrameByName("base_link"), RigidTransform(RollPitchYaw(0, -np.pi/2, 0), [0, 0, 0.25]))
plant.Finalize()
num_iiwa_positions = plant.num_positions(iiwa)
# I need a PassThrough system so that I can export the input port.
iiwa_position = builder.AddSystem(PassThrough(num_iiwa_positions))
builder.ExportInput(iiwa_position.get_input_port(), "iiwa_position")
builder.ExportOutput(iiwa_position.get_output_port(), "iiwa_position_command")
# Export the iiwa "state" outputs.
demux = builder.AddSystem(Demultiplexer(
2 * num_iiwa_positions, num_iiwa_positions))
builder.Connect(plant.get_state_output_port(iiwa), demux.get_input_port())
builder.ExportOutput(demux.get_output_port(0), "iiwa_position_measured")
builder.ExportOutput(demux.get_output_port(1), "iiwa_velocity_estimated")
builder.ExportOutput(plant.get_state_output_port(iiwa), "iiwa_state_estimated")
# Make the plant for the iiwa controller to use.
controller_plant = MultibodyPlant(time_step=time_step)
controller_iiwa = AddIiwa(controller_plant, collision_model="with_box_collision")
controller_parser = Parser(controller_plant)
controller_parser.AddModelFromFile("utils/models/paddle.sdf")
controller_plant.WeldFrames(controller_plant.GetFrameByName("iiwa_link_7"), controller_plant.GetFrameByName("base_link"), RigidTransform(RollPitchYaw(0, -np.pi/2, 0), [0, 0, 0.25]))
# AddWsg(controller_plant, controller_iiwa, welded=True)
controller_plant.Finalize()
# Add the iiwa controller
iiwa_controller = builder.AddSystem(
InverseDynamicsController(
controller_plant,
kp=[kp]*num_iiwa_positions,
ki=[ki]*num_iiwa_positions,
kd=[kd]*num_iiwa_positions,
has_reference_acceleration=False))
iiwa_controller.set_name("iiwa_controller")
builder.Connect(
plant.get_state_output_port(iiwa), iiwa_controller.get_input_port_estimated_state())
# Add in the feed-forward torque
adder = builder.AddSystem(Adder(2, num_iiwa_positions))
builder.Connect(iiwa_controller.get_output_port_control(),
adder.get_input_port(0))
# Use a PassThrough to make the port optional (it will provide zero values if not connected).
torque_passthrough = builder.AddSystem(
PassThrough([0]*num_iiwa_positions))
builder.Connect(torque_passthrough.get_output_port(), adder.get_input_port(1))
builder.ExportInput(torque_passthrough.get_input_port(), "iiwa_feedforward_torque")
builder.Connect(adder.get_output_port(), plant.get_actuation_input_port(iiwa))
# Add discrete derivative to command velocities.
desired_state_from_position = builder.AddSystem(
StateInterpolatorWithDiscreteDerivative(
num_iiwa_positions, time_step, suppress_initial_transient=True))
desired_state_from_position.set_name("desired_state_from_position")
builder.Connect(desired_state_from_position.get_output_port(),
iiwa_controller.get_input_port_desired_state())
builder.Connect(iiwa_position.get_output_port(), desired_state_from_position.get_input_port())
# Export commanded torques.
#builder.ExportOutput(adder.get_output_port(), "iiwa_torque_commanded")
#builder.ExportOutput(adder.get_output_port(), "iiwa_torque_measured")
# Export "cheat" ports.
builder.ExportOutput(scene_graph.get_query_output_port(), "geometry_query")
builder.ExportOutput(plant.get_contact_results_output_port(), "contact_results")
builder.ExportOutput(plant.get_state_output_port(), "plant_continuous_state")
diagram = builder.Build()
return diagram, plant
def station_test():
builder = DiagramBuilder()
station = builder.AddSystem(JugglerStation().get_diagram())
visualizer = ConnectMeshcatVisualizer(
builder, output_port=station.GetOutputPort("geometry_query"), zmq_url=zmq_url)
diagram = builder.Build()
simulator = Simulator(diagram)
context = simulator.get_context()
station_context = station.GetMyContextFromRoot(context)
station.GetInputPort("iiwa_position").FixValue(station_context, [0, np.pi/2, 0, -np.pi/2, 0, -np.pi/4, 0])
visualizer.start_recording()
simulator.AdvanceTo(5.0)
visualizer.stop_recording()
visualizer.publish_recording()
if __name__ == "__main__":
station_test() |
import warnings
warnings.filterwarnings('ignore')
import unittest
from preprocessing.data_explorer import outliers_detector
import numpy as np
np.random.seed(0)
data_points = np.random.randn(2000, 2)
avg_codisp, is_outlier = outliers_detector(data_points)
class TestOutliers(unittest.TestCase):
def test_outliers_score(self):
self.assertEqual(round(avg_codisp.quantile(0.99), 4), 25.3697)
if __name__ == '__main__':
unittest.main()
|
from classes.Logger import Logger
from classes.Board import Board
from classes.Visualiser import Visualiser
class GameLogger:
"""
Hardcoded column values whose key map towards a piece's UID so we can "random" access files for "random"-ness sake
"""
__map = [119, 124, 129, 134, 139, 144, 149, 154, 159, 164, 169, 174, 179, 184, 189, 194, # Black pieces
22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 77, 82, 87, 92, 97] # White pieces
def __init__(self):
self.logger = Logger()
def action_to_file(self, origin_piece, destination_piece=None):
"""
Given 2 pieces, set their positions in the file
:param origin_piece: Piece
:param destination_piece: Piece
:return: void
"""
stream = self.logger.stream()
offset = self.get_piece_offset(origin_piece)
stream.seek(offset)
stream.write(Visualiser.to_algebraic(*origin_piece.get_pos()))
if destination_piece:
""" Likely a capture here, but let's make sure it is """
destination_piece_state = "XX"
offset = self.get_piece_offset(destination_piece)
if not (origin_piece.is_white() ^ destination_piece.is_white()):
""" Highly likely castling in here. Set destination piece's state instead of marking them captured """
destination_piece_state = Visualiser.to_algebraic(*destination_piece.get_pos())
stream.seek(offset)
stream.write(destination_piece_state)
# Reset seek
stream.seek(0)
def get_piece_offset(self, piece):
"""
Get our piece's column offset and take into consideration all quirks
:param piece:
:return:
"""
offset = self.__map[piece.uid] + 2 # "2" is an offset from piece name and the equal sign (e.g. K=e1)
if not piece.is_white():
"""
Black magic voodoo caused by \n needs an offset like this even though
file.read() wont tell you a \n exists
"""
offset += 2
return offset
def state_to_file(self, board: Board):
"""
Persist current state to file in logger
:param board: The current game board
:return:
"""
self.logger.clear()
stream = self.logger.stream()
char_offset = 5
# Sort pieces nicely by camo and valuation
[white, black] = self.group_by_camo(board.pieces)
# Set seek column number
col = 0
for camo in [white, black]:
# Set preface text to clearly indicate which camo the following pieces are
preface = " (by valuation): "
preface = ("White" if camo[0].is_white() else "Black") + preface
stream.write(preface)
col += len(preface) # Increment col to take into account recently printed text
for piece in self.sort_by_rank(camo):
[x, y] = piece.get_pos()
algebraic_pos = Visualiser.to_algebraic(*[x, y])
string = piece.name.strip() + "=" + algebraic_pos # Format name and algebraic position nicely
stream.write(string.ljust(char_offset))
col += char_offset
# Space out next columns to make way for next camo
stream.write("\n")
@staticmethod
def group_by_camo(pieces):
"""
Group pieces by camo(uflage)
:param pieces:
:return: List containing the pieces. First array contains white pieces, black on the other
"""
camos = [[], []]
for piece in pieces:
camo = 0 if piece.is_white() else 1
camos[camo].append(piece)
return camos
@staticmethod
def sort_by_rank(pieces):
"""
Sort pieces by their valuations (rank)
:param pieces:
:return: Pieces sorted by valuation
"""
return sorted(pieces, key=lambda piece: piece.value)
|
"""
Instruction for the candidate.
1) You are an avid rock collector who lives in southern California. Some rare
and desirable rocks just became available in New ork, so you are planning
a cross-country road trip. There are several other rare rocks that you could
pick up along the way.
You have been given a grid filled with numbers, representing the number of
rare rocks available in varous cities across the country. Your objective
is to find the optimal path from So_Cal to New York that would allow you to
accumulate the most rocks along the way.
Note: You can only travel either north (up) or east (right)
2) Consider adding some additional tests in doTestPass()
3) Implement optimalPath() correctly
4) Here is an example: ^
[[0,0,0,0,5], New York (end) N
[0,1,1,1,0], < W E >
So_cal (start) [2,0,0,0,0]] S
v
"""
def find_max(x, y, rows, cols, val, dp):
west, south = 0, 0
if x + 1 < rows:
west = dp[x+1][y]
if y - 1 >= 0:
south = dp[x][y-1]
return max(west, south) + val
def add_neighbours(x, y, rows, cols, queue):
if x - 1 >= 0:
queue.add((x-1, y))
if y + 1 < cols:
queue.add((x, y+1))
def optimal_path(grid):
m_, n_ = len(grid), len(grid[0])
queue = {(m_-1, 0)}
# this should be redused to only previous state
dp = [[0] * n_ for _ in range(m_)]
while queue:
x, y = queue.pop()
add_neighbours(x, y, m_, n_, queue)
dp[x][y] = find_max(x,y,m_,n_,grid[x][y], dp)
return dp[0][n_-1]
def eq(exp, res):
assert exp == res, f'expected: {exp} result: {res}'
def main():
input = [
[[0,0,0,0,5],
[0,1,1,1,0],
[2,0,0,0,0]],
[[0,0,0,0,5],
[0,1,1,1,0],
[2,0,0,0,100]]
]
expected = [10, 107]
for i, o in zip(input, expected):
eq(o, optimal_path(i))
print('success')
if __name__ == '__main__':
main() |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from st2client.models.policy import Policy, PolicyType
from st2client.commands import resource
LOG = logging.getLogger(__name__)
class PolicyTypeBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(PolicyTypeBranch, self).__init__(
PolicyType,
description,
app,
subparsers,
parent_parser=parent_parser,
read_only=True,
commands={"list": PolicyTypeListCommand, "get": PolicyTypeGetCommand},
)
class PolicyTypeListCommand(resource.ResourceListCommand):
display_attributes = ["id", "resource_type", "name", "description"]
def __init__(self, resource, *args, **kwargs):
super(PolicyTypeListCommand, self).__init__(resource, *args, **kwargs)
self.parser.add_argument(
"-r",
"--resource-type",
type=str,
dest="resource_type",
help="Return policy types for the resource type.",
)
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
if args.resource_type:
filters = {"resource_type": args.resource_type}
filters.update(**kwargs)
instances = self.manager.query(**filters)
return instances
else:
return self.manager.get_all(**kwargs)
class PolicyTypeGetCommand(resource.ResourceGetCommand):
pk_argument_name = "ref_or_id"
def get_resource(self, ref_or_id, **kwargs):
return self.get_resource_by_ref_or_id(ref_or_id=ref_or_id, **kwargs)
class PolicyBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(PolicyBranch, self).__init__(
Policy,
description,
app,
subparsers,
parent_parser=parent_parser,
commands={
"list": PolicyListCommand,
"get": PolicyGetCommand,
"update": PolicyUpdateCommand,
"delete": PolicyDeleteCommand,
},
)
class PolicyListCommand(resource.ContentPackResourceListCommand):
display_attributes = ["ref", "resource_ref", "policy_type", "enabled"]
def __init__(self, resource, *args, **kwargs):
super(PolicyListCommand, self).__init__(resource, *args, **kwargs)
self.parser.add_argument(
"-r",
"--resource-ref",
type=str,
dest="resource_ref",
help="Return policies for the resource ref.",
)
self.parser.add_argument(
"-pt",
"--policy-type",
type=str,
dest="policy_type",
help="Return policies of the policy type.",
)
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
filters = {}
if args.pack:
filters["pack"] = args.pack
if args.resource_ref:
filters["resource_ref"] = args.resource_ref
if args.policy_type:
filters["policy_type"] = args.policy_type
filters.update(**kwargs)
include_attributes = self._get_include_attributes(args=args)
if include_attributes:
include_attributes = ",".join(include_attributes)
filters["params"] = {"include_attributes": include_attributes}
return self.manager.query(**filters)
class PolicyGetCommand(resource.ContentPackResourceGetCommand):
display_attributes = ["all"]
attribute_display_order = [
"id",
"ref",
"pack",
"name",
"description",
"enabled",
"resource_ref",
"policy_type",
"parameters",
]
class PolicyUpdateCommand(resource.ContentPackResourceUpdateCommand):
pass
class PolicyDeleteCommand(resource.ContentPackResourceDeleteCommand):
pass
|
/home/runner/.cache/pip/pool/b0/ea/23/8e93b04086b09945ebc0137943c1d94e8feedb1b3e345e78ea4b9fd225 |
# Generated by Django 2.1.7 on 2019-06-24 16:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizaciones', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='contraparte',
name='siglas',
field=models.CharField(default=1, help_text='Siglas o nombre corto de la oganización', max_length=200, verbose_name='Siglas o nombre corto'),
preserve_default=False,
),
]
|
#!/usr/bin/env python
'''
@package ion.processes.data.transforms
@file ion/processes/data/transforms/notification_worker.py
@author Brian McKenna <bmckenna@asascience.com>
@brief NotificationWorker class processes real-time notifications
'''
from datetime import datetime
from email.mime.text import MIMEText
import smtplib
from pyon.event.event import EventPublisher, EventSubscriber
from pyon.public import log, RT, OT, PRED, CFG
from interface.objects import DeliveryModeEnum, NotificationFrequencyEnum
from ion.core.process.transform import TransformEventListener
from ion.services.dm.utility.uns_utility_methods import load_notifications
from jinja2 import Environment, FileSystemLoader
import smtplib
class NotificationWorker(TransformEventListener):
"""
Instances of this class acts as a Notification Worker.
"""
def on_init(self):
# clients
self.resource_registry = self.container.resource_registry
self.event_publisher = EventPublisher(OT.NotificationSentEvent)
# SMTP client configurations
self.smtp_from = CFG.get_safe('server.smtp.from', 'data_alerts@oceanobservatories.org')
self.smtp_host = CFG.get_safe('server.smtp.host', 'localhost')
self.smtp_port = CFG.get_safe('server.smtp.port', 25)
# Jinja2 template environment
self.jinja_env = Environment(loader=FileSystemLoader('res/templates'), trim_blocks=True, lstrip_blocks=True)
super(NotificationWorker, self).on_init()
def on_start(self):
super(NotificationWorker,self).on_start()
self.notifications = load_notifications()
def _load_notifications_callback(msg, headers):
""" local callback method so this can be used as callback in EventSubscribers """
self.notifications = load_notifications() # from uns_utility_methods
# the subscriber for the ReloadUserInfoEvent (new subscriber, subscription deleted, notifications changed, etc)
self.reload_user_info_subscriber = EventSubscriber(
event_type=OT.ReloadUserInfoEvent,
#origin='UserNotificationService',
callback=_load_notifications_callback
)
self.add_endpoint(self.reload_user_info_subscriber)
# the subscriber for the UserInfo resource update events
self.userinfo_rsc_mod_subscriber = EventSubscriber(
event_type=OT.ResourceModifiedEvent,
sub_type="UPDATE",
origin_type="UserInfo",
callback=_load_notifications_callback
)
self.add_endpoint(self.userinfo_rsc_mod_subscriber)
def process_event(self, event, headers):
"""
callback for the subscriber listening for all events
"""
# create tuple key (origin,origin_type,event_type,event_subtype) for incoming event
# use key to match against known notifications, keyed by same tuple (or combination of this tuple)
origin = event.origin
origin_type = event.origin_type
event_type = event.type_
event_subtype = event.sub_type
key = (origin,origin_type,event_type,event_subtype)
# users to notify with a list of the notifications that have been triggered by this Event
users = {} # users to be notified
# loop the combinations of keys (see _key_combinations below for explanation)
# set() to eliminate duplicates when '' values exist in tuple
for k in set(self._key_combinations(key)):
if k in self.notifications:
for (notification, user) in self.notifications.get(k, []):
# notification has been triggered
if user not in users:
users[user] = []
users[user].append(notification)
# we now have a dict, keyed by users that will be notified, each user has a list of notifications triggered by this event
# send email
if users:
# message content for Jinja2 template (these fields are based on Event and thus are the same for all users/notifications)
context = {}
context['event_label'] = self.event_type_to_label(event.type_) # convert to UX label if known
context['origin_type'] = event.origin_type
context['origin'] = event.origin
context['url'] = 'http://ooinet.oceanobservatories.org' # TODO get from CFG
context['timestamp'] = datetime.utcfromtimestamp(float(event.ts_created)/1000.0).strftime('%Y-%m-%d %H:%M:%S (UTC)')
# use one SMTP connection for all emails
smtp = self._initialize_smtp()
try:
# loop through list of users getting notified of this Event
for user in users:
# list of NotificationRequests for this user triggered by this event
for notification in users[user]:
# name of NotificationRequest, defaults to...NotificationRequest? I don't think name gets set anywhere? TODO, what's default?
context['notification_name'] = notification.name or notification.type_
# send message for each DeliveryConfiguration (this has mode and frequency to determine realtime, email or SMS)
for delivery_configuration in notification.delivery_configurations:
# skip if DeliveryConfiguration.frequency is DISABLED
if delivery_configuration.frequency == NotificationFrequencyEnum.DISABLED:
continue
# only process REAL_TIME
if delivery_configuration.frequency != NotificationFrequencyEnum.REAL_TIME:
continue
# default to UserInfo.contact.email if no email specified in DeliveryConfiguration
smtp_to = delivery_configuration.email if delivery_configuration.email else user.contact.email
context['smtp_to'] = smtp_to
# message from Jinja2 template (email or SMS)
try:
# email - MIMEText
if delivery_configuration.mode == DeliveryModeEnum.EMAIL:
body = self.jinja_env.get_template('notification_realtime_email.txt').render(context)
mime_text = MIMEText(body)
mime_text['Subject'] = 'OOINet ION Event Notification - %s' % context['event_label']
mime_text['From'] = self.smtp_from
mime_text['To'] = context['smtp_to']
smtp_msg = mime_text.as_string()
# SMS - just the template string
elif delivery_configuration.mode == DeliveryModeEnum.SMS:
body = self.jinja_env.get_template('notification_realtime_sms.txt').render(context)
smtp_msg = body
# unknown DeliveryMode
else:
raise Exception #TODO specify unknown DeliveryModeEnum
smtp.sendmail(self.smtp_from, smtp_to, smtp_msg)
except Exception:
log.error('Failed to create message for notification %s', notification._id)
continue # skips this notification
# publish NotificationSentEvent - one per NotificationRequest (EventListener plugin NotificationSentScanner listens)
notification_max = int(CFG.get_safe("service.user_notification.max_daily_notifications", 1000))
self.event_publisher.publish_event(user_id=user._id, notification_id=notification._id, notification_max=notification_max)
finally:
smtp.quit()
def _initialize_smtp(self):
""" class method so user/pass/etc can be added """
return smtplib.SMTP(self.smtp_host, self.smtp_port)
def _key_combinations(self, key):
"""
creates a list of all possible combinations of the tuple elements, from 1 member to len(key) members
only the elements of each combination are set, '' elsewhere, all therefore have same length as key
eg. ('a', 'b', 'c') -> (a) becomes ('a', '', '') and (b) becomes ('', 'b', '')
extension of https://docs.python.org/2/library/itertools.html#itertools.combinations (Equivalent to section)
differences:
- loops all r from 1 to n
- returns tuple of same length as n with '' as filler
"""
n = len(key)
# want all combinations of 1 to n
for r in range(1,n+1):
indices = range(r)
# first combination is the first r values
combination = ['']*n # creates a list of n ''s
for i in indices:
combination[i] = key[i]
yield tuple(combination)
# remaining combinations
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
break
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
combination = ['']*n
for i in indices:
combination[i] = key[i]
yield tuple(combination)
# TODO: REMOVE AND REPLACE WITHIN NotificationRequest
# this is a temporary hack so we're using UX (ion-ux) defined labels in the email
# see https://github.com/ooici/ion-ux/blob/master/static/js/ux-views-notifications.js#L1-L70
def event_type_to_label(self, key):
event_to_label = {
'ResourceAgentConnectionLostErrorEvent': 'Communication Lost/Restored',
'ResourceAgentErrorEvent': 'Device Error',
'ResourceIssueReportedEvent': 'Issue reported',
'ResourceLifecycleEvent': 'Lifecycle state change',
'ResourceAgentStateEvent': 'Agent operational state change',
'ResourceAgentResourceStateEvent': 'Device operational state change',
'DeviceOperatorEvent': 'Operator event on device',
'ResourceOperatorEvent': 'Operator event on resource',
'ParameterQCEvent': 'QC alert',
'OrgNegotiationInitiatedEvent': 'Request received',
'ResourceModifiedEvent': 'Resource modified',
'DeviceStatusAlertEvent': 'Status alert/change',
'DeviceAggregateStatusEvent': 'Aggregate Status alert/change',
}
# if not known, just return the event_type
return event_to_label.get(key, key)
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.chart
import typing
from abc import abstractproperty
from ..drawing.shape import Shape as Shape_85cc09e5
from ..style.character_properties import CharacterProperties as CharacterProperties_1d4f0ef3
from ..xml.user_defined_attributes_supplier import UserDefinedAttributesSupplier as UserDefinedAttributesSupplier_9fbe1222
if typing.TYPE_CHECKING:
from .chart_legend_position import ChartLegendPosition as ChartLegendPosition_18bc0eb0
class ChartLegend(Shape_85cc09e5, CharacterProperties_1d4f0ef3, UserDefinedAttributesSupplier_9fbe1222):
"""
Service Class
specifies the legend of a chart.
The text/font properties which are specified in the service com.sun.star.drawing.Shape correlate to all text objects inside the legend.
**since**
OOo 1.1.2
See Also:
`API ChartLegend <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1chart_1_1ChartLegend.html>`_
"""
__ooo_ns__: str = 'com.sun.star.chart'
__ooo_full_ns__: str = 'com.sun.star.chart.ChartLegend'
__ooo_type_name__: str = 'service'
@abstractproperty
def Alignment(self) -> 'ChartLegendPosition_18bc0eb0':
"""
determines the alignment of the legend relative to the diagram.
"""
@abstractproperty
def AutomaticPosition(self) -> bool:
"""
If this property is TRUE the position is calculated by the application automatically.
Setting this property to false will have no effect. Instead use the interface com.sun.star.drawing.XShape to set a concrete position.
"""
__all__ = ['ChartLegend']
|
from abc import ABC, abstractmethod
from PyQt5.QtGui import QIcon
class AbstractPlayer(ABC):
"""This is the abstract player class that provides a basic interface for all players in the game."""
def __init__(self, engine, is_ai=False):
self.name = None
self.tickets = None
self.location = None
self.icon = QIcon()
self.is_ai = is_ai
self.engine = engine
@abstractmethod
def play_next(self):
"""perform next action by player"""
NotImplementedError(
"Class {} doesn't implement play_next()".format(self.__class__.__name__)
)
@abstractmethod
def get_role(self):
NotImplementedError(
"Class {} doesn't implement get_role()".format(self.__class__.__name__)
)
def set_location(self, location):
self.location = location
def get_info(self):
"""get player info"""
stats = {
"name": self.name,
"is_ai": self.is_ai,
"tickets": self.tickets,
"location": self.location,
"role": self.get_role(),
"icon": self.icon,
}
return stats
|
from numpy import exp
try:
from plexus.low_level_drivers.simple_avr_cond_driver import SimpleCondSensorControl
from plexus.nodes.node import Command
from plexus.nodes.message import Message
from plexus.devices.base_device import BaseDevice
except Exception as e:
from src.plexus.low_level_drivers.simple_avr_cond_driver import SimpleCondSensorControl
from src.plexus.nodes.node import Command, Message
from src.plexus.devices.base_device import BaseDevice
class AVRCondDevice(BaseDevice):
"""
this wrapper for custom connected to AVR mcu
"""
def __init__(self, name: str, num_of_channels: int = 6,
dev: str = "/dev/ttyUSB1", baud: int = 9600, timeout: int = 1, slave_id: int = 2):
super().__init__(name)
# for ax2+bx+c approximation
# TODO mb send it through init args?
# self.a = 1.41433757
# self.b = -6.43387014
# self.c = 7.81645995
# self.a = 1.134
# self.b = -8.218
# self.c = 20.264
# self.d = -16.255
# self.a = 0.019
# self.b = 1.4
self.a = 0.4819
self.b = 4.1594 # polynomial approx
self._sensor = SimpleCondSensorControl(dev=dev, baud=baud, timeout=timeout)
self._annotation = "this is simple test device to control six relay channels through AVR mcu"
self.slave_id = slave_id
self.num_of_channels = num_of_channels
self._status = "started"
get_raw_data_command = Command(
name="get_raw_data",
annotation="get raw conductivity data from custom sensor",
output_kwargs={"conductivity": "float"}
)
get_approx_data_command = Command(
name="get_approx_data",
annotation="get scaled conductivity data from custom sensor",
output_kwargs={"conductivity": "float"}
)
self._available_commands.extend([get_raw_data_command, get_approx_data_command])
self._status = "work"
print("awailable commands for me {}".format(self._available_commands))
def raw_to_approx(self, raw_data):
x = raw_data
# return self.a*x*x*x + self.b*x*x + self.c*x + self.d
# return self.a*exp(self.b*x)
return self.a*x/(self.b-x)
def device_commands_handler(self, command, **kwargs):
if command == "get_raw_data":
try:
echo, ans = self._sensor.get_data(self.slave_id)
self._status = "work"
return float(ans.decode('utf-8'))
except Exception as e:
self._status = "error"
raise ConnectionError("ERROR {}".format(e))
if command == "get_approx_data":
try:
echo, ans = self._sensor.get_data(self.slave_id)
self._status = "work"
rd = float(ans.decode('utf-8'))
appr_data = self.raw_to_approx(rd)
print(rd, appr_data)
return appr_data
except Exception as e:
self._status = "error"
raise ConnectionError("ERROR {}".format(e)) |
# Created by Xinyu Zhu on 2021/4/20, 17:58
import pysynth as ps
import numpy as np
import re
# 先限定音符12356 中国风五声调式 这样听起来比较自然
notes = np.array(["c4", "d4", "e4", "g4", "a4", ])
# 音符时值
durations = np.array([1, 2, 4, -2, -4, -8])
# 随机生成音符 重音穿插其中
sn = []
for t in range(16):
n = np.random.randint(0, len(notes))
note = notes[n] + "*"
sn.append(note)
for i in range(np.random.randint(3, 5)):
note0 = notes[np.random.randint(0, len(notes))]
sn.append(note0)
# 随机生成音符时值序列 形成长短参差变幻的节奏
dn = []
for i in range(len(sn)):
duration = durations[np.random.randint(0, len(durations))]
nn = sn[i]
dn.append(duration)
# 将音符和时值合并成旋律
melody = tuple(zip(sn, dn))
print(melody)
# 将乐谱合成到声音文件
ps.make_wav(melody, fn=r"right.wav")
print("ok")
|
# Shell sort - https://onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&problem=1093
def read_list(size):
lst = []
for i in range(size):
lst.append(input())
return lst
def out_of_place(lst, offset, length):
oop = []
# print('Offset: ', offset)
for i in range(0, length):
if not lst.count(i):
continue
index = lst.index(i) + offset
if index != i:
lst.remove(i)
oop.append(i)
offset += 1
return oop
def shell_sort(current_order, desired):
current_order = [desired.index(i) for i in current_order]
length = len(desired)
misplaced = []
while True:
oop = out_of_place(current_order, len(misplaced), length)
if not oop:
break
misplaced += oop
misplaced.sort()
misplaced = misplaced[::-1]
return [desired[i] for i in misplaced]
def handle_block():
size = int(input())
current = read_list(size)
desired = read_list(size)
move_order = shell_sort(current, desired)
for i in move_order:
print(i)
print()
if __name__ == '__main__':
blocks = int(input())
for i in range(blocks):
handle_block()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Indufficient app conf."""
from django.apps import AppConfig
class InsufficientConfig(AppConfig):
"""Indufficient app conf."""
name = 'backend.insufficient'
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#LogMapper
#
#MIT License
#
#Copyright (c) 2018 Jorge A. Baena - abaena78@gmail.com
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
===============================================================================
Main Control Loop for LogMapper Agent
@author: Jorge Andres Baena
@contact : www.logmapper.org
@copyright: "Copyright 2018, The LogMapper Project"
@license: "GPL"
@date: 7/04/2018
===============================================================================
"""
import os
import logging
import argparse
import config.config as cfg
import logmappercommon.utils.logging_util as logging_util
import logmapperagent.control.main_control as control
#%%
#==============================================================================
# Global Initialization.
#==============================================================================
logger = logging.getLogger(__name__)
#%%
#==============================================================================
# Functions.
#==============================================================================
def processArguments():
"""
===========================================================================
Parse command Line arguments
===========================================================================
If args are invalid return false, and program must exit
**Args**:
None
**Returns**:
True if args are valid. Otherwise False -- (boolean)
"""
parser = argparse.ArgumentParser(prog="LogMapper Agent")
parser.add_argument("-rc", "--reset-config", dest="resetconfig",
help="Reset configuration settings", action="store_true")
parser.add_argument("-c", "--config", dest="configfilename", required=False,
help="specific config file")
parser.add_argument('-v', '--version', action='version', version='%(prog)s '+cfg.__version__)
args = parser.parse_args()
if args.configfilename:
if not os.path.exists(args.configfilename):
parser.error("The file %s does not exist!" % args.configfilename)
return args
#%%
def main():
"""
===========================================================================
logmappper-agent **Main** function
===========================================================================
Aqui un ejemplo de documentacion
Formatos: *cursiva*
``code = True``
**Args**:
* config: configuration object
**Returns**:
None
"""
args = processArguments()
config=cfg.loadConfig(args.configfilename, args.resetconfig)
logMapperAgentConfig = cfg.loadLogMapperAgentConfig(config)
logging_util.configureLogger(logMapperAgentConfig.logFilePath)
logger.info("Start LogMapper Agent")
cfg.printConfig(config)
control.initApplication(config)
control.startApplicationTask(config)
control.mainLoop(config)
control.stopApplicationTask(config)
logger.info("Finish LogMapper Agent")
#%%
#==============================================================================
# main
#==============================================================================
if __name__ == '__main__':
main() |
from flask.ext.login import UserMixin
from app import db
class User(UserMixin,db.Document):
kaid = db.StringField(required=True, unique=True)
username = db.StringField(required=True, unique=True)
nickname = db.StringField(required=True)
access_token = db.StringField(required=True)
access_token_secret = db.StringField(required=True)
timestamp = db.DateTimeField(rquired=True)
missions = db.ListField(db.ReferenceField('Mission'))
colors = db.StringField(default="blue")
class Task(db.EmbeddedDocument):
kaid = db.StringField(required=True)
title = db.StringField(required=True)
name = db.StringField(required=True)
class Topics(db.EmbeddedDocument):
title = db.StringField(required=True)
tasks = db.EmbeddedDocumentListField("Task")
class Mission(db.Document):
title = db.StringField(required=True)
code = db.StringField(required=True, unique=True)
owner = db.ReferenceField(User,dbref=False)
topics = db.ListField(db.EmbeddedDocumentField("Topics"))
class TaskList(db.Document):
kaid = db.StringField(required=True)
name = db.StringField(required=True)
title = db.StringField(required=True)
|
import torch.nn as nn
from mmcv.cnn import normal_init, kaiming_init
from ..builder import HEADS
from .base import BaseHead
from ..builder import build_loss
from mmcv.cnn.bricks import build_norm_layer
def _init_weights(module, init_linear='normal', std=0.01, bias=0.):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in module.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=std, bias=bias)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d,
nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@HEADS.register_module()
class ContrastHead(nn.Module):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
out_channels,
in_channels,
hidden_channels=None,
loss_contrast=dict(type='SimCLRLoss'),
spatial_type='avg',
norm_cfg=None,
fc_layer_num=2,
with_final_normalize=False,
dropout_ratio=0.5,
init_std=0.01):
super().__init__()
self.out_channels = out_channels
self.in_channels = in_channels
self.loss_contrast = build_loss(loss_contrast)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# self.fcs = nn.Sequential(
# nn.Linear(self.in_channels, self.in_channels),
# nn.BatchNorm1d(self.in_channels),
# nn.ReLU(inplace=True),
# nn.Linear(self.in_channels, self.out_channels)
# )
# self.fc1 = nn.Linear(self.in_channels, self.in_channels)
if hidden_channels is None:
self.hidden_channels = in_channels
else:
self.hidden_channels = hidden_channels
if fc_layer_num == 2:
if norm_cfg is not None:
norm1_name, self.norm1 = build_norm_layer(norm_cfg, self.hidden_channels)
self.fcs = nn.Sequential(
nn.Linear(self.in_channels, self.hidden_channels, bias=False),
self.norm1,
nn.ReLU(inplace=True),
nn.Linear(self.hidden_channels, self.out_channels, bias=(not with_final_normalize))
)
else:
self.fcs = nn.Sequential(
nn.Linear(self.in_channels, self.hidden_channels),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_channels, self.out_channels)
)
elif fc_layer_num == 3:
if norm_cfg is not None:
norm1_name, self.norm1 = build_norm_layer(norm_cfg, self.hidden_channels)
norm2_name, self.norm2 = build_norm_layer(norm_cfg, self.hidden_channels)
self.fcs = nn.Sequential(
nn.Linear(self.in_channels, self.hidden_channels, bias=False),
self.norm1,
nn.ReLU(inplace=True),
nn.Linear(self.hidden_channels, self.hidden_channels, bias=False),
self.norm2,
nn.ReLU(inplace=True),
nn.Linear(self.hidden_channels, self.out_channels, bias=(not with_final_normalize))
)
else:
self.fcs = nn.Sequential(
nn.Linear(self.in_channels, self.hidden_channels, bias=True),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_channels, self.hidden_channels, bias=True),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_channels, self.out_channels)
)
else:
raise NotImplementedError
# self.relu = nn.ReLU(inplace=True)
# self.fc2 = nn.Linear(self.in_channels, self.out_channels)
if with_final_normalize:
norm_final_cfg = norm_cfg.copy()
norm_final_cfg['affine'] = False
norm_final_name, self.norm_final = build_norm_layer(norm_final_cfg, self.out_channels)
self.fcs.add_module(str(len(self.fcs) + 1), self.norm_final)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
# normal_init(self.fc1, std=self.init_std)
# normal_init(self.fc2, std=self.init_std)
# normal_init(self.fcs, std=self.init_std)
_init_weights(self.fcs, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
out = self.fcs(x)
# [N, in_channels]
return out
def loss(self, embedding):
return self.loss_contrast(embedding)
@HEADS.register_module()
class Contrast2DHead(nn.Module):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
out_channels,
in_channels,
hidden_channels=None,
loss_contrast=dict(type='SimCLRLoss'),
spatial_type='avg',
norm_cfg=None,
dropout_ratio=0.5,
init_std=0.01):
super().__init__()
self.out_channels = out_channels
self.in_channels = in_channels
self.loss_contrast = build_loss(loss_contrast)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# self.fcs = nn.Sequential(
# nn.Linear(self.in_channels, self.in_channels),
# nn.BatchNorm1d(self.in_channels),
# nn.ReLU(inplace=True),
# nn.Linear(self.in_channels, self.out_channels)
# )
# self.fc1 = nn.Linear(self.in_channels, self.in_channels)
if hidden_channels is None:
self.hidden_channels = in_channels
else:
self.hidden_channels = hidden_channels
if norm_cfg is not None:
norm1_name, self.norm1 = build_norm_layer(norm_cfg, self.hidden_channels)
self.fcs = nn.Sequential(
nn.Linear(self.in_channels, self.hidden_channels, bias=False),
self.norm1,
nn.ReLU(inplace=True),
nn.Linear(self.hidden_channels, self.out_channels)
)
else:
self.fcs = nn.Sequential(
nn.Linear(self.in_channels, self.hidden_channels),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_channels, self.out_channels)
)
# self.relu = nn.ReLU(inplace=True)
# self.fc2 = nn.Linear(self.in_channels, self.out_channels)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
# normal_init(self.fc1, std=self.init_std)
# normal_init(self.fc2, std=self.init_std)
# normal_init(self.fcs, std=self.init_std)
_init_weights(self.fcs, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
out = self.fcs(x)
# [N, in_channels]
return out
def loss(self, embedding):
return self.loss_contrast(embedding)
|
# -*- coding: utf-8 -*-
import os
import json
import base64
import hashlib
import logging
import signal
from contextlib import contextmanager
from datetime import datetime, timedelta
from subprocess import run, STDOUT, PIPE, TimeoutExpired, CompletedProcess
from tempfile import NamedTemporaryFile
log = logging.getLogger('connector_utils')
def _time_rm_nanos(time_str):
time1, time2 = time_str.rsplit('.', 1)
return '.'.join([time1, time2[:6]])
def timestr2dtime(time_str):
return datetime.fromisoformat(_time_rm_nanos(time_str))
def timenow():
return datetime.utcnow()
def timenow_plus(seconds):
return timenow() + timedelta(seconds=seconds)
def utc_from_now_iso(seconds):
return timenow_plus(seconds).isoformat('T', timespec='milliseconds') + 'Z'
def unique_id(*args):
return hashlib.md5(':'.join(map(str, args)).encode()).hexdigest()
def append_os_env(env):
final_env = os.environ.copy()
if env:
for key, value in env.items():
if value:
final_env[key] = value
return final_env
def execute_cmd(cmd, **kwargs) -> CompletedProcess:
if kwargs.get('env'):
opt_env = append_os_env(kwargs.get('env'))
else:
opt_env = None
opt_input = kwargs.get('input')
timeout = kwargs.get('timeout', 120)
stderr = STDOUT if kwargs.get('sterr_in_stdout', False) else PIPE
log.debug(f'Run command: {cmd}')
try:
result = run(cmd, stdout=PIPE, stderr=stderr, env=opt_env, input=opt_input,
timeout=timeout, encoding='UTF-8')
log.debug(f'Command result: {result}')
except TimeoutExpired:
message = 'Command execution timed out after {} seconds'.format(timeout)
log.exception(message)
raise Exception(message)
if result.returncode == 0:
return result
else:
log.exception(result)
raise Exception(result.stderr)
def join_stderr_stdout(process_result: CompletedProcess):
return f'StdOut: \n{process_result.stdout} \n\nStdErr: \n{process_result.stderr}'
def create_tmp_file(content):
file = NamedTemporaryFile(delete=True)
file.write(content.encode())
file.flush()
return file
def generate_registry_config(registries_auth):
auths = {}
for registry_auth in registries_auth:
user_pass = registry_auth['username'] + ':' + registry_auth['password']
auth = base64.b64encode(user_pass.encode('ascii')).decode('utf-8')
auths['https://' + registry_auth['serveraddress']] = {'auth': auth}
return json.dumps({'auths': auths})
@contextmanager
def timeout(deadline):
# Register a function to raise a TimeoutError on the signal.
signal.signal(signal.SIGALRM, raise_timeout)
# Schedule the signal to be sent after ``time``.
signal.alarm(deadline)
try:
yield
except TimeoutError:
raise
finally:
# Unregister the signal so it won't be triggered
# if the timeout is not reached.
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def raise_timeout(signum, frame):
raise TimeoutError
|
#!/usr/bin/env python3
import re
from string import Template
from mpi_constants import constants
from mpi_functions import functions
from mpi_constants_fortran import constants_fortran
from mpi_functions_fortran import functions_fortran
print()
print("// C constants")
for (tp, nm) in constants:
subs = {'abi_tp': re.sub(r"MPI_", "MPIABI_", tp),
'abi_nm': re.sub(r"MPI_", "MPIABI_", nm)}
print(Template("extern $abi_tp const $abi_nm;").substitute(subs))
print()
print("// C functions")
for (tp, nm, args, flags) in functions:
subs = {'abi_tp': re.sub(r"MPI_", "MPIABI_", tp),
'abi_nm': re.sub(r"MPI_", "MPIABI_", nm)}
for (i, (atp, anm)) in enumerate(args):
subs['abi_atp{0}'.format(i)] = re.sub(r"MPI_", "MPIABI_", atp)
subs['anm{0}'.format(i)] = anm
tmpl = ["$abi_tp $abi_nm("]
for (i, (atp, anm)) in enumerate(args):
tmpl.append(" $abi_atp{0} $anm{0},".format(i))
tmpl[-1] = re.sub(r",?$", "", tmpl[-1]) # remove trailing comma of last argument
tmpl.append(");")
print(Template("\n".join(tmpl)).substitute(subs))
print()
print("// Fortran constants")
for (tp, nm) in constants_fortran:
subs = {'abi_tp': re.sub(r"MPI_\w+", "MPIABI_Fint", tp),
'abi_nm': re.sub(r"MPI_", "MPIABI_", nm).lower() + "_"}
print(Template("extern $abi_tp const $abi_nm;").
substitute(subs))
print()
print("// Fortran functions")
for (tp, nm, args) in functions_fortran:
subs = {'abi_tp': re.sub(r"MPI_\w+", "MPIABI_Fint", tp),
'abi_nm': re.sub(r"MPI_", "MPIABI_", nm).lower() + "_"}
for (i, (atp, anm)) in enumerate(args):
subs['abi_atp{0}'.format(i)] = re.sub(r"MPI_\w+", "MPIABI_Fint", atp)
subs['anm{0}'.format(i)] = anm
tmpl = ["$abi_tp $abi_nm("]
for (i, (atp, anm)) in enumerate(args):
tmpl.append(" $abi_atp{0} $anm{0},".format(i))
tmpl[-1] = re.sub(r",?$", "", tmpl[-1]) # remove trailing comma of last argument
tmpl.append(");")
# tmpl = ["extern $abi_tp (* const $abi_nm)("]
# for (i, (atp, anm)) in enumerate(args):
# tmpl.append(" $abi_atp{0} $anm{0},".format(i))
# tmpl[-1] = re.sub(r",?$", "", tmpl[-1]) # remove trailing comma of last argument
# tmpl.append(");")
print(Template("\n".join(tmpl)).substitute(subs))
|
import os
import argparse
import pickle
import yaml
import torch
from glob import glob
from tqdm.auto import tqdm
from easydict import EasyDict
from models.epsnet import *
from utils.datasets import *
from utils.transforms import *
from utils.misc import *
def num_confs(num:str):
if num.endswith('x'):
return lambda x:x*int(num[:-1])
elif int(num) > 0:
return lambda x:int(num)
else:
raise ValueError()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('ckpt', type=str, help='path for loading the checkpoint')
parser.add_argument('--save_traj', action='store_true', default=False,
help='whether store the whole trajectory for sampling')
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--tag', type=str, default='')
parser.add_argument('--num_confs', type=num_confs, default=num_confs('2x'))
parser.add_argument('--test_set', type=str, default=None)
parser.add_argument('--start_idx', type=int, default=800)
parser.add_argument('--end_idx', type=int, default=1000)
parser.add_argument('--out_dir', type=str, default=None)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--clip', type=float, default=1000.0)
parser.add_argument('--n_steps', type=int, default=5000,
help='sampling num steps; for DSM framework, this means num steps for each noise scale')
parser.add_argument('--global_start_sigma', type=float, default=0.5,
help='enable global gradients only when noise is low')
parser.add_argument('--w_global', type=float, default=1.0,
help='weight for global gradients')
# Parameters for DDPM
parser.add_argument('--sampling_type', type=str, default='ld',
help='generalized, ddpm_noisy, ld: sampling method for DDIM, DDPM or Langevin Dynamics')
parser.add_argument('--eta', type=float, default=1.0,
help='weight for DDIM and DDPM: 0->DDIM, 1->DDPM')
args = parser.parse_args()
# Load checkpoint
ckpt = torch.load(args.ckpt)
config_path = glob(os.path.join(os.path.dirname(os.path.dirname(args.ckpt)), '*.yml'))[0]
with open(config_path, 'r') as f:
config = EasyDict(yaml.safe_load(f))
seed_all(config.train.seed)
log_dir = os.path.dirname(os.path.dirname(args.ckpt))
# Logging
output_dir = get_new_log_dir(log_dir, 'sample', tag=args.tag)
logger = get_logger('test', output_dir)
logger.info(args)
# Datasets and loaders
logger.info('Loading datasets...')
transforms = Compose([
CountNodesPerGraph(),
AddHigherOrderEdges(order=config.model.edge_order), # Offline edge augmentation
])
if args.test_set is None:
test_set = PackedConformationDataset(config.dataset.test, transform=transforms)
else:
test_set = PackedConformationDataset(args.test_set, transform=transforms)
# Model
logger.info('Loading model...')
model = get_model(ckpt['config'].model).to(args.device)
model.load_state_dict(ckpt['model'])
test_set_selected = []
for i, data in enumerate(test_set):
if not (args.start_idx <= i < args.end_idx): continue
test_set_selected.append(data)
done_smiles = set()
results = []
if args.resume is not None:
with open(args.resume, 'rb') as f:
results = pickle.load(f)
for data in results:
done_smiles.add(data.smiles)
for i, data in enumerate(tqdm(test_set_selected)):
if data.smiles in done_smiles:
logger.info('Molecule#%d is already done.' % i)
continue
num_refs = data.pos_ref.size(0) // data.num_nodes
num_samples = args.num_confs(num_refs)
data_input = data.clone()
data_input['pos_ref'] = None
batch = repeat_data(data_input, num_samples).to(args.device)
clip_local = None
for _ in range(2): # Maximum number of retry
try:
pos_init = torch.randn(batch.num_nodes, 3).to(args.device)
pos_gen, pos_gen_traj = model.langevin_dynamics_sample(
atom_type=batch.atom_type,
pos_init=pos_init,
bond_index=batch.edge_index,
bond_type=batch.edge_type,
batch=batch.batch,
num_graphs=batch.num_graphs,
extend_order=False, # Done in transforms.
n_steps=args.n_steps,
step_lr=1e-6,
w_global=args.w_global,
global_start_sigma=args.global_start_sigma,
clip=args.clip,
clip_local=clip_local,
sampling_type=args.sampling_type,
eta=args.eta
)
pos_gen = pos_gen.cpu()
if args.save_traj:
data.pos_gen = torch.stack(pos_gen_traj)
else:
data.pos_gen = pos_gen
results.append(data)
done_smiles.add(data.smiles)
save_path = os.path.join(output_dir, 'samples_%d.pkl' % i)
logger.info('Saving samples to: %s' % save_path)
with open(save_path, 'wb') as f:
pickle.dump(results, f)
break # No errors occured, break the retry loop
except FloatingPointError:
clip_local = 20
logger.warning('Retrying with local clipping.')
save_path = os.path.join(output_dir, 'samples_all.pkl')
logger.info('Saving samples to: %s' % save_path)
def get_mol_key(data):
for i, d in enumerate(test_set_selected):
if d.smiles == data.smiles:
return i
return -1
results.sort(key=get_mol_key)
with open(save_path, 'wb') as f:
pickle.dump(results, f)
|
from . import OverMind
from .client import Client |
class UserNotFoundException(Exception):
"""登録されていないユーザが利用した場合のエラー. """
pass
class BadShukkinStateException(Exception):
"""出勤の条件が整っていない場合のエラーエラー. """
pass
class BadTaikinStateException(Exception):
"""退勤の条件が整っていない場合のエラーエラー. """
pass
|
import time
import board
import busio
# from adafruit_ads1x15.differential import ADS1115
from adafruit_ads1x15.single_ended import ADS1115
import array
import touchio
def calc_mean(this_array):
return sum(this_array)/len(this_array)
adc_active = True
# adc_active = False
# Capacitive touch on D3
touch = touchio.TouchIn(board.D3)
if adc_active:
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
adc = ADS1115(i2c)
adc_gain = 2/3
# adc_gain = 1
# ADS1X15_CONFIG_GAIN = {
# 2/3: 0x0000,
# 1: 0x0200,
# 2: 0x0400,
# 4: 0x0600,
# 8: 0x0800,
# 16: 0x0A00
# }
#adc_datarate = 128
adc_datarate = 128
# # Mapping of data/sample rate to config register values for ADS1115 (slower).
# ADS1115_CONFIG_DR = {
# 8: 0x0000,
# 16: 0x0020,
# 32: 0x0040,
# 64: 0x0060,
# 128: 0x0080,
# 250: 0x00A0,
# 475: 0x00C0,
# 860: 0x00E0
# }
# https://github.com/adafruit/Adafruit_CircuitPython_ADS1x15/blob/master/adafruit_ads1x15/adafruit_ads1x15.py
#################
###### Danger!
###### When analog inputs of ADS1115 are driven above VDD+0.3V = 3.6V
###### other channels are influenced!!!
################
channels = (0, 1, 2, 3)
# channel_names = ['adc_ch01', 'adc_ch23']
channel_names = ['adc_ch00_se', 'adc_ch01_se', 'adc_ch02_se', 'adc_ch03_se']
print_length = 3
print_index = 0
data_length = 15
data_index = 0
data_raw = [array.array('f', [0] * data_length) for p in channels]
data_avg = [0.0 for p in channels]
touch_raw = array.array('f', [0] * data_length)
touch_avg = 0.0
while True:
touch_raw_value = touch.raw_value
touch_raw[data_index] = touch.raw_value
if adc_active:
for i, ch in enumerate(channels):
# throw away 1 sample for channel crosstalk
# raw = adc.read_volts(ch, adc_gain, adc_datarate)
raw = adc.read_volts(ch, adc_gain, adc_datarate)
data_raw[i][data_index] = raw
if print_index == 0:
output = ""
output += '{'
output += ' "guid": "btrn-adc-sensor-0002", '
touch_avg = calc_mean(touch_raw)
output += ' "touch_raw"'
output += ': %0.2f' % touch_avg
if adc_active:
for i, p in enumerate(channels):
data_avg[i] = calc_mean(data_raw[i])
output += ', '
output += '"' + channel_names[i] + '"'
output += ': %0.6f' % data_avg[i]
output += '}'
print(output)
# # Print results
# print("{:>5}\t{:>5.3f}".format(raw, volts))
# Sleep for a bit
# time.sleep(0.5)
data_index = (data_index + 1) % data_length
print_index = (print_index + 1) % print_length
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
from cdpcli import DEFAULT_PROFILE_NAME
from cdpcli.extensions.commands import BasicCommand
from . import PREDEFINED_SECTION_NAMES
class ConfigureGetCommand(BasicCommand):
NAME = 'get'
DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get', '_description.rst')
SYNOPSIS = ('cdp configure get varname [--profile profile-name]')
EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
ARG_TABLE = [
{'name': 'varname',
'help_text': 'The name of the config value to retrieve.',
'action': 'store',
'cli_type_name': 'string', 'positional_arg': True},
]
def __init__(self, stream=sys.stdout):
super(ConfigureGetCommand, self).__init__()
self._stream = stream
def _run_main(self, client_creator, args, parsed_globals):
context = client_creator.context
varname = args.varname
value = None
if '.' not in varname:
# get_scoped_config() returns the config variables in the config
# file (not the logical_var names), which is what we want.
config = context.get_scoped_config()
value = config.get(varname)
else:
value = self._get_dotted_config_value(context, varname)
if value is not None:
self._stream.write(value)
self._stream.write('\n')
return 0
else:
return 1
def _get_dotted_config_value(self, context, varname):
parts = varname.split('.')
num_dots = varname.count('.')
# Logic to deal with predefined sections like [preview], [plugin] and etc.
if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
full_config = context.full_config
section, config_name = varname.split('.')
value = full_config.get(section, {}).get(config_name)
if value is None:
# Try to retrieve it from the profile config.
value = full_config['profiles'].get(
section, {}).get(config_name)
return value
if parts[0] == 'profile':
profile_name = parts[1]
config_name = parts[2]
remaining = parts[3:]
# Check if varname starts with 'default' profile (e.g.
# default.foo.bar.instance_profile). If not, go further to check if
# varname starts with a known profile name
elif parts[0] == DEFAULT_PROFILE_NAME or \
(parts[0] in context.full_config['profiles']):
profile_name = parts[0]
config_name = parts[1]
remaining = parts[2:]
else:
profile_name = context.get_config_variable('profile')
config_name = parts[0]
remaining = parts[1:]
value = context.full_config['profiles'].get(
profile_name, {}).get(config_name)
if len(remaining) == 1:
try:
value = value.get(remaining[-1])
except AttributeError:
value = None
return value
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Define package structure."""
import numpy as np
from setuptools_scm import get_version
from pathlib import Path
from pkg_resources import get_distribution, DistributionNotFound
from .branch_scheme import branch_scheme
try: # pragma: nocover
# get accurate version for developer installs
version_str = get_version(Path(__file__).parent.parent, local_scheme=branch_scheme)
__version__ = version_str
except (LookupError, ImportError):
try:
# Set the version automatically from the package details.
__version__ = get_distribution(__name__).version
except DistributionNotFound: # pragma: nocover
# package is not installed
pass
# Before we can do anything else, we need to initialize some core, shared
# variables.
from sqlalchemy.ext.declarative import declarative_base
# define some default tolerances for various units
DEFAULT_DAY_TOL = {'atol': 1e-3 / (3600. * 24.), 'rtol': 0} # ms
DEFAULT_HOUR_TOL = {'atol': 1e-3 / (3600), 'rtol': 0} # ms
DEFAULT_MIN_TOL = {'atol': 1e-3 / (3600), 'rtol': 0} # ms
DEFAULT_GPS_TOL = {'atol': 1e-3, 'rtol': 0} # ms
class MCDeclarativeBase(object):
"""Base table object."""
def __repr__(self):
"""Define standard representation."""
columns = self.__table__.columns.keys()
rep_str = '<' + self.__class__.__name__ + '('
for c in columns:
rep_str += str(getattr(self, c)) + ', '
rep_str = rep_str[0:-2]
rep_str += ')>'
return rep_str
def isclose(self, other):
"""Test if two objects are nearly equal."""
if not isinstance(other, self.__class__):
print('not the same class')
return False
self_columns = self.__table__.columns
other_columns = other.__table__.columns
if {c.name for c in self_columns} != {c.name for c in other_columns}:
print('set of columns are not the same')
return False
for c in self_columns:
self_c = getattr(self, c.name)
other_c = getattr(other, c.name)
if isinstance(self_c, (str, int)):
if self_c != other_c:
print('column {col} is string-like or int-like, values are not '
'equal'.format(col=c))
return False
elif isinstance(self_c, np.ndarray) and self_c.dtype.kind == 'i':
if not np.all(self_c == other_c):
print('column {col} is an int-like array, values are not equal'.format(col=c))
return False
elif self_c is None:
if other_c is None:
pass # nullable columns, both null
else:
print('column {col} is None in first object and {val} in the second.'
.format(col=c, val=other_c))
return False
else:
if hasattr(self, 'tols') and c.name in self.tols.keys():
atol = self.tols[c.name]['atol']
rtol = self.tols[c.name]['rtol']
else:
# use numpy defaults
atol = 1e-08
rtol = 1e-05
if not np.isclose(self_c, other_c, atol=atol, rtol=rtol):
print('column {col} is float-like or a float-like array, values are not equal'
.format(col=c))
return False
return True
MCDeclarativeBase = declarative_base(cls=MCDeclarativeBase)
import logging # noqa
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
# Now we can pull in the rest of our definitions.
def NotNull(kind, **kwargs):
"""Define a non-nullable column."""
from sqlalchemy import Column
return Column(kind, nullable=False, **kwargs)
from . import autocorrelations # noqa
from . import cm_transfer # noqa
from . import cm_dossier # noqa
from . import cm_active # noqa
from . import cm_sysdef # noqa
from . import cm_utils # noqa
from . import cm_sysutils # noqa
from . import cm_partconnect # noqa
from . import correlator # noqa
from . import daemon_status # noqa
from . import geo_location # noqa
from . import observations # noqa
from . import subsystem_error # noqa
from . import server_status # noqa
from . import librarian # noqa
from . import node # noqa
from . import rtp # noqa
from . import qm # noqa
from . import weather # noqa
from . import mc # noqa keep this last.
|
from object import *
import matplotlib
import matplotlib.pyplot as plt
class Cat(Object):
def update(self):
self.x = self.x + np.array([self.x[2], self.x[3], 100*np.random.randn(), 100*np.random.randn()])*self.step
self.t = self.t + self.step
self.check_wall()
self.check_obstacles()
self.state2pixel()
def run(self, interval):
self.trajectory = []
self.trajectory.append([self.x[0], self.x[1], self.t])
tf = self.t + interval
while self.t < tf:
self.update()
self.trajectory.append([self.x[0], self.x[1], self.t])
self.trajectory = np.array(self.trajectory)
def main():
pygame.init()
pygame.display.set_mode()
cat1 = Cat(x0=[0, 0, 0, 0], obstacles=[(1, 1), (1, 3), (3, 1), (3, 3)])
cat1.run(100)
# cat1.run(1)
plt.plot(cat1.trajectory[:, 2], cat1.trajectory[:, 0], 'b', label='x(t)')
plt.plot(cat1.trajectory[:, 2], cat1.trajectory[:, 1], 'b', label='x(t)')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.show()
if __name__ == "__main__":
main()
|
def dd(L):
d1 = {}
for elem in L:
if elem not in d1:
d1[elem] = 1
else:
d1[elem] += 1
# print(d1)
return d1
def is_list_permutation(L1, L2):
'''
L1 and L2: lists containing integers and strings
Returns False if L1 and L2 are not permutations of each other.
If they are permutations of each other, returns a
tuple of 3 items in this order:
the element occurring most, how many times it occurs, and its type
'''
if len(L1) == len(L2) and len(L1) == 0:
return (None, None, None)
if len(L1) != len(L2):
return False
d1 = dd(L1)
d2 = dd(L2)
k1 = d1.keys()
k2 = d2.keys()
# Iguales elems
for i in k1:
if i not in k2:
return False
for i in k2:
if i not in k1:
return False
# Iguales ocurrencias
for i in k1:
if d1[i] != d2[i]:
return False
maxOcurr = max(d1.values())
for i in d1:
if d1[i] == maxOcurr:
return (i, maxOcurr, type(i))
# L1 = []
# L2 = []
# print('0: ', is_list_permutation(L1, L2))
# L1 = ['a', 'a', 'b']
# L2 = ['a', 'b']
# print('a: ', is_list_permutation(L1, L2))
L1 = [1, 'b', 1, 'c', 'c', 1]
L2 = ['c', 1, 'b', 1, 1, 'c']
print('rr: ', is_list_permutation(L1, L2))
|
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
n = len(citations)
if n == 0:
return 0
counter = {}
for i in range(n + 1):
counter[i] = 0
for citation in citations:
if citation > n:
citation = n
counter[citation] += 1
cnt = 0
ans = 0
for key in range(n, -1, -1):
cnt += counter[key]
ans = max(ans, min(cnt, key))
return ans |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.