content
stringlengths 5
1.05M
|
|---|
# coding=utf-8
"""Model filter change tests."""
import pytest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.test import TestCase, override_settings
from django.urls import reverse
from acme.core.models import Customer
from acme.tests import new_user
from model_filters.constants import FILTER_PARAMETER_NAME, FORM_SAVE_APPLY, OR_SEPARATOR
from model_filters.models import FieldFilter, ModelFilter
@pytest.mark.e2e
@pytest.mark.change
class Tests(TestCase):
"""Do end-to-end change tests."""
@pytest.mark.permissions
def test_change_model_filter_permissions(self):
"""Basic change permissions checks for anon, user, and staff."""
owner = new_user(is_staff=True)
content_type = ContentType.objects.get_for_model(Customer)
model_filter_ct = ContentType.objects.get_for_model(ModelFilter)
permission = Permission.objects.get(
content_type=content_type, codename="view_customer"
)
owner.user_permissions.add(permission)
model_filter = ModelFilter.objects.create(
name="Customer Filter",
content_type=content_type,
owner=owner,
)
url = reverse("admin:model_filters_modelfilter_change", args=(model_filter.id,))
# Anonymous user should be asked to login to admin site.
response = self.client.get(url, follow=True)
self.assertEqual("/admin/login/", response.request["PATH_INFO"])
# Regular user should be asked to login to admin site.
user = new_user()
self.client.force_login(user)
response = self.client.get(url, follow=True)
self.assertEqual("/admin/login/", response.request["PATH_INFO"])
# Staff user can't access the model filter's content type, is redirected.
staff = new_user(is_staff=True)
self.client.force_login(staff)
response = self.client.get(url)
self.assertRedirects(response, "/admin/")
# Give staff user access to the model filter's content type.
permission = Permission.objects.get(
content_type=content_type, codename="change_customer"
)
staff.user_permissions.add(permission)
# A staff user that does not own the model filter should still be blocked.
response = self.client.get(url, follow=True)
self.assertRedirects(response, "/admin/")
# Disable owner-only permissions so regular staff can get results.
with override_settings(
MODEL_FILTERS_VIEW_OWNER_ONLY=False, MODEL_FILTERS_CHANGE_OWNER_ONLY=False
):
# Add view class permissions.
permission = Permission.objects.get(
content_type=model_filter_ct, codename="view_modelfilter"
)
staff.user_permissions.add(permission)
staff = get_object_or_404(get_user_model(), pk=staff.id)
# Regular staff can view, but not change.
response = self.client.get(url, follow=True)
self.assertEqual(200, response.status_code)
print(response.content)
self.assertNotContains(response, "Save")
self.assertNotContains(response, "Delete")
self.assertEqual(url, response.request["PATH_INFO"])
# Give staff user access to the model filter content type.
permission = Permission.objects.get(
content_type=model_filter_ct, codename="change_modelfilter"
)
staff.user_permissions.add(permission)
# Regular staff has permissions, access is allowed.
response = self.client.get(url, follow=True)
self.assertEqual(200, response.status_code)
self.assertEqual(url, response.request["PATH_INFO"])
# The owner of the model filter should still be allowed to change it.
self.client.force_login(owner)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
# The owner of the model filter should be allowed to change it.
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_change_model_filter(self):
"""Change simple model filters and save them differently."""
owner = new_user(is_staff=True, is_superuser=True)
content_type = ContentType.objects.get_for_model(Customer)
model_filter = ModelFilter.objects.create(
name="Customer Filter",
content_type=content_type,
owner=owner,
)
field_filter = FieldFilter.objects.create(
model_filter=model_filter,
field="name",
operator="exact",
value="Wile E. Coyote",
)
self.client.force_login(owner)
url = reverse("admin:model_filters_modelfilter_change", args=(model_filter.id,))
methods = ["_save", "_continue", FORM_SAVE_APPLY]
for submit in methods:
data = {
"id": model_filter.id,
"name": "New Name",
"description": "Fancy words.",
"content_type": model_filter.content_type_id,
"owner": owner.id,
"fields-TOTAL_FORMS": 1,
"fields-INITIAL_FORMS": 1,
"fields-MIN_NUM_FORMS": 0,
"fields-MAX_NUM_FORMS": 1000,
"fields-0-id": field_filter.id,
"fields-0-field": field_filter.field,
"fields-0-operator": field_filter.operator,
"fields-0-value": field_filter.value,
submit: "Save button",
}
response = self.client.post(url, data=data, follow=True)
request = response.request
self.assertEqual(200, response.status_code)
self.assertFalse(response.context.get("errors"))
# The model filter should be updated.
model_filters = ModelFilter.objects.all()
model_filter2 = model_filters[0]
self.assertEqual(model_filter2.name, "New Name")
self.assertEqual(model_filter2.description, "Fancy words.")
self.assertEqual(model_filter2.content_type, model_filter.content_type)
self.assertEqual(model_filter2.owner, model_filter.owner)
self.assertEqual(model_filter2.ephemeral, model_filter.ephemeral)
self.assertEqual(1, model_filter2.fields.count())
field_filter2 = model_filter2.fields.all()[0]
self.assertEqual(field_filter2.field, field_filter.field)
self.assertEqual(field_filter2.operator, field_filter.operator)
self.assertEqual(field_filter2.value, field_filter.value)
self.assertEqual(field_filter2.negate, field_filter.negate)
if submit == "_save":
# Input "_save" redirects to model filter list.
self.assertEqual(
reverse("admin:model_filters_modelfilter_changelist"),
request["PATH_INFO"],
)
elif submit == "_continue":
# Input "_continue" redirects to model filter change form.
self.assertEqual(
reverse(
"admin:model_filters_modelfilter_change",
args=(model_filter.id,),
),
request["PATH_INFO"],
)
elif submit == FORM_SAVE_APPLY:
# Input "_saveapply" redirects to model filter content type list.
filter_url = reverse(
f"admin:{content_type.app_label}_{content_type.model}_changelist"
)
self.assertEqual(
f"{filter_url}?{FILTER_PARAMETER_NAME}={model_filter.id}",
f"{request['PATH_INFO']}?{request['QUERY_STRING']}",
)
def test_change_model_filter_remove_field(self):
"""Remove field filters from a model filter."""
owner = new_user(is_staff=True, is_superuser=True)
content_type = ContentType.objects.get_for_model(Customer)
model_filter = ModelFilter.objects.create(
name="Customer Filter",
content_type=content_type,
owner=owner,
)
field_filter = FieldFilter.objects.create(
model_filter=model_filter,
field="name",
operator="exact",
value="Wile E. Coyote",
)
self.client.force_login(owner)
url = reverse("admin:model_filters_modelfilter_change", args=(model_filter.id,))
data = {
"id": model_filter.id,
"name": model_filter.name or "",
"description": model_filter.description or "",
"content_type": model_filter.content_type_id,
"owner": owner.id,
"fields-TOTAL_FORMS": 0,
"fields-INITIAL_FORMS": 1,
"fields-MIN_NUM_FORMS": 0,
"fields-MAX_NUM_FORMS": 1000,
"fields-0-id": field_filter.id,
"fields-0-field": field_filter.field,
"fields-0-operator": field_filter.operator,
"fields-0-value": field_filter.value,
"fields-0-DELETE": "checked",
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
errors = response.context.get("errors")
self.assertTrue(errors)
self.assertEqual("At least one field filter is required.", errors[0])
# Add two more field filters (an OR and a valid field).
field_filter2 = FieldFilter.objects.create(
model_filter=model_filter,
field=OR_SEPARATOR,
operator="exact",
value="",
)
field_filter3 = FieldFilter.objects.create(
model_filter=model_filter,
field="membership",
operator="equals",
value=Customer.MEMBERSHIP_PLATINUM,
)
# Try to delete the first field filter.
data = {
"id": model_filter.id,
"name": model_filter.name or "",
"description": model_filter.description or "",
"content_type": model_filter.content_type_id,
"owner": owner.id,
"fields-TOTAL_FORMS": 2,
"fields-INITIAL_FORMS": 3,
"fields-MIN_NUM_FORMS": 0,
"fields-MAX_NUM_FORMS": 1000,
"fields-0-id": field_filter.id,
"fields-0-field": field_filter.field,
"fields-0-operator": field_filter.operator,
"fields-0-value": field_filter.value,
"fields-0-DELETE": "checked",
"fields-1-id": field_filter2.id,
"fields-1-field": field_filter2.field,
"fields-1-operator": field_filter2.operator,
"fields-1-value": field_filter2.value,
"fields-2-id": field_filter3.id,
"fields-2-field": field_filter3.field,
"fields-2-operator": field_filter3.operator,
"fields-2-value": field_filter3.value,
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
errors = response.context.get("errors")
self.assertTrue(errors)
self.assertEqual("First field filter cannot be an OR separator.", errors[0])
# Try to delete the last field filter.
data = {
"id": model_filter.id,
"name": model_filter.name or "",
"description": model_filter.description or "",
"content_type": model_filter.content_type_id,
"owner": owner.id,
"fields-TOTAL_FORMS": 2,
"fields-INITIAL_FORMS": 3,
"fields-MIN_NUM_FORMS": 0,
"fields-MAX_NUM_FORMS": 1000,
"fields-0-id": field_filter.id,
"fields-0-field": field_filter.field,
"fields-0-operator": field_filter.operator,
"fields-0-value": field_filter.value,
"fields-1-id": field_filter2.id,
"fields-1-field": field_filter2.field,
"fields-1-operator": field_filter2.operator,
"fields-1-value": field_filter2.value,
"fields-2-id": field_filter3.id,
"fields-2-field": field_filter3.field,
"fields-2-operator": field_filter3.operator,
"fields-2-value": field_filter3.value,
"fields-2-DELETE": "checked",
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
errors = response.context.get("errors")
self.assertTrue(errors)
self.assertEqual("Last field filter cannot be an OR separator.", errors[0])
# Delete the middle (OR) field filter.
data = {
"id": model_filter.id,
"name": model_filter.name or "",
"description": model_filter.description or "",
"content_type": model_filter.content_type_id,
"owner": owner.id,
"fields-TOTAL_FORMS": 2,
"fields-INITIAL_FORMS": 3,
"fields-MIN_NUM_FORMS": 0,
"fields-MAX_NUM_FORMS": 1000,
"fields-0-id": field_filter.id,
"fields-0-field": field_filter.field,
"fields-0-operator": field_filter.operator,
"fields-0-value": field_filter.value,
"fields-1-id": field_filter2.id,
"fields-1-field": field_filter2.field,
"fields-1-operator": field_filter2.operator,
"fields-1-value": field_filter2.value,
"fields-1-DELETE": "checked",
"fields-2-id": field_filter3.id,
"fields-2-field": field_filter3.field,
"fields-2-operator": field_filter3.operator,
"fields-2-value": field_filter3.value,
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
self.assertFalse(response.context.get("errors"))
self.assertEqual(2, model_filter.fields.count())
def test_change_model_filter_related_fields(self):
"""Cannot change unchangeable related fields."""
owner = new_user(is_staff=True, is_superuser=True)
content_type = ContentType.objects.get_for_model(Customer)
model_filter = ModelFilter.objects.create(
name="Customer Filter",
content_type=content_type,
owner=owner,
)
field_filter = FieldFilter.objects.create(
model_filter=model_filter,
field="name",
operator="exact",
value="Wile E. Coyote",
)
self.client.force_login(owner)
url = reverse("admin:model_filters_modelfilter_change", args=(model_filter.id,))
bad_choice = (
"Select a valid choice. That choice is not one of the available choices."
)
# Try to change the owner.
bad_data = {
"id": model_filter.id,
"name": model_filter.name or "",
"description": model_filter.description or "",
"content_type": model_filter.content_type_id,
"owner": 9999,
"fields-TOTAL_FORMS": 0,
"fields-INITIAL_FORMS": 1,
"fields-MIN_NUM_FORMS": 0,
"fields-MAX_NUM_FORMS": 1000,
"fields-0-id": field_filter.id,
"fields-0-field": field_filter.field,
"fields-0-operator": field_filter.operator,
"fields-0-value": field_filter.value,
}
# Try to change the content type.
bad_data2 = dict(bad_data)
bad_data2["owner"] = owner.id
bad_data2["content_type"] = 9999
for data in [bad_data, bad_data2]:
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
errors = response.context.get("errors")
self.assertTrue(errors)
self.assertEqual(bad_choice, errors[0][0])
|
"""Tests for JsonFirmwareUpdateProtocol."""
# Copyright 2019 WolkAbout Technology s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import unittest
sys.path.append("..") # noqa
from wolk_gateway_module.json_firmware_update_protocol import (
JsonFirmwareUpdateProtocol,
)
from wolk_gateway_module.model.firmware_update_status import (
FirmwareUpdateStatus,
FirmwareUpdateState,
FirmwareUpdateErrorCode,
)
from wolk_gateway_module.model.message import Message
class JsonFirmwareUpdateProtocolTests(unittest.TestCase):
"""JsonFirmwareUpdateProtocol Tests."""
DEVICE_PATH_PREFIX = "d/"
FIRMWARE_UPDATE_INSTALL_TOPIC_ROOT = "p2d/firmware_update_install/"
FIRMWARE_UPDATE_ABORT_TOPIC_ROOT = "p2d/firmware_update_abort/"
FIRMWARE_UPDATE_STATUS_TOPIC_ROOT = "d2p/firmware_update_status/"
FIRMWARE_VERSION_UPDATE_TOPIC_ROOT = "d2p/firmware_version_update/"
def test_get_inbound_topics_for_device(self):
"""Test that returned list is correct for given device key."""
json_firmware_update_protocol = JsonFirmwareUpdateProtocol()
device_key = "some_key"
expected = [
self.FIRMWARE_UPDATE_INSTALL_TOPIC_ROOT
+ self.DEVICE_PATH_PREFIX
+ device_key,
self.FIRMWARE_UPDATE_ABORT_TOPIC_ROOT
+ self.DEVICE_PATH_PREFIX
+ device_key,
]
self.assertEqual(
expected,
json_firmware_update_protocol.get_inbound_topics_for_device(
device_key
),
)
def test_make_firmware_update_installation_status_message(self):
"""Test that firmware update status message is created correctly."""
json_firmware_update_protocol = JsonFirmwareUpdateProtocol()
device_key = "some_key"
status = FirmwareUpdateStatus(FirmwareUpdateState.INSTALLATION)
expected = Message(
self.FIRMWARE_UPDATE_STATUS_TOPIC_ROOT
+ self.DEVICE_PATH_PREFIX
+ device_key,
json.dumps({"status": "INSTALLATION"}),
)
self.assertEqual(
expected,
json_firmware_update_protocol.make_update_message(
device_key, status
),
)
def test_make_firmware_update_error_file_system_status_message(self):
"""Test that firmware update status message is created correctly."""
json_firmware_update_protocol = JsonFirmwareUpdateProtocol()
device_key = "some_key"
status = FirmwareUpdateStatus(
FirmwareUpdateState.INSTALLATION,
FirmwareUpdateErrorCode.FILE_SYSTEM_ERROR,
)
expected = Message(
self.FIRMWARE_UPDATE_STATUS_TOPIC_ROOT
+ self.DEVICE_PATH_PREFIX
+ device_key,
json.dumps(
{
"status": status.status.value,
"error": status.error_code.value,
}
),
)
self.assertEqual(
expected,
json_firmware_update_protocol.make_update_message(
device_key, status
),
)
def test_make_version_message(self):
"""Test that firmware version message is created correctly."""
json_firmware_update_protocol = JsonFirmwareUpdateProtocol()
device_key = "some_key"
firmware_version = "v1.0"
expected = Message(
self.FIRMWARE_VERSION_UPDATE_TOPIC_ROOT
+ self.DEVICE_PATH_PREFIX
+ device_key,
firmware_version,
)
self.assertEqual(
expected,
json_firmware_update_protocol.make_version_message(
device_key, firmware_version
),
)
def test_is_firmware_install_command(self):
"""Test that message is firmware install command."""
json_firmware_update_protocol = JsonFirmwareUpdateProtocol()
message = Message(self.FIRMWARE_UPDATE_INSTALL_TOPIC_ROOT)
self.assertTrue(
json_firmware_update_protocol.is_firmware_install_command(message)
)
def test_is_firmware_abort_command(self):
"""Test that message is firmware abort command."""
json_firmware_update_protocol = JsonFirmwareUpdateProtocol()
message = Message(self.FIRMWARE_UPDATE_ABORT_TOPIC_ROOT)
self.assertTrue(
json_firmware_update_protocol.is_firmware_abort_command(message)
)
def test_make_firmware_file_path(self):
"""Test firmare file path is extracted correctly."""
json_firmware_update_protocol = JsonFirmwareUpdateProtocol()
expected = "some/path/to/file"
message = Message(
self.FIRMWARE_UPDATE_INSTALL_TOPIC_ROOT,
json.dumps({"fileName": expected}),
)
self.assertEqual(
expected,
json_firmware_update_protocol.make_firmware_file_path(message),
)
def test_extract_key_from_message(self):
"""Test that device key is correctly extracted from abort message."""
json_firmware_update_protocol = JsonFirmwareUpdateProtocol()
device_key = "some_key"
message = Message(
self.FIRMWARE_UPDATE_ABORT_TOPIC_ROOT
+ self.DEVICE_PATH_PREFIX
+ device_key
)
self.assertEqual(
device_key,
json_firmware_update_protocol.extract_key_from_message(message),
)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
import json
import gzip
import numpy as np
import nltk
from inferbeddings.nli import util
from inferbeddings.models.training.util import make_batches
def evaluate(session, eval_path, label_to_index, token_to_index, predictions_op, batch_size,
sentence1_ph, sentence2_ph, sentence1_len_ph, sentence2_len_ph, dropout_keep_prob_ph,
has_bos=False, has_eos=False, has_unk=False, is_lower=False,
bos_idx=1, eos_idx=2, unk_idx=3):
sentence1_all = []
sentence2_all = []
gold_label_all = []
with gzip.open(eval_path, 'rb') as f:
for line in f:
decoded_line = line.decode('utf-8')
if is_lower:
decoded_line = decoded_line.lower()
obj = json.loads(decoded_line)
gold_label = obj['gold_label']
if gold_label in ['contradiction', 'entailment', 'neutral']:
gold_label_all += [label_to_index[gold_label]]
sentence1_parse = obj['sentence1_parse']
sentence2_parse = obj['sentence2_parse']
sentence1_tree = nltk.Tree.fromstring(sentence1_parse)
sentence2_tree = nltk.Tree.fromstring(sentence2_parse)
sentence1_tokens = sentence1_tree.leaves()
sentence2_tokens = sentence2_tree.leaves()
sentence1_ids = []
sentence2_ids = []
if has_bos:
sentence1_ids += [bos_idx]
sentence2_ids += [bos_idx]
for token in sentence1_tokens:
if token in token_to_index:
sentence1_ids += [token_to_index[token]]
elif has_unk:
sentence1_ids += [unk_idx]
for token in sentence2_tokens:
if token in token_to_index:
sentence2_ids += [token_to_index[token]]
elif has_unk:
sentence2_ids += [unk_idx]
if has_eos:
sentence1_ids += [eos_idx]
sentence2_ids += [eos_idx]
sentence1_all += [sentence1_ids]
sentence2_all += [sentence2_ids]
sentence1_all_len = [len(s) for s in sentence1_all]
sentence2_all_len = [len(s) for s in sentence2_all]
np_sentence1 = util.pad_sequences(sequences=sentence1_all)
np_sentence2 = util.pad_sequences(sequences=sentence2_all)
np_sentence1_len = np.array(sentence1_all_len)
np_sentence2_len = np.array(sentence2_all_len)
gold_label = np.array(gold_label_all)
nb_instances = gold_label.shape[0]
batches = make_batches(size=nb_instances, batch_size=batch_size)
predictions = []
for batch_idx, (batch_start, batch_end) in enumerate(batches):
feed_dict = {
sentence1_ph: np_sentence1[batch_start:batch_end],
sentence2_ph: np_sentence2[batch_start:batch_end],
sentence1_len_ph: np_sentence1_len[batch_start:batch_end],
sentence2_len_ph: np_sentence2_len[batch_start:batch_end],
dropout_keep_prob_ph: 1.0
}
_predictions = session.run(predictions_op, feed_dict=feed_dict)
predictions += _predictions.tolist()
matches = np.array(predictions) == gold_label
return np.mean(matches)
|
import os
from JavaPropertiesLibrary.keywords import JavaPropertiesLibrary as jp
def test_something():
my_jp = jp()
my_jp.get_properties_file_content('./utests/test.properties')
assert 1 == 1
def test_get_properties_file_content():
my_jp = jp()
prop_path = os.path.abspath('./utests/test.properties')
my_jp_content = my_jp.get_properties_file_content(prop_path)
assert 'AcquireModule.system.traverse.TraverseType' in my_jp_content
assert 'I am not here' not in my_jp_content
def test_get_properties_value():
my_jp = jp()
prop_path = os.path.abspath('./utests/test.properties')
my_key = 'AcquireModule.system.Identifier'
my_value = my_jp.get_properties_value(prop_path, my_key)
assert my_value != ''
assert my_value == 'System_SN12345'
def test_change_properties_value():
my_jp = jp()
prop_path = os.path.abspath('./utests/test.properties')
my_key = 'AcquireModule.system.Identifier'
my_new_value = 'mynewvalue'
my_jp.change_properties_value(prop_path, my_key, my_new_value)
new_value = my_jp.get_properties_value(prop_path, my_key)
assert new_value == my_new_value
my_jp.change_properties_value(prop_path, my_key, 'System_SN12345')
default_value = my_jp.get_properties_value(prop_path, my_key)
assert default_value == 'System_SN12345'
|
import os
from OpenSSL import crypto
from dotenv import load_dotenv
load_dotenv()
EMAIL_ADDRESS = os.getenv("EMAIL_ADDRESS")
COMMON_NAME = os.getenv("COMMON_NAME")
COUNTRY_NAME = os.getenv("COUNTRY_NAME")
LOCALITY_NAME = os.getenv("CITY_NAME")
STATE_OR_PROVINCE_NAME = os.getenv("PROVINCE_NAME")
ORGANIZATION_NAME = os.getenv("ORGANIZATION_NAME")
ORGANIZATION_UNIT_NAME = os.getenv("ORGANIZATION_UNIT_NAME")
CRT_VALIDITY = int(os.getenv("CRT_VALIDITY_YEARS")) * 365 * 24 * 60 * 60
def make_key():
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 4096)
return k
def make_crt(
key=None,
emailAddress=EMAIL_ADDRESS,
commonName=COMMON_NAME,
countryName=COUNTRY_NAME,
localityName=LOCALITY_NAME,
stateOrProvinceName=STATE_OR_PROVINCE_NAME,
organizationName=ORGANIZATION_NAME,
organizationUnitName=ORGANIZATION_UNIT_NAME,
serialNumber=0,
validityStartInSeconds=0,
validityEndInSeconds=CRT_VALIDITY
):
#can look at generated file using openssl:
#openssl x509 -inform pem -in selfsigned.crt -noout -text
# create a key pair
key = key or make_key()
# create a self-signed crt
crt = crypto.X509()
crt.get_subject().C = countryName
crt.get_subject().ST = stateOrProvinceName
crt.get_subject().L = localityName
crt.get_subject().O = organizationName
crt.get_subject().OU = organizationUnitName
crt.get_subject().CN = commonName
crt.get_subject().emailAddress = emailAddress
crt.set_serial_number(serialNumber)
crt.gmtime_adj_notBefore(0)
crt.gmtime_adj_notAfter(validityEndInSeconds)
crt.set_issuer(crt.get_subject())
crt.set_pubkey(key)
return crt
def make_key_crt(signing_key=None, **kwargs):
key = make_key()
crt = make_crt(key, **kwargs)
signing_key = signing_key or key
crt.sign(signing_key, 'sha512')
return key, crt
def pem_priv_key(key):
return crypto.dump_privatekey(crypto.FILETYPE_PEM, key).decode("utf-8")
def pem_pub_key(key):
return crypto.dump_publickey(crypto.FILETYPE_PEM, key).decode("utf-8")
def pem_crt(crt):
return crypto.dump_certificate(crypto.FILETYPE_PEM, crt).decode("utf-8")
def text_crt(crt):
return crypto.dump_certificate(crypto.FILETYPE_TEXT, crt).decode("utf-8")
def p12_key_crt(key, crt, password):
pfx = crypto.PKCS12()
pfx.set_privatekey(key)
pfx.set_certificate(crt)
return pfx.export(password.encode('utf-8'))
def export_key_crt(
key,
crt,
key_file=None,
crt_file=None,
pem_file=None,
p12_file=None,
p12_password=None,
ca_file=None
):
key_str = pem_priv_key(key)
if key_file:
with open(key_file, "wt") as f:
f.write(key_str)
crt_str = pem_crt(crt)
if crt_file:
with open(crt_file, "wt") as f:
f.write(crt_str)
pem_str = crt_str + key_str
if pem_file:
with open(pem_file, "wt") as f:
f.write(pem_str)
if p12_file:
if not p12_password:
raise Exception("Please provide p12_password to export p12_file")
p12_str = p12_key_crt(key, crt, p12_password)
with open(p12_file, "wb") as f:
f.write(p12_str)
if ca_file:
ca_str = text_crt(crt)
with open(ca_file, "wt") as f:
f.write(ca_str)
SERVER_KEY = os.getenv("SERVER_KEY")
SERVER_CRT = os.getenv("SERVER_CRT")
SERVER_PEM = os.getenv("SERVER_PEM")
SERVER_P12 = os.getenv("SERVER_P12")
SERVER_CA = os.getenv("SERVER_CA")
SERVER_P12_PASSWORD = os.getenv("SERVER_P12_PASSWORD")
def generate_server(
defaults=True,
key_file=None,
crt_file=None,
pem_file=None,
p12_file=None,
p12_password=None,
ca_file=None,
**kwargs
):
if defaults:
key_file = key_file or SERVER_KEY
crt_file = crt_file or SERVER_CRT
pem_file = pem_file or SERVER_PEM
p12_file = p12_file or SERVER_P12
p12_password = p12_password or SERVER_P12_PASSWORD
ca_file = ca_file or SERVER_CA
key, crt = make_key_crt(**kwargs)
export_key_crt(
key,
crt,
key_file=key_file,
crt_file=crt_file,
pem_file=pem_file,
p12_file=p12_file,
p12_password=p12_password,
ca_file=ca_file
)
return key, crt
def export_key(key, priv_file=None, pub_file=None):
priv_str = pem_priv_key(key)
if priv_file:
with open(priv_file, "wt") as f:
f.write(priv_str)
pub_str = pem_pub_key(key)
if pub_file:
with open(pub_file, "wt") as f:
f.write(pub_str)
return priv_str, pub_str
AUTH_ENC = os.getenv("AUTH_ENC")
AUTH_DEC = os.getenv("AUTH_DEC")
def generate_auth(enc_file=AUTH_ENC, dec_file=AUTH_DEC):
key = make_key()
enc, dec = export_key(key, priv_file=enc_file, pub_file=dec_file)
return enc, dec
REFRESH_ENC = os.getenv("REFRESH_ENC")
REFRESH_DEC = os.getenv("REFRESH_DEC")
def generate_refresh(enc_file=REFRESH_ENC, dec_file=REFRESH_DEC):
key = make_key()
enc, dec = export_key(key, priv_file=enc_file, pub_file=dec_file)
return enc, dec
DOWNLOAD_ENC = os.getenv("DOWNLOAD_ENC")
DOWNLOAD_DEC = os.getenv("DOWNLOAD_DEC")
def generate_download(enc_file=DOWNLOAD_ENC, dec_file=DOWNLOAD_DEC):
key = make_key()
enc, dec = export_key(key, priv_file=enc_file, pub_file=dec_file)
return enc, dec
UPLOAD_ENC = os.getenv("UPLOAD_ENC")
UPLOAD_DEC = os.getenv("UPLOAD_DEC")
def generate_upload(enc_file=UPLOAD_ENC, dec_file=UPLOAD_DEC):
key = make_key()
enc, dec = export_key(key, priv_file=enc_file, pub_file=dec_file)
return enc, dec
REPORT_ENC = os.getenv("REPORT_ENC")
REPORT_DEC = os.getenv("REPORT_DEC")
def generate_report(enc_file=REPORT_ENC, dec_file=REPORT_DEC):
key = make_key()
enc, dec = export_key(key, priv_file=enc_file, pub_file=dec_file)
return enc, dec
EMAIL_ENC = os.getenv("EMAIL_ENC")
EMAIL_DEC = os.getenv("EMAIL_DEC")
def generate_email(enc_file=EMAIL_ENC, dec_file=EMAIL_DEC):
key = make_key()
enc, dec = export_key(key, priv_file=enc_file, pub_file=dec_file)
print(enc_file, dec_file)
return enc, dec
if __name__ == "__main__":
#generate_server()
generate_auth()
generate_refresh()
generate_download()
generate_upload()
generate_report()
generate_email()
|
from pynubank.exception import NuException
from pynubank.utils.http import HttpClient
DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery'
DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery'
class Discovery:
def __init__(self, client: HttpClient):
self.client = client
self.proxy_list_url = {}
self.proxy_list_app_url = {}
self._update_proxy_urls()
def get_url(self, name: str) -> str:
return self._get_url(name, self.proxy_list_url)
def get_app_url(self, name: str) -> str:
return self._get_url(name, self.proxy_list_app_url)
def _update_proxy_urls(self):
self.proxy_list_url = self.client.get(DISCOVERY_URL)
self.proxy_list_app_url = self.client.get(DISCOVERY_APP_URL)
def _get_url(self, name: str, target: dict) -> str:
url = target.get(name)
if not url:
raise NuException(f'There is no URL discovered for {name}')
return url
|
"""Utilities for plotting."""
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
from matplotlib import artist
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d.art3d import Line3D, Text3D, Poly3DCollection, Line3DCollection
from .transformations import transform
from .rotations import unitx, unitz, perpendicular_to_vectors, norm_vector
class Frame(artist.Artist):
"""A Matplotlib artist that displays a frame represented by its basis.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
label : str, optional (default: None)
Name of the frame
s : float, optional (default: 1)
Length of basis vectors
Other arguments except 'c' and 'color' are passed on to Line3D.
"""
def __init__(self, A2B, label=None, s=1.0, **kwargs):
super(Frame, self).__init__()
if "c" in kwargs:
kwargs.pop("c")
if "color" in kwargs:
kwargs.pop("color")
self.s = s
self.x_axis = Line3D([], [], [], color="r", **kwargs)
self.y_axis = Line3D([], [], [], color="g", **kwargs)
self.z_axis = Line3D([], [], [], color="b", **kwargs)
self.draw_label = label is not None
self.label = label
if self.draw_label:
self.label_indicator = Line3D([], [], [], color="k", **kwargs)
self.label_text = Text3D(0, 0, 0, text="", zdir="x")
self.set_data(A2B, label)
def set_data(self, A2B, label=None):
"""Set the transformation data.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
label : str, optional (default: None)
Name of the frame
"""
R = A2B[:3, :3]
p = A2B[:3, 3]
for d, b in enumerate([self.x_axis, self.y_axis, self.z_axis]):
b.set_data(np.array([p[0], p[0] + self.s * R[0, d]]),
np.array([p[1], p[1] + self.s * R[1, d]]))
b.set_3d_properties(np.array([p[2], p[2] + self.s * R[2, d]]))
if self.draw_label:
if label is None:
label = self.label
label_pos = p + 0.5 * self.s * (R[:, 0] + R[:, 1] + R[:, 2])
self.label_indicator.set_data(
np.array([p[0], label_pos[0]]),
np.array([p[1], label_pos[1]]))
self.label_indicator.set_3d_properties(
np.array([p[2], label_pos[2]]))
self.label_text.set_text(label)
self.label_text.set_position([label_pos[0], label_pos[1]])
self.label_text.set_3d_properties(label_pos[2], zdir="x")
@artist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
"""Draw the artist."""
for b in [self.x_axis, self.y_axis, self.z_axis]:
b.draw(renderer, *args, **kwargs)
if self.draw_label:
self.label_indicator.draw(renderer, *args, **kwargs)
self.label_text.draw(renderer, *args, **kwargs)
super(Frame, self).draw(renderer, *args, **kwargs)
def add_frame(self, axis):
"""Add the frame to a 3D axis."""
for b in [self.x_axis, self.y_axis, self.z_axis]:
axis.add_line(b)
if self.draw_label:
axis.add_line(self.label_indicator)
axis._add_text(self.label_text)
class LabeledFrame(Frame):
"""Displays a frame represented by its basis with axis labels.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
label : str, optional (default: None)
Name of the frame
s : float, optional (default: 1)
Length of basis vectors
Other arguments except 'c' and 'color' are passed on to Line3D.
"""
def __init__(self, A2B, label=None, s=1.0, **kwargs):
self.x_label = Text3D(0, 0, 0, text="", zdir="x")
self.y_label = Text3D(0, 0, 0, text="", zdir="x")
self.z_label = Text3D(0, 0, 0, text="", zdir="x")
super(LabeledFrame, self).__init__(A2B, label=None, s=1.0, **kwargs)
def set_data(self, A2B, label=None):
"""Set the transformation data.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
label : str, optional (default: None)
Name of the frame
"""
super(LabeledFrame, self).set_data(A2B, label)
R = A2B[:3, :3]
p = A2B[:3, 3]
x_label_location = p + 1.1 * self.s * R[:, 0]
y_label_location = p + 1.1 * self.s * R[:, 1]
z_label_location = p + 1.1 * self.s * R[:, 2]
self.x_label.set_text("x")
self.x_label.set_position(x_label_location[:2])
self.x_label.set_3d_properties(x_label_location[2], zdir="x")
self.y_label.set_text("y")
self.y_label.set_position(y_label_location[:2])
self.y_label.set_3d_properties(y_label_location[2], zdir="x")
self.z_label.set_text("z")
self.z_label.set_position(z_label_location[:2])
self.z_label.set_3d_properties(z_label_location[2], zdir="x")
@artist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
"""Draw the artist."""
self.x_label.draw(renderer, *args, **kwargs)
self.y_label.draw(renderer, *args, **kwargs)
self.z_label.draw(renderer, *args, **kwargs)
super(LabeledFrame, self).draw(renderer, *args, **kwargs)
def add_frame(self, axis):
"""Add the frame to a 3D axis."""
super(LabeledFrame, self).add_frame(axis)
axis._add_text(self.x_label)
axis._add_text(self.y_label)
axis._add_text(self.z_label)
class Trajectory(artist.Artist):
"""A Matplotlib artist that displays a trajectory.
Parameters
----------
H : array-like, shape (n_steps, 4, 4)
Sequence of poses represented by homogeneous matrices
show_direction : bool, optional (default: True)
Plot an arrow to indicate the direction of the trajectory
n_frames : int, optional (default: 10)
Number of frames that should be plotted to indicate the rotation
s : float, optional (default: 1)
Scaling of the frames that will be drawn
Other arguments are passed onto Line3D.
"""
def __init__(self, H, show_direction=True, n_frames=10, s=1.0, **kwargs):
super(Trajectory, self).__init__()
self.show_direction = show_direction
self.trajectory = Line3D([], [], [], **kwargs)
self.key_frames = [Frame(np.eye(4), s=s, **kwargs)
for _ in range(n_frames)]
if self.show_direction:
self.direction_arrow = Arrow3D(
[0, 0], [0, 0], [0, 0],
mutation_scale=20, lw=1, arrowstyle="-|>", color="k")
self.set_data(H)
def set_data(self, H):
"""Set the trajectory data.
Parameters
----------
H : array-like, shape (n_steps, 4, 4)
Sequence of poses represented by homogeneous matrices
"""
positions = H[:, :3, 3]
self.trajectory.set_data(positions[:, 0], positions[:, 1])
self.trajectory.set_3d_properties(positions[:, 2])
key_frames_indices = np.linspace(
0, len(H) - 1, len(self.key_frames), dtype=np.int)
for i, key_frame_idx in enumerate(key_frames_indices):
self.key_frames[i].set_data(H[key_frame_idx])
if self.show_direction:
start = 0.8 * positions[0] + 0.2 * positions[-1]
end = 0.2 * positions[0] + 0.8 * positions[-1]
self.direction_arrow.set_data(
[start[0], end[0]], [start[1], end[1]], [start[2], end[2]])
@artist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
"""Draw the artist."""
self.trajectory.draw(renderer, *args, **kwargs)
for key_frame in self.key_frames:
key_frame.draw(renderer, *args, **kwargs)
if self.show_direction:
self.direction_arrow.draw(renderer)
super(Trajectory, self).draw(renderer, *args, **kwargs)
def add_trajectory(self, axis):
"""Add the trajectory to a 3D axis."""
axis.add_line(self.trajectory)
for key_frame in self.key_frames:
key_frame.add_frame(axis)
if self.show_direction:
axis.add_artist(self.direction_arrow)
class Arrow3D(FancyArrowPatch): # http://stackoverflow.com/a/11156353/915743
"""A Matplotlib patch that represents an arrow in 3D."""
def __init__(self, xs, ys, zs, *args, **kwargs):
super(Arrow3D, self).__init__((0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def set_data(self, xs, ys, zs):
"""Set the arrow data.
Parameters
----------
xs : iterable
List of x positions
ys : iterable
List of y positions
zs : iterable
List of z positions
"""
self._verts3d = xs, ys, zs
def draw(self, renderer):
"""Draw the patch."""
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
super(Arrow3D, self).draw(renderer)
def make_3d_axis(ax_s, pos=111):
"""Generate new 3D axis.
Parameters
----------
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
pos : int, optional (default: 111)
Position indicator (nrows, ncols, plot_number)
Returns
-------
ax : Matplotlib 3d axis
New axis
"""
try:
ax = plt.subplot(pos, projection="3d", aspect="equal")
except NotImplementedError:
# HACK: workaround for bug in new matplotlib versions (ca. 3.02):
# "It is not currently possible to manually set the aspect"
ax = plt.subplot(pos, projection="3d")
plt.setp(ax, xlim=(-ax_s, ax_s), ylim=(-ax_s, ax_s), zlim=(-ax_s, ax_s),
xlabel="X", ylabel="Y", zlabel="Z")
return ax
def plot_vector(ax=None, start=np.zeros(3), direction=np.array([1, 0, 0]), s=1.0, arrowstyle="simple", ax_s=1, **kwargs):
"""Plot Vector.
Draws an arrow from start to start + s * direction.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
start : array-like, shape (3,), optional (default: [0, 0, 0])
Start of the vector
direction : array-like, shape (3,), optional (default: [0, 0, 0])
Direction of the vector
s : float, optional (default: 1)
Scaling of the vector that will be drawn
arrowstyle : str, or ArrowStyle, optional (default: 'simple')
See matplotlib's documentation of arrowstyle in
matplotlib.patches.FancyArrowPatch for more options
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
kwargs : dict, optional (default: {})
Additional arguments for the plotting functions, e.g. alpha
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
axis_arrow = Arrow3D(
[start[0], start[0] + s * direction[0]],
[start[1], start[1] + s * direction[1]],
[start[2], start[2] + s * direction[2]],
mutation_scale=20, arrowstyle=arrowstyle, **kwargs)
ax.add_artist(axis_arrow)
return ax
def plot_length_variable(ax=None, start=np.zeros(3), end=np.ones(3), name="l", above=False, ax_s=1, color="k", **kwargs):
"""Plot length with text at its center.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
start : array-like, shape (3,), optional (default: [0, 0, 0])
Start point
end : array-like, shape (3,), optional (default: [1, 1, 1])
End point
name : str, optional (default: 'l')
Text in the middle
above : bool, optional (default: False)
Plot name above line
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
color : str, optional (default: black)
Color in which the cylinder should be plotted
kwargs : dict, optional (default: {})
Additional arguments for the text, e.g. fontsize
"""
if ax is None:
ax = make_3d_axis(ax_s)
direction = end - start
length = np.linalg.norm(direction)
if above:
ax.plot([start[0], end[0]], [start[1], end[1]], [start[2], end[2]], color=color)
else:
mid1 = start + 0.4 * direction
mid2 = start + 0.6 * direction
ax.plot([start[0], mid1[0]], [start[1], mid1[1]], [start[2], mid1[2]], color=color)
ax.plot([end[0], mid2[0]], [end[1], mid2[1]], [end[2], mid2[2]], color=color)
if np.linalg.norm(direction / length - unitz) < np.finfo(float).eps:
axis = unitx
else:
axis = unitz
mark = norm_vector(perpendicular_to_vectors(direction, axis)) * 0.03 * length
mark_start1 = start + mark
mark_start2 = start - mark
mark_end1 = end + mark
mark_end2 = end - mark
ax.plot([mark_start1[0], mark_start2[0]],
[mark_start1[1], mark_start2[1]],
[mark_start1[2], mark_start2[2]],
color=color)
ax.plot([mark_end1[0], mark_end2[0]],
[mark_end1[1], mark_end2[1]],
[mark_end1[2], mark_end2[2]],
color=color)
text_location = start + 0.45 * direction
if above:
text_location[2] += 0.3 * length
ax.text(text_location[0], text_location[1], text_location[2], name, zdir="x", **kwargs)
return ax
def plot_box(ax=None, size=np.ones(3), A2B=np.eye(4), ax_s=1, wireframe=True, color="k", alpha=1.0):
"""Plot box.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
size : array-like, shape (3,), optional (default: [1, 1, 1])
Size of the box per dimension
A2B : array-like, shape (4, 4)
Center of the box
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe of cylinder and surface otherwise
color : str, optional (default: black)
Color in which the cylinder should be plotted
alpha : float, optional (default: 1)
Alpha value of the mesh that will be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
corners = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]
])
corners = (corners - 0.5) * size
corners = transform(
A2B, np.hstack((corners, np.ones((len(corners), 1)))))[:, :3]
if wireframe:
for i, j in [(0, 1), (0, 2), (1, 3), (2, 3),
(4, 5), (4, 6), (5, 7), (6, 7),
(0, 4), (1, 5), (2, 6), (3, 7)]:
ax.plot([corners[i, 0], corners[j, 0]],
[corners[i, 1], corners[j, 1]],
[corners[i, 2], corners[j, 2]],
c=color, alpha=alpha)
else:
p3c = Poly3DCollection(np.array([
[corners[0], corners[1], corners[2]],
[corners[1], corners[2], corners[3]],
[corners[4], corners[5], corners[6]],
[corners[5], corners[6], corners[7]],
[corners[0], corners[1], corners[4]],
[corners[1], corners[4], corners[5]],
[corners[2], corners[6], corners[7]],
[corners[2], corners[3], corners[7]],
[corners[0], corners[4], corners[6]],
[corners[0], corners[2], corners[6]],
[corners[1], corners[5], corners[7]],
[corners[1], corners[3], corners[7]],
]))
p3c.set_alpha(alpha)
p3c.set_facecolor(color)
ax.add_collection3d(p3c)
return ax
def plot_sphere(ax=None, radius=1.0, p=np.zeros(3), ax_s=1, wireframe=True, n_steps=100, color="k", alpha=1.0):
"""Plot cylinder.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
radius : float, optional (default: 1)
Radius of the sphere
p : array-like, shape (3,), optional (default: [0, 0, 0])
Center of the sphere
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe of cylinder and surface otherwise
n_steps : int, optional (default: 100)
Number of discrete steps plotted in each dimension
color : str, optional (default: black)
Color in which the cylinder should be plotted
alpha : float, optional (default: 1)
Alpha value of the mesh that will be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
phi, theta = np.mgrid[0.0:np.pi:n_steps * 1j, 0.0:2.0 * np.pi:n_steps * 1j]
x = p[0] + radius * np.sin(phi) * np.cos(theta)
y = p[1] + radius * np.sin(phi) * np.sin(theta)
z = p[2] + radius * np.cos(phi)
if wireframe:
ax.plot_wireframe(x, y, z, rstride=10, cstride=10, color=color, alpha=alpha)
else:
ax.plot_surface(x, y, z, color=color, alpha=alpha, linewidth=0)
return ax
def plot_cylinder(ax=None, length=1.0, radius=1.0, thickness=0.0, A2B=np.eye(4), ax_s=1, wireframe=True, n_steps=100, alpha=1.0, color="k"):
"""Plot cylinder.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
length : float, optional (default: 1)
Length of the cylinder
radius : float, optional (default: 1)
Radius of the cylinder
thickness : float, optional (default: 0)
Thickness of a cylindrical shell. It will be subtracted from the
outer radius to obtain the inner radius. The difference must be
greater than 0.
A2B : array-like, shape (4, 4)
Center of the cylinder
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe of cylinder and surface otherwise
n_steps : int, optional (default: 100)
Number of discrete steps plotted in each dimension
alpha : float, optional (default: 1)
Alpha value of the mesh that will be plotted
color : str, optional (default: black)
Color in which the cylinder should be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
inner_radius = radius - thickness
if inner_radius <= 0.0:
raise ValueError("Thickness of cylindrical shell results in "
"invalid inner radius: %g" % inner_radius)
axis_start = A2B.dot(np.array([0, 0, -0.5 * length, 1]))[:3]
axis_end = A2B.dot(np.array([0, 0, 0.5 * length, 1]))[:3]
axis = axis_end - axis_start
axis /= length
not_axis = np.array([1, 0, 0])
if (axis == not_axis).all():
not_axis = np.array([0, 1, 0])
n1 = np.cross(axis, not_axis)
n1 /= np.linalg.norm(n1)
n2 = np.cross(axis, n1)
if wireframe:
t = np.linspace(0, length, n_steps)
else:
t = np.array([0, length])
theta = np.linspace(0, 2 * np.pi, n_steps)
t, theta = np.meshgrid(t, theta)
if thickness > 0.0:
X_outer, Y_outer, Z_outer = [
axis_start[i] + axis[i] * t +
radius * np.sin(theta) * n1[i] +
radius * np.cos(theta) * n2[i] for i in [0, 1, 2]]
X_inner, Y_inner, Z_inner = [
axis_end[i] - axis[i] * t +
inner_radius * np.sin(theta) * n1[i] +
inner_radius * np.cos(theta) * n2[i] for i in [0, 1, 2]]
X = np.hstack((X_outer, X_inner))
Y = np.hstack((Y_outer, Y_inner))
Z = np.hstack((Z_outer, Z_inner))
else:
X, Y, Z = [axis_start[i] + axis[i] * t +
radius * np.sin(theta) * n1[i] +
radius * np.cos(theta) * n2[i] for i in [0, 1, 2]]
if wireframe:
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10, alpha=alpha,
color=color)
else:
ax.plot_surface(X, Y, Z, color=color, alpha=alpha, linewidth=0)
return ax
def plot_mesh(ax=None, filename=None, A2B=np.eye(4), s=np.array([1.0, 1.0, 1.0]), ax_s=1, wireframe=False, convex_hull=False, alpha=1.0, color="k"):
"""Plot mesh.
Note that this function requires the additional library 'trimesh'. It will
print a warning if trimesh is not available.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
filename : str, optional (default: None)
Path to mesh file.
A2B : array-like, shape (4, 4)
Pose of the mesh
s : array-like, shape (3,), optional (default: [1, 1, 1])
Scaling of the mesh that will be drawn
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe of mesh and surface otherwise
convex_hull : bool, optional (default: False)
Show convex hull instead of the original mesh. This can be much
faster.
alpha : float, optional (default: 1)
Alpha value of the mesh that will be plotted
color : str, optional (default: black)
Color in which the cylinder should be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
if filename is None:
warnings.warn(
"No filename given for mesh. When you use the "
"UrdfTransformManager, make sure to set the mesh path or "
"package directory.")
return ax
try:
import trimesh
except ImportError:
warnings.warn(
"Cannot display mesh. Library 'trimesh' not installed.")
return ax
mesh = trimesh.load(filename)
if convex_hull:
mesh = mesh.convex_hull
vertices = mesh.vertices * s
vertices = np.hstack((vertices, np.ones((len(vertices), 1))))
vertices = transform(A2B, vertices)[:, :3]
vectors = np.array([vertices[[i, j, k]] for i, j, k in mesh.faces])
if wireframe:
surface = Line3DCollection(vectors)
surface.set_color(color)
else:
surface = Poly3DCollection(vectors)
surface.set_facecolor(color)
surface.set_alpha(alpha)
ax.add_collection3d(surface)
return ax
def remove_frame(ax, left=0.0, bottom=0.0, right=1.0, top=1.0):
"""Remove axis and scale bbox.
Parameters
----------
ax : Matplotlib 3d axis
Axis from which we remove the frame
left : float, optional (default: 0)
Position of left border (between 0 and 1)
bottom : float, optional (default: 0)
Position of bottom border (between 0 and 1)
right : float, optional (default: 1)
Position of right border (between 0 and 1)
top : float, optional (default: 1)
Position of top border (between 0 and 1)
"""
ax.axis("off")
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top)
except ImportError:
warnings.warn("Matplotlib is not installed, visualization is not available")
|
import pytest
from dagster import PipelineDefinition, execute_pipeline, pipeline, solid
from dagster.core.errors import DagsterRunConflict
from dagster.core.instance import DagsterInstance
from dagster.core.snap.pipeline_snapshot import create_pipeline_snapshot_id
from dagster.core.storage.pipeline_run import PipelineRun
def test_get_or_create_run():
instance = DagsterInstance.ephemeral()
assert instance.get_runs() == []
pipeline_run = PipelineRun.create_empty_run('foo_pipeline', 'new_run')
assert instance.get_or_create_run(pipeline_run) == pipeline_run
assert instance.get_runs() == [pipeline_run]
assert instance.get_or_create_run(pipeline_run) == pipeline_run
assert instance.get_runs() == [pipeline_run]
conflicting_pipeline_run = PipelineRun.create_empty_run('bar_pipeline', 'new_run')
with pytest.raises(DagsterRunConflict, match='Found conflicting existing run with same id.'):
instance.get_or_create_run(conflicting_pipeline_run)
def do_test_single_write_read(instance):
run_id = 'some_run_id'
pipeline_def = PipelineDefinition(name='some_pipeline', solid_defs=[])
instance.create_empty_run(run_id=run_id, pipeline_name=pipeline_def.name)
run = instance.get_run_by_id(run_id)
assert run.run_id == run_id
assert run.pipeline_name == 'some_pipeline'
assert list(instance.get_runs()) == [run]
instance.wipe()
assert list(instance.get_runs()) == []
def test_filesystem_persist_one_run(tmpdir):
do_test_single_write_read(DagsterInstance.local_temp(str(tmpdir)))
def test_in_memory_persist_one_run():
do_test_single_write_read(DagsterInstance.ephemeral())
def test_create_pipeline_snapshot():
@solid
def noop_solid(_):
pass
@pipeline
def noop_pipeline():
noop_solid()
instance = DagsterInstance.local_temp()
result = execute_pipeline(noop_pipeline, instance=instance)
assert result.success
run = instance.get_run_by_id(result.run_id)
assert run.pipeline_snapshot_id == create_pipeline_snapshot_id(
noop_pipeline.get_pipeline_snapshot()
)
|
#!/usr/bin/env python3
from .parser import Parser
from .parser import PcapParser
|
import torch
from hypothesis import given, settings
from hypothesis import strategies as st
from torch import nn as nn
from torchdiffeq import odeint as odeint
from dynamics_learning.networks.kalman.ekf import EKFCell
from dynamics_learning.networks.models import SimpleODENetConfig
from dynamics_learning.utils.net_utils import batch_eye
from dynamics_learning.utils.tests.test_net_utils import wrap_zero_input
@given(batch_dim=st.integers(min_value=1, max_value=10),)
def test_EKF_basic(batch_dim: int):
network = SimpleODENetConfig(
input_dim=2,
output_dim=2,
hidden_layers=2,
hidden_units=64,
nonlinearity=nn.Tanh(),
).create()
kf = EKFCell(network, None)
x = torch.randn(batch_dim, 2)
p0 = torch.randn(batch_dim, 2, 2)
p0 = torch.bmm(p0, p0.transpose(-1, -2)) + batch_eye(2, batch_dim)
vect = kf.gaussian_parameters_to_vector(x, p0)
mu, var = kf.vector_to_gaussian_parameters(vect)
# TODO uncomment
# npt.assert_allclose(x.numpy(), mu.numpy())
# npt.assert_allclose(p0.numpy(), var.numpy(), rtol=1e-6)
@given(batch_dim=st.integers(min_value=1, max_value=2),)
@settings(deadline=5000)
def test_EKFCell_dynamics_foward(batch_dim: int):
network = SimpleODENetConfig(
input_dim=2,
output_dim=2,
hidden_layers=2,
hidden_units=2,
nonlinearity=nn.Tanh(),
).create()
kf_f = EKFCell(network, None) # Model with cholesky
batch_t = torch.linspace(0.0, 1.0, 4)
x = torch.randn(batch_dim, 2)
p0 = torch.randn(batch_dim, 2, 2)
p0 = torch.bmm(p0, p0.transpose(-1, -2)) + batch_eye(2, batch_dim) * 10
vect = kf_f.gaussian_parameters_to_vector(x, p0)
xs_ps_1 = odeint(wrap_zero_input(kf_f), vect, batch_t)
x1, var1 = kf_f.vector_to_gaussian_parameters(xs_ps_1)
# TODO: fix test - giving flaky resutls
# @given(batch_dim=st.integers(min_value=1, max_value=8),)
# @settings(deadline=5000)
# def test_EKFCell_dynamics_backwards(batch_dim: int):
# network = SimpleODENetConfig(
# input_dim=2,
# output_dim=2,
# hidden_layers=2,
# hidden_units=64,
# nonlinearity=nn.Tanh(),
# ).create()
# kf_f = EKFCell(network, None) # Model with cholesky
#
# batch_t = torch.linspace(0.0, 1.0, 5)
#
# # TODO generate with hypothesis
# x = torch.randn(batch_dim, 2)
# p0 = torch.randn(batch_dim, 2, 2)
# p0 = torch.bmm(p0, p0.transpose(-1, -2)) + batch_eye(2, batch_dim) * 10
#
# vect = kf_f.gaussian_parameters_to_vector(x, p0)
# xs_ps_1 = odeint(kf_f, vect, batch_t)
# x1, var1 = kf_f.vector_to_gaussian_parameters(xs_ps_1)
# fake_loss = torch.mean(x1 ** 2) + torch.mean(var1 ** 2)
# fake_loss.backward()
|
import json
from urllib.request import urlopen
import numpy as np
import scipy.optimize as opt
def get_json_from_url(url):
"""Get a json from a URL."""
response = urlopen(url)
return json.loads(response.read().decode())
def logistic(x, a, c, d):
"""Fit a logistic function."""
return a / (1. + np.exp(-c * (x - d)))
def fit_predict(x, y, f, x_pred=None):
"""Fit a function and predict on some input"""
popt, pcov = opt.curve_fit(f, x, y, maxfev=100000)
if x_pred is None:
x_pred = x
return f(x_pred, *popt)
|
from django.test import TestCase
from django.core.cache import cache
from apps.accounts.models import Account
from apps.schedule.models import Lessons, Schedule
from apps.university.models import Faculties, Departaments, StudyGroups, Auditories, Disciplines
class LessonsTest(TestCase):
"""Common tests to validate the Lessons model"""
def setUp(self):
"""Fixtures"""
self.day = 'Monday'
self.time = '14:00-16:00'
self.name_schedule = 'new_name_schedule'
self.name_faculty = 'new_name_faculty'
self.slug_faculty = 'new-slug-faculty'
self.name_departament = 'new_name_departament'
self.slug_departament = 'new-slug-departament'
self.name_studygroup = 'new_name_studygroup'
self.slug_studygroup = 'new-slug-studygroup'
self.name_auditory = 'new_name_auditory'
self.slug_auditory = 'new-slug-auditory'
self.name_discipline = 'new_name_discipline'
self.slug_discipline = 'new-slug-discipline'
self.student = Account.objects.create(
username='student',
password='student',
email='student@university-schedule.com',
role='Student',
is_active=True
)
self.new_faculty = Faculties.objects.create(name=self.name_faculty, slug=self.slug_faculty, )
self.new_departament = Departaments.objects.create(
faculty=self.new_faculty,
name=self.name_departament,
slug=self.slug_departament,
)
self.new_studygroup = StudyGroups.objects.create(
departament=self.new_departament,
name=self.name_studygroup,
slug=self.slug_studygroup,
)
self.new_discipline = Disciplines.objects.create(name=self.name_discipline, slug=self.slug_discipline, )
self.new_auditory = Auditories.objects.create(name=self.name_auditory, slug=self.slug_auditory, )
self.new_lesson = Lessons.objects.create(
lesson_name=self.new_discipline,
day=self.day,
time=self.time,
auditory=self.new_auditory,
)
# clear cache before running tests
cache.clear()
def test_model_lessons(self):
"""test to validate the Lessons model"""
# request initial number of objects Lessons
total_count_obj = Lessons.objects.all().count()
# checking the number of objects Lessons
self.assertEqual(1, total_count_obj)
# request fields of Lessons
self.assertEqual(self.new_lesson.lesson_name, self.new_discipline)
self.assertEqual(self.new_lesson.day, self.day)
self.assertEqual(self.new_lesson.time, self.time)
self.assertEqual(self.new_lesson.auditory, self.new_auditory)
# request for the number of Lessons objects after creating a new one and check
now_total_count_obj = Lessons.objects.all().count()
now_total_count_obj2 = Lessons.objects.filter(lesson_name=self.new_discipline).filter(day=self.day).count()
self.assertEqual(now_total_count_obj, total_count_obj)
self.assertEqual(now_total_count_obj2, total_count_obj)
# request from database and check new Lessons
get_lesson = Lessons.objects.get(lesson_name=self.new_discipline, day=self.day,)
self.assertEqual(get_lesson.lesson_name, self.new_discipline)
self.assertEqual(get_lesson.day, self.day)
class ScheduleTest(LessonsTest):
"""Common tests to validate the Schedule model"""
def test_model_schedule(self):
"""test to validate the Schedule model"""
# request for the initial number of Schedule objects
total_count_obj = Schedule.objects.all().count()
# checking the number of Schedule objects
self.assertEqual(0, total_count_obj)
# request fields of the Schedule object
self.schedule = Schedule.objects.create(
schedule_name=self.name_schedule,
student=self.student,
faculty=self.new_faculty,
departament=self.new_departament,
studygroups=self.new_studygroup,
day=self.day,
available=True,
)
self.assertEqual(self.schedule.schedule_name, self.name_schedule)
self.assertEqual(self.schedule.student, self.student)
self.assertEqual(self.schedule.faculty, self.new_faculty)
self.assertEqual(self.schedule.departament, self.new_departament)
self.assertEqual(self.schedule.studygroups, self.new_studygroup)
self.assertEqual(self.schedule.day, self.day)
self.assertEqual(self.schedule.available, True)
# request lessons count
self.assertEqual(self.schedule.lessons.count(), 0)
# add lessons
self.schedule.lessons.add(self.new_lesson)
# request lessons count
self.assertEqual(self.schedule.lessons.count(), 1)
# request for the number of Schedule objects after creating a new one and check
now_total_count_obj = Schedule.objects.all().count()
now_total_count_obj2 = Schedule.objects.filter(
schedule_name=self.name_schedule).filter(student=self.student).filter(day=self.day).count()
self.assertEqual(now_total_count_obj, total_count_obj + 1)
self.assertEqual(now_total_count_obj2, total_count_obj + 1)
# request from the database and check for new Schedule
get_schedule = Schedule.objects.get(
schedule_name=self.name_schedule, student=self.student, day=self.day)
self.assertEqual(get_schedule.schedule_name, self.name_schedule)
self.assertEqual(get_schedule.student, self.student)
self.assertEqual(get_schedule.day, self.day)
|
import logging
import os
import pickle
import platform
import pytest
import localpaths # type: ignore
from . import serve
from .serve import ConfigBuilder, inject_script
logger = logging.getLogger()
@pytest.mark.skipif(platform.uname()[0] == "Windows",
reason="Expected contents are platform-dependent")
def test_make_hosts_file_nix():
with ConfigBuilder(logger,
ports={"http": [8000]},
browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x, y"}) as c:
hosts = serve.make_hosts_file(c, "192.168.42.42")
lines = hosts.split("\n")
assert set(lines) == {"",
"192.168.42.42\tfoo.bar",
"192.168.42.42\tfoo2.bar",
"192.168.42.42\ta.foo.bar",
"192.168.42.42\ta.foo2.bar",
"192.168.42.42\tb.foo.bar",
"192.168.42.42\tb.foo2.bar"}
assert lines[-1] == ""
@pytest.mark.skipif(platform.uname()[0] != "Windows",
reason="Expected contents are platform-dependent")
def test_make_hosts_file_windows():
with ConfigBuilder(logger,
ports={"http": [8000]},
browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as c:
hosts = serve.make_hosts_file(c, "192.168.42.42")
lines = hosts.split("\n")
assert set(lines) == {"",
"0.0.0.0\tx.foo.bar",
"0.0.0.0\tx.foo2.bar",
"0.0.0.0\ty.foo.bar",
"0.0.0.0\ty.foo2.bar",
"192.168.42.42\tfoo.bar",
"192.168.42.42\tfoo2.bar",
"192.168.42.42\ta.foo.bar",
"192.168.42.42\ta.foo2.bar",
"192.168.42.42\tb.foo.bar",
"192.168.42.42\tb.foo2.bar"}
assert lines[-1] == ""
def test_ws_doc_root_default():
with ConfigBuilder(logger) as c:
assert c.doc_root == localpaths.repo_root
assert c.ws_doc_root == os.path.join(localpaths.repo_root, "websockets", "handlers")
assert c.paths["ws_doc_root"] == c.ws_doc_root
def test_init_ws_doc_root():
with ConfigBuilder(logger, ws_doc_root="/") as c:
assert c.doc_root == localpaths.repo_root # check this hasn't changed
assert c.ws_doc_root == "/"
assert c.paths["ws_doc_root"] == c.ws_doc_root
def test_set_ws_doc_root():
cb = ConfigBuilder(logger)
cb.ws_doc_root = "/"
with cb as c:
assert c.doc_root == localpaths.repo_root # check this hasn't changed
assert c.ws_doc_root == "/"
assert c.paths["ws_doc_root"] == c.ws_doc_root
def test_pickle():
# Ensure that the config object can be pickled
with ConfigBuilder(logger) as c:
pickle.dumps(c)
def test_alternate_host_unspecified():
ConfigBuilder(logger, browser_host="web-platform.test")
@pytest.mark.parametrize("primary, alternate", [
("web-platform.test", "web-platform.test"),
("a.web-platform.test", "web-platform.test"),
("web-platform.test", "a.web-platform.test"),
("a.web-platform.test", "a.web-platform.test"),
])
def test_alternate_host_invalid(primary, alternate):
with pytest.raises(ValueError):
ConfigBuilder(logger, browser_host=primary, alternate_hosts={"alt": alternate})
@pytest.mark.parametrize("primary, alternate", [
("web-platform.test", "not-web-platform.test"),
("a.web-platform.test", "b.web-platform.test"),
("web-platform-tests.dev", "web-platform-tests.live"),
])
def test_alternate_host_valid(primary, alternate):
ConfigBuilder(logger, browser_host=primary, alternate_hosts={"alt": alternate})
# A token marking the location of expected script injection.
INJECT_SCRIPT_MARKER = b"<!-- inject here -->"
def test_inject_script_after_head():
html = b"""<!DOCTYPE html>
<html>
<head>
<!-- inject here --><script src="test.js"></script>
</head>
<body>
</body>
</html>"""
assert INJECT_SCRIPT_MARKER in html
assert inject_script(html.replace(INJECT_SCRIPT_MARKER, b""), INJECT_SCRIPT_MARKER) == html
def test_inject_script_no_html_head():
html = b"""<!DOCTYPE html>
<!-- inject here --><div></div>"""
assert INJECT_SCRIPT_MARKER in html
assert inject_script(html.replace(INJECT_SCRIPT_MARKER, b""), INJECT_SCRIPT_MARKER) == html
def test_inject_script_no_doctype():
html = b"""<!-- inject here --><div></div>"""
assert INJECT_SCRIPT_MARKER in html
assert inject_script(html.replace(INJECT_SCRIPT_MARKER, b""), INJECT_SCRIPT_MARKER) == html
def test_inject_script_parse_error():
html = b"""<!--<!-- inject here --><div></div>"""
assert INJECT_SCRIPT_MARKER in html
# On a parse error, the script should not be injected and the original content should be
# returned.
assert INJECT_SCRIPT_MARKER not in inject_script(html.replace(INJECT_SCRIPT_MARKER, b""), INJECT_SCRIPT_MARKER)
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""This package is responsible for modeling the energy consumers and the system load as curves and associated curve data. Special circumstances that may affect the load, such as seasons and daytypes, are also included here. This information is used by Load Forecasting and Load Management.
"""
from CIM15.IEC61970.LoadModel.ConformLoad import ConformLoad
from CIM15.IEC61970.LoadModel.ConformLoadGroup import ConformLoadGroup
from CIM15.IEC61970.LoadModel.SeasonDayTypeSchedule import SeasonDayTypeSchedule
from CIM15.IEC61970.LoadModel.LoadGroup import LoadGroup
from CIM15.IEC61970.LoadModel.ConformLoadSchedule import ConformLoadSchedule
from CIM15.IEC61970.LoadModel.LoadArea import LoadArea
from CIM15.IEC61970.LoadModel.PowerCutZone import PowerCutZone
from CIM15.IEC61970.LoadModel.LoadResponseCharacteristic import LoadResponseCharacteristic
from CIM15.IEC61970.LoadModel.NonConformLoad import NonConformLoad
from CIM15.IEC61970.LoadModel.StationSupply import StationSupply
from CIM15.IEC61970.LoadModel.EnergyArea import EnergyArea
from CIM15.IEC61970.LoadModel.NonConformLoadSchedule import NonConformLoadSchedule
from CIM15.IEC61970.LoadModel.NonConformLoadGroup import NonConformLoadGroup
from CIM15.IEC61970.LoadModel.DayType import DayType
from CIM15.IEC61970.LoadModel.SubLoadArea import SubLoadArea
from CIM15.IEC61970.LoadModel.Season import Season
nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15#LoadModel"
nsPrefix = "cimLoadModel"
class SeasonName(str):
"""Values are: winter, summer, fall, spring
"""
pass
|
import sys
import gym
import pylab
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
class A2CAgent:
def __init__(self, state_size, action_size, actor_lr=1e-3, critic_lr=5e-3, discount_factor=0.99):
self.state_size = state_size
self.action_size = action_size
self.value_size = 1
self.discount_factor = discount_factor
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.actor_model = self._build_actor()
self.critic_model = self._build_critic()
def _build_actor(self):
actor = Sequential()
actor.add(Dense(24, input_dim=self.state_size, activation='relu', kernel_initializer='he_uniform'))
actor.add(Dense(self.action_size, activation='softmax', kernel_initializer='he_uniform'))
actor.compile(loss="categorical_crossentropy", optimizer=Adam(self.actor_lr))
return actor
def _build_critic(self):
critic = Sequential()
critic.add(Dense(24, input_dim=self.state_size, activation='relu', kernel_initializer='he_uniform'))
critic.add(Dense(self.value_size, activation='linear', kernel_initializer='he_uniform'))
critic.compile(loss='mse', optimizer=Adam(self.critic_lr))
return critic
def act(self, state):
policy = self.actor_model.predict(state, batch_size=1).flatten()
return np.random.choice(self.action_size, 1, p=policy)[0]
def train_model(self, state, action, reward, next_state, done):
target = np.zeros((1, self.value_size))
advantages = np.zeros((1, self.action_size))
value = self.critic_model.predict(state)[0]
next_value = self.critic_model.predict(next_state)[0]
if done:
advantages[0][action] = reward - value
target[0][0] = reward
else:
advantages[0][action] = reward + self.discount_factor * (next_value) - value
target[0][0] = reward + self.discount_factor * next_value
self.actor_model.fit(state, advantages, epochs=1, verbose=0)
self.critic_model.fit(state, target, epochs=1, verbose=0)
def save_model(self, path):
self.actor_model.save(path + ' actor')
self.critic_model.save(path + 'critic')
def load_model(self, path):
self.actor_model.load_model(path + ' actor')
self.critic_model.load_model(path + 'critic')
|
# Consider an array of sheep where some sheep may be missing from their place. We need a function that counts the number
# of sheep present in the array (true means present).
# For example,
# [True, True, True, False,
# True, True, True, True ,
# True, False, True, False,
# True, False, False, True ,
# True, True, True, True ,
# False, False, True, True]
# The correct answer would be 17.
# Hint: Don't forget to check for bad values like null/undefined
def count_sheeps(arrayOfSheeps):
contador = 0
for elemento in arrayOfSheeps:
if elemento == True:
contador += 1
return contador
assert (count_sheeps([True, True, True, False,
True, True, True, True,
True, False, True, False,
True, False, False, True,
True, True, True, True,
False, False, True, True])) == 17, "Debería contar 17"
|
# Copyright (c) 2017-2018 Lenovo Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from rest_framework.response import Response
from newton_base.swagger import views as newton_json_view
logger = logging.getLogger(__name__)
class SwaggerJsonView(newton_json_view.SwaggerJsonView):
def get(self, request):
'''
reuse newton code and update the basePath
:param request:
:return:
'''
resp = super(SwaggerJsonView, self).get(request)
json_data = resp.data if resp else None
if json_data:
json_data["basePath"] = "/api/multicloud-thinkcloud/v0/"
json_data["info"]["title"] = "Service NBI of MultiCloud plugin for OpenStack Thinkcloud"
return Response(data=json_data, status=200)
else:
return Response(data={'error': 'internal error'}, status=500)
class APIv1SwaggerJsonView(newton_json_view.SwaggerJsonView):
def get(self, request):
'''
reuse newton code and update the basePath
:param request:
:return:
'''
resp = super(APIv1SwaggerJsonView, self).get(request)
json_data = resp.data if resp else None
if json_data:
json_data["basePath"] = "/api/multicloud-thinkcloud/v1/"
json_data["info"]["title"] = "Service NBI v1 of MultiCloud plugin for Thinkcloud"
return Response(data=json_data, status=200)
else:
return Response(data={'error': 'internal error'}, status=500)
|
from sopel_help import mixins
def test_html_generator_generate_help_commands():
mixin = mixins.HTMLGeneratorMixin()
result = list(mixin.generate_help_commands({
'group_a': ['command_a_a', 'command_a_b'],
'group_c': ['command_c_a', 'command_c_b'],
'group_b': ['command_b_a', 'command_b_b'],
}))
assert result == [
'<h2 id="plugin-group_a"><a href="#plugin-group_a">GROUP_A</a></h2>'
'<ul><li>command_a_a</li><li>command_a_b</li></ul>',
'<h2 id="plugin-group_b"><a href="#plugin-group_b">GROUP_B</a></h2>'
'<ul><li>command_b_a</li><li>command_b_b</li></ul>',
'<h2 id="plugin-group_c"><a href="#plugin-group_c">GROUP_C</a></h2>'
'<ul><li>command_c_a</li><li>command_c_b</li></ul>',
]
|
from .calc_fantasy import calc_match_fantasy
|
import uuid
rand64 = uuid.uuid4().int & (1<<64)-1
print(rand64)
|
'''entre no sistema com o cumprimento do cateto oposto, cateto adjacente
de um triângulo retângulo qualquer calcule e saia com o valor do
cumprimento da hipotenusa
Matematicamente:
hip = (co ** 2 + ca ** 2) / (1 / 2)
'''
from math import hypot, pow
cores = {'limpa': '\033[m', 'azul': '\033[1;34m'}
print('{:-^40}'.format('CO² | CA² | HIPOTENUSA'))
co = int(input('Digite o cumprimento do Cateto Oposto. '))
ca = int(input('Digite o cumprimento do Cateto Adjacente. '))
print('{}CO² = {:.2f}\nCA² = {:.2f}\nHipotenusa = {:.2f}{} '
.format(cores['azul'], pow(co, 2), pow(ca, 2), hypot(co, ca), cores['limpa']))
print('{:-^40}'.format('FIM'))
|
"""
test_cors_origin.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from nose.plugins.attrib import attr
from w3af.plugins.tests.helper import PluginTest, PluginConfig
from w3af.plugins.audit.cors_origin import cors_origin
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.kb.info_set import InfoSet
from w3af.core.data.request.fuzzable_request import FuzzableRequest
from w3af.core.data.url.HTTPResponse import HTTPResponse
from w3af.core.data.dc.headers import Headers
class TestCORSOriginScan(PluginTest):
# Test scripts host/port and web context root
target_url = 'http://moth/w3af/audit/cors/'
# Originator for tests cases
originator = 'http://moth/'
_run_configs = {
'cfg': {
'target': target_url,
'plugins': {
'audit': (
PluginConfig(
'cors_origin',
('origin_header_value',
originator, PluginConfig.STR),
('expected_http_response_code',
200, PluginConfig.INT),
),
),
'crawl': (
PluginConfig(
'web_spider',
('only_forward', True, PluginConfig.BOOL)),
),
}
}
}
@attr('ci_fails')
def test_scan(self):
cfg = self._run_configs['cfg']
self._scan(cfg['target'], cfg['plugins'])
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEquals(2, len(vulns), vulns)
EXPECTED_NAMES = ['Insecure Access-Control-Allow-Origin',
'Insecure Access-Control-Allow-Origin']
self.assertEqual([v.get_name() for v in vulns],
EXPECTED_NAMES)
self.assertTrue(all([v.get_url().url_string.startswith(self.target_url)
for v in vulns]))
class TestCORSOrigin(PluginTest):
def setUp(self):
super(TestCORSOrigin, self).setUp()
self.co = cors_origin()
self.url = URL('http://moth/')
self.origin = 'http://moth/'
self.response = HTTPResponse(200, '', Headers(), self.url,
self.url, _id=3)
self.request = FuzzableRequest(self.url)
def test_allow_methods_no(self):
allow_methods = 'GET, POST, Options'
allow_origin = 'http://w3af.org/'
allow_credentials = 'false'
self.co._allow_methods(self.request, self.url, self.origin,
self.response, allow_origin, allow_credentials,
allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(vulns, [])
def test_allow_methods_strange(self):
allow_methods = 'GET, POST, OPTIONS, FOO'
allow_origin = 'http://w3af.org/'
allow_credentials = 'false'
self.co._allow_methods(self.request, self.url, self.origin,
self.response, allow_origin, allow_credentials,
allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 1)
vuln = vulns[0]
self.assertEqual(vuln.get_name(), 'Uncommon CORS methods enabled')
self.assertNotEqual(vuln.get_desc(), None)
def test_allow_methods_sensitive(self):
allow_methods = 'GET, POST, OPTIONS, PUT'
allow_origin = 'http://w3af.org/'
allow_credentials = 'false'
self.co._allow_methods(self.request, self.url, self.origin,
self.response, allow_origin, allow_credentials,
allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 1)
vuln = vulns[0]
self.assertEqual(vuln.get_name(), 'Sensitive CORS methods enabled')
self.assertNotEqual(vuln.get_desc(), None)
def test_allow_methods_sensitive_strange(self):
allow_methods = 'GET, POST, OPTIONS, PUT, FOO'
allow_origin = 'http://w3af.org/'
allow_credentials = 'false'
self.co._allow_methods(self.request, self.url, self.origin,
self.response, allow_origin, allow_credentials,
allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 2)
vuln_names = set([v.get_name() for v in vulns])
expected_vuln_names = {'Sensitive CORS methods enabled',
'Uncommon CORS methods enabled'}
self.assertEqual(vuln_names, expected_vuln_names)
self.assertIsNotNone(vulns[0].get_desc())
self.assertIsNotNone(vulns[1].get_desc())
def test_allow_methods_sensitive_call_max(self):
allow_methods = 'GET, POST, OPTIONS, PUT'
allow_origin = 'http://w3af.org/'
allow_credentials = 'false'
for i in xrange(InfoSet.MAX_INFO_INSTANCES + 2):
self.co._allow_methods(self.request, self.url, self.origin,
self.response, allow_origin,
allow_credentials, allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 1)
v = vulns[0]
msg = 'Failure on run #%s' % i
self.assertEqual(v.get_name(),
'Sensitive CORS methods enabled',
msg)
def test_universal_allow_not(self):
allow_methods = 'GET, POST, OPTIONS'
allow_origin = 'http://w3af.org/'
allow_credentials = 'false'
self.co._analyze_server_response(self.request, self.url, self.origin,
self.response, allow_origin,
allow_credentials, allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 0, vulns)
def test_universal_allow_yes(self):
allow_methods = 'GET, POST, OPTIONS'
allow_origin = '*'
allow_credentials = 'false'
self.co._analyze_server_response(self.request, self.url, self.origin,
self.response, allow_origin,
allow_credentials, allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 1, vulns)
vuln = vulns[0]
self.assertEqual(vuln.get_name(),
'Access-Control-Allow-Origin set to "*"')
self.assertNotEqual(vuln.get_desc(), None)
def test_universal_origin_echo_false(self):
allow_methods = 'GET, POST, OPTIONS'
allow_origin = 'http://www.google.com/'
allow_credentials = 'false'
self.co._analyze_server_response(self.request, self.url, self.origin,
self.response, allow_origin,
allow_credentials, allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 0, vulns)
def test_universal_origin_echo_without_credentials(self):
allow_methods = 'GET, POST, OPTIONS'
allow_origin = 'http://moth/'
allow_credentials = 'false'
self.co._analyze_server_response( self.request, self.url, self.origin,
self.response, allow_origin,
allow_credentials, allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 1, vulns)
vuln = vulns[0]
self.assertEqual(vuln.get_name(),
'Insecure Access-Control-Allow-Origin')
self.assertNotEqual(vuln.get_desc(), None)
def test_universal_origin_echo_with_credentials(self):
allow_methods = 'GET, POST, OPTIONS'
allow_origin = 'http://moth/'
allow_credentials = 'true'
self.co._analyze_server_response(self.request, self.url, self.origin,
self.response, allow_origin,
allow_credentials, allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 1, vulns)
vuln = vulns[0]
self.assertEqual(vuln.get_name(),
'Insecure Access-Control-Allow-Origin with credentials')
self.assertNotEqual(vuln.get_desc(), None)
def test_universal_origin_allow_creds(self):
allow_methods = 'GET, POST, OPTIONS'
allow_origin = '*'
allow_credentials = 'true'
self.co._analyze_server_response(self.request, self.url, self.origin,
self.response, allow_origin,
allow_credentials, allow_methods)
vulns = self.kb.get('cors_origin', 'cors_origin')
self.assertEqual(len(vulns), 2, vulns)
name_creds = 'Incorrect withCredentials implementation'
acao_star = 'Access-Control-Allow-Origin set to "*"'
impl_err_vuln = [v for v in vulns if v.get_name() == name_creds]
acao_all_vuln = [v for v in vulns if v.get_name() == acao_star]
vuln = impl_err_vuln[0]
self.assertEqual(vuln.get_name(),
'Incorrect withCredentials implementation')
self.assertNotEqual(vuln.get_desc(), None)
vuln = acao_all_vuln[0]
self.assertEqual(vuln.get_name(),
'Access-Control-Allow-Origin set to "*"')
self.assertNotEqual(vuln.get_desc(), None)
|
""" tests for exif2spotlight CLI """
import pathlib
import shutil
import pytest
from click.testing import CliRunner
TEST_IMAGES_DIR = "tests/test_images"
TEST_FILE_WARNING = "exiftool_warning.heic"
TEST_FILE_BADFILE = "badimage.jpeg"
TEST_FILE_WRONG_FILETYPE = "not-a-jpeg-really-a-png.jpeg"
TEST_FILE_1 = "jackrose.jpeg"
TEST_FILE_2 = "statue.jpg"
def copy_test_files(dest, source=None):
""" copy test images to temp directory, returns list of files copied """
if source is None:
source = pathlib.Path("tests/test_images")
else:
source = pathlib.Path(source)
files_copied = []
for source_file in source.glob("*"):
dest_file = pathlib.Path(dest) / source_file.name
shutil.copy2(str(source_file), str(dest_file))
files_copied.append(dest_file)
return files_copied
def test_help_1():
""" test help shown if called with no options """
from exif2spotlight.cli import cli
runner = CliRunner()
result = runner.invoke(cli, [])
output = result.output
assert result.exit_code == 0
assert "Usage:" in result.output
assert "[OPTIONS] FILE" in result.output
def test_help_2():
""" test help shown if called --help option """
from exif2spotlight.cli import cli
runner = CliRunner()
result = runner.invoke(cli, ["--help"])
output = result.output
assert result.exit_code == 0
assert "Usage:" in result.output
assert "[OPTIONS] FILE" in result.output
def test_verbose():
""" test --verbose """
from exif2spotlight.cli import cli
runner = CliRunner()
result = runner.invoke(cli, ["--verbose"])
output = result.output
assert result.exit_code == 0
assert "Usage:" in result.output
assert "[OPTIONS] FILE" in result.output
def test_exiftool():
""" test --exiftool option """
import os
import shutil
from exif2spotlight.cli import cli
from exif2spotlight.exiftool import get_exiftool_path
runner = CliRunner()
cwd = pathlib.Path(os.getcwd())
# pylint: disable=not-context-manager
with runner.isolated_filesystem():
test_dir = pathlib.Path(os.getcwd())
copy_test_files(str(test_dir), source=str(cwd / TEST_IMAGES_DIR))
exiftool_source = get_exiftool_path()
exiftool_path = test_dir / "myexiftool"
shutil.copy2(exiftool_source, exiftool_path)
result = runner.invoke(
cli, ["--verbose", "--exiftool", exiftool_path, str(test_dir / TEST_FILE_1)]
)
assert result.exit_code == 0
assert f"exiftool path: {exiftool_path}" in result.output
|
# Generated by Django 2.2.12 on 2020-06-01 12:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Dashboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userinvitetoken',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='auth.Group'),
),
migrations.AlterField(
model_name='userinvitetoken',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
|
from tkinter import *
window = Tk()
menu = Menu(window)
window.config(menu=menu)
filemenu = Menu(menu)
menu.add_cascade(label='File', menu=filemenu)
filemenu.add_command(label='New')
filemenu.add_command(label='Exit', command=window.quit)
helpmenu = Menu(menu)
menu.add_cascade(label='Help', menu=helpmenu)
helpmenu.add_command(label='About')
window.geometry("300x200")
window.title("Enter the matrix" )
top = Toplevel()
top.geometry("500x400")
top.title('Final Matrix')
swaps = [(0, 1), (1, 2), (3, 4), (4, 5), (6, 7), (7, 8),
(0, 3), (1, 4), (2, 5), (3, 6), (4, 7), (5, 8)]
primes = {3, 5, 7, 11, 13, 17}
final_states = [(1, 2, 3, 4, 5, 6, 7, 8, 9)]
possible_states = {(1, 2, 3, 4, 5, 6, 7, 8, 9): 0}
steps=IntVar()
new_state = []
steps=0
sp1=IntVar()
sp2=IntVar()
sp3=IntVar()
sp4=IntVar()
sp5=IntVar()
sp6=IntVar()
sp7=IntVar()
sp8=IntVar()
sp9=IntVar()
def puzzle():
global steps
while final_states:
for _ in range(len(final_states)):
state = list(final_states.pop(0))
for i, j in swaps:
if state[i] + state[j] in primes:
state[i], state[j] = state[j], state[i]
t = tuple(state)
if t not in possible_states:
possible_states[t] = steps + 1
final_states.append(t)
state[i], state[j] = state[j], state[i]
steps += 1
but3 = Button(window, text="ENTER", width=15, command=display(), fg="yellow", bg="black")
but3.grid(row=3, column=1)
def result(new_state):
if new_state in possible_states:
Button(top, text=' 1 ', fg='black', bg='red', height=2, width=9).grid(row=2, column=0)
Button(top, text=' 2 ', fg='black', bg='red', height=2, width=9).grid(row=2, column=1)
Button(top, text=' 3 ', fg='black', bg='red', height=2, width=9).grid(row=2, column=2)
Button(top, text=' 4 ', fg='black', bg='red', height=2, width=9).grid(row=3, column=0)
Button(top, text=' 5 ', fg='black', bg='red', height=2, width=9).grid(row=3, column=1)
Button(top, text=' 6 ', fg='black', bg='red', height=2, width=9).grid(row=3, column=2)
Button(top, text=' 7 ', fg='black', bg='red', height=2, width=9).grid(row=4, column=0)
Button(top, text=' 8 ', fg='black', bg='red', height=2, width=9).grid(row=4, column=1)
Button(top, text=' 9 ', fg='black', bg='red', height=2, width=9).grid(row=4, column=2)
print("Minimum number of steps required are {}\n".format(possible_states[new_state]))
Label(top, text="Minimum Number of steps required are " ,fg="yellow", font=("bold",16), bg="royal blue").place(x=0, y=150)
Label(top, text=possible_states[new_state], fg="yellow", font=("bold",16), bg="royal blue").place(x=375, y=150)
else:
print("Final state cannot be achieved\n")
Label(top, text="Final state cannot be achieved",fg="yellow",font=("bold",16), bg="royal blue").place(x=100, y=100)
def display():
new_state=[]
a=sp1.get()
b=sp2.get()
c=sp3.get()
d=sp4.get()
e=sp5.get()
f=sp6.get()
g=sp7.get()
h=sp8.get()
i=sp9.get()
print(sp1.get() + " " + sp2.get() + " " + sp3.get())
print(sp4.get() + " " + sp5.get() + " " + sp6.get())
print(sp7.get() + " " + sp8.get() + " " + sp9.get())
r=list(map(int, a))
r.extend(map(int, b))
r.extend(map(int, c))
r.extend(map(int, d))
r.extend(map(int, e))
r.extend(map(int, f))
r.extend(map(int, g))
r.extend(map(int, h))
r.extend(map(int, i))
new_state.extend(r)
new_state = tuple(new_state)
but2 = Button(window, text="ENTER", width=15, command=result(new_state), fg="yellow", bg="black")
but2.grid(row=3, column=1)
sp1=Spinbox(window,from_=1, to=9, width=9)
sp1.grid(row=0,column=0)
sp2=Spinbox(window,from_=1, to=9, width=9)
sp2.grid(row=0,column=1)
sp3=Spinbox(window,from_=1, to=9, width=9)
sp3.grid(row=0,column=2)
sp4=Spinbox(window,from_=1, to=9, width=9)
sp4.grid(row=1,column=0)
sp5=Spinbox(window,from_=1, to=9, width=9)
sp5.grid(row=1,column=1)
sp6=Spinbox(window,from_=1, to=9, width=9)
sp6.grid(row=1,column=2)
sp7=Spinbox(window,from_=1, to=9, width=9)
sp7.grid(row=2,column=0)
sp8=Spinbox(window,from_=1, to=9, width=9)
sp8.grid(row=2,column=1)
sp9=Spinbox(window,from_=1, to=9, width=9)
sp9.grid(row=2,column=2)
but1=Button(window,text="ENTER",width=15,command=puzzle,
fg="yellow",bg="black")
but1.grid(row=3,column=1)
window.mainloop()
|
import time
import numpy as np
import pandas as pd
def add_new_category(x):
"""
Aimed at 'trafficSource.keyword' to tidy things up a little
"""
x = str(x).lower()
if x == 'nan':
return 'nan'
x = ''.join(x.split())
if r'provided' in x:
return 'not_provided'
if r'youtube' in x or r'you' in x or r'yo' in x or r'tub' in x or r'yout' in x or r'y o u' in x:
return 'youtube'
if r'google' in x or r'goo' in x or r'gle' in x:
return 'google'
else:
return 'other'
# Dump cleaned data to parquets for later.
train_df = pd.read_parquet('input/cleaned/train.parquet.gzip')
test_df = pd.read_parquet('input/cleaned/test.parquet.gzip')
# Remove target col.
y_train = train_df['totals.transactionRevenue'].values
train_df = train_df.drop(['totals.transactionRevenue'], axis=1)
# Join datasets for rowise feature engineering.
trn_len = train_df.shape[0]
merged_df = pd.concat([train_df, test_df])
num_cols = ["totals.hits", "totals.pageviews", "visitNumber", "visitStartTime"]
for col in num_cols:
merged_df[col] = merged_df[col].astype(float)
merged_df['diff_visitId_time'] = merged_df['visitId'] - merged_df['visitStartTime']
merged_df['diff_visitId_time'] = (merged_df['diff_visitId_time'] != 0).astype(float)
merged_df['totals.hits'] = merged_df['totals.hits'].astype(float)
# Build Time based features.
merged_df['formated_date'] = pd.to_datetime(merged_df['date'], format='%Y%m%d')
merged_df['month'] = pd.DatetimeIndex(merged_df['formated_date']).month
merged_df['year'] = pd.DatetimeIndex(merged_df['formated_date']).year
merged_df['day'] = pd.DatetimeIndex(merged_df['formated_date']).day
merged_df['quarter'] = pd.DatetimeIndex(merged_df['formated_date']).quarter
merged_df['weekday'] = pd.DatetimeIndex(merged_df['formated_date']).weekday
merged_df['weekofyear'] = pd.DatetimeIndex(merged_df['formated_date']).weekofyear
merged_df['is_month_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_month_start
merged_df['is_month_end'] = pd.DatetimeIndex(merged_df['formated_date']).is_month_end
merged_df['is_quarter_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_quarter_start
merged_df['is_quarter_end'] = pd.DatetimeIndex(merged_df['formated_date']).is_quarter_end
merged_df['is_year_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_year_start
merged_df['is_year_end'] = pd.DatetimeIndex(merged_df['formated_date']).is_year_end
merged_df['month_unique_user_count'] = merged_df.groupby('month')['fullVisitorId'].transform('nunique')
merged_df['day_unique_user_count'] = merged_df.groupby('day')['fullVisitorId'].transform('nunique')
merged_df['weekday_unique_user_count'] = merged_df.groupby('weekday')['fullVisitorId'].transform('nunique')
merged_df['visitStartTime'] = pd.to_datetime(merged_df['visitStartTime'], unit='s')
merged_df['hour'] = pd.DatetimeIndex(merged_df['visitStartTime']).hour
merged_df['minute'] = pd.DatetimeIndex(merged_df['visitStartTime']).minute
# Cleanup for keywords
merged_df['trafficSource.keyword'] = merged_df['trafficSource.keyword'].fillna('nan')
merged_df['trafficSource.keyword_groups'] = merged_df['trafficSource.keyword'].apply(add_new_category)
merged_df['browser_category'] = merged_df['device.browser'] + '_' + merged_df['device.deviceCategory']
merged_df['browser_operatingSystem'] = merged_df['device.browser'] + '_' + merged_df['device.operatingSystem']
merged_df['source_country'] = merged_df['trafficSource.source'] + '_' + merged_df['geoNetwork.country']
merged_df['log.visitNumber'] = np.log1p(merged_df['visitNumber'])
merged_df['log.totals.hits'] = np.log1p(merged_df['totals.hits'])
merged_df['totals.pageviews'] = merged_df['totals.pageviews'].astype(float).fillna(0)
merged_df['log.totals.pageviews'] = np.log1p(merged_df['totals.pageviews'])
merged_df["page_hits_ratio"] = merged_df['visitNumber'] / (merged_df['totals.pageviews'] + 1)
# Drop old vars.
merged_df = merged_df.drop(['formated_date', 'visitId', 'sessionId', 'visitStartTime'], axis=1)
# Split data back to original data sets.
train_df = merged_df[:trn_len]
test_df = merged_df[trn_len:]
del merged_df
train_df['totals.transactionRevenue'] = y_train
print(set(list(train_df)) - set(list(test_df)))
train_df.to_parquet('input/processed/train_static_features.parquet.gzip', compression='gzip')
test_df.to_parquet('input/processed/test_static_features.parquet.gzip', compression='gzip')
|
import os
import json
import logging
from math import pi
from animation import *
import tkinter.ttk as ttk
from tkinter import filedialog
from equation import DiffEqSecKind
from tkinter_app_pattern import TkinterApp
# Константы:
SPRING_SHAPE = 10, 20 # 10 - кол-во витков, 20 - диаметр
CUBE_LENGTH = 80 # длина ребра куба
ROOT_SIZE = "1182x724+300+100"
PLACE_WIN_ANIMATION = 0, 0
PLACE_SET_WINDOW = 720, 0
PLACE_CHART_WINDOW = 0, 240
ARROW_SHAPE = 10, 20, 5
ORDINATE_POSITION = 50
OUTSIDE_CANVAS = -50, -50, -50, -50
MAIN_PARAMS = (650, 25), 25
CORRECT_COORDS_DATA = 150
DIGIT_CAPACITY = 6 # кол-во знаков после точки
CHART_STOP_POINT = 700
CHART_FACTOR = 1
TIME_FACTOR = 50
FORM_RESISTANCE_COEFFICIENT = 1.05 # коэффициент сопротивления формы
COEFFICIENT_FRICTION = 0.22 # коэффициент трения скольжения
free_fall_coefficient = 9.8 # ускорение свободного падения
# строчка, позволяет не выводить лишние данные (они нужны только разработчику):
logging.basicConfig(level=logging.ERROR)
def find(name):
"""
Поиск файла
Args:
name: имя файла
Returns: True или False
"""
return os.path.exists(name)
def button_app_style():
style = ttk.Style()
style.theme_use('clam')
style.configure('TButton', background='#2B2E35',
foreground='#FF6A54', width=10,
borderwidth=1, focusthickness=2,
relief='sunken',
focuscolor='#2B2E30',
font=('Comic Sans MS', 16, 'italic'))
style.map('TButton', foreground=[('pressed', 'red'), ('active', '#FCEAC6')],
background=[('pressed', '!disabled', '#FCEAC6'), ('active', '#4B505C')])
style.configure('TCombobox', background='#2B2E35',
foreground='#FCEAC6', width=10,
borderwidth=1, focusthickness=2,
relief='sunken',
fieldbackground='#2B2E35',
selectbackground='#2B2E35',
selectforeground='#FCEAC6',
font=('Comic Sans MS', 16, 'italic'))
style.map('TCombobox', foreground=[('pressed', '#FCEAC6'), ('active', 'black')])
class CreateToolTip(object):
"""
create a tooltip for a given widget
"""
def __init__(self, widget, text='widget info'):
self.waittime = 500
self.wraplength = 180 # pixels
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
return event
def leave(self, event=None):
self.unschedule()
self.hidetip()
return event
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
_id = self.id
self.id = None
if _id:
self.widget.after_cancel(_id)
def showtip(self, event=None):
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 30
y += self.widget.winfo_rooty() - 30
# creates a toplevel window
self.tw = tk.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(self.tw, text=self.text, justify='left',
background="#ffffff", relief='solid', borderwidth=1,
wraplength=self.wraplength)
label.pack(ipadx=1)
return event
def hidetip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
class App(TkinterApp):
FRAME_COLOR = '#FCEAC6'
chart_opts = {
'width': 720,
'height': 480,
'bg': '#2B2E35',
'highlightbackground': FRAME_COLOR,
'highlightcolor': FRAME_COLOR
}
animation_opts = {
'width': 720,
'height': 240,
'bg': '#2B2E35',
'highlightbackground': FRAME_COLOR,
'highlightcolor': FRAME_COLOR
}
settings_window_opts = {
'width': 462,
'height': 724,
'bg': '#2B2E35',
'highlightbackground': FRAME_COLOR,
'highlightcolor': FRAME_COLOR,
'highlightthickness': 2
}
text_param = {
'font': ('Comic Sans MS', 15, "italic"),
'bg': '#2B2E35',
'fg': '#FCEAC6'
}
font_main_params = ('Comic Sans MS', 12, "italic")
task_data = {} # данные задачи
app_time = 0 # время приложения
coords_chart = []
coords_chart_two = []
coords_chart_three = []
info_text = []
start_flag = False
def _ready(self):
# Считываем информацию с файла:
self.read_data_json_file()
self.root.geometry(ROOT_SIZE)
self.root.title("Лабораторная работа по МИЗу. Подготовил: Коновалов Ф.Д., группа М1О-302С")
self.root.resizable(width=False, height=False) # неизменный размер окна
self.root.bind("<Control-s>", self.button_start_process)
self.root.bind("<Control-p>", self.button_stop_process)
self.root.bind("<Control-u>", self.button_update_process)
self.root.bind("<Control-e>", self.button_close_program)
# Рамка с информацией о задаче:
self.settings_window = tk.Frame(self.root, **self.settings_window_opts)
self.settings_window.place(x=PLACE_SET_WINDOW[0], y=PLACE_SET_WINDOW[1])
# Полотно с анимацией маятника:
self.animation = tk.Canvas(self.root, **self.animation_opts)
self.animation.place(x=PLACE_WIN_ANIMATION[0], y=PLACE_WIN_ANIMATION[1])
# Создание объектов
self.table = Table(self.animation, self.task_data["Входные данные"]["Отклонение"]) # стол
self.cube = Cube(CUBE_LENGTH) # кубик
self.left_spring = Spring(*SPRING_SHAPE) # левая пружина
self.right_spring = Spring(*SPRING_SHAPE) # правая пружина
# Добавление объектов на стол:
self.table.add_obj(self.cube) # добавление куба на стол
self.table.add_obj(self.left_spring) # добавление левой пружины на стол
self.table.add_obj(self.right_spring) # добавление правой пружины на стол
# Полотно с графиком:
self.window_chart = tk.Canvas(self.root, **self.chart_opts)
self.window_chart.place(x=PLACE_CHART_WINDOW[0], y=PLACE_CHART_WINDOW[1])
self.draw_chart_axes() # отрисовка осей координат
self.chart = Chart(self.window_chart) # создание графика
self.information_canvas() # вывод считанной информации с файла на рамку
self.main_chart_id = self.window_chart.create_line(OUTSIDE_CANVAS, fill='#FFB54F', width=2)
self.main_chart_id_two = self.window_chart.create_line(OUTSIDE_CANVAS, fill='#FF6A54', width=1, dash=(4, 2))
self.main_chart_id_three = self.window_chart.create_line(OUTSIDE_CANVAS, fill='#FF6A54', width=1, dash=(4, 2))
self._phys_flag = False # не запускать процесс (работу приложения)
self.output_data(self.window_chart, *MAIN_PARAMS)
def _draw(self):
# Отрисовка стола:
self.animation.create_line(*self.table.generate_table_coords(), fill='#FCEAC6', width=2, tags=("table",))
# Отрисовка левой пружины:
self.animation.create_line(*self.left_spring.create_coords(self.table.create_coords_mesh_left_spring()[0],
self.table.create_coords_mesh_left_spring()[1]),
fill='#FFB54F', tags=("left_spring",))
# Отрисовка правой пружины:
self.animation.create_line(*self.right_spring.create_coords(self.table.create_coords_mesh_right_spring()[0],
self.table.create_coords_mesh_right_spring()[1]),
fill='#FFB54F', tags=("right_spring",))
# Отрисовка кубика:
self.animation.create_rectangle(self.table.center_mass_position - CUBE_LENGTH // 2,
self.animation_opts['height'] // 2 - CUBE_LENGTH // 2,
self.table.center_mass_position + CUBE_LENGTH // 2,
self.animation_opts['height'] // 2 + CUBE_LENGTH // 2,
fill="#FF6A54", tags=("cube",))
if not self.start_flag:
self._draw_flag = False
else:
self._draw_flag = True
# Условие начала отрисовки графика:
if len(self.coords_chart) > 2:
# Отрисовка графика:
if len(self.coords_chart_two) != 0 and len(self.coords_chart_three) != 0:
self.window_chart.coords(self.main_chart_id, *self._flatten(self.coords_chart))
self.window_chart.coords(self.main_chart_id_two, *self._flatten(self.coords_chart_two))
self.window_chart.coords(self.main_chart_id_three, *self._flatten(self.coords_chart_three))
else:
self.window_chart.coords(self.main_chart_id, *self._flatten(self.coords_chart))
# Условие остановки графика:
if self.coords_chart[-1][0] < CHART_STOP_POINT:
self._phys_flag = True
else:
self._phys_flag = False
self._draw_flag = False
def _physics_process(self, delta):
# Уравнение движения (составленное по 2-му з-ну Ньютона):
self.equation = DiffEqSecKind(
FORM_RESISTANCE_COEFFICIENT / self.cube_mass,
2 * self.spring_coeff_elasticity / self.cube_mass,
-COEFFICIENT_FRICTION * free_fall_coefficient,
(self.task_data["Входные данные"]["Отклонение"], 0))
# print(self.equation._calculate_discriminant())
function = self.equation.create_equation(self.app_time, TIME_FACTOR)
# Условие прорисовки вспомогательных (красных) линий при затухающих колебаниях:
if isinstance(function, tuple):
function = self.equation.create_equation(self.app_time, TIME_FACTOR)[0]
function_two = self.equation.create_equation(self.app_time, TIME_FACTOR)[1]
function_three = -self.equation.create_equation(self.app_time, TIME_FACTOR)[1]
else:
function = self.equation.create_equation(self.app_time, TIME_FACTOR)
function_two = 0
function_three = 0
self.animation.delete('left_spring')
self.animation.delete('right_spring')
self.animation.delete('table')
self.animation.delete('cube')
# положение куба:
self.table.center_mass_position = function
# добавление в список следующей пары координат:
self.coords_chart.append(self.chart.convert_coords(self.app_time, function, CHART_FACTOR))
self.coords_chart_two.append(self.chart.convert_coords(self.app_time, function_two, CHART_FACTOR))
self.coords_chart_three.append(
self.chart.convert_coords(self.app_time, function_three, CHART_FACTOR))
self.app_time += delta
def information_canvas(self):
"""
Вывод информации о задаче + вывод кнопок на полотно.
Изначально метод получился слишком большим. Для простоты восприятия
кода данный метод рыл разбит на несколько методов.
Расположение данных и кнопок зависит от величин height, delta, abscissa,
которые изменяются по мере заполнения данных.
"""
# Величины, от к-х зависит расположение данных на окне:
height, delta = 50, 35
abscissa = 5
# Заголовок:
tk.Label(self.settings_window, text='Задача №2. Вариант 59', font=('Comic Sans MS', 18, "bold"),
bg='#2B2E35', fg='#5188BA').place(x=80, y=10)
# Первый блок данных:
height, delta, abscissa = self.print_input_data(height, delta, abscissa)
# Второй блок данных:
height, delta, abscissa = self.print_add_conditions(height, delta, abscissa)
# Третий блок данных:
height, delta, abscissa = self.print_special_conditions(height, delta, abscissa)
# Кнопки:
self.output_buttons(height, delta)
def print_input_data(self, height, delta, abscissa):
"""
Вывод входных данных
Args:
height: величина, влияющая на расположение данных на окне
delta: величина, влияющая на расположение данных на окне
abscissa: величина, влияющая на расположение данных на окне
"""
tk.Label(self.settings_window, text="1.Входные данные:", font=('Comic Sans MS', 16, "bold"),
bg='#2B2E35', fg='#FFB54F').place(x=abscissa, y=height)
for key, value in self.task_data["Входные данные"].items():
tk.Label(self.settings_window, text=f' {key}: {value} см', **self.text_param).place(
x=abscissa, y=height + delta)
height += delta
return height, delta, abscissa
def print_add_conditions(self, height, delta, abscissa):
"""
Вывод дополнительных условий
Args:
height: величина, влияющая на расположение данных на окне
delta: величина, влияющая на расположение данных на окне
abscissa: величина, влияющая на расположение данных на окне
"""
tk.Label(self.settings_window, text="2.Дополнительные условия:", font=('Comic Sans MS', 16, "bold"),
bg='#2B2E35', fg='#FFB54F').place(
x=abscissa, y=height + delta)
for key, value in self.task_data["Дополнительные условия"].items():
if key == "Длина пружин":
tk.Label(self.settings_window, text=f' {key}: {value} см', **self.text_param).place(
x=abscissa, y=height + 2 * delta)
elif key == "Материал тела":
tk.Label(self.settings_window, text=f' {key}: ', **self.text_param).place(
x=abscissa, y=height + 2 * delta)
self.text_var_first = tk.StringVar(self.settings_window)
material_box = ttk.Combobox(self.settings_window,
width=10,
textvariable=self.text_var_first,
values=[i for i, j in self.task_data["Плотность"].items()],
font=('Comic Sans MS', 16, "italic"))
material_box.place(x=abscissa + 280, y=height + 2 * delta)
material_box.current(0)
material_box.bind("<<ComboboxSelected>>", self.box_call_first)
elif key == "Материал пружины":
tk.Label(self.settings_window, text=f' {key}: ', **self.text_param).place(
x=abscissa, y=height + 2 * delta)
self.text_var_second = tk.StringVar(self.settings_window)
material_box = ttk.Combobox(self.settings_window,
width=10,
textvariable=self.text_var_second,
values=[i for i, j in self.task_data["Модуль сдвига"].items()],
font=('Comic Sans MS', 16, "italic"))
material_box.place(x=abscissa + 280, y=height + 2 * delta)
material_box.current(0)
material_box.bind("<<ComboboxSelected>>", self.box_call_second)
else:
tk.Label(self.settings_window, text=f' {key}: {value}', **self.text_param).place(
x=abscissa, y=height + 2 * delta)
height += delta
return height, delta, abscissa
def print_special_conditions(self, height, delta, abscissa):
"""
Вывод особых условий
Args:
height: величина, влияющая на расположение данных на окне
delta: величина, влияющая на расположение данных на окне
abscissa: величина, влияющая на расположение данных на окне
"""
tk.Label(self.settings_window, text="3.Особые условия:", font=('Comic Sans MS', 16, "bold"),
bg='#2B2E35', fg='#FFB54F').place(
x=abscissa, y=height + 2 * delta)
for key in self.task_data["Особые условия"]:
tk.Label(self.settings_window, text=f' -{key}', **self.text_param).place(
x=abscissa, y=height + 3 * delta)
height += delta
return height, delta, abscissa
def output_buttons(self, height, delta):
"""
Создание кнопок
Args:
height: величина, влияющая на расположение кнопки на окне
delta: величина, влияющая на расположение кнопки на окне
"""
button_app_style() # установка стиля кнопок
exit_btn = ttk.Button(self.settings_window, text=f'Выход', command=self.button_close_program)
exit_btn.place(x=2 * delta, y=height + 3.5 * delta)
CreateToolTip(exit_btn, "Exit (Ctrl + e)\n"
"Выйти из приложения")
update_btn = ttk.Button(self.settings_window, text=f'Сбросить', command=self.button_update_process)
update_btn.place(x=7 * delta, y=height + 3.5 * delta)
CreateToolTip(update_btn, "Update (Ctrl + u)\n"
"Обновить процесс")
self.start_btn = ttk.Button(self.window_chart, text=f'Начать', command=self.button_start_process)
self.start_btn.place(x=380, y=424)
CreateToolTip(self.start_btn, "Start (Ctrl + s)\n"
"Запустить модель")
stop_btn = ttk.Button(self.window_chart, text=f'Пауза', command=self.button_stop_process)
stop_btn.place(x=550, y=424)
CreateToolTip(stop_btn, "Pause (Ctrl + p)\n"
"Остановить процесс")
def box_call_first(self, event):
self.task_data["Дополнительные условия"]["Материал тела"] = self.text_var_first.get()
self.update_main_model_params()
return event
def box_call_second(self, event):
self.task_data["Дополнительные условия"]["Материал пружины"] = self.text_var_second.get()
self.update_main_model_params()
return event
def button_stop_process(self, event=None):
self._phys_flag = False
self._proc_flag = False
self._draw_flag = False
return event
def button_update_process(self, event=None):
"""
Сброс текущего состояния приложения
"""
self.start_btn.configure(text="Начать")
# Удаление объектов текущего состояния с анимации:
self.animation.delete('left_spring')
self.animation.delete('right_spring')
# self.animation.delete('table')
self.animation.delete('cube')
# Удаление графика текущего состояния (и осей координат):
self.window_chart.delete(self.main_chart_id)
self.window_chart.delete(self.main_chart_id_two)
self.window_chart.delete(self.main_chart_id_three)
# Отрисовка осей:
self.draw_chart_axes()
# Обновление времени приложения:
self.app_time = 0
# Очистка списка координат:
self.coords_chart = []
self.coords_chart_two = []
self.coords_chart_three = []
# Приведение положения кубика к начальному состоянию:
self.table.center_mass_position = self.task_data["Входные данные"]["Отклонение"]
# Обновление основных параметров маятника:
self.update_main_model_params()
self._phys_flag = False
self._draw_flag = True
self.main_chart_id = self.window_chart.create_line(OUTSIDE_CANVAS, fill='#FFB54F', width=2)
self.main_chart_id_two = self.window_chart.create_line(OUTSIDE_CANVAS, fill='#FF6A54', width=1, dash=(2, 4))
self.main_chart_id_three = self.window_chart.create_line(OUTSIDE_CANVAS, fill='#FF6A54', width=1, dash=(2, 4))
self.start_flag = False
return event
def button_start_process(self, event=None):
"""
Начать процесс (начать работу приложения)
"""
self.start_btn.configure(text="Продолжить")
self._phys_flag = True
self._draw_flag = True
self.start_flag = True
return event
def button_close_program(self, event=None):
"""
Закрыть приложение
"""
self.root.destroy()
return event
def draw_chart_axes(self):
"""
Отрисовка осей координат
"""
self.window_chart.create_line(0, self.chart_opts['height'] // 2,
self.chart_opts['width'], self.chart_opts['height'] // 2,
fill='white', arrow=tk.LAST, arrowshape=ARROW_SHAPE)
self.window_chart.create_line(ORDINATE_POSITION, self.chart_opts['height'], ORDINATE_POSITION, 0,
fill='white', arrow=tk.LAST, arrowshape=ARROW_SHAPE)
def discard(self):
"""
Сброс расчёта. Начальное состояние программы.
"""
self.window_chart.delete("all")
self.draw_chart_axes()
def read_data_json_file(self):
"""
Читать данные файла.
Данный метод ищет файл <Input_data.json> в той же директории где
лежит файл программы и если не находит, то право на выбор нужного
файла предоставляется пользователю.
"""
if find('Input_data.json'):
with open('../programm/Input_data.json', encoding="utf-8") as file:
self.task_data = json.loads(file.read())
else:
with open(filedialog.askopenfilename(title="Откройте файл с данными (формат: .json)"), encoding="utf-8") \
as file:
self.task_data = json.loads(file.read())
def information_console(self):
"""
Оформление данных задачи в консоли.
"""
task_text = "Горизонтальный реальный пружинный маятник, закреплённый двумя пружинами. Тело - куб."
print("Задача №2. Вариант 59.".center(len(task_text)))
print("Подготовил студент группы М1О-302С-18 Коновалов Ф.Д.\n".center(len(task_text)))
print("Условие задачи:")
print(task_text)
for key, value in self.task_data.items():
print()
print(f"{key}:")
if isinstance(value, dict):
for inside_key, inside_value in value.items():
print(f" {inside_key}: {inside_value}")
elif isinstance(value, list):
for step in value:
print(f" - {step}")
else:
print(f" {key}: {value}\n")
def update_main_model_params(self):
for i in self.info_text:
self.window_chart.delete(i)
self.info_text = []
self.output_data(self.window_chart, *MAIN_PARAMS)
@staticmethod
def control_round(obj):
if isinstance(obj, complex):
return complex(round(obj.real, DIGIT_CAPACITY), round(obj.imag, DIGIT_CAPACITY))
else:
return round(obj, DIGIT_CAPACITY)
def output_data(self, canvas: tk.Canvas, coords: tuple, delta):
self.info_text.append(canvas.create_text(coords,
text=u"\u03B2 =" + f" {self.control_round(self.damping_factor)}",
font=self.font_main_params,
fill=self.text_param["fg"]))
self.info_text.append(canvas.create_text(
coords[0], coords[1] + delta,
text="\u03C9_0 =" + f" {self.control_round(self.natural_frequency_ideal_pendulum)}",
font=self.font_main_params,
fill=self.text_param["fg"]))
self.info_text.append(canvas.create_text(
coords[0], coords[1] + 2 * delta,
text="\u03C9 =" + f" {self.control_round(self.damped_oscillation_frequency)}",
font=self.font_main_params,
fill=self.text_param["fg"]))
self.info_text.append(canvas.create_text(
coords[0] - CORRECT_COORDS_DATA, coords[1],
text="\u03A4 =" + f" {self.control_round(self.period)}",
font=self.font_main_params,
fill=self.text_param["fg"]))
self.info_text.append(canvas.create_text(coords[0] - CORRECT_COORDS_DATA, coords[1] + delta,
text="\u03BB =" + f" {self.control_round(self.damping_decrement)}",
font=self.font_main_params,
fill=self.text_param["fg"]))
self.info_text.append(canvas.create_text(coords[0] - CORRECT_COORDS_DATA, coords[1] + 2 * delta,
text="m =" + f" {self.control_round(self.cube_mass)} кг",
font=self.font_main_params,
fill=self.text_param["fg"]))
@property
def damping_factor(self):
"""
Расчёт коэффициента затухания
Returns: коэффициент затухания
"""
return FORM_RESISTANCE_COEFFICIENT / (2 * self.cube_mass)
@property
def natural_frequency_ideal_pendulum(self):
"""
Расчёт частоты собственных колебаний идеального маятника
Returns: частота собственных колебаний идеального маятника
"""
return (self.spring_coeff_elasticity / self.cube_mass) ** .5
@property
def damped_oscillation_frequency(self):
"""
Расчёт частоты затухающих колебаний
Returns: частота затухающих колебаний
"""
return (self.natural_frequency_ideal_pendulum ** 2 - self.damping_factor ** 2) ** .5
@property
def period(self):
"""
Расчёт периода затухающих колебаний
Returns: период затухающих колебаний
"""
return 2 * pi / self.damped_oscillation_frequency
@property
def damping_decrement(self):
"""
Расчёт логарифмического декремента затухания
Returns: период затухающих колебаний
"""
return self.period * self.damping_factor
@property
def cube_mass(self):
"""
Расчёт массы кубика
Returns: масса кубика
"""
for key, value in self.task_data["Плотность"].items():
if self.task_data["Дополнительные условия"]["Материал тела"] == key:
return value * (self.task_data["Входные данные"]["Размер куба"] ** 3) / 1000
@property
def shear_modulus(self):
"""
Подбор модуля сдвига (зависит от материала пружины)
Returns: модуль сдвига
"""
for key, value in self.task_data["Модуль сдвига"].items():
if self.task_data["Дополнительные условия"]["Материал пружины"] == key:
return value * (10 ** 10)
@property
def spring_coeff_elasticity(self):
"""
Расчёт коэффициента упругости пружины
Returns: коэффициент упругости пружины
"""
amount_turns_spring = ((self.task_data["Дополнительные условия"]["Длина пружин"]) /
(self.task_data["Входные данные"]["Шаг витков пружины"])) + 1
return (self.shear_modulus * (self.task_data["Входные данные"]["Диаметр проволоки"] ** 4)) / \
(8 * (self.task_data["Входные данные"]["Диаметр пружины"] ** 3) * amount_turns_spring)
@property
def coefficient_friction(self):
"""
Расчёт коэффициента трения скольжения
Returns: коэффициент трения скольжения
"""
return float(input("Введите коэффициент трения скольжения: "))
@staticmethod
def _flatten(seq):
"""Internal function."""
res = ()
for item in seq:
if isinstance(item, (tuple, list)):
res = res + App._flatten(item)
elif item is not None:
res = res + (item,)
return res
if __name__ == '__main__':
app = App()
app.run()
|
from django.shortcuts import render
# Create your views here.from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductForm
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,
quantity=cd['quantity'],
update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(
initial={'quantity': item['quantity'],
'update': True})
return render(request, 'cart/detail.html', {'cart': cart})
|
# this is the "keep alive" file,
# used to keep the replit alive..
# not in use rn
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return"DenzGraphingApiWrapper_Bot is up and running"
def run():
app.run(port='8080', host='0.0.0.0')
def keep_alive():
t = Thread(target = run)
t.start()
|
from ..models import EvolutionTrackInterpolator, IsochroneInterpolator
from .models import MISTIsochroneGrid, MISTBasicIsochroneGrid, MISTEvolutionTrackGrid
from .bc import MISTBolometricCorrectionGrid
class MIST_Isochrone(IsochroneInterpolator):
grid_type = MISTIsochroneGrid
bc_type = MISTBolometricCorrectionGrid
eep_bounds = (0, 1710)
class MIST_BasicIsochrone(IsochroneInterpolator):
grid_type = MISTBasicIsochroneGrid
bc_type = MISTBolometricCorrectionGrid
eep_bounds = (0, 1710)
class MIST_EvolutionTrack(EvolutionTrackInterpolator):
grid_type = MISTEvolutionTrackGrid
bc_type = MISTBolometricCorrectionGrid
eep_bounds = (0, 1710)
class MIST_BasicEvolutionTrack(EvolutionTrackInterpolator):
grid_type = MISTEvolutionTrackGrid
bc_type = MISTBolometricCorrectionGrid
eep_bounds = (0, 1710)
MIST_Isochrone._track_type = MIST_EvolutionTrack
MIST_BasicIsochrone._track_type = MIST_BasicEvolutionTrack
MIST_EvolutionTrack._iso_type = MIST_Isochrone
MIST_BasicEvolutionTrack._iso_type = MIST_BasicIsochrone
|
import torch
import numpy as np
import numbers
import quantization
import quantization.help_functions as qhf
class ScalingFunction(object):
'''
This class is there to hold two functions: the scaling function for a tensor, and its inverse.
They are budled together in a class because to be able to invert the scaling, we need to remember
several parameters, and it is a little uncomfortable to do it manually. The class of course remembers
correctly.
'''
# TODO: Make static version of scale and inv_scale that take as arguments all that is necessary,
# and then the class can just be a small wrapper about calling scale, saving the arguments,
# and calling inv. So we would have both ways to call the scaling function, directly and through
# the class.
def __init__(self, type_scaling, max_element, subtract_mean, bucket_size, modify_in_place=False):
type_scaling = type_scaling.lower()
if type_scaling not in ('linear', 'absmax', 'absnorm'):
raise ValueError('Incorrect parameter: type of scaling must be "linear", '
'"absMax" or "absNorm"')
if bucket_size is not None and (bucket_size <= 0 or not isinstance(bucket_size, int)):
raise ValueError('Bucket size must be an integer and strictly positive. '
'Pass None if you want to avoid using buckets')
if max_element is True:
if not isinstance(max_element, numbers.Number) or isinstance(max_element, bool):
raise ValueError('maxElementAllowed must be a number')
self.type_scaling = type_scaling
self.max_element = max_element
self.subtract_mean = subtract_mean
self.bucket_size = bucket_size
self.modify_in_place = modify_in_place
self.tol_diff_zero = 1e-10
#Things we need to invert the tensor. Set to None, will be populated by scale
self.mean_tensor = None
self.original_tensor_size = None
self.original_tensor_length = None
self.expected_tensor_size = None
self.alpha = None #used if linear scaling
self.beta = None
self.idx_min_rows = None
self.idx_max_rows = None
self.norm_scaling = None #used if absNorm or absMax
self.tensor_sign = None
def scale_down(self, tensor):
'''
Scales the tensor using one of the methods. Note that if bucket_size is not None,
the shape of the tensor will be changed. This change will be inverted by inv_scale
'''
if not self.modify_in_place:
tensor = tensor.clone()
if self.subtract_mean:
self.mean_tensor = tensor.mean()
tensor.sub_(self.mean_tensor)
else:
self.mean_tensor = 0
if self.max_element is not False:
tensor[tensor > self.max_element] = self.max_element
tensor[tensor < -self.max_element] = -self.max_element
self.original_tensor_size = tensor.size()
self.original_tensor_length = tensor.numel()
tensor = qhf.create_bucket_tensor(tensor, self.bucket_size, fill_values='last')
if self.bucket_size is None:
tensor = tensor.view(-1)
self.expected_tensor_size = tensor.size()
# if tensor is bucketed, it has 2 dimension, otherwise it has 1.
if self.type_scaling == 'linear':
if self.bucket_size is None:
min_rows, idx_min_rows = tensor.min(dim=0, keepdim=True)
max_rows, idx_max_rows = tensor.max(dim=0, keepdim=True)
else:
min_rows, idx_min_rows = tensor.min(dim=1, keepdim=True)
max_rows, idx_max_rows = tensor.max(dim=1, keepdim=True)
alpha = max_rows - min_rows
beta = min_rows
# If alpha is zero for one row, it means the whole row is 0.
# So we set alpha = 1 there, to avoid nan and inf, and result won't change
if self.bucket_size is None:
if alpha[0] < self.tol_diff_zero:
alpha[0] = 1
else:
alpha[alpha < self.tol_diff_zero] = 1
self.alpha = alpha
self.beta = beta
self.idx_min_rows = idx_min_rows
self.idx_max_rows = idx_max_rows
tensor.sub_(self.beta.expand_as(tensor))
tensor.div_(self.alpha.expand_as(tensor))
elif self.type_scaling in ('absmax', 'absnorm'):
self.tensor_sign = torch.sign(tensor)
tensor.abs_()
if self.type_scaling == 'absmax':
norm_to_use = 'max'
elif self.type_scaling == 'absnorm':
norm_to_use = 'norm'
else: raise ValueError
if self.bucket_size is None:
norm_scaling = getattr(tensor, norm_to_use)(p=2)
if norm_scaling < self.tol_diff_zero:
norm_scaling = 1
else:
norm_scaling = getattr(tensor, norm_to_use)(p=2, dim=1, keepdim=True)
norm_scaling[norm_scaling < self.tol_diff_zero] = 1
self.norm_scaling = norm_scaling.view
tensor.div_(self.norm_scaling.expand_as(tensor))
return tensor
def inv_scale_down(self, tensor):
"inverts the scaling done before. Note that the max_element truncation won't be inverted"
if not self.modify_in_place:
tensor = tensor.clone()
if tensor.size() != self.expected_tensor_size:
raise ValueError('The tensor passed has not the expected size.')
if self.type_scaling == 'linear':
tensor.mul_(self.alpha.expand_as(tensor))
tensor.add_(self.beta.expand_as(tensor))
elif self.type_scaling in ('absmax', 'absnorm'):
tensor.mul_(self.norm_scaling.expand_as(tensor))
tensor.mul_(self.tensor_sign)
tensor.add_(self.mean_tensor)
tensor = tensor.view(-1)[0:self.original_tensor_length] # remove the filler values
tensor = tensor.view(self.original_tensor_size)
return tensor
def uniformQuantization(tensor, s, type_of_scaling='linear', stochastic_rounding=False,
max_element=False, subtract_mean=False, bucket_size=None, modify_in_place=False):
'''
Quantizes using the random uniform quantization algorithm the tensor passed using s levels.
'''
if not modify_in_place:
tensor = tensor.clone()
#we always pass True to modify_in_place because we have already cloned it by this point
scaling_function = ScalingFunction(type_of_scaling, max_element, subtract_mean,
bucket_size, modify_in_place=True)
tensor = scaling_function.scale_down(tensor)
#decrease s by one so as to have exactly s quantization points
s = s - 1
if stochastic_rounding:
#What follows is an in-place version of this code:
# lVector = torch.floor((tensor * s))
# probabilities = s * tensor - lVector
# tensor = lVector / s
probabilities = s*tensor
tensor.mul_(s)
tensor.floor_()
probabilities -= tensor
tensor.div_(s)
currRand = torch.rand(tensor.size())
currRand = currRand.cuda() if tensor.is_cuda else currRand
tensor.add_((currRand <= probabilities).float()*1/s)
else:
tensor.mul_(s)
tensor.round_()
tensor.div_(s)
tensor = scaling_function.inv_scale_down(tensor)
return tensor, scaling_function
def nonUniformQuantization(tensor, listQuantizationPoints, max_element=False,
subtract_mean=False, modify_in_place=False, bucket_size=None,
pre_processed_values=False, search_sorted_obj=None, scaling_function=None,
tensors_info=None):
'''
:param tensor: the tensor to quantize
:param listQuantizationPoints: the quantization points to quantize it with
:param max_element: see ScalingFunction doc
:param subtract_mean: see ScalingFunction doc
:param modify_in_place: modify the tensor in place or clone it
:param bucket_size: the bucket size
:param pre_processed_values: If True, it expects the tensor to be pre-processed already
:param inverse_idx_sort: The index of pre-processing
This function is the bottleneck of the differentiable quantization algorithm. One way to speed it up is to
avoid during the same operations on the tensor every time; in fact, in the differentiable quantiaztion loop,
the tensors are always the same and only the listQuantiaztionPoints changes. To take advantage of this,
you can scale the tensor only once, and sort them. Sorting them speeds up the algorithm.
If you sort them, you need to retain the indices, and pass inverse_idx_sort (the indices the unsort the array).
In short to pre-process you have to do something like:
> scaling_function = ScalingFuntion(....)
> tensor = scaling_function.scale_down(tensor) #scale it down
> tensor_info = tensor.type(), tensor.is_cuda
> tensor = tensor.view(-1).cpu().numpy() #we need a 1 dimensional numpy array
> indices_sort = np.argsort(tensor) #sorting the array
> tensor = tensor[indices_sort]
> inv_idx_sort = np.argsort(indices_sort) #getting the indices to unsort the array
> nonUniformQuantization(tensor, listQuantizationPoints, inv_idx_sort, scaling_function, tensor_info)
'''
if pre_processed_values is True and (search_sorted_obj is None or scaling_function is None or tensors_info is None):
raise ValueError('If values are preprocessed, all pre processed arguments need to be passed')
if pre_processed_values is False and not (search_sorted_obj is None and \
scaling_function is None and tensors_info is None):
raise ValueError('pre processing is False but you are passing some pre processing values. '
'This is probably not what you wanted to do, so to avoid bugs an error is raised')
if isinstance(listQuantizationPoints, list):
listQuantizationPoints = torch.Tensor(listQuantizationPoints)
#we need numpy.searchsorted to make this efficient.
#There is no pytorch equivalent for now, so I need to convert it to a numpy array first
if not pre_processed_values:
if not modify_in_place:
tensor = tensor.clone()
# we always pass True to modify_in_place because we have already cloned it by this point
scaling_function = ScalingFunction(type_scaling='linear', max_element=max_element,
subtract_mean=subtract_mean, bucket_size=bucket_size,
modify_in_place=True)
tensor = scaling_function.scale_down(tensor)
tensor_type = tensor.type()
is_tensor_cuda = tensor.is_cuda
if is_tensor_cuda:
numpyTensor = tensor.view(-1).cpu().numpy()
else:
numpyTensor = tensor.view(-1).numpy()
else:
tensor_type, is_tensor_cuda = tensors_info
listQuantizationPoints = listQuantizationPoints.cpu().numpy()
if not pre_processed_values:
#code taken from
#https://stackoverflow.com/questions/37841654/find-elements-of-array-one-nearest-to-elements-of-array-two/37842324#37842324
indicesClosest = np.searchsorted(listQuantizationPoints, numpyTensor, side="left").clip(
max=listQuantizationPoints.size - 1)
mask = (indicesClosest > 0) & \
((indicesClosest == len(listQuantizationPoints)) |
(np.fabs(numpyTensor - listQuantizationPoints[indicesClosest - 1]) <
np.fabs(numpyTensor - listQuantizationPoints[indicesClosest])))
indicesClosest = indicesClosest - mask
else:
indicesClosest = search_sorted_obj.query(listQuantizationPoints)
#transforming it back to torch tensors
tensor = listQuantizationPoints[indicesClosest]
indicesClosest = torch.from_numpy(indicesClosest).long()
tensor = torch.from_numpy(tensor).type(tensor_type)
if is_tensor_cuda:
tensor = tensor.cuda()
indicesClosest = indicesClosest.cuda()
tensor = tensor.view(*scaling_function.expected_tensor_size)
tensor = scaling_function.inv_scale_down(tensor)
indicesClosest = indicesClosest.view(-1)[0:scaling_function.original_tensor_length] #rescale the indices, too
indicesClosest = indicesClosest.view(scaling_function.original_tensor_size)
return tensor, indicesClosest, scaling_function
class uniformQuantization_variable(torch.autograd.Function):
def __init__(self, s, type_of_scaling='linear', stochastic_rounding=False, max_element=False,
subtract_mean=False, modify_in_place=False, bucket_size=None):
super(uniformQuantization_variable, self).__init__()
self.s = s
self.typeOfScaling = type_of_scaling
self.stochasticRounding = stochastic_rounding
self.maxElementAllowed = max_element
self.subtractMean = subtract_mean
self.modifyInPlace = modify_in_place
self.bucket_size = bucket_size
self.saved_for_backward = None
def forward(self, input):
self.saved_for_backward = {}
self.saved_for_backward['input'] = input.clone()
quantized_tensor = uniformQuantization(input, s=self.s,
type_of_scaling=self.typeOfScaling,
stochastic_rounding=self.stochasticRounding,
max_element=self.maxElementAllowed,
subtract_mean=self.subtractMean,
modify_in_place=self.modifyInPlace,
bucket_size=self.bucket_size)[0]
return quantized_tensor
def backward(self, grad_output):
#TODO: Add explanation or link to explanation
#A convoluted derivation tells us the following:
#TODO: Make sure this works with self.bucket_size = None, too!
if self.typeOfScaling != 'linear':
raise ValueError('Linear scaling is necessary to backpropagate')
if self.subtractMean is True:
raise NotImplementedError('The backprop function assumes subtractMean to be False for now')
if self.bucket_size is None:
raise NotImplementedError('Right now the code does not work with bucket_size None.'
' Not hard to modify though')
if self.saved_for_backward is None:
raise ValueError('Need to have called .forward() to be able to call .backward()')
tensor = self.saved_for_backward['input']
# I could save the quantized tensor and scaling function from the forward pass, but too memory expensive.
quantized_tensor_unscaled, scaling_function = uniformQuantization(tensor, s=self.s,
type_of_scaling=self.typeOfScaling,
stochastic_rounding=self.stochasticRounding,
max_element=self.maxElementAllowed,
subtract_mean=self.subtractMean,
modify_in_place=self.modifyInPlace,
bucket_size=self.bucket_size)
quantized_tensor_unscaled = scaling_function.scale_down(quantized_tensor_unscaled)
quantized_tensor_unscaled = quantized_tensor_unscaled.view(-1)[0:scaling_function.original_tensor_length]
quantized_tensor_unscaled = quantized_tensor_unscaled.view(scaling_function.original_tensor_size)
alpha = scaling_function.alpha
beta = scaling_function.beta
total_num_buckets = alpha.size(0)
bucketed_tensor_sizes = total_num_buckets, self.bucket_size
alpha = alpha.expand(*bucketed_tensor_sizes).contiguous().view(-1)\
[0:scaling_function.original_tensor_length]
beta = beta.expand(*bucketed_tensor_sizes).contiguous().view(-1)\
[0:scaling_function.original_tensor_length]
idx_max_rows = scaling_function.idx_max_rows
idx_min_rows = scaling_function.idx_min_rows
#the index are relative to each bucket; so a 4 in the third row is actually a 4 + 2*bucket_size.
#So I need to adjust these values with this "adder_for_buckets"
adder_for_buckets = torch.arange(0, self.bucket_size * total_num_buckets, self.bucket_size).long()
if idx_max_rows.is_cuda:
adder_for_buckets = adder_for_buckets.cuda()
idx_max_rows = idx_max_rows + adder_for_buckets
idx_min_rows = idx_min_rows + adder_for_buckets
idx_max_rows = idx_max_rows.expand(*bucketed_tensor_sizes).contiguous().view(-1)\
[0:scaling_function.original_tensor_length]
idx_min_rows = idx_min_rows.expand(*bucketed_tensor_sizes).contiguous().view(-1)\
[0:scaling_function.original_tensor_length]
one_vector = torch.ones(scaling_function.original_tensor_length)
tensor = tensor.view(-1)
grad_output = grad_output.view(-1)
#create the sparse matrix grad alpha/grad v
index_sparse_max = torch.LongTensor(2, scaling_function.original_tensor_length)
index_sparse_min = torch.LongTensor(2, scaling_function.original_tensor_length)
index_sparse_max[0,:] = torch.arange(0, scaling_function.original_tensor_length)
index_sparse_min[0, :] = torch.arange(0, scaling_function.original_tensor_length)
index_sparse_max[1,:] = idx_max_rows
index_sparse_min[1,:] = idx_min_rows
grad_sparse_max = torch.sparse.FloatTensor(index_sparse_max,
one_vector,
torch.Size([scaling_function.original_tensor_length]*2))
grad_sparse_min = torch.sparse.FloatTensor(index_sparse_min,
one_vector,
torch.Size([scaling_function.original_tensor_length]*2))
grad_alpha = grad_sparse_max - grad_sparse_min
if tensor.is_cuda:
grad_alpha = grad_alpha.cuda()
output = grad_output + \
torch.mm(grad_alpha.t(),
(grad_output*(quantized_tensor_unscaled-(tensor-beta)/alpha).view(-1)).view(-1,1))
output = output.view(scaling_function.original_tensor_size)
del self.saved_for_backward
self.saved_for_backward = None
return output
class nonUniformQuantization_variable(torch.autograd.Function):
def __init__(self, max_element = False, subtract_mean = False,
modify_in_place = False, bucket_size=None, pre_process_tensors=False, tensor=None):
if pre_process_tensors is True and (tensor is None):
raise ValueError('To pre-process tensors you need to pass the tensor and the scaling function options')
super(nonUniformQuantization_variable, self).__init__()
self.maxElementAllowed = max_element
self.subtractMean = subtract_mean
self.modifyInPlace = modify_in_place
self.bucket_size = bucket_size
self.savedForBackward = None
self.pre_process_tensors = pre_process_tensors
#variables used for preprocessing
self.search_sorted_obj = None
self.tensors_info = None
self.scaling_function = None
if self.pre_process_tensors:
self.preprocess(tensor)
def preprocess(self, tensor):
if not self.modifyInPlace:
tensor = tensor.clone()
scaling_function = quantization.ScalingFunction(type_scaling='linear', max_element=self.maxElementAllowed,
subtract_mean=self.subtractMean, bucket_size=self.bucket_size, modify_in_place=True)
tensor = scaling_function.scale_down(tensor)
tensor_type = tensor.type()
is_tensor_cuda = tensor.is_cuda
if is_tensor_cuda:
numpyTensor = tensor.view(-1).cpu().numpy()
else:
numpyTensor = tensor.view(-1).numpy()
self.search_sorted_obj = SearchSorted(numpyTensor.copy())
self.tensors_info = (tensor_type, is_tensor_cuda)
self.scaling_function = scaling_function
def forward(self, inputTensor, listQuantizationPoints):
if listQuantizationPoints.dim() != 1:
raise ValueError('listPoints must be a 1-D tensor')
numPoints = listQuantizationPoints.size()[0]
if self.pre_process_tensors:
quantizedTensor, indicesOfQuantization, scaling_function = nonUniformQuantization(
None, listQuantizationPoints, modify_in_place=self.modifyInPlace,
max_element=self.maxElementAllowed, subtract_mean=self.subtractMean, bucket_size=self.bucket_size,
pre_processed_values=True, search_sorted_obj=self.search_sorted_obj,
scaling_function=self.scaling_function, tensors_info=self.tensors_info)
else:
quantizedTensor, indicesOfQuantization, scaling_function = nonUniformQuantization(
inputTensor, listQuantizationPoints, modify_in_place=self.modifyInPlace,
max_element=self.maxElementAllowed, subtract_mean=self.subtractMean, bucket_size=self.bucket_size,
pre_processed_values=False, search_sorted_obj=None, scaling_function=None, tensors_info=None)
scalingFactor = scaling_function.alpha
self.savedForBackward = {'indices':indicesOfQuantization, 'numPoints':numPoints, 'scalingFactor':scalingFactor}
return quantizedTensor
def backward(self, grad_output):
grad_inputTensor = grad_output
#grad_output is delta Loss / delta output.
#we want deltaLoss / delta listPoints, so we need to do
#deltaLoss / delta ListPoints = Sum_j deltaLoss/delta output_j * delta output_j/delta ListPoints
if self.savedForBackward is None:
raise ValueError('Need savedIndices to be able to call backward()')
indices = self.savedForBackward['indices']
numPoints = self.savedForBackward['numPoints']
scalingFactor = self.savedForBackward['scalingFactor']
gradPointTensor = torch.zeros(numPoints)
if quantization.USE_CUDA: gradPointTensor = gradPointTensor.cuda()
#Remember that delta output_j/delta ListPoints = 0 if the point is quantized to that particular element,
#otherwise it is equal to the scaling factor. But the scaling factor is different (depending on the
#bucket). So we modify the gradient by multiplying by the appropriate scaling factor,
#so that we can group all the indices together in an efficient fashion. The more obvious (but slower)
#way of doing this would be to check to which bucket does the current element belong to, and then
#add to the gradient the scaling factor for that bucket multiplied by 1 or 0 depending on the
#index (1 if it is the index it has been quantized to, 0 otherwise)
modified_gradient = grad_output.clone()
modified_gradient = qhf.create_bucket_tensor(modified_gradient, self.bucket_size)
modified_gradient *= scalingFactor.expand_as(modified_gradient)
modified_gradient = modified_gradient.view(-1)[0:grad_output.numel()].view(grad_output.size())
# To avoid this loop, one can do somehting like
# unqID, idx, IDsums = np.unique(indices, return_counts=True, return_inverse=True)
# value_sums = np.bincount(idx, modified_gradient.ravel())
# I don't see an analogous of np.unique in torch, so for now this loop is good enough.
for idx in range(numPoints):
gradPointTensor[idx] = torch.masked_select(modified_gradient, indices == idx).sum()
self.savedIndices = None #reset it to None
return grad_inputTensor, gradPointTensor
class SearchSorted:
def __init__(self, tensor, use_k_optimization=True):
'''
use_k_optimization requires storing 4x the size of the tensor.
If use_k_optimization is True, the class will assume that successive calls will be made with similar k.
When this happens, we can cut the running time significantly by storing additional variables. If it won't be
called with successive k, set the flag to False, as otherwise would just consume more memory for no
good reason
'''
indices_sort = np.argsort(tensor)
self.sorted_tensor = tensor[indices_sort]
self.inv_indices_sort = np.argsort(indices_sort)
self.use_k_optimization = use_k_optimization
if use_k_optimization:
self.indices_sort = indices_sort
self.previous_indices_results = None
self.prev_idx_A_k_pair = None
def query(self, k):
midpoints = k[:-1] + np.diff(k) / 2
idx_count = np.searchsorted(self.sorted_tensor, midpoints)
idx_A_k_pair = []
count = 0
old_obj = 0
for obj in idx_count:
if obj != old_obj:
idx_A_k_pair.append((obj, count))
old_obj = obj
count += 1
if not self.use_k_optimization or self.previous_indices_results is None:
# creates the index matrix in the sorted case
final_indices = self._create_indices_matrix(idx_A_k_pair, self.sorted_tensor.shape, len(k))
# and now unsort it to match the original tensor position
indicesClosest = final_indices[self.inv_indices_sort]
if self.use_k_optimization:
self.prev_idx_A_k_pair = idx_A_k_pair
self.previous_indices_results = indicesClosest
return indicesClosest
old_indices_unsorted = self._create_indices_matrix(self.prev_idx_A_k_pair, self.sorted_tensor.shape, len(k))
new_indices_unsorted = self._create_indices_matrix(idx_A_k_pair, self.sorted_tensor.shape, len(k))
mask = new_indices_unsorted != old_indices_unsorted
self.prev_idx_A_k_pair = idx_A_k_pair
self.previous_indices_results[self.indices_sort[mask]] = new_indices_unsorted[mask]
indicesClosest = self.previous_indices_results
return indicesClosest
@staticmethod
def _create_indices_matrix(idx_A_k_pair, matrix_shape, len_quant_points):
old_idx = 0
final_indices = np.zeros(matrix_shape, dtype=int)
for idx_A, idx_k in idx_A_k_pair:
final_indices[old_idx:idx_A] = idx_k
old_idx = idx_A
final_indices[old_idx:] = len_quant_points - 1
return final_indices
|
# Exercício 085
numeros = [[], []]
for c in range(1, 8):
n = int(input(f'Digite o {c}º valor: '))
if n % 2 == 0:
numeros[0].append(n)
else:
numeros[1].append(n)
print('=-' * 25)
print(f'Os valores pares digitados foram: {sorted(numeros[0])}')
print(f'Os valores impares digitados foram: {sorted(numeros[1])}')
|
"""module for inputname modifier."""
from ..fdef import Fdef, EnumInput
from .modifier import Modifier
class InputNameModifier(Modifier):
"""Input name modifier tweaks enum modifier's behavior."""
def define(self, fdef: Fdef) -> None:
if fdef.enum_input is None:
fdef._enum_input = EnumInput.NAME
else:
fdef._enum_input = fdef.enum_input | EnumInput.NAME
return
|
'''
This module contains functions used to import a csv to pandas DataFrame and to
perform
'''
import pandas as pd
# load csv to data DataFrame
def create_dataframe(url):
'''Create a DataFrame using csv data online
Args:
url (str): url that points to a csv data file
Return:
d_f (pandas DataFrame): DataFrame of the data in url
'''
d_f = pd.read_csv(url)
return d_f
# check data frame for columns in columns, at least 10 entries and that all
# datatypes are the same
def test_create_datadrame(d_f, columns):
'''Test of df for the following attributes:
the columns in columns
at least 10 entries
that all datatypes in the DataFrame are the same
Args:
d_f (pandas DataFrame): a DataFrame to be tested
columns (list of strings): names of columns to check for in df
Returns:
a_bool (bool): True if df passes tests, False otherwise
'''
# set output to True, then look for conditions to turn False.
a_bool = True
# check whether data frame contains only the columns listed in input
# 'columns'
# first, sort column list alaphabetically in case columns
# is not in same order as the columns in df
if d_f.columns.tolist().sort() != columns.sort():
a_bool = False
# check number of entries in data frame
elif len(d_f) <= 10:
a_bool = False
# take 'set' of dypes in df. If length is 1, all data types are the same.
elif len(set(d_f.dtypes)) != 1:
a_bool = False
return a_bool
|
# -*- coding: UTF-8 -*-
# !/usr/bin/env python
'''
author: MRN6
updated: Jul. 18th, 2020 Sat. 03:40PM
LICENSE: CC0
'''
import re
class QPath:
def __init__(self):
pass
def minimalpair(self, tag, html, position):
#最小对称法
check_start_tag='<'+tag
check_end_tag=''
if tag=='meta':
check_end_tag='>'
else:
check_end_tag='</'+tag+'>'
start_tag_length=len(check_start_tag)
end_tag_length=len(check_end_tag)
length=len(html)
index=position
start=html.find(check_start_tag, index)
if start>=0:
require=1
while require>0 and (index<length-1-start_tag_length and index<length-1-end_tag_length) :
index=index+1
if html[index:index+start_tag_length]==check_start_tag:
require=require+1
if html[index:index+end_tag_length]==check_end_tag:
require=require-1
return html[position:index+end_tag_length]
def match(self, path=None, html=None):
if path is None or html is None or len(path.strip())<=0 or len(html.strip())<=0:
return []
if not '//' in path:
return []
rules=path.split('//')
matches=[]
submatches=[]
l=len(rules)
c=0
match_html=html
for rule in rules:
c=c+1
if len(rule.strip())<1:
continue
if submatches is not None and len(submatches)>0:
t=submatches
submatches=[]
for submatch in t:
if len(submatch.strip())<=0:
continue
attributecontent=''
if ':' in rule:
ruledatas=rule.split(':')
tag=ruledatas[0]
attributedatas=ruledatas[1].split('=')
attribute=attributedatas[0]
value=attributedatas[1]
attributecontent=attribute+'="'+value+'[^"]*"'
else:
tag=rule
tempmatches=re.findall('<'+tag+'[^<>]*'+attributecontent, submatch)
if tempmatches is None or tempmatches==[]:
continue
index=0
#print('[match-end]', tempmatches, '[/match-end]')
for tempmatch in tempmatches:
position=submatch.find(tempmatch, index)
while position>=0 and index<len(submatch):
match=self.minimalpair(tag, submatch, position)
index=position+len(match)
if c==l:
matches.append(match)
else:
submatches.append(match)
position=submatch.find(tempmatch, index)
else:
attributecontent=''
attribute=None
value=None
if ':' in rule:
ruledatas=rule.split(':')
tag=ruledatas[0]
attributedatas=ruledatas[1].split('=')
attribute=attributedatas[0]
value=attributedatas[1]
attributecontent=attribute+'="'+value+'[^"]*"'
else:
tag=rule
tempmatches=re.findall('<'+tag+'[^<>]*'+attributecontent, match_html)
if tempmatches is None or tempmatches==[]:
return []
index=0
#print('[match-root]', tempmatches, '[/match-root]')
for tempmatch in tempmatches:
if not tag in tempmatch or (attribute is not None and value is not None and not attribute+'="'+value+'"' in tempmatch):
continue
position=match_html.find(tempmatch, index)
while position>=0 and index<len(html)-1:
match=self.minimalpair(tag, match_html, position)
#print(position, '[match-sub]', match, '[/match-sub]')
index=position+len(match)
if c==l:
matches.append(match)
else:
submatches.append(match)
position=match_html.find(tempmatch, index)
return matches
qpath=QPath()
|
"""
The MIT License (MIT)
Copyright (c) 2021 NVIDIA
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader
import numpy as np
import copy
import random
from collections import OrderedDict
from utilities import train_model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
MAX_MODEL_SIZE = 500000
CANDIDATE_EVALUATIONS = 500
EVAL_EPOCHS = 3
FINAL_EPOCHS = 20
layer_types = ['DENSE', 'CONV2D', 'MAXPOOL2D']
param_values = dict([('size', [16, 64, 256, 1024, 4096]),
('activation', ['relu', 'tanh', 'elu']),
('kernel_size', [(1, 1), (2, 2), (3, 3), (4, 4)]),
('stride', [(1, 1), (2, 2), (3, 3), (4, 4)]),
('dropout', [0.0, 0.4, 0.7, 0.9])])
layer_params = dict([('DENSE', ['size', 'activation', 'dropout']),
('CONV2D', ['size', 'activation',
'kernel_size', 'stride',
'dropout']),
('MAXPOOL2D', ['kernel_size', 'stride',
'dropout'])])
# Load training dataset into a single batch to compute mean and stddev.
transform = transforms.Compose([transforms.ToTensor()])
trainset = CIFAR10(root='./pt_data', train=True, download=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=len(trainset), shuffle=False)
data = next(iter(trainloader))
mean = data[0].mean()
stddev = data[0].std()
# Load and standardize training and test dataset.
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean, stddev)])
trainset = CIFAR10(root='./pt_data', train=True, download=True, transform=transform)
testset = CIFAR10(root='./pt_data', train=False, download=True, transform=transform)
# Methods to create a model definition.
def generate_random_layer(layer_type):
layer = {}
layer['layer_type'] = layer_type
params = layer_params[layer_type]
for param in params:
values = param_values[param]
layer[param] = values[np.random.randint(0, len(values))]
return layer
def generate_model_definition():
layer_count = np.random.randint(2, 9)
non_dense_count = np.random.randint(1, layer_count)
layers = []
for i in range(layer_count):
if i < non_dense_count:
layer_type = layer_types[np.random.randint(1, 3)]
layer = generate_random_layer(layer_type)
else:
layer = generate_random_layer('DENSE')
layers.append(layer)
return layers
def compute_weight_count(layers):
last_shape = (32, 32, 3)
total_weights = 0
for layer in layers:
layer_type = layer['layer_type']
if layer_type == 'DENSE':
size = layer['size']
weights = size * (np.prod(last_shape) + 1)
last_shape = (layer['size'])
else:
stride = layer['stride']
if layer_type == 'CONV2D':
size = layer['size']
kernel_size = layer['kernel_size']
weights = size * ((np.prod(kernel_size) *
last_shape[2]) + 1)
last_shape = (np.ceil(last_shape[0]/stride[0]),
np.ceil(last_shape[1]/stride[1]),
size)
elif layer_type == 'MAXPOOL2D':
weights = 0
last_shape = (np.ceil(last_shape[0]/stride[0]),
np.ceil(last_shape[1]/stride[1]),
last_shape[2])
total_weights += weights
total_weights += ((np.prod(last_shape) + 1) * 10)
return total_weights
# Methods to create and evaluate model based on model definition.
def add_layer(model_list, params, prior_type, last_shape):
layer_num = len(model_list)
act = None
layer_type = params['layer_type']
if layer_type == 'DENSE':
if prior_type != 'DENSE':
model_list.append(("layer" + str(layer_num), nn.Flatten()))
layer_num += 1
last_shape = int(np.prod(last_shape))
size = params['size']
act = params['activation']
model_list.append(("layer" + str(layer_num), nn.Linear(last_shape, size)))
last_shape = (size)
elif layer_type == 'CONV2D':
size = params['size']
act = params['activation']
kernel_size = params['kernel_size']
stride = params['stride']
padding = int(kernel_size[0] / 2)
model_list.append(("layer" + str(layer_num),
nn.Conv2d(last_shape[2], size, kernel_size, stride=stride, padding=padding)))
last_shape = (int((last_shape[0]+2*padding-(kernel_size[0]-1)-1)/stride[0]+1),
int((last_shape[1]+2*padding-(kernel_size[1]-1)-1)/stride[1]+1),
size)
elif layer_type == 'MAXPOOL2D':
kernel_size = params['kernel_size']
stride = params['stride']
padding = int(kernel_size[0] / 2)
model_list.append(("layer" + str(layer_num),
nn.MaxPool2d(kernel_size, stride=stride, padding=padding)))
last_shape = (int((last_shape[0]+2*padding-(kernel_size[0]-1)-1)/stride[0]+1),
int((last_shape[1]+2*padding-(kernel_size[1]-1)-1)/stride[1]+1),
last_shape[2])
layer_num += 1
if(act != None):
if (act == 'relu'):
model_list.append(("layer" + str(layer_num), nn.ReLU()))
elif (act == 'elu'):
model_list.append(("layer" + str(layer_num), nn.ELU()))
elif (act == 'tanh'):
model_list.append(("layer" + str(layer_num), nn.Tanh()))
layer_num += 1
dropout = params['dropout']
if(dropout > 0.0):
model_list.append(("layer" + str(layer_num), nn.Dropout(p=dropout)))
return last_shape
def create_model(layers):
model_list = []
prev_layer = 'NONE'
last_shape = (32, 32, 3)
for layer in layers:
last_shape = add_layer(model_list, layer, prev_layer, last_shape)
prev_layer = layer['layer_type']
model_list.append(("layer" + str(len(model_list)), nn.Linear(last_shape, 10)))
model = nn.Sequential(OrderedDict(model_list))
return model
def create_and_evaluate_model(model_definition):
weight_count = compute_weight_count(model_definition)
if weight_count > MAX_MODEL_SIZE:
return 0.0
model = create_model(model_definition)
# Loss function and optimizer
optimizer = torch.optim.Adam(model.parameters())
loss_function = nn.CrossEntropyLoss()
train_result = train_model(model, device, EVAL_EPOCHS, 64, trainset, testset,
optimizer, loss_function, 'acc')
acc = train_result[1]
print('Size: ', weight_count)
print('Accuracy: %5.2f' %acc)
del model
return acc
# Pure random search.
np.random.seed(7)
val_accuracy = 0.0
for i in range(CANDIDATE_EVALUATIONS):
valid_model = False
while(valid_model == False):
model_definition = generate_model_definition()
acc = create_and_evaluate_model(model_definition)
if acc > 0.0:
valid_model = True
if acc > val_accuracy:
best_model = model_definition
val_accuracy = acc
print('Random search, best accuracy: %5.2f' %val_accuracy)
# Helper method for hill climbing algorithm.
def tweak_model(model_definition):
layer_num = np.random.randint(0, len(model_definition))
last_layer = len(model_definition) - 1
for first_dense, layer in enumerate(model_definition):
if layer['layer_type'] == 'DENSE':
break
if np.random.randint(0, 2) == 1:
delta = 1
else:
delta = -1
if np.random.randint(0, 2) == 1:
# Add/remove layer.
if len(model_definition) < 3:
delta = 1 # Layer removal not allowed
if delta == -1:
# Remove layer.
if layer_num == 0 and first_dense == 1:
layer_num += 1 # Require >= 1 non-dense layer
if layer_num == first_dense and layer_num == last_layer:
layer_num -= 1 # Require >= 1 dense layer
del model_definition[layer_num]
else:
# Add layer.
if layer_num < first_dense:
layer_type = layer_types[np.random.randint(1, 3)]
else:
layer_type = 'DENSE'
layer = generate_random_layer(layer_type)
model_definition.insert(layer_num, layer)
else:
# Tweak parameter.
layer = model_definition[layer_num]
layer_type = layer['layer_type']
params = layer_params[layer_type]
param = params[np.random.randint(0, len(params))]
current_val = layer[param]
values = param_values[param]
index = values.index(current_val)
max_index = len(values)
new_val = values[(index + delta) % max_index]
layer[param] = new_val
# Hill climbing, starting from best model from random search.
model_definition = best_model
for i in range(CANDIDATE_EVALUATIONS):
valid_model = False
while(valid_model == False):
old_model_definition = copy.deepcopy(model_definition)
tweak_model(model_definition)
acc = create_and_evaluate_model(model_definition)
if acc > 0.0:
valid_model = True
else:
model_definition = old_model_definition
if acc > val_accuracy:
best_model = copy.deepcopy(model_definition)
val_accuracy = acc
else:
model_definition = old_model_definition
print('Hill climbing, best accuracy: %5.2f' %val_accuracy)
# Evaluate final model for larger number of epochs.
model = create_model(best_model)
optimizer = torch.optim.Adam(model.parameters())
loss_function = nn.CrossEntropyLoss()
train_result = train_model(model, device, FINAL_EPOCHS, 64, trainset, testset,
optimizer, loss_function, 'acc')
|
"""
ipcai2016
Copyright (c) German Cancer Research Center,
Computer Assisted Interventions.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE for details
"""
'''
Created on Oct 19, 2015
@author: wirkert
'''
from scipy.interpolate import interp1d
import sklearn.preprocessing
import pandas as pd
import numpy as np
def fold_by_sliding_average(df, window_size):
"""take a batch and apply a sliding average with given window size to
the reflectances.
window_size is elements to the left and to the right.
There will be some boundary effect on the edges."""
# next line does the folding.
df.reflectances = pd.rolling_mean(df.reflectances.T, window_size,
center=True).T
# let's get rid of NaN columns which are created at the boundaries
df.dropna(axis="columns", inplace=True)
return df
def fold_by_fwhm(df, fwhm):
"""
fold reflectances given in dataframe by fwhm. assumes rectangular window.
:param df: created simulation batch
:param fwhm: full width at half maximum [m]
:return:
"""
# build an interpolator using the inormation provided by the dataframes
# reflectance column
wavelengths = df.reflectances.columns.astype(float)
interpolator = interp1d(wavelengths,
df.reflectances.as_matrix(), assume_sorted=False,
bounds_error=False)
new_reflectances = []
for w in wavelengths:
waveband = np.linspace(w - fwhm/2., w + fwhm/2., 100)
new_r = np.sum(interpolator(waveband), axis=1) / len(waveband)
new_reflectances.append(new_r)
new_reflectances = np.array(new_reflectances).T
switch_reflectances(df, wavelengths, new_reflectances)
# let's get rid of NaN columns which are created at the boundaries
df.dropna(axis="columns", inplace=True)
def switch_reflectances(df, new_wavelengths, new_reflectances):
"""
TODO: document and test
:param df:
:param new_wavelengths:
:param new_reflectances:
:return:
"""
df.drop(df["reflectances"].columns, axis=1, level=1, inplace=True)
for i, nw in enumerate(new_wavelengths):
df["reflectances", nw] = new_reflectances[:, i]
return df
def normalize_reflectances(df):
r = df["reflectances"]
r = sklearn.preprocessing.normalize(r, norm="l1")
df["reflectances"] = r
return df
def interpolate_wavelengths(df, new_wavelengths):
""" interpolate image data to fit new_wavelengths. Current implementation
performs simple linear interpolation. Neither existing nor new _wavelengths
need to be sorted. """
# build an interpolator using the inormation provided by the dataframes
# reflectance column
interpolator = interp1d(df.reflectances.columns.astype(float),
df.reflectances.as_matrix(), assume_sorted=False,
bounds_error=False)
# use this to create new reflectances
new_reflectances = interpolator(new_wavelengths)
# build a new dataframe out of this information and set the original df
# to the new information. This seems hacky, can't it be done easier?
switch_reflectances(df, new_wavelengths, new_reflectances)
return df
def to_df(msi):
"""
TODO: document, write and test
:param msi:
:return:
"""
pass
|
## Copyright (c) 2022, Team FirmWire
## SPDX-License-Identifier: BSD-3-Clause
from collections import OrderedDict
from abc import ABC, abstractmethod
class FirmWireSOC(ABC):
@property
@abstractmethod
def peripherals():
pass
@property
@abstractmethod
def name():
pass
class SOCPeripheral(object):
def __init__(self, cls, address, size, **kwargs):
self._cls = cls
self._address = address
self._size = size
self._attr = kwargs
self._created_peripheral = None
def __call__(self, name, address, size, **kwargs):
# XXX: peripherals which are class properties are single instance, breaking this as only a single instance can exist
assert (
self._created_peripheral is None
), "SOCPeripheral can only be realized once"
self._created_peripheral = self._cls(name, address, size, **kwargs)
return self._created_peripheral
def resolve(self):
"""Return a reference to the created peripheral object"""
assert self._created_peripheral is not None, "SOCPeripheral was never created"
return self._created_peripheral
################################
SOC_BY_NAME = OrderedDict()
def get_soc(vendor, name):
vendor_socs = SOC_BY_NAME.get(vendor)
if vendor_socs is None:
return None
return vendor_socs.get(name)
def get_socs(vendor=None):
if vendor:
return OrderedDict(SOC_BY_NAME.get(vendor))
else:
return OrderedDict(SOC_BY_NAME)
def register_soc(vendor, cls):
global SOC_BY_NAME
assert issubclass(cls, FirmWireSOC), "SOC must be derived from FirmWireSOC"
if vendor not in SOC_BY_NAME:
SOC_BY_NAME[vendor] = OrderedDict()
assert cls.name not in SOC_BY_NAME[vendor], (
"SOC registered twice or with duplicate name %s" % cls.name
)
SOC_BY_NAME[vendor][cls.name] = cls
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import wave
import datetime
import pyaudio
import RPi.GPIO as GPIO
import requests
from test_nn import AudioNetTester
# 録音デバイス番号
DEVICE = 2
# LEDとタクトスイッチのピン
SWITCH_PIN = 21
GREEN_PIN = 12
YELLOW_PIN = 16
RED_PIN = 20
# IDCFクラウドのIPアドレス
IDCF_IP = 'XXX.XXX.XXX.XXX'
# MeshblueのTriggerのUUIDとToken
UUID = "trigger-uuid"
TOKEN = "trigger-token"
GPIO.setmode(GPIO.BCM)
GPIO.setup(SWITCH_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(GREEN_PIN, GPIO.OUT)
GPIO.setup(YELLOW_PIN, GPIO.OUT)
GPIO.setup(RED_PIN, GPIO.OUT)
def check_devices():
u'''オーディオデバイス番号の確認'''
p = pyaudio.PyAudio()
count = p.get_device_count()
devices = []
for i in range(count):
devices.append(p.get_device_info_by_index(i))
for i, dev in enumerate(devices):
print (i, dev['name'])
def record_wav():
u'''wavファイルを録音する'''
FORMAT = pyaudio.paInt16
CHANNELS = 1 # モノラル
RATE = 44100 # サンプルレート
CHUNK = 2**13 # データ点数
RECORD_SECONDS = 3 # 録音する時間の長さ
now = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
WAVE_OUTPUT_FILENAME = "voice/%s.wav" % (now)
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
input_device_index=DEVICE, # デバイスのインデックス番号
frames_per_buffer=CHUNK)
print ("recording...")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print ("finished recording")
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
return WAVE_OUTPUT_FILENAME
def blink_led(normal, cold, sleepy):
u'''診断結果をLEDで表示'''
if normal > cold and normal > sleepy:
GPIO.output(GREEN_PIN, GPIO.HIGH)
time.sleep(5.0)
GPIO.output(GREEN_PIN, GPIO.LOW)
elif cold > sleepy:
GPIO.output(RED_PIN, GPIO.HIGH)
time.sleep(5.0)
GPIO.output(RED_PIN, GPIO.LOW)
else:
GPIO.output(YELLOW_PIN, GPIO.HIGH)
time.sleep(5.0)
GPIO.output(YELLOW_PIN, GPIO.LOW)
def send_data(normal, cold, sleepy, wav_file):
u'''IDCFクラウドにデータを送信する'''
print("Send trigger")
trigger_url = "http://%s/data/%s" % (IDCF_IP, UUID)
headers = {
"meshblu_auth_uuid": UUID,
"meshblu_auth_token": TOKEN
}
payload = {
'trigger': 'on', 'normal': normal,
'cold': cold, 'sleepy': sleepy
}
requests.post(trigger_url, headers=headers, data=payload)
print("Sending data finished")
def main():
last_pin_status = 0
tester = AudioNetTester()
print("Waiting")
while True:
pin_status = GPIO.input(SWITCH_PIN)
if last_pin_status == 1 and pin_status == 0:
time.sleep(0.1)
GPIO.output(GREEN_PIN, GPIO.HIGH)
wav_file = record_wav()
GPIO.output(GREEN_PIN, GPIO.LOW)
normal, cold, sleepy = tester.test(wav_file)
blink_led(normal, cold, sleepy)
send_data(normal, cold, sleepy, wav_file)
print("Waiting")
last_pin_status = pin_status
time.sleep(0.1)
GPIO.cleanup()
if __name__ == '__main__':
main()
|
import sys
import json
def read_data(data_filename):
with open(data_filename) as data_file:
data = json.load(data_file)
return data
def tree_stats(data):
span2counts = {}
for instance in data:
for question in instance['questions']:
support_text = instance['support'][0]['text']
support_char_offsets = instance['support'][0]['tokens']
support_postags = instance['support'][0]['postags']
for answer in question['answers']:
astart,aend = answer['span']
token_start_id = token_start_idx_from_char_offset(astart, support_char_offsets)
token_end_id = token_end_idx_from_char_offset(aend, support_char_offsets)
sent_start_id =
sent_end_id =
# answers which cross sent boundaries stats
# if same sentence, get that sentence's tree, what constituent corresponds to answer?
def pos_stats(data):
pos2counts = {}
for instance in data:
for question in instance['questions']:
support_text = instance['support'][0]['text']
support_char_offsets = instance['support'][0]['tokens']
support_postags = instance['support'][0]['postags']
for answer in question['answers']:
astart,aend = answer['span']
token_start_id = token_start_idx_from_char_offset(astart, support_char_offsets)
token_end_id = token_end_idx_from_char_offset(aend, support_char_offsets)
answer_postag_str = ' '.join(support_postags[token_start_id:token_end_id+1])
if answer_postag_str in pos2counts:
pos2counts[answer_postag_str] += 1
else:
pos2counts[answer_postag_str] = 1
pairs = sorted(pos2counts.items(), key=lambda x: -1 * x[1])
total = sum(pos2counts.values())
tally = 0
count = 0
print('Top Freq % PosTag Sequence')
print('--------------------------------------')
for k,v in pairs:
tally += v
count += 1
print('{0:<5} {1:>5} {2:>4.1f} {3}'.format(count, v, 100.0 * tally / total, k))
def token_start_idx_from_char_offset(char_offset, token_offsets):
for tidx, to in enumerate(token_offsets):
if char_offset < to[1]:
return tidx
print('Incorrect char offset {} into token offsets {}'.format(char_offset, token_offsets))
sys.exit()
return -1
def token_end_idx_from_char_offset(char_offset, token_offsets):
for tidx, to in enumerate(token_offsets):
if char_offset <= to[1]:
return tidx
print('Incorrect char offset {} into token offsets {}'.format(char_offset, token_offsets))
sys.exit()
return -1
def main():
import sys
if len(sys.argv) == 2:
data = read_data(sys.argv[1])
pos_stats(data)
if __name__ == "__main__": main()
|
#Please Don't Forget to read README.md otherwise you will fall in trouble
import pyautogui
import time
X_Axis=int(input("Enter X axis:-"))
Y_Axis=int(input("Enter Y axis:-"))
print("It Will Start Bombarding in 10sec")
print("Clear the place where it wants to click")
time.sleep(10)
print("u have more 5sec extra for clearing the place of clicking")
time.sleep(5)
print("starting")
while True:
pyautogui.leftClick(x=X_Axis,y=Y_Axis)
|
#!/usr/bin/env python3
# Dependencies from the Python 3 standard library:
import os
import pickle
import shutil
from subprocess import call
# Dependencies from the Scipy stack https://www.scipy.org/stackspec.html :
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import interpolation, map_coordinates, gaussian_filter
# Dependencies from https://github.com/AndrewGYork/rescan_line_sted :
import np_tif
import line_sted_tools as st
"""
Run this script in a Python 3 interpreter to produce the images for Figure 2.
The purpose of Figure 2
-----------------------
Line STED is much gentler than point STED, but has very anisotropic
resolution. To give isotropic resolution and compare fairly to
point-STED, line-STED needs to fuse multiple images taken with different
scan directions.
This module compares simulations of several operating points to
illustrate that for the same light dose, fusion of multiple line scan
directions gives higher image quality than point-STED. This seems to be
a general conclusion; as far as I can tell, line-STED is always better.
"""
def main():
output_prefix = os.path.abspath(os.path.join(
os.getcwd(), os.pardir, os.pardir, 'big_images', 'Figure_2_temp/'))
psfs, comparisons = calculate_psfs(output_prefix) # Lots going on; see below.
for im_name in ('cat', 'astronaut', 'lines', 'rings'):
print("\nTest image:", im_name)
if deconvolution_is_complete(psfs, output_prefix, im_name):
print("Using saved deconvolution results.")
else:
# Create a deconvolution object for each of the psfs created above
deconvolvers = {
name: st.Deconvolver(
psf, (os.path.join(output_prefix, im_name+'_'+name+'_')))
for name, psf in psfs.items()}
# Use our test object to create simulated data for each imaging
# method and dump the data to disk:
test_object = np_tif.tif_to_array(
'test_object_'+ im_name +'.tif').astype(np.float64)
for decon_object in deconvolvers.values():
decon_object.create_data_from_object(
test_object, total_brightness=5e10)
decon_object.record_data()
# Deconvolve each of our objects, saving occasionally:
print('Deconvolving...')
for i, save in st.logarithmic_progress(range(2**10 + 1)):
for decon_object in deconvolvers.values():
decon_object.iterate()
if save: decon_object.record_iteration()
print('\nDone deconvolving.')
create_figure(comparisons, output_prefix, im_name)
# Copy the final figures into their own directory:
src_dir = os.path.join(output_prefix, '1_Figures')
dst_dir = os.path.abspath(os.path.join(output_prefix,os.pardir,'figure_2'))
if os.path.isdir(dst_dir): shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
def calculate_psfs(output_prefix):
"""Tune a family of comparable line-STED vs. point-STED psfs.
"""
comparison_filename = os.path.join(output_prefix, 'psf_comparisons.pkl')
if os.path.exists(comparison_filename):
print("Loading saved PSF comparisons...")
comparisons = pickle.load(open(comparison_filename, 'rb'))
else:
comparisons = {}
# Yes, I really did tune all the parameters below by hand so the
# comparisons came out perfectly. Ugh.
comparisons['1p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=0.99, #Juuust under 1, ensures no STED
line_resolution_improvement=0.99,
point_emissions_per_molecule=4,
line_emissions_per_molecule=4,
line_scan_type='descanned',
line_num_orientations=1,
max_excitation_brightness=0.01) # Without STED, no reason to saturate
comparisons['1p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=0.99, #Juuust under 1, ensures no STED
line_resolution_improvement=1.38282445,
point_emissions_per_molecule=4,
line_emissions_per_molecule=4,
line_scan_type='rescanned',
line_num_orientations=2,
max_excitation_brightness=0.01) # Without STED, no reason to saturate
comparisons['1p5x_ld'] = psf_comparison_pair(
point_resolution_improvement=1.5,
line_resolution_improvement=2.68125,
point_emissions_per_molecule=4,
line_emissions_per_molecule=2.825,
line_scan_type='descanned',
line_num_orientations=3)
comparisons['1p5x_lr'] = psf_comparison_pair(
point_resolution_improvement=1.5,
line_resolution_improvement=2.95425,
point_emissions_per_molecule=4,
line_emissions_per_molecule=2.618,
line_scan_type='rescanned',
line_num_orientations=3)
comparisons['2p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=2,
line_resolution_improvement=4.04057,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.007,
line_scan_type='descanned',
line_num_orientations=4)
comparisons['2p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=2,
line_resolution_improvement=4.07614,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.0227,
line_scan_type='rescanned',
line_num_orientations=4)
comparisons['2p5x_ld'] = psf_comparison_pair(
point_resolution_improvement=2.5,
line_resolution_improvement=5.13325,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.792,
line_scan_type='descanned',
line_num_orientations=6)
comparisons['2p5x_lr'] = psf_comparison_pair(
point_resolution_improvement=2.5,
line_resolution_improvement=5.15129,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.8,
line_scan_type='rescanned',
line_num_orientations=6)
comparisons['3p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=3,
line_resolution_improvement=5.94563,
point_emissions_per_molecule=4,
line_emissions_per_molecule=5.034,
line_scan_type='descanned',
line_num_orientations=8)
comparisons['3p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=3,
line_resolution_improvement=5.95587,
point_emissions_per_molecule=4,
line_emissions_per_molecule=5.0385,
line_scan_type='rescanned',
line_num_orientations=8)
comparisons['4p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=4,
line_resolution_improvement=7.8386627,
point_emissions_per_molecule=4,
line_emissions_per_molecule=7.371,
line_scan_type='descanned',
line_num_orientations=10)
comparisons['4p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=4,
line_resolution_improvement=7.840982,
point_emissions_per_molecule=4,
line_emissions_per_molecule=7.37195,
line_scan_type='rescanned',
line_num_orientations=10)
print("Done calculating PSFs.\n")
if not os.path.isdir(output_prefix): os.makedirs(output_prefix)
pickle.dump(comparisons, open(comparison_filename, 'wb'))
print("Light dose (saturation units):")
for c in sorted(comparisons.keys()):
print("%s point-STED:%6s (excitation),%9s (depletion)"%(
c,
"%0.2f"%(comparisons[c]['point']['excitation_dose']),
"%0.2f"%(comparisons[c]['point']['depletion_dose'])))
print("%7s-line-STED:%6s (excitation),%9s (depletion)"%(
c + '%3s'%('%i'%len(comparisons[c]['line_sted_psfs'])),
"%0.2f"%(comparisons[c]['line']['excitation_dose']),
"%0.2f"%(comparisons[c]['line']['depletion_dose'])))
psfs = {}
for c in comparisons.keys():
psfs[c + '_point_sted'] = comparisons[c]['point_sted_psf']
psfs[c + '_line_%i_angles_sted'%len(comparisons[c]['line_sted_psfs'])
] = comparisons[c]['line_sted_psfs']
return psfs, comparisons
def psf_comparison_pair(
point_resolution_improvement,
line_resolution_improvement,
point_emissions_per_molecule,
line_emissions_per_molecule,
line_scan_type, # 'descanned' or 'rescanned'
line_num_orientations,
max_excitation_brightness=0.25,
steps_per_improved_psf_width=4, # Actual sampling, for Nyquist
steps_per_excitation_psf_width=25, # Display sampling, for convenience
):
"""
Compute a pair of PSFs, line-STED and point-STED, so we can compare
their resolution and photodose.
"""
# Compute correctly sampled PSFs, to get emission levels and light
# dose:
print("Calculating point-STED psf...")
point = st.tune_psf(
psf_type='point',
scan_type='descanned',
desired_resolution_improvement=point_resolution_improvement,
desired_emissions_per_molecule=point_emissions_per_molecule,
max_excitation_brightness=max_excitation_brightness,
steps_per_improved_psf_width=steps_per_improved_psf_width,
verbose_results=True)
print("Calculating line-STED psf,", line_num_orientations, "orientations...")
line = st.tune_psf(
psf_type='line',
scan_type=line_scan_type,
desired_resolution_improvement=line_resolution_improvement,
desired_emissions_per_molecule=line_emissions_per_molecule,
max_excitation_brightness=max_excitation_brightness,
steps_per_improved_psf_width=steps_per_improved_psf_width,
verbose_results=True)
# Compute finely sampled PSFs, to interpolate the PSF shapes onto a
# consistent pixel size for display:
fine_point = st.psf_report(
psf_type='point',
excitation_brightness=point['excitation_brightness'],
depletion_brightness=point['depletion_brightness'],
steps_per_excitation_psf_width=steps_per_excitation_psf_width,
pulses_per_position=point['pulses_per_position'],
verbose=False)
fine_line = st.psf_report(
psf_type='line',
excitation_brightness=line['excitation_brightness'],
depletion_brightness=line['depletion_brightness'],
steps_per_excitation_psf_width=steps_per_excitation_psf_width,
pulses_per_position=line['pulses_per_position'],
verbose=False)
# Normalize the finely sampled PSFs to give the proper emission level:
def norm(x):
return x / x.sum()
point_sted_psf = [point['expected_emission'] *
norm(fine_point['psfs']['descan_sted'])]
assert line['pulses_per_position'] >= line_num_orientations
psf = {'descanned': 'descan_sted', 'rescanned': 'rescan_sted'}[line_scan_type]
line_sted_psfs = [1 / line_num_orientations *
line['expected_emission'] *
rotate(norm(fine_line['psfs'][psf]), angle)
for angle in np.arange(0, 180, 180/line_num_orientations)]
return {'point_sted_psf': point_sted_psf,
'line_sted_psfs': line_sted_psfs,
'point': point,
'line': line}
def deconvolution_is_complete(psfs, output_prefix, im_name):
"""Check for saved deconvolution results
"""
estimate_filenames = [os.path.join(output_prefix, im_name + '_' + name +
'_estimate_history.tif')
for name in sorted(psfs.keys())]
ft_error_filenames = [os.path.join(output_prefix + im_name + '_'+ name +
'_estimate_FT_error_history.tif')
for name in sorted(psfs.keys())]
for f in estimate_filenames + ft_error_filenames:
if not os.path.exists(f):
return False
return True
def rotate(x, degrees):
if degrees == 0:
return x
elif degrees == 90:
return np.rot90(np.squeeze(x)).reshape(x.shape)
else:
return np.clip(
interpolation.rotate(x, angle=degrees, axes=(1, 2), reshape=False),
0, 1.1 * x.max())
def create_figure(comparisons, output_prefix, im_name):
print("Constructing figure images...")
figure_dir = os.path.join(output_prefix, "1_Figures")
if not os.path.isdir(figure_dir): os.makedirs(figure_dir)
for c in sorted(comparisons.keys()):
# Calculating the filenames and loading the images isn't too
# hard but the code looks like a bucket full of crabs:
num_angles = len(comparisons[c]['line_sted_psfs'])
point_estimate_filename = os.path.join(
output_prefix, im_name+'_'+c+'_point_sted_estimate_history.tif')
point_estimate = np_tif.tif_to_array(
point_estimate_filename)[-1, :, :].astype(np.float64)
line_estimate_filename = os.path.join(
output_prefix,
im_name+'_'+c+'_line_%i_angles_sted_estimate_history.tif'%num_angles)
line_estimate = np_tif.tif_to_array(
line_estimate_filename)[-1, :, :].astype(np.float64)
true_object_filename = os.path.join(
output_prefix, im_name + '_' + c + '_point_sted_object.tif')
true_object = np_tif.tif_to_array(
true_object_filename)[0, :, :].astype(np.float64)
# Not that my "publication-quality" matplotlib figure-generating
# code is anything like readable...
for i in range(2):
# Comparison of point-STED and line-STED images that use the
# same excitation and depletion dose. Side by side, and
# overlaid with switching.
fig = plt.figure(figsize=(10, 4), dpi=100)
plt.suptitle("Image comparison")
ax = plt.subplot(1, 3, 1)
plt.imshow(point_estimate, cmap=plt.cm.gray)
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
if c.startswith('1p0x'):
ax.set_xlabel("(a) Point confocal")
else:
ax.set_xlabel("(a) Point STED, R=%0.1f"%(
comparisons[c]['point']['resolution_improvement_descanned']))
ax = plt.subplot(1, 3, 2)
plt.imshow(line_estimate, cmap=plt.cm.gray)
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
if c.startswith('1p0x'):
ax.set_xlabel("(b) %i-line confocal with equal dose"%num_angles)
else:
ax.set_xlabel("(b) %i-line STED with equal dose"%num_angles)
ax = plt.subplot(1, 3, 3)
plt.imshow((point_estimate, line_estimate)[i], cmap=plt.cm.gray)
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
if c.startswith('1p0x'):
ax.set_xlabel(("(c) Comparison (point confocal)",
"(c) Comparison (%i-line confocal)"%num_angles)[i])
else:
ax.set_xlabel(("(c) Comparison (point STED)",
"(c) Comparison (%i-line STED)"%num_angles)[i])
plt.subplots_adjust(left=0, bottom=0, right=1, top=1,
wspace=0, hspace=0)
plt.savefig(os.path.join(
output_prefix, 'figure_2_'+ im_name +'_'+ c + '_%i.svg'%i),
bbox_inches='tight')
plt.close(fig)
imagemagick_failure = False
try: # Try to convert the SVG output to animated GIF
call(["convert",
"-loop", "0",
"-delay", "100", os.path.join(
output_prefix, 'figure_2_'+ im_name +'_'+ c + '_0.svg'),
"-delay", "100", os.path.join(
output_prefix, 'figure_2_'+ im_name +'_'+ c + '_1.svg'),
os.path.join(
figure_dir, 'figure_2_' + im_name + '_' + c + '.gif')])
except: # Don't expect this conversion to work on anyone else's system.
if not imagemagick_failure:
print("Gif conversion failed. Is ImageMagick installed?")
imagemagick_failure = True
# Error vs. spatial frequency for the point vs. line images
# plotted above
fig = plt.figure(figsize=(10, 4), dpi=100)
def fourier_error(x):
return (np.abs(np.fft.fftshift(np.fft.fftn(x - true_object))) /
np.prod(true_object.shape))
ax = plt.subplot2grid((12, 12), (0, 0), rowspan=8, colspan=8)
plt.imshow(np.log(1 + np.hstack((fourier_error(point_estimate),
fourier_error(line_estimate)))),
cmap=plt.cm.gray)
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
n_x, n_y = true_object.shape
ang = (0) * 2*np.pi/360
rad = 0.3
x0, x1 = (0.5 + rad * np.array((-np.cos(ang), np.cos(ang)))) * n_x
y0, y1 = (0.5 + rad * np.array((-np.sin(ang), np.sin(ang)))) * n_y
ax.plot([x0, x1], [y0, y1], 'go-')
ax.plot([x0 + n_x, x1 + n_x], [y0, y1], 'b+-')
ang = (90/num_angles) * 2*np.pi/360
x0_2, x1_2 = (0.5 + rad * np.array((-np.cos(ang), np.cos(ang)))) * n_x
y0_2, y1_2 = (0.5 + rad * np.array((-np.sin(ang), np.sin(ang)))) * n_y
ax.plot([x0_2 + n_x, x1_2 + n_x], [y0_2, y1_2], 'b+:')
ax.set_xlabel("(d) Error vs. spatial frequency")
# Error vs. spatial frequency for lines extracted from the
# 2D fourier error plots
ax = plt.subplot2grid((12, 12), (2, 8), rowspan=6, colspan=4)
samps = 1000
xy = np.vstack((np.linspace(x0, x1, samps), np.linspace(y0, y1, samps)))
xy_2 = np.vstack((np.linspace(x0_2, x1_2, samps),
np.linspace(y0_2, y1_2, samps)))
z_point = map_coordinates(np.transpose(fourier_error(point_estimate)), xy)
z_line = map_coordinates(np.transpose(fourier_error(line_estimate)), xy)
z_line_2 = map_coordinates(np.transpose(fourier_error(line_estimate)),
xy_2)
ax.semilogy(gaussian_filter(z_point, sigma=samps/80),
'g-', label='Point')
ax.semilogy(gaussian_filter(z_line, sigma=samps/80),
'b-', label='Line, best')
ax.semilogy(gaussian_filter(z_line_2, sigma=samps/80),
'b:', label='Line, worst')
ax.axes.get_xaxis().set_ticks([])
ax.set_xlabel("(e) Error vs. spatial frequency")
plt.ylim(5e0, 9e5)
ax.yaxis.tick_right()
ax.grid('on')
ax.legend(loc=(-0.1, .75), fontsize=8)
# PSFs for point and line STED
ax = plt.subplot2grid((12, 12), (10, 0), rowspan=2, colspan=2)
plt.imshow(comparisons[c]['point_sted_psf'][0][0, :, :],
cmap=plt.cm.gray)
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
for n, im in enumerate(comparisons[c]['line_sted_psfs']):
ax = plt.subplot2grid((12, 12), (10, 2+n), rowspan=2, colspan=1)
plt.imshow(im[0, :, :], cmap=plt.cm.gray)
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
fig.text(x=0.16, y=0.25, s="(f) Point PSF", fontsize=10)
fig.text(x=0.26, y=0.25, s="(g) Line PSF(s)", fontsize=10)
# Save the results
fig.text(x=0.65, y=0.83,
s=("Excitation dose: %6s\n"%(
"%0.1f"%(comparisons[c]['point']['excitation_dose'])) +
"Depletion dose: %7s"%(
"%0.1f"%(comparisons[c]['point']['depletion_dose']))),
fontsize=12, fontweight='bold')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0, hspace=0)
plt.savefig(os.path.join(
figure_dir, 'figure_2_'+ im_name +'_'+ c + '.svg'),
bbox_inches='tight')
plt.close(fig)
print("Done constructing figure images.")
if __name__ == '__main__':
main()
|
import lib.logger as logging
from lib.functions import wait_until, r_sleep
from lib.game import ui
from lib.game.notifications import Notifications
logger = logging.get_logger(__name__)
class Store(Notifications):
class FILTERS:
ARTIFACT = ui.STORE_FILTER_ARTIFACT
SECOND_LIST = [FILTERS.ARTIFACT.name]
def is_store_open(self):
"""Checks if Store is open.
:rtype: bool
"""
return self.emulator.is_ui_element_on_screen(ui.STORE_LABEL) or \
self.emulator.is_ui_element_on_screen(ui.STORE_LABEL_2)
def _drag_store_list_to_the_right(self):
"""Drags store' list of items to the right."""
self.emulator.drag(from_ui=ui.STORE_DRAG_RIGHT, to_ui=ui.STORE_DRAG_LEFT)
def _open_filter_menu(self):
"""Opens store's filter menu.
:rtype: bool
"""
if not wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_FILTER):
return logger.error("Can't open store's filter.")
logger.debug("Opening store's filter.")
self.emulator.click_button(ui.STORE_FILTER)
return wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_FILTER_LABEL)
def open_filter(self, ui_filter):
"""Opens store's filter menu by it's UI element.
:param ui.UIElement ui_filter: UI element that represent store's filter. See `Store.FILTERS` for reference.
:rtype: bool
"""
if self._open_filter_menu():
r_sleep(1) # Wait for animations
if not self.emulator.is_ui_element_on_screen(ui_filter) and ui_filter.name in self.SECOND_LIST:
while not wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui_filter, timeout=1):
logger.debug("Dragging to the bottom of the filters.")
self.emulator.drag(from_ui=ui.STORE_FILTER_DRAG_BOTTOM, to_ui=ui.STORE_FILTER_DRAG_TOP, duration=3)
if self.emulator.is_ui_element_on_screen(ui_filter):
logger.debug(f"Selecting by filter {ui_filter.name}")
self.emulator.click_button(ui_filter)
return wait_until(self.is_store_open)
class EnergyStore(Store):
"""Class for working with energy in store."""
def open_energy_store(self):
"""Opens energy store using plus button near energy counter."""
self.game.go_to_main_menu()
self.emulator.click_button(ui.STORE_COLLECT_ENERGY_PLUS_SIGN)
self.game.close_ads()
return wait_until(self.is_store_open)
def collect_free_energy(self):
"""Collects free daily energy."""
if not self.open_energy_store():
logger.error("Failed get to Store - Energy menu.")
return self.game.go_to_main_menu()
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_COLLECT_ENERGY_FREE):
logger.debug("Found available free energy button.")
self.emulator.click_button(ui.STORE_COLLECT_ENERGY_FREE)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_COLLECT_ENERGY_FREE_PURCHASE):
self.emulator.click_button(ui.STORE_COLLECT_ENERGY_FREE_PURCHASE)
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_COLLECT_ENERGY_FREE_PURCHASE_CLOSE):
logger.info("Free energy collected.")
self.emulator.click_button(ui.STORE_COLLECT_ENERGY_FREE_PURCHASE_CLOSE)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_COLLECT_ENERGY_FREE_FULL):
logger.info("Energy is already full, can't collect.")
self.emulator.click_button(ui.STORE_COLLECT_ENERGY_FREE_FULL)
else:
logger.info("Free energy isn't available right now.")
self.game.go_to_main_menu()
def collect_energy_via_assemble_points(self, use_all_points=True):
"""Collects energy using available Assemble Points.
:param bool use_all_points: use all available points or not.
"""
if not self.open_energy_store():
logger.error("Failed get to Store - Energy menu.")
return self.game.go_to_main_menu()
recharged = self._recharge_energy_with_points()
if use_all_points and recharged:
while recharged:
logger.debug("Trying to recharge energy again.")
recharged = self._recharge_energy_with_points()
self.game.go_to_main_menu()
def _recharge_energy_with_points(self):
"""Recharges energy with Assemble Points once.
:return: was energy recharged or not.
:rtype: bool
"""
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_RECHARGE_ENERGY_VIA_POINTS):
self.emulator.click_button(ui.STORE_RECHARGE_ENERGY_VIA_POINTS)
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_RECHARGE_ENERGY_VIA_POINTS_PURCHASE):
logger.debug("Purchasing energy via assemble points.")
self.emulator.click_button(ui.STORE_RECHARGE_ENERGY_VIA_POINTS_PURCHASE)
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_COLLECT_ENERGY_FREE_PURCHASE_CLOSE):
logger.info("Energy recharged.")
self.emulator.click_button(ui.STORE_COLLECT_ENERGY_FREE_PURCHASE_CLOSE)
return True
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_RECHARGE_ENERGY_VIA_NO_POINTS):
logger.info("Not enough Assemble Points for energy recharge.")
self.emulator.click_button(ui.STORE_RECHARGE_ENERGY_VIA_NO_POINTS)
return False
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_RECHARGE_ENERGY_VIA_POINTS_LIMIT):
logger.info("Reached daily limit for energy recharging.")
self.emulator.click_button(ui.STORE_RECHARGE_ENERGY_VIA_POINTS_LIMIT)
return False
return False
class CharacterStore(Store):
"""Class for working with Character Store."""
def open_character_store(self):
"""Opens Character store using Dimension Chest button in Main Menu."""
self.game.go_to_main_menu()
self.emulator.click_button(ui.MAIN_MENU)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.MAIN_MENU):
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.MAIN_MENU_DIMENSION_CHEST):
self.emulator.click_button(ui.MAIN_MENU_DIMENSION_CHEST)
self.close_ads()
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_OPEN_CHARACTER_FROM_DIMENSION_CHEST):
logger.debug("Opening Character tab.")
self.emulator.click_button(ui.STORE_OPEN_CHARACTER_FROM_DIMENSION_CHEST)
return True
return False
def _open_hero_chest_tab(self):
"""Open `Hero` chest tab in the Store."""
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_CHARACTER_HERO_CHEST_TAB):
logger.debug("Opening Hero Chest tab.")
self.emulator.click_button(ui.STORE_CHARACTER_HERO_CHEST_TAB)
return True
return False
def acquire_free_hero_chest(self):
"""Acquires available Free Hero Chest."""
self.open_character_store()
if self._open_hero_chest_tab():
if not wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_CHARACTER_FREE_HERO_CHEST_BUTTON):
logger.info("No available Free Hero Chest, exiting.")
return self.game.go_to_main_menu()
logger.info("Free Hero Chest is available.")
self.emulator.click_button(ui.STORE_CHARACTER_FREE_HERO_CHEST_BUTTON)
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_CHARACTER_FREE_HERO_CHEST_BUTTON_ACQUIRE):
logger.info("Free Hero Chest is available.")
self.emulator.click_button(ui.STORE_CHARACTER_FREE_HERO_CHEST_BUTTON_ACQUIRE)
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_CHARACTER_FREE_HERO_CHEST_PURCHASE):
self.emulator.click_button(ui.STORE_CHARACTER_FREE_HERO_CHEST_PURCHASE)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.SKIP_CUTSCENE):
self.emulator.click_button(ui.SKIP_CUTSCENE)
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_CHARACTER_FREE_HERO_CHEST_PURCHASE_CLOSE):
self.emulator.click_button(ui.STORE_CHARACTER_FREE_HERO_CHEST_PURCHASE_CLOSE)
r_sleep(1) # Wait for animation
logger.info("Free Hero Chest acquired.")
self.emulator.click_button(ui.MENU_BACK)
r_sleep(1) # Wait for animation
self.game.go_to_main_menu()
class ArtifactStore(Store):
"""Class for working with Artifact Store."""
class ARTIFACT_CHEST:
GOLD_100 = "STORE_ARTIFACT_CHEST_1"
GOLD_250 = "STORE_ARTIFACT_CHEST_2"
GOLD_750 = "STORE_ARTIFACT_CHEST_3"
GOLD_1250 = "STORE_ARTIFACT_CHEST_4"
def open_artifact_store(self):
"""Opens Artifact store using Dimension Chest button in Main Menu."""
self.game.go_to_main_menu()
self.emulator.click_button(ui.MAIN_MENU)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.MAIN_MENU):
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.MAIN_MENU_DIMENSION_CHEST):
self.emulator.click_button(ui.MAIN_MENU_DIMENSION_CHEST)
self.close_ads()
return self.open_filter(self.FILTERS.ARTIFACT)
return False
def acquire_free_artifact_chest(self):
"""Acquires available Free Artifact Chest."""
if not self.open_artifact_store():
return logger.error("Can't open Artifact Store.")
self._drag_store_list_to_the_right()
r_sleep(1) # Wait for animations
self._drag_store_list_to_the_right()
self.emulator.click_button(ui.STORE_ARTIFACT_FREE_CHEST)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_ARTIFACT_FREE_CHEST_BUTTON_ACQUIRE):
logger.debug("Acquiring Free Artifact Chest.")
self.emulator.click_button(ui.STORE_ARTIFACT_FREE_CHEST_BUTTON_ACQUIRE)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_ARTIFACT_FREE_CHEST_PURCHASE, timeout=5):
self.emulator.click_button(ui.STORE_ARTIFACT_FREE_CHEST_PURCHASE)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.SKIP_CUTSCENE):
self.emulator.click_button(ui.SKIP_CUTSCENE)
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_ARTIFACT_FREE_CHEST_PURCHASE_CLOSE):
self.emulator.click_button(ui.STORE_ARTIFACT_FREE_CHEST_PURCHASE_CLOSE)
r_sleep(1) # Wait for animation
logger.info("Free Artifact Chest acquired.")
self.emulator.click_button(ui.MENU_BACK)
r_sleep(1) # Wait for animation
else:
logger.info("No available Free Artifact Chest, exiting.")
self.emulator.click_button(ui.MENU_BACK)
r_sleep(1) # Wait for animation
self.game.go_to_main_menu()
def buy_artifact_chest(self, chests_to_buy=None):
"""Buys artifact chest from the Store by it's UI element.
:param str | list[str] chests_to_buy: UI elements of chest to buy.
"""
if not self.open_artifact_store():
return logger.error("Can't open Artifact Store.")
self._drag_store_list_to_the_right()
r_sleep(1) # Wait for animations
self._drag_store_list_to_the_right()
for chest in chests_to_buy:
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_ARTIFACT_CHEST_GOLD):
self.emulator.click_button(ui.STORE_ARTIFACT_CHEST_GOLD)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.STORE_ARTIFACT_GOLD_CHEST_PURCHASE):
self.emulator.click_button(ui.STORE_ARTIFACT_GOLD_CHEST_PURCHASE)
if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.SKIP_CUTSCENE):
self.emulator.click_button(ui.SKIP_CUTSCENE)
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_ARTIFACT_FREE_CHEST_PURCHASE_CLOSE):
self.emulator.click_button(ui.STORE_ARTIFACT_FREE_CHEST_PURCHASE_CLOSE)
logger.info(f"Artifact Chest STORE_ARTIFACT_CHEST_GOLD acquired.")
if wait_until(self.emulator.is_ui_element_on_screen,
ui_element=ui.STORE_RECHARGE_ENERGY_VIA_POINTS_LIMIT):
logger.info("Daily Acquire Artifact Chest Reached.")
self.emulator.click_button(ui.STORE_RECHARGE_ENERGY_VIA_POINTS_LIMIT)
break
self.game.go_to_main_menu()
|
class FSMProxy(object):
def __init__(self, cfg):
from fysom import Fysom
events = set([event['name'] for event in cfg['events']])
cfg['callbacks'] = self.collect_event_listeners(events, cfg['callbacks'])
self.fsm = Fysom(cfg)
self.attach_proxy_methods(self.fsm, events)
def collect_event_listeners(self, events, callbacks):
callbacks = callbacks.copy()
callback_names = []
for event in events:
callback_names.append(('_before_' + event, 'onbefore' + event))
callback_names.append(('_after_' + event, 'onafter' + event))
for fn_name, listener in callback_names:
fn = getattr(self, fn_name, None)
if callable(fn):
if listener in callbacks:
old_fn = callbacks[listener]
def wrapper(e, old_fn=old_fn, fn=fn):
old_fn(e)
fn(e)
callbacks[listener] = wrapper
else:
callbacks[listener] = fn
return callbacks
def attach_proxy_methods(self, fsm, events):
def make_proxy(fsm, event):
fn = getattr(fsm, event)
def proxy(*args, **kwargs):
if args:
raise FSMProxyError('FSMProxy event listeners only accept named arguments.')
fn(**kwargs)
return proxy
for event in events:
if not hasattr(self, event):
setattr(self, event, make_proxy(fsm, event))
def __getstate__(self):
state = {}
for key, value in self.__dict__.iteritems():
if callable(value) or key == 'fsm':
continue
state[key] = value
state['__class__'] = self.__module__ + '.' + self.__class__.__name__
return state
def __setstate__(self, state):
for key in state:
self.__dict__[key] = state[key]
class FSMProxyError(Exception):
pass
|
import os
import sys
from pathlib import Path
# py.test messes up sys.path, must add manually
# (note: do not have __init__.py in project if project and app has same name, python takes "top package" and will
# import from project instead of from app)
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from django.conf import settings
from six import text_type
def pytest_configure(config):
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'clothstream.settings.test')
# http://djangosnippets.org/snippets/646/
class InvalidVarException(object):
def __mod__(self, missing):
try:
missing_str = text_type(missing)
except:
missing_str = 'Failed to create string representation'
raise Exception('Unknown template variable %r %s' % (missing, missing_str))
def __contains__(self, search):
if search == '%s':
return True
return False
settings.TEMPLATE_DEBUG = True
settings.TEMPLATE_STRING_IF_INVALID = InvalidVarException()
# Disable static compiling in tests
settings.STATIC_BUNDLES = {}
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'clothstream.tests',
)
# This speeds up the tests considerably, pbkdf2 is by design, slow.
settings.PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
# we monkey-patch connection.creation, to be sure that we modify sequences after it has completely finished
# (if we use post-syncdb signal, it won't work as some post-signals will actually call reset-sequences)
from django.db import connections
for connection in connections.all():
from pytest_django.db_reuse import _monkeypatch
from clothstream.lib.modify_seq import setup_modified_seq
create_test_db = connection.creation.create_test_db
def create_test_db_with_modified_sequences(self, *args, **kwargs):
create_test_db(*args, **kwargs)
setup_modified_seq(connection)
_monkeypatch(connection.creation, 'create_test_db', create_test_db_with_modified_sequences)
|
# Copyright 2017--2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Encoders for sequence-to-sequence models.
"""
import inspect
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import List, Optional, Union
import mxnet as mx
from . import config
from . import constants as C
from . import layers
from . import transformer
from . import utils
logger = logging.getLogger(__name__)
ImageEncoderConfig = None
def get_encoder(config: 'EncoderConfig', prefix: str = '', dtype: str = C.DTYPE_FP32) -> 'Encoder':
return get_transformer_encoder(config, prefix, dtype)
def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str, dtype: str) -> 'Encoder':
"""
Returns a Transformer encoder, consisting of an embedding layer with
positional encodings and a TransformerEncoder instance.
:param config: Configuration for transformer encoder.
:param prefix: Prefix for variable names.
:return: Encoder instance.
"""
return TransformerEncoder(config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX, dtype=dtype)
class Encoder(ABC, mx.gluon.HybridBlock):
"""
Generic encoder interface.
"""
@abstractmethod
def __init__(self, **kwargs):
mx.gluon.HybridBlock.__init__(self, **kwargs)
def forward(self, inputs, valid_length): # pylint: disable=arguments-differ
return mx.gluon.HybridBlock.forward(self, inputs, valid_length)
def __call__(self, inputs, valid_length): #pylint: disable=arguments-differ
"""
Encodes inputs given valid lengths of individual examples.
:param inputs: Input data.
:param valid_length: Length of inputs without padding.
:return: Encoded versions of input data (data, data_length).
"""
return mx.gluon.HybridBlock.__call__(self, inputs, valid_length)
@abstractmethod
def get_num_hidden(self) -> int:
"""
:return: The representation size of this encoder.
"""
raise NotImplementedError()
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
:return: The size of the encoded sequence.
"""
return seq_len
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the encoder if such a restriction exists.
"""
return None
@dataclass
class FactorConfig(config.Config):
vocab_size: int
num_embed: int
combine: str # From C.FACTORS_COMBINE_CHOICES
share_embedding: bool
@dataclass
class EmbeddingConfig(config.Config):
vocab_size: int
num_embed: int
dropout: float
num_factors: int = field(init=False)
factor_configs: Optional[List[FactorConfig]] = None
allow_sparse_grad: bool = False
def __post_init__(self):
self.num_factors = 1
if self.factor_configs is not None:
self.num_factors += len(self.factor_configs)
class Embedding(Encoder):
"""
Thin wrapper around MXNet's Embedding symbol. Works with both time- and batch-major data layouts.
:param config: Embedding config.
:param prefix: Name prefix for symbols of this encoder.
:param dtype: Data type. Default: 'float32'.
"""
def __init__(self,
config: EmbeddingConfig,
prefix: str,
embed_weight: Optional[mx.gluon.Parameter] = None,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__(prefix=prefix)
self.config = config
self._dtype = dtype
self._factor_weight_format_string = 'factor%d_weight'
with self.name_scope():
if embed_weight is None:
self.embed_weight = self.params.get('weight',
shape=(self.config.vocab_size, self.config.num_embed),
grad_stype='row_sparse',
dtype=dtype)
self._use_sparse_grad = self.config.allow_sparse_grad
else:
self.embed_weight = embed_weight # adds to self._reg_params
self.params.update({embed_weight.name: embed_weight}) # adds to self.params
self._use_sparse_grad = embed_weight._grad_stype == 'row_sparse' and self.config.allow_sparse_grad
if self.config.factor_configs is not None:
for i, fc in enumerate(self.config.factor_configs, 1):
factor_weight_name = self._factor_weight_format_string % i
factor_weight = embed_weight if fc.share_embedding else \
self.params.get(factor_weight_name, shape=(fc.vocab_size, fc.num_embed), dtype=dtype)
# We set the attribute of the class to trigger the hybrid_forward parameter creation "magic"
setattr(self, factor_weight_name, factor_weight)
def hybrid_forward(self, F, data, valid_length, embed_weight, **kwargs): # pylint: disable=arguments-differ
# We will catch the optional factor weights in kwargs
average_factors_embeds = [] # type: List[Union[mx.sym.Symbol, mx.nd.ndarray]]
concat_factors_embeds = [] # type: List[Union[mx.sym.Symbol, mx.nd.ndarray]]
sum_factors_embeds = [] # type: List[Union[mx.sym.Symbol, mx.nd.ndarray]]
if self.config.num_factors > 1 and self.config.factor_configs is not None:
data, *data_factors = F.split(data=data,
num_outputs=self.config.num_factors,
axis=2,
squeeze_axis=True)
for i, (factor_data, factor_config) in enumerate(zip(data_factors,
self.config.factor_configs), 1):
factor_weight = kwargs[self._factor_weight_format_string % i]
factor_embedding = F.Embedding(data=factor_data,
input_dim=factor_config.vocab_size,
weight=factor_weight,
output_dim=factor_config.num_embed)
if factor_config.combine == C.FACTORS_COMBINE_CONCAT:
concat_factors_embeds.append(factor_embedding)
elif factor_config.combine == C.FACTORS_COMBINE_SUM:
sum_factors_embeds.append(factor_embedding)
elif factor_config.combine == C.FACTORS_COMBINE_AVERAGE:
average_factors_embeds.append(factor_embedding)
else:
raise ValueError("Unknown combine value for factors: %s" % factor_config.combine)
else:
data = F.squeeze(data, axis=2)
embed = F.Embedding(data,
weight=embed_weight,
input_dim=self.config.vocab_size,
output_dim=self.config.num_embed,
dtype=self._dtype,
sparse_grad=self._use_sparse_grad)
if self.config.num_factors > 1 and self.config.factor_configs is not None:
if average_factors_embeds:
embed = F.add_n(embed, *average_factors_embeds) / (len(average_factors_embeds) + 1)
if sum_factors_embeds:
embed = F.add_n(embed, *sum_factors_embeds)
if concat_factors_embeds:
embed = F.concat(embed, *concat_factors_embeds, dim=2)
if self.config.dropout > 0:
embed = F.Dropout(data=embed, p=self.config.dropout)
return embed, F.identity(valid_length) # identity: See https://github.com/apache/incubator-mxnet/issues/14228
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.config.num_embed
class EncoderSequence(Encoder, mx.gluon.nn.HybridSequential):
"""
A sequence of encoders is itself an encoder.
"""
def __init__(self, prefix: str = '') -> None:
Encoder.__init__(self)
mx.gluon.nn.HybridSequential.__init__(self, prefix=prefix)
def add(self, *encoders):
"""Adds block on top of the stack."""
for encoder in encoders:
utils.check_condition(isinstance(encoder, Encoder), "%s is not of type Encoder" % encoder)
mx.gluon.nn.HybridSequential.add(self, *encoders)
def hybrid_forward(self, F, data, valid_length): # pylint: disable=arguments-differ
for block in self._children.values():
data, valid_length = block(data, valid_length)
return data, F.identity(valid_length) # identity: See https://github.com/apache/incubator-mxnet/issues/14228
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return next(reversed(self._children.values())).get_num_hidden()
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
Returns the size of the encoded sequence.
"""
for encoder in self._children.values():
seq_len = encoder.get_encoded_seq_len(seq_len)
return seq_len
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the encoder if such a restriction exists.
"""
max_seq_len = min((encoder.get_max_seq_len()
for encoder in self._children.values() if encoder.get_max_seq_len() is not None), default=None)
return max_seq_len
def append(self, cls, infer_hidden: bool = False, **kwargs) -> Encoder:
"""
Extends sequence with new Encoder.
:param cls: Encoder type.
:param infer_hidden: If number of hidden should be inferred from previous encoder.
:param kwargs: Named arbitrary parameters for Encoder.
:return: Instance of Encoder.
"""
params = dict(kwargs)
if infer_hidden:
params['num_hidden'] = self.get_num_hidden()
sig_params = inspect.signature(cls.__init__).parameters
encoder = cls(**params)
self.add(encoder)
return encoder
class TransformerEncoder(Encoder, mx.gluon.HybridBlock):
"""
Non-recurrent encoder based on the transformer architecture in:
Attention Is All You Need, Figure 1 (left)
Vaswani et al. (https://arxiv.org/pdf/1706.03762.pdf).
:param config: Configuration for transformer encoder.
:param prefix: Name prefix for operations in this encoder.
"""
def __init__(self,
config: transformer.TransformerConfig,
prefix: str = C.TRANSFORMER_ENCODER_PREFIX,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__(prefix=prefix)
self.config = config
with self.name_scope():
self.pos_embedding = layers.PositionalEmbeddings(weight_type=self.config.positional_embedding_type,
num_embed=self.config.model_size,
max_seq_len=self.config.max_seq_len_source,
prefix=C.SOURCE_POSITIONAL_EMBEDDING_PREFIX,
scale_up_input=True,
scale_down_positions=False)
self.layers = mx.gluon.nn.HybridSequential()
for i in range(config.num_layers):
self.layers.add(transformer.TransformerEncoderBlock(config, prefix="%d_" % i, dtype=dtype))
self.final_process = transformer.TransformerProcessBlock(sequence=config.preprocess_sequence,
dropout=config.dropout_prepost,
prefix="final_process_",
num_hidden=self.config.model_size)
def hybrid_forward(self, F, data, valid_length):
# positional embedding
data = self.pos_embedding(data, None)
if self.config.dropout_prepost > 0.0:
data = F.Dropout(data=data, p=self.config.dropout_prepost)
# (batch_size * heads, seq_len)
att_valid_length = layers.prepare_source_valid_lengths(F, valid_length, data,
num_heads=self.config.attention_heads)
data = F.transpose(data, axes=(1, 0, 2))
for block in self.layers:
data = block(data, att_valid_length)
data = self.final_process(data, None)
data = F.transpose(data, axes=(1, 0, 2))
return data, valid_length
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.config.model_size
EncoderConfig = Union[transformer.TransformerConfig]
|
from unittest import TestCase
from copy import deepcopy
from numpy import array as np_array
from rptools.rpthermo.rpThermo import (
build_stoichio_matrix,
get_target_rxn_idx,
minimize,
remove_compounds,
# eQuilibrator,
# initThermo,
# get_compounds_from_cache
)
from brs_utils import (
create_logger,
Cache
)
from chemlite import Reaction
from rptools.rplibs import(
rpCompound,
rpReaction,
rpPathway
)
species = {
"TARGET_0000000001": rpCompound(
id="TARGET_0000000001",
smiles="[H]OC(=O)C([H])=C([H])C([H])=C([H])C(=O)O[H]",
inchi="InChI=1S/C6H6O4/c7-5(8)3-1-2-4-6(9)10/h1-4H,(H,7,8)(H,9,10)",
inchikey="TXXHDPDFNKHHGW-UHFFFAOYSA-N"
),
"CMPD_0000000010": rpCompound(
id="CMPD_0000000010",
smiles="[H]OC(=O)c1c([H])c([H])c(O[H])c(O[H])c1[H]",
inchi="InChI=1S/C7H6O4/c8-5-2-1-4(7(10)11)3-6(5)9/h1-3,8-9H,(H,10,11)",
inchikey="YQUVCSBJEUQKSH-UHFFFAOYSA-N"
),
"MNXM23": rpCompound(
id="MNXM23",
formula="C3H3O3",
smiles="CC(=O)C(=O)O]",
inchi="InChI=1S/C3H4O3/c1-2(4)3(5)6/h1H3,(H,5,6)",
inchikey="LCTONWCANYUPML-UHFFFAOYSA-N",
name="pyruvate"
),
"CMPD_0000000025": rpCompound(
id="CMPD_0000000025",
smiles="[H]OC(=O)c1c([H])c([H])c([H])c(O[H])c1[H]",
inchi="InChI=1S/C7H6O3/c8-6-3-1-2-5(4-6)7(9)10/h1-4,8H,(H,9,10)",
inchikey="IJFXRHURBJZNAO-UHFFFAOYSA-N"
),
"CMPD_0000000003": rpCompound(
id="CMPD_0000000003",
smiles="[H]Oc1c([H])c([H])c([H])c([H])c1O[H]",
inchi="InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H",
inchikey="YCIMNLLNPGFGHC-UHFFFAOYSA-N"
),
"CMPD_0000000003_wo_smiles": rpCompound(
id="CMPD_0000000003_wo_smiles",
inchi="InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H",
inchikey="YCIMNLLNPGFGHC-UHFFFAOYSA-N"
),
"CMPD_0000000004_wo_smiles": rpCompound(
id="CMPD_0000000003_wo_smiles",
inchi="InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H",
inchikey="YCIMNLLNPGFGHC-UHFFFAOYSA-N"
),
"CMPD_0000000003_w_smiles_None": rpCompound(
id="CMPD_0000000003_wo_smiles",
inchi="InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H",
inchikey="YCIMNLLNPGFGHC-UHFFFAOYSA-N",
smiles=None
),
"MNXM337": rpCompound(
id="MNXM337",
smiles="[H]OC(=O)C(OC1([H])C([H])=C(C(=O)O[H])C([H])=C([H])C1([H])O[H])=C([H])[H]",
inchi="InChI=1S/C10H10O6/c1-5(9(12)13)16-8-4-6(10(14)15)2-3-7(8)11/h2-4,7-8,11H,1H2,(H,12,13)(H,14,15)",
inchikey="WTFXTQVDAKGDEY-UHFFFAOYSA-N"
),
"MNXM2": rpCompound(
id="MNXM2",
smiles="[H]O[H]",
inchi="InChI=1S/H2O/h1H2",
inchikey="XLYOFNOQVPJJNP-UHFFFAOYSA-N"
),
"MNXM13": rpCompound(
id="MNXM13",
smiles="O=C=O",
inchi="InChI=1S/CO2/c2-1-3",
inchikey="CURLTUGMZLYLDI-UHFFFAOYSA-N",
formula="CO2",
name="CO2"
),
"MNXM5": rpCompound(
id="MNXM5",
smiles="N=C(O)c1ccc[n+](C2OC(COP(=O)(O)OP(=O)(O)OCC3OC(n4cnc5c(N)ncnc54)C(OP(=O)(O)O)C3O)C(O)C2O)c1",
inchi="InChI=1S/C21H28N7O17P3/c22-17-12-19(25-7-24-17)28(8-26-12)21-16(44-46(33,34)35)14(30)11(43-21)6-41-48(38,39)45-47(36,37)40-5-10-13(29)15(31)20(42-10)27-3-1-2-9(4-27)18(23)32/h1-4,7-8,10-11,13-16,20-21,29-31H,5-6H2,(H7-,22,23,24,25,32,33,34,35,36,37,38,39)/p+1",
inchikey="XJLXINKUBYWONI-UHFFFAOYSA-O",
formula="C21H25N7O17P3",
name="NADP(+)"
),
"MNXM4": rpCompound(
id="MNXM4",
smiles="O=O",
inchi="InChI=1S/O2/c1-2",
inchikey="MYMOFIZGZYHOMD-UHFFFAOYSA-N"
),
"MNXM1": rpCompound(
id="MNXM1",
smiles="[H+]",
inchi="InChI=1S/p+1",
inchikey="GPRLSGONYQIRFK-UHFFFAOYSA-N"
),
"MNXM6": rpCompound(
id="MNXM6",
smiles="[H]N=C(O[H])C1=C([H])N(C2([H])OC([H])(C([H])([H])OP(=O)(O[H])OP(=O)(O[H])OC([H])([H])C3([H])OC([H])(n4c([H])nc5c(N([H])[H])nc([H])nc54)C([H])(OP(=O)(O[H])O[H])C3([H])O[H])C([H])(O[H])C2([H])O[H])C([H])=C([H])C1([H])[H]",
inchi="InChI=1S/C21H30N7O17P3/c22-17-12-19(25-7-24-17)28(8-26-12)21-16(44-46(33,34)35)14(30)11(43-21)6-41-48(38,39)45-47(36,37)40-5-10-13(29)15(31)20(42-10)27-3-1-2-9(4-27)18(23)32/h1,3-4,7-8,10-11,13-16,20-21,29-31H,2,5-6H2,(H2,23,32)(H,36,37)(H,38,39)(H2,22,24,25)(H2,33,34,35)",
inchikey="ACFIXJIJDZMPPO-UHFFFAOYSA-N"
)
}
class Test_rpThermo(TestCase):
def setUp(self):
self.logger = create_logger(__name__, 'ERROR')
self.rxn_1 = rpReaction(
id='rxn_1',
reactants={'MNXM188': 1, 'MNXM4': 1, 'MNXM6': 1, 'MNXM1': 3},
products={'CMPD_0000000004': 1, 'CMPD_0000000003': 1, 'MNXM13': 1, 'MNXM15': 3, 'MNXM5': 1},
)
self.rxn_2 = rpReaction(
id='rxn_2',
reactants={'MNXM4': 1, 'CMPD_0000000003': 2},
products={'MNXM1': 1, 'TARGET_0000000001': 1},
)
self.rxn_3 = rpReaction(
id='rxn_3',
reactants={'CMPD_0000000004': 3, 'MNXM4': 1, 'MNXM6': 1},
products={'MNXM13': 1, 'MNXM5': 1},
)
self.reactions = [self.rxn_1, self.rxn_2, self.rxn_3]
self.sto_mat_1 = [
[-3.0, 1.0, 0.0],
[-1.0, -1.0, -1.0],
[1.0, 0.0, 1.0],
[3.0, 0.0, 0.0],
[1.0, 0.0, -3.0],
[1.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, -2.0, 0.0],
[-1.0, 0.0, 0.0],
[-1.0, 0.0, -1.0]
]
# Reactions
# |- rxn_1: 1.0 MNXM188 + 1.0 MNXM4 + 1.0 MNXM6 + 3.0 MNXM1 --> 1.0 CMPD_0000000004 + 1.0 CMPD_0000000003 + 1.0 MNXM13 + 1.0 MNXM15 + 1.0 MNXM5
# |- rxn_2: 1.0 MNXM4 + 2.0 CMPD_0000000003 --> 2.0 MNXM1 + 1.0 TARGET_0000000001
# |- rxn_3: 1.0 MNXM4 + 1.0 MNXM6 + 3.0 CMPD_0000000004 --> 1.0 MNXM13 + 1.0 MNXM5
# Compounds ordered: CMPD_0000000003, CMPD_0000000004
# Reactions ordered: rxn_1, rxn_2*, rxn_3
# * to be optimised
def test_minimize_1(self):
sto_mat = np_array(
[
[1, -2, 0],
[1, 0, -3]
]
)
rxn_tgt_idx = 1
coeffs = minimize(
sto_mat,
rxn_tgt_idx,
self.logger
)
self.assertSequenceEqual(
coeffs.tolist(),
[3.0, 1.5, 1.0]
)
# Reactions
# |- rxn_1: 1.0 MNXM4 + 1.0 MNXM421 + 1.0 MNXM6 + 1.0 MNXM1 --> 1.0 CMPD_0000000015 + 1.0 MNXM2 + 1.0 MNXM5
# |- rxn_2: 1.0 MNXM1 + 1.0 CMPD_0000000015 + 1.0 MNXM2 --> 1.0 CMPD_0000000010 + 1.0 MNXM15
# |- rxn_3: 1.0 MNXM1 + 1.0 CMPD_0000000010 --> 1.0 CMPD_0000000003 + 1.0 MNXM13
# |- rxn_4: 1.0 MNXM4 + 1.0 CMPD_0000000003 --> 2.0 MNXM1 + 1.0 TARGET_0000000001
# Compounds ordered: CMPD_0000000003, CMPD_0000000010, CMPD_0000000015
# Reactions ordered: rxn_1, rxn_2, rxn_3, rxn_4*
# * to be optimised
def test_minimize_2(self):
sto_mat = np_array(
[
[0, 0, 1, -1],
[0, 1, -1, 0],
[1, -1, 0, 0]
]
)
rxn_tgt_idx = 3
coeffs = minimize(
sto_mat,
rxn_tgt_idx,
self.logger
)
self.assertSequenceEqual(
coeffs.tolist(),
[1 , 1 , 1, 1]
)
def test_minimize_1cmpd(self):
sto_mat = np_array(
[
[ 1, -1, 0, 0]
]
)
rxn_tgt_idx = 2
coeffs = minimize(
sto_mat,
rxn_tgt_idx,
self.logger
)
_coeffs = deepcopy(coeffs)
for coeff_idx in range(len(_coeffs)):
if (
_coeffs[coeff_idx] == 0
or _coeffs[coeff_idx] == abs(float("inf"))
):
_coeffs[coeff_idx] = 1.
self.assertSequenceEqual(
list(_coeffs),
[1, 1, 1, 1]
)
def test_build_stoichio_matrix(self):
# Ignore the order of matrix lines because
# it is not relevant for our resolution system
self.assertCountEqual(
build_stoichio_matrix(self.reactions).tolist(),
self.sto_mat_1
)
def test_build_stoichio_matrix_w_sel_cmpds(self):
# Ignore the order of matrix lines because
# it is not relevant for our resolution system
self.assertCountEqual(
build_stoichio_matrix(
reactions=self.reactions,
compounds=['CMPD_0000000003']
).tolist(),
[self.sto_mat_1[7]]
)
def test_get_target_rxn_idx(self):
self.assertEqual(
get_target_rxn_idx(
reactions=self.reactions,
rxn_target_id=self.rxn_2.get_id(),
),
self.reactions.index(self.rxn_2)
)
def test_remove_compounds(self):
pathway = rpPathway(id='thermo')
for rxn in self.reactions:
pathway.add_reaction(rxn)
compd_id1 = 'UNK_CMPD_FOOBAR'
compd_id2 = 'UNK_CMPD_FOOBAR_2'
self.rxn_1.add_product(stoichio=2, compound_id=compd_id1)
self.rxn_1.add_product(stoichio=3, compound_id=compd_id2)
self.rxn_2.add_reactant(stoichio=2, compound_id=compd_id2)
self.rxn_3.add_reactant(stoichio=1, compound_id=compd_id1)
reactions = remove_compounds(
compounds=[compd_id1, compd_id2],
reactions=pathway.get_list_of_reactions(),
rxn_target_id=self.rxn_2.get_id(),
)
self.assertDictEqual(
Reaction.sum_stoichio(reactions),
{'MNXM1': -1.5, 'MNXM188': -1.0, 'MNXM4': -4.5, 'MNXM6': -3.0, 'CMPD_0000000003': -2.0, 'CMPD_0000000004': -5.0, 'MNXM13': 3.0, 'MNXM15': 3.0, 'MNXM5': 3.0, 'TARGET_0000000001': 1.5}
)
# cc = initThermo()
# species, unk_compounds = get_compounds_from_cache(
# compounds=pathway.get_species(),
# cc=cc
# )
# results = eQuilibrator(
# species_stoichio=pathway.net_reaction(),
# species=species,
# cc=cc
# )
|
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import validate_password
from django.utils.timezone import now
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.validators import UniqueTogetherValidator
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from sn_test_task.redis import redis_instance
from social_network.models import Post, Like
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = 'id', 'username', 'password',
read_only_fields = 'id',
extra_kwargs = {'password': {'write_only': True}}
@staticmethod
def validate_password(password):
try:
validate_password(password)
except ValidationError as e:
raise serializers.ValidationError(str(e))
return password
def create(self, validated_data):
user = User.objects.create(**validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class PostCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = '__all__'
read_only_fields = 'id', 'created',
class PostSerializer(serializers.ModelSerializer):
likes_count = serializers.SerializerMethodField('get_likes_count')
user = serializers.HyperlinkedRelatedField(
read_only=True,
view_name='sn:user_stats',
lookup_field='pk'
)
@staticmethod
def get_likes_count(instance):
return Like.objects.filter(post=instance).count()
class Meta:
model = Post
fields = 'id', 'user', 'title', 'likes_count', 'created', 'content'
read_only_fields = 'id', 'created',
class PostCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = '__all__'
read_only_fields = 'id', 'created',
class PostListSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Post
fields = 'id', 'link', 'created', 'title', 'content',
read_only_fields = 'id', 'created'
extra_kwargs = {
'link': {'view_name': 'sn:post_detail', 'lookup_field': 'pk'},
}
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = '__all__'
read_only_fields = 'id', 'created'
validators = [
UniqueTogetherValidator(
queryset=Like.objects.all(),
fields=['user', 'post'],
message='You liked this post already. Please, try to like another post.'
)
]
class DislikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = '__all__'
class TokenObtainPairSerializerAndLogin(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
user.last_login = now()
user.save()
return super().get_token(user)
class UserActivitySerializer(serializers.ModelSerializer):
last_request = serializers.SerializerMethodField(source='get_last_request')
@staticmethod
def get_last_request(instance):
user_key = '_'.join(('user', str(instance.id)))
last_request = redis_instance.get(user_key)
return last_request
class Meta:
model = User
fields = 'id', 'username', 'last_login', 'last_request'
class AggregatedLikeSerializer(serializers.Serializer):
created__date = serializers.DateField()
total = serializers.IntegerField()
|
from setuptools import setup
setup(
name='django-pkgconf',
version='0.3.0',
description='Yet another application settings helper.',
url='https://github.com/byashimov/django-pkgconf',
author='Murad Byashimov',
author_email='byashimov@gmail.com',
packages=['pkgconf'],
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Environment :: Web Environment',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
],
)
|
from django.contrib.auth import get_user_model
from django.contrib.auth.base_user import (AbstractBaseUser as DjangoAbstractBaseUser,
BaseUserManager as DjangoBaseUserManager)
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import PermissionsMixin
class BaseUserManager(DjangoBaseUserManager):
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):
email = self.normalize_email(email)
user_model = get_user_model()
user = user_model(email=email, is_active=True, is_staff=is_staff, is_superuser=is_superuser, **extra_fields)
user.set_password(password)
user.save()
return user
def create_user(self, email=None, password=None, is_staff=False, **extra_fields):
return self._create_user(email, password, is_staff, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True, **extra_fields)
def activate(self, username):
user = self.get_by_natural_key(username)
user.is_active = True
user.save()
return user
def confirm_new_email(self, username):
user = self.get_by_natural_key(username)
user.confirm_new_email()
return user
# noinspection PyAbstractClass
class AbstractBaseUser(DjangoAbstractBaseUser,PermissionsMixin):
objects = BaseUserManager()
email = models.EmailField(_("e-mail"), max_length=255, unique=True, db_index=True)
date_joined = models.DateTimeField(_("date joined"), default=timezone.now)
is_active = models.BooleanField(_("active"), default=False)
is_staff = models.BooleanField(_("staff"), default=False)
is_superuser = models.BooleanField(_('superuser'), default=False)
new_email = models.EmailField(_("new e-mail"), max_length=255, null=True, blank=True, db_index=True)
USERNAME_FIELD = "email"
class Meta(DjangoAbstractBaseUser.Meta):
abstract = True
def get_short_name(self):
return self.email
def confirm_new_email(self):
if not self.new_email:
return
self.email = self.new_email
self.new_email = None
self.save()
class AbstractUser(AbstractBaseUser):
name = models.CharField(max_length=255, blank=True, null=True)
class Meta(AbstractBaseUser.Meta):
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
return self.name
class User(AbstractUser):
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
|
import numpy as np
import matplotlib.pyplot as plt
import random
from PIL import Image
from matplotlib import gridspec
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle, Circle
from matplotlib.pyplot import draw, figure, get_cmap, gray
from matplotlib.transforms import Affine2D
from numpy import array, uint32, round, sqrt, ceil, asarray, append
from numpy import spacing as eps
from hotspotter.other.AbstractPrintable import AbstractManager
from hotspotter.other.ConcretePrintable import Pref
from hotspotter.other.logger import logmsg, logdbg, logwarn
from warnings import catch_warnings, simplefilter
import colorsys
import os.path
import sys
class DrawManager(AbstractManager):
def init_preferences(dm, default_bit=False):
iom = dm.hs.iom
if dm.draw_prefs == None:
dm.draw_prefs = Pref(fpath=iom.get_prefs_fpath('draw_prefs'))
dm.draw_prefs.bbox_bit = True
dm.draw_prefs.ellipse_bit = False
dm.draw_prefs.ellipse_alpha = .6
dm.draw_prefs.points_bit = False
dm.draw_prefs.result_view = Pref(1, choices=['in_image', 'in_chip'])
dm.draw_prefs.fignum = 0
dm.draw_prefs.num_result_cols = 3
dm.draw_prefs.figsize = (5,5)
dm.draw_prefs.colormap = Pref('hsv', hidden=True)
dm.draw_prefs.in_qtc_bit = Pref(False, hidden=True) #Draw in the Qt Console
dm.draw_prefs.use_thumbnails = Pref(False, hidden=True)
dm.draw_prefs.thumbnail_size = Pref(128, hidden=True)
if not default_bit:
dm.draw_prefs.load()
# ---
def show_splash(dm):
splash_fname = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'splash.png')
if not os.path.exists(splash_fname):
root_dir = os.path.realpath(os.path.dirname(__file__))
while root_dir!=None:
splash_fname = os.path.join(root_dir, "hotspotter", "front", 'splash.png')
logdbg(splash_fname)
exists_test = os.path.exists(splash_fname)
logdbg('Exists:'+str(exists_test))
if exists_test:
break
tmp = os.path.dirname(root_dir)
if tmp == root_dir:
root_dir = None
else:
root_dir = tmp
logdbg('Splash Fname: %r '% splash_fname)
splash_img = asarray(Image.open(splash_fname))
dm.add_images([splash_img],['Welcome to Hotspotter'])
dm.end_draw()
# ---
def show_image(dm, gx):
gm, cm = dm.hs.get_managers('gm','cm')
gid = gm.gx2_gid[gx]
img_list = gm.gx2_img_list(gx)
title_list = ['gid='+str(gid)+' gname='+gm.gx2_gname[gx]]
dm.add_images(img_list, title_list)
cx_list = gm.gx2_cx_list[gx]
if dm.draw_prefs.use_thumbnails is True:
pass
for cx in iter(cx_list):
dm.draw_chiprep2(cx, axi=0, in_image_bit=True)
dm.end_draw()
# ---
def show_chip(dm, cx, in_raw_chip=False, **kwargs):
cm = dm.hs.cm
cid, gname, chip = cm.cx2_(cx, 'cid', 'gname', 'chip')
if in_raw_chip:
chip = np.asarray(cm.cx2_pil_chip(cx, scaled=True,
preprocessed=False, rotated=True,
colored=True))
if dm.draw_prefs.use_thumbnails is True:
pass
dm.add_images([chip], [gname])
# Draw chiprep and return fsel incase rand is good
fsel_ret = dm.draw_chiprep2(cx, axi=0, **kwargs)
dm.end_draw()
return fsel_ret
# ---
def show_query(dm, res, titleargs=None, enddraw=True):
# Make sure draw is valid
if res is None: dm.show_splash(); return
# Get Chip Properties
cm = res.hs.cm
qcm = res.qhs.cm
titleargs =\
('cx', 'cid', 'nid', 'name', 'gname')
( qcx, qcid , qnid , qname , qgname ) = res.qcid2_(*titleargs)
( tcx, tcid , tnid , tname , tgname ) = res.tcid2_(*titleargs)
(tcx, tscore, ) = res.tcid2_('cx','score')
# Titles of the Plot
#qtitle = 'gname: %s\nQUERY cid=%d, nid=%d' % (qgname, qcid, qnid)
#ttile = ['cid=%d\n gname: %s\nrank/score=%d,%.2f' % tup for tup in zip(tcid, tgname, range(1,len(tscore)+1), tscore)]
qtitle = 'gname: %s\nQUERY nid=%d' % (qgname, qnid)
ttile = ['gname: %s\nrank/score=%d/%.2f' % tup for tup in zip(tgname, range(1,len(tscore)+1), tscore)]
title_list = [qtitle] + ttile
if dm.draw_prefs.use_thumbnails is True:
pass
# Add the images to draw
in_image_bit = dm.draw_prefs.result_view == 'in_image'
if in_image_bit:
qimg = qcm.cx2_img(qcx)
timg = cm.cx2_img_list(tcx)
dm.add_images([qimg] + timg, title_list)
elif dm.draw_prefs.result_view == 'in_chip':
qchip = qcm.cx2_chip_list(qcx)
tchip = cm.cx2_chip_list(tcx)
dm.add_images(qchip + tchip, title_list)
# Draw the Query Chiprep
qaxi = 0; qfsel = []
dm.draw_chiprep2(qcx, axi=qaxi, fsel=qfsel, qcm=qcm)
# Draw the Top Result Chipreps
for (tx, cx) in enumerate(tcx):
fm = res.rr.cx2_fm[cx]
fs = res.rr.cx2_fs[cx]
axi = tx+1
if len(fs) == 0:
qfsel = np.array([], np.uint32)
fsel = np.array([], np.uint32)
else:
qfsel = fm[fs > 0][:,0]
fsel = fm[fs > 0][:,1]
dm.draw_chiprep2(cx,
axi=axi,
axi_color=axi,
fsel=fsel,
in_image_bit=in_image_bit)
dm.draw_chiprep2(qcx,
axi=qaxi,
axi_color=axi,
fsel=qfsel,
in_image_bit=in_image_bit,
qcm=qcm)
if enddraw:
dm.end_draw()
# ---
def __init__(dm, hs):
super( DrawManager, dm ).__init__( hs )
dm.hs = hs
dm.fignum = 0
dm.dpi = 100 #72
dm.draw_prefs = None
dm.ax_list = []
dm.init_preferences()
# ---
def update_figsize(dm):
fig = dm.get_figure()
dm.draw_prefs.figsize = (fig.get_figheight(), fig.get_figwidth())
# ---
def get_figure(dm):
guifig = dm.hs.uim.get_gui_figure()
if guifig != None and dm.fignum == 0: # Check to see if we have access to the gui
return guifig
fig = figure(num=dm.fignum, figsize=dm.draw_prefs.figsize, dpi=dm.dpi, facecolor='w', edgecolor='k')
return fig
# ---
def annotate_orientation(dm):
logmsg('Please select an orientation of the torso (Click Two Points on the Image)')
try:
# Compute an angle from user interaction
sys.stdout.flush()
fig = dm.get_figure()
pts = np.array(fig.ginput(2))
logdbg('GInput Points are: '+str(pts))
# Get reference point to origin
refpt = pts[0] - pts[1]
#theta = np.math.atan2(refpt[1], refpt[0])
theta = np.math.atan(refpt[1]/refpt[0])
logmsg('The angle in radians is: '+str(theta))
return theta
except Exception as ex:
logmsg('Annotate Orientation Failed'+str(ex))
return None
def annotate_roi(dm):
logmsg('Please Select a Rectangular Region of Interest (Click Two Points on the Image)')
try:
sys.stdout.flush()
fig = dm.get_figure()
pts = fig.ginput(2)
logdbg('GInput Points are: '+str(pts))
(x1, y1, x2, y2) = (pts[0][0], pts[0][1], pts[1][0], pts[1][1])
xm = min(x1,x2)
xM = max(x1,x2)
ym = min(y1,y2)
yM = max(y1,y2)
(x, y, w, h) = (xm, ym, xM-xm, yM-ym)
roi = array(round([x,y,w,h]),dtype=uint32)
logmsg('The new ROI is: '+str(roi))
return roi
except Exception as ex:
logmsg('Annotate ROI Failed'+str(ex))
return None
# ---
def end_draw(dm):
#gray()
logdbg('Finalizing Draw with '+str(len(dm.ax_list))+' axes')
fig = dm.get_figure()
#fig.subplots_adjust(hspace=0.2, wspace=0.2)
#fig.tight_layout(pad=.3, h_pad=None, w_pad=None)
#fig.tight_layout()
if dm.draw_prefs.in_qtc_bit:
try:
from IPython.back.display import display
display(fig)
except:
logwarn('Cannot Draw in QTConsole')
fig.show()
dm.hs.uim.redraw_gui()
fig.canvas.draw()
#draw()
# ---
def save_fig(dm, save_file):
dm.end_draw()
fig = dm.get_figure()
fig.savefig(save_file, format='png')
# ---
def add_images(dm, img_list, title_list=[]):
fig = dm.get_figure(); fig.clf()
num_images = len(img_list)
#
dm.ax_list = [None]*num_images
title_list = title_list + ['NoTitle']*(num_images-len(title_list))
# Fit Images into a grid
max_columns = min(num_images, max(1,dm.draw_prefs.num_result_cols))
if max_columns == 0: max_columns = 1
nr = int( ceil( float(num_images)/max_columns) )
nc = max_columns if num_images >= max_columns else 1
#
gs = gridspec.GridSpec( nr, nc )
for i in xrange(num_images):
#logdbg(' Adding the '+str(i)+'th Image')
#logdbg(' * type(img_list[i]): %s'+str(type(img_list[i])))
#logdbg(' * img_list[i].shape: %s'+str(img_list[i].shape))
dm.ax_list[i] = fig.add_subplot(gs[i])
imgplot = dm.ax_list[i].imshow(img_list[i])
imgplot.set_cmap('gray')
dm.ax_list[i].get_xaxis().set_ticks([])
dm.ax_list[i].get_yaxis().set_ticks([])
dm.ax_list[i].set_title(title_list[i])
# transData: data coordinates -> display coordinates
# transAxes: axes coordinates -> display coordinates
# transLimits: data - > axes
#
#gs.tight_layout(fig)
logdbg('Added '+str(num_images)+' images/axes')
# ---
def _get_fpt_ell_collection(dm, fpts, T_data, alpha, edgecolor):
ell_patches = []
for (x,y,a,c,d) in fpts: # Manually Calculated sqrtm(inv(A))
with catch_warnings():
simplefilter("ignore")
aIS = 1/sqrt(a)
cIS = (c/sqrt(a) - c/sqrt(d))/(a - d + eps(1))
dIS = 1/sqrt(d)
transEll = Affine2D([\
( aIS, 0, x),\
( cIS, dIS, y),\
( 0, 0, 1)])
unitCirc1 = Circle((0,0),1,transform=transEll)
ell_patches = [unitCirc1] + ell_patches
ellipse_collection = PatchCollection(ell_patches)
ellipse_collection.set_facecolor('none')
ellipse_collection.set_transform(T_data)
ellipse_collection.set_alpha(alpha)
ellipse_collection.set_edgecolor(edgecolor)
return ellipse_collection
def draw_chiprep2(dm, cx, axi=0, fsel=None, in_image_bit=False, axi_color=0,
bbox_bit=None,
ell_alpha=None,
ell_bit=None,
xy_bit=None,
color=None,
qcm=None,
**kwargs):
'''
Draws a chip representation over an already drawn chip
cx - the chiprep to draw. Managed by the chip manager
axi - the axis index to draw it in
#TODO: in_image_bit becomes data_coordinates
in_image_bit - are the data coordinates image or rotated chip?
raw the chip by itself or in its original image
axi_color - use the color associated with axis index
(used for ploting queries)
---
Others are preference overloads
bbox_bit -
ell_alpha
ell_bit
xy_bit
ell_color
'''
# Allows display of cross database queries
cm = dm.hs.cm if qcm is None else qcm
# Grab Preferences
xy_bit = dm.draw_prefs.points_bit if xy_bit is None else xy_bit
ell_bit = dm.draw_prefs.ellipse_bit if ell_bit is None else ell_bit
bbox_bit = dm.draw_prefs.bbox_bit if bbox_bit is None else bbox_bit
ell_alpha = dm.draw_prefs.ellipse_alpha if ell_alpha is None else ell_alpha
# Make sure alpha in range [0,1]
if ell_alpha > 1: ell_alpha = 1.0
if ell_alpha < 0: ell_alpha = 0.0
# Get color from colormap or overloaded parameter
if color is None:
color = plt.get_cmap('hsv')(float(axi_color)/len(dm.ax_list))[0:3]
if axi_color == 0:
color = [color[0], color[1]+.5, color[2]]
# Axis We are drawing to.
ax = dm.ax_list[axi]
T_data = ax.transData # data coordinates -> display coordinates
# Data coordinates are chip coords
if xy_bit or ell_bit or fsel != None:
T_fpts = T_data if not in_image_bit else\
Affine2D(cm.cx2_T_chip2img(cx) ) + T_data
fpts = cm.get_fpts(cx)
if fsel is None: fsel = range(len(fpts))
# ---DEVHACK---
# Randomly sample the keypoints. (Except be sneaky)
elif fsel == 'rand':
# Get Relative Position
minxy = fpts.min(0)[0:2]
maxxy = fpts.max(0)[0:2]
rel_pos = (fpts[:,0]-minxy[0])/(maxxy[0]-minxy[0])
to_zero = 1 - np.abs(rel_pos - .5)/.5
pdf = (to_zero / to_zero.sum())
# Transform Relative Position to Probabilities
# making it more likely to pick a centerpoint
fsel = np.random.choice(xrange(len(fpts)), size=88, replace=False, p=pdf)
# ---/DEVHACK---
# Plot ellipses
if ell_bit and len(fpts) > 0 and len(fsel) > 0:
ells = dm._get_fpt_ell_collection(fpts[fsel,:],
T_fpts,
ell_alpha,
color)
ax.add_collection(ells)
# Plot xy points
if xy_bit and len(fpts) > 0 and len(fsel) > 0:
ax.plot(fpts[fsel,0], fpts[fsel,1], 'o',\
markeredgecolor=color,\
markerfacecolor=color,\
transform=T_fpts,\
markersize=2)
# ===
if bbox_bit:
# Draw Bounding Rectangle in Image Coords
[rx,ry,rw,rh] = cm.cx2_roi[cx]
rxy = (rx,ry)
# Convert to Chip Coords if needbe
T_bbox = T_data if in_image_bit else\
Affine2D( np.linalg.inv(cm.cx2_T_chip2img(cx)) ) + T_data
bbox = Rectangle(rxy,rw,rh,transform=T_bbox)
# Visual Properties
bbox.set_fill(False)
bbox.set_edgecolor(color)
ax.add_patch(bbox)
# Draw Text Annotation
cid = cm.cx2_cid[cx]
name = cm.cx2_name(cx)
# Lower the value to .2 for the background color and set alpha=.7
rgb_textFG = [1,1,1]
hsv_textBG = colorsys.rgb_to_hsv(*color)[0:2]+(.2,)
rgb_textBG = colorsys.hsv_to_rgb(*hsv_textBG)+(.7,)
# Draw Orientation Backwards
degrees = 0 if not in_image_bit else -cm.cx2_theta[cx]*180/np.pi
txy = (0,rh) if not in_image_bit else (rx, ry+rh)
chip_text = 'name='+name+'\n'+'cid='+str(cid)
ax.text(txy[0]+1, txy[1]+1, chip_text,
horizontalalignment ='left',
verticalalignment ='top',
transform =T_data,
rotation =degrees,
color =rgb_textFG,
backgroundcolor =rgb_textBG)
return fsel
|
from django.db import models
from model_utils.models import TimeStampedModel
from companies.models import Company
class InterviewStage(TimeStampedModel):
name = models.CharField(max_length=100)
position = models.IntegerField(null=True)
tag = models.CharField(blank=True, max_length=15)
company = models.ForeignKey(Company, null=True)
class Meta:
ordering = ['position']
def __unicode__(self):
return self.name
def change_position(self, delta):
delta = int(delta)
if self.position == 1 and delta == -1:
return
try:
other_stage = InterviewStage.objects.get(
company=self.company,
position=self.position + delta
)
except InterviewStage.DoesNotExist:
# Should not happen
return
other_stage.position = self.position
other_stage.save()
self.position = self.position + delta
self.save()
def prepare_for_deletion(self):
stages_to_modify = InterviewStage.objects.filter(
company=self.company, position__gt=self.position
)
for stage in stages_to_modify:
stage.position -= 1
stage.save()
|
#login token for discord
token = 'Enter token here'
#If you don't have an api_key for LoL, you can get one at https://developer.riotgames.com
lol_api_key = 'Enter league of legends api key here'
region = 'Enter a region here (na, br, kr, etc.)'
#set to true if you want to use lolfantasy
enable_fantasy = False
#phantomjs directory (i.e. D:\\phantomjs-2.x.x\\bin\\phantomjs.exe)
directory='Enter phantomjs directory here'
|
import math
from datetime import datetime
from collections import defaultdict
import numpy as np
import h5py
import pandas as pd
from exetera.core import exporter, persistence, utils, session, fields
from exetera.core.persistence import DataStore
from exetera.processing.nat_medicine_model import nature_medicine_model_1
from exetera.processing.method_paper_model import method_paper_model
"""
Goal
* show whether mask wearing helps with exposure
* must separate out preventative / remedial action
* is the person sick and if so, when
* has the person been predicted as having covid and if so, when
* has the person had a positive covid test result and if so, when
* preventative
* healthy
* not healthy but not yet covid
* remedial
* having had covid
* must separate out confounding factors
* background risk
* non-health worker / non-covid health worker / covid health worker
Process
1. generate results for tested people
1. filter out people without tests
n. generate results for predicted people
"""
def ppe_use_and_travel(ds, src, tmp, start_timestamp):
logging = True
s_asmts = src['assessments']
if 'filtered_assessments' not in tmp.keys():
f_asmts = tmp.create_group('filtered_assessments')
cats = ds.get_reader(s_asmts['created_at'])
asmt_filter = cats[:] >= start_timestamp
ccs = ds.get_reader(s_asmts['country_code'])
asmt_filter = asmt_filter & (ccs[:] == b'GB')
symptom_keys = ('persistent_cough', 'fatigue', 'delirium', 'shortness_of_breath',
'fever', 'diarrhoea', 'abdominal_pain', 'chest_pain', 'hoarse_voice',
'skipped_meals', 'loss_of_smell')
mask_keys = ('mask_cloth_or_scarf', 'mask_surgical', 'mask_n95_ffp')
isolation_keys = ('isolation_healthcare_provider', 'isolation_little_interaction',
'isolation_lots_of_people')
other_keys = ('patient_id', )
symptom_thresholds = {s: 2 for s in symptom_keys}
symptom_thresholds.update({m: 2 for m in mask_keys})
symptom_thresholds['fatigue'] = 3
symptom_thresholds['shortness_of_breath'] = 3
for k in symptom_keys + mask_keys + isolation_keys + other_keys:
with utils.Timer("filtering {}".format(k)):
reader = ds.get_reader(s_asmts[k])
if k in mask_keys + symptom_keys:
values = np.where(reader[:] >= symptom_thresholds[k], 1, 0)
ds.get_numeric_writer(f_asmts, k, 'int8').write(
ds.apply_filter(asmt_filter, values))
hist = np.unique(reader[:], return_counts=True)
print(sorted(zip(hist[0], hist[1])))
hist = np.unique(values, return_counts=True)
print(sorted(zip(hist[0], hist[1])))
else:
reader.get_writer(f_asmts, k).write(ds.apply_filter(asmt_filter, reader))
print('filtered assessments:', np.count_nonzero(asmt_filter), len(asmt_filter))
#
#
# if 'filtered_assessment_predictions' not in tmp.keys():
# f_pred_asmts = tmp.create_group('filtered_assessment_predictions')
symptom_readers = dict()
for s in symptom_keys:
symptom_readers[s] = ds.get_reader(f_asmts[s])
predictions = ds.get_numeric_writer(f_asmts, 'prediction', 'float32')
method_paper_model(ds, symptom_readers, predictions)
predictions = ds.get_reader(f_asmts['prediction'])
print('predictions:', np.count_nonzero(predictions[:] > 0), len(predictions))
if 'patient_assessment_summaries' not in tmp.keys():
asmt_psum = tmp.create_group('patient_assessment_summaries')
pids = ds.get_reader(f_asmts['patient_id'])
mcos = ds.get_reader(f_asmts['mask_cloth_or_scarf'])
msurg = ds.get_reader(f_asmts['mask_surgical'])
m95 = ds.get_reader(f_asmts['mask_n95_ffp'])
with utils.Timer("generating patient_id spans"):
asmt_spans = ds.get_spans(field=pids[:])
for k in mask_keys:
with utils.Timer("getting per patient mask summary for {}".format(k)):
writer = ds.get_numeric_writer(asmt_psum, k, 'int8')
ds.apply_spans_max(asmt_spans, ds.get_reader(f_asmts[k])[:], writer)
print(sorted(utils.build_histogram(ds.get_reader(asmt_psum[k])[:])))
for k in isolation_keys:
with utils.Timer("getting per patient isolation summary for {}".format(k)):
writer = ds.get_numeric_writer(asmt_psum, k, 'int32')
ds.apply_spans_max(asmt_spans, ds.get_reader(f_asmts[k])[:], writer)
print(sorted(utils.build_histogram(ds.get_reader(asmt_psum[k])[:])))
with utils.Timer("getting prediction maxes for patients"):
p_predictions = predictions.get_writer(asmt_psum, 'prediction')
ds.apply_spans_max(asmt_spans, predictions, p_predictions)
p_predictions = ds.get_reader(asmt_psum[k])
positives = p_predictions[:] > 0
print("max covid prediction:", np.count_nonzero(positives), len(positives))
with utils.Timer("getting patient ids from assessments"):
writer = pids.get_writer(asmt_psum, 'patient_id')
writer.write(pd.unique(pids[:]))
else:
asmt_psum = tmp['patient_assessment_summaries']
s_ptnts = src['patients']
print(s_ptnts.keys())
pdf = pd.DataFrame({'id': ds.get_reader(s_ptnts['id'])[:],
'hwwc': ds.get_reader(s_ptnts['health_worker_with_contact'])[:]})
adf = pd.DataFrame({'patient_id': ds.get_reader(asmt_psum['patient_id'])[:]})
jdf = pd.merge(left=adf, right=pdf, left_on='patient_id', right_on='id', how='left')
print(len(jdf['hwwc']))
class TestResults:
def __init__(self):
self.positive = 0
self.total = 0
def add(self, result):
if result:
self.positive += 1
self.total += 1
results = defaultdict(TestResults)
positives = ds.get_reader(asmt_psum['prediction'])[:]
positives = positives > 0
mask_0 = ds.get_reader(asmt_psum['mask_cloth_or_scarf'])[:]
mask_1 = ds.get_reader(asmt_psum['mask_surgical'])[:]
mask_2 = ds.get_reader(asmt_psum['mask_cloth_or_scarf'])[:]
# mask = mask_0 | mask_1 | mask_2
mask = mask_0
print(np.unique(mask, return_counts=True))
isol_lots = ds.get_reader(asmt_psum['isolation_lots_of_people'])[:]
isol_lots_7 = np.where(isol_lots > 7, 7, isol_lots)
print(np.unique(isol_lots_7, return_counts=True))
print(len(mask), len(positives), len(isol_lots_7))
# isolation lots of users
for i_r in range(len(mask)):
results[(isol_lots_7[i_r], mask[i_r])].add(positives[i_r])
groupings = sorted(list((r[0], (r[1].positive, r[1].total)) for r in results.items()))
for g in groupings:
print(g[0], g[1][0], g[1][1], g[1][0] / g[1][1])
def ppe_use_and_travel_2(ds, src, dest, start_ts):
ds = session.Session()
s_ptnts = src['patients']
s_asmts = src['assessments']
print(s_asmts.keys())
s_tests = src['tests']
if 'filtered_patients' not in dest.keys():
f_ptnts = dest.create_group('filtered_patients')
f_asmts = dest.create_group('filtered_assessments')
f_tests = dest.create_group('filtered_tests')
# calculate patient first positives
raw_p_ids = ds.get(s_ptnts['id']).data[:]
raw_p_acts = ds.get(s_ptnts['assessment_count']).data[:]
raw_a_pids = ds.get(s_asmts['patient_id']).data[:]
raw_t_pids = ds.get(s_tests['patient_id']).data[:]
# filter out anyone without assessments
patient_filter = raw_p_acts > 0
print("patient_filter:",
np.count_nonzero(patient_filter), np.count_nonzero(patient_filter == 0))
# filter patients
f_p_ids = ds.get(s_ptnts['id']).create_like(f_ptnts, 'id')
f_p_ids.data.write(ds.apply_filter(patient_filter, raw_p_ids))
# filter out any orphaned assessments
with utils.Timer("fk in pk"):
assessment_filter = persistence.foreign_key_is_in_primary_key(raw_p_ids, raw_a_pids)
print("assessment_filter:",
np.count_nonzero(assessment_filter), np.count_nonzero(assessment_filter == False))
f_a_pids = ds.get(s_asmts['patient_id']).create_like(f_asmts, 'patient_id')
f_a_pids.data.write(ds.apply_filter(assessment_filter, raw_a_pids))
for k in ('created_at', 'tested_covid_positive'):
field = ds.get(s_asmts[k]).create_like(f_asmts, k)
field.data.write(ds.apply_filter(assessment_filter, ds.get(s_asmts[k]).data[:]))
# filter out any orphaned tests
test_filter = persistence.foreign_key_is_in_primary_key(raw_p_ids, raw_t_pids)
print("test_filter:",
np.count_nonzero(test_filter), np.count_nonzero(test_filter == False))
f_t_pids = ds.get(s_tests['patient_id']).create_like(f_tests, 'patient_id')
f_t_pids.data.write(ds.apply_filter(test_filter, raw_t_pids))
else:
f_ptnts = dest['filtered_patients']
f_asmts = dest['filtered_assessments']
f_tests = dest['filtered_tests']
f_p_ids = ds.get(f_ptnts['id'])
f_a_pids = ds.get(f_asmts['patient_id'])
f_t_pids = ds.get(f_tests['patient_id'])
# calculate the shared set of indices for assessments / tests back to patients
with utils.Timer("get_shared_index"):
p_inds, a_pinds, t_pinds = ds.get_shared_index((f_p_ids, f_a_pids, f_t_pids))
print(max(p_inds.max(), a_pinds.max(), t_pinds.max()))
# now filter only assessments with positive test results
pos_asmt_tests = ds.get(f_asmts['tested_covid_positive']).data[:] == 3
print("old tests positive:",
np.count_nonzero(pos_asmt_tests), np.count_nonzero(pos_asmt_tests == False))
# now filter only tests with positive test results
s_asmts = src['assessments']
a_cats = ds.get(f_asmts['created_at'])
asmt_filter = a_cats.data[:] >= start_ts
print(np.count_nonzero(asmt_filter), len(asmt_filter))
raw_a_cats = ds.apply_filter(asmt_filter, a_cats.data[:])
a_days = np.zeros(len(raw_a_cats), dtype=np.int32)
start_dt = datetime.fromtimestamp(start_ts)
for i_r in range(len(raw_a_cats)):
a_days[i_r] = (datetime.fromtimestamp(raw_a_cats[i_r]) - start_dt).days
print(sorted(utils.build_histogram(a_days)))
if __name__ == '__main__':
datastore = DataStore()
src_file = '/home/ben/covid/ds_20200901_full.hdf5'
dest_file = '/home/ben/covid/ds_20200901_ppe.hdf5'
with h5py.File(src_file, 'r') as src_data:
with h5py.File(dest_file, 'w') as dest_data:
start_timestamp = datetime.timestamp(datetime(2020, 6, 12))
ppe_use_and_travel_2(datastore, src_data, dest_data, start_timestamp)
|
import unittest
import sklearn_extra
from nlpatl.models.clustering import SkLearnExtraClustering
class TestModelClusteringSkLearnExtra(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.train_features = [
[0, 0],
[0, 1],
[0, 2],
[0, 3],
[0, 4],
[0, 5],
[1, 0],
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
]
cls.train_labels = [0, 0, 1, 1, 0, 0, 2, 2, 1, 1, 2, 1]
def test_parameters(self):
clustering = SkLearnExtraClustering()
assert 8 == clustering.model.n_clusters, "Invalid when using default parameters"
model_config = {}
clustering = SkLearnExtraClustering(model_config=model_config)
assert 8 == clustering.model.n_clusters, "Invalid when passing emtpy parameters"
model_config = {"n_clusters": 4}
clustering = SkLearnExtraClustering(model_config=model_config)
assert 4 == clustering.model.n_clusters, "Invalid when passing parameter"
clustering = SkLearnExtraClustering(model_name="kmedoids")
assert (
type(clustering.model) is sklearn_extra.cluster._k_medoids.KMedoids
), "Unable to initialize KMedoids"
def test_cluster(self):
clustering = SkLearnExtraClustering()
clustering.train(self.train_features)
result = clustering.predict_proba(self.train_features)
num_actual_class = clustering.model.n_clusters
num_expected_class = len(set(result.groups))
assert (
num_actual_class == num_expected_class
), "{} expected clusters is different from {} actual clusters".format(
num_actual_class, num_expected_class
)
assert result.groups, "Missed groups attribute"
assert result.values is not None, "Missed values attribute"
assert result.indices is not None, "Missed indices attribute"
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-08-02 16:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_user_name'),
]
operations = [
migrations.AddField(
model_name='archer',
name='junior_masters_age',
field=models.CharField(blank=True, choices=[('', 'None'), ('U21', 'U21'), ('U18', 'U18'), ('U14', 'U14'), ('U12', 'U12')], default='', max_length=3),
),
]
|
from solvers.ea_solver import EASolver
from solvers.math import interpolate_signal, ls_fit
from solvers.tests.base_test_case import BaseTestCase
from solvers.tests.correction_models import linear_correction
class TestEASolver(BaseTestCase):
def setUp(self):
super().setUp()
self.solver = EASolver(x=self.x,
pure_components=self.pure_components,
correction_model=linear_correction,
rsme_threshold=0.1,
fit_function=ls_fit)
def test_no_x_axis_errors_should_pass(self) -> None:
self.run_test(self.mixture_signal)
def test_offset_error_should_pass(self) -> None:
x_distorted = self.x + 2
signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)
self.run_test(signal)
def test_slope_error_should_pass(self) -> None:
x_distorted = 1.01 * self.x
signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)
self.run_test(signal)
def test_slope_and_offset_error_should_pass(self) -> None:
x_distorted = 1.01 * self.x - 2
signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)
self.run_test(signal)
|
import numpy as np
import scipy.optimize
import scipy.special
from scipy.interpolate import approximate_taylor_polynomial
# -----------------------------------------------------------------------------
class StringPolynomial:
'''
Representation of a polynomial using a python string which specifies a numpy function,
and an integer giving the desired polynomial's degree.
'''
def __init__(self, funcstr, poly_deg):
'''
funcstr: (str) specification of function using "x" as the argument, e.g. "np.where(x<0, -1 ,np.where(x>0,1,0))"
The function should accept a numpy array as "x"
poly_deg: (int) degree of the polynoimal to be used to approximate the specified function
'''
self.funcstr = funcstr
self.poly_deg = int(poly_deg)
try:
self.__call__(0.5)
except Exception as err:
raise ValueError(
f"Invalid function specifciation, failed to evaluate at x=0.5, err={err}")
def degree(self):
return self.poly_deg
def __call__(self, arg):
ret = eval(self.funcstr, globals(), {'x': arg})
return ret
def target(self, arg):
return self.__call__(arg)
# -----------------------------------------------------------------------------
class TargetPolynomial(np.polynomial.Polynomial):
'''
Polynomial with ideal target
'''
def __init__(self, *args, target=None, scale=None, **kwargs):
'''
target = function which accepts argument and gives ideal response, e.g. lambda x: x**2
scale = metadata about scale of polynomial
'''
self.target = target
self.scale = scale
super().__init__(*args, **kwargs)
# -----------------------------------------------------------------------------
class PolyGenerator:
'''
Abstract base class for polynomial generators
'''
def __init__(self, verbose=True):
self.verbose = verbose
return
def help(self):
'''
return help text
'''
return "help text about the expected polynomial arguments"
def generate(self):
'''
return list of floats specifying the [const, a, a^2, ...] coefficients of the polynomial
'''
return [0, 0]
# -----------------------------------------------------------------------------
class PolyCosineTX(PolyGenerator):
def help(self):
return "Used for Hamiltonian simultion for time tau. Error is epsilon"
def generate(
self,
tau=10.,
epsilon=0.1,
return_coef=True,
ensure_bounded=True,
return_scale=False):
'''
Approximation to cos(tx) polynomial, using sums of Chebyshev
polynomials, from Optimal Hamiltonian Simulation by Quantum Signal
Processing by Low and Chuang,
https://arxiv.org/abs/1606.02685
ensure_bounded: True if polynomial should be normalized to be between
+/- 1
'''
r = scipy.optimize.fsolve(lambda r: (
np.e * np.abs(tau) / (2 * r))**r - (5 / 4) * epsilon, tau)[0]
print(r)
R = np.floor(r / 2).astype(int)
R = max(R, 1)
print(f"R={R}")
g = scipy.special.jv(0, tau) * np.polynomial.chebyshev.Chebyshev([1])
for k in range(1, R + 1):
gcoef = 2 * scipy.special.jv(2 * k, tau)
deg = 2 * k
g += (-1)**k * gcoef * \
np.polynomial.chebyshev.Chebyshev([0] * deg + [1])
if ensure_bounded:
scale = 0.5
g = scale * g
print(f"[PolyCosineTX] rescaling by {scale}.")
if return_coef:
pcoefs = np.polynomial.chebyshev.cheb2poly(g.coef)
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
return g
class PolySineTX(PolyGenerator):
def help(self):
return "Used for Hamiltonian simultion for time tau. Error is epsilon"
def generate(
self,
tau=10.,
epsilon=0.1,
return_coef=True,
ensure_bounded=True,
return_scale=False):
'''
Approximation to cos(tx) polynomial, using sums of Chebyshev
polynomials, from Optimal Hamiltonian Simulation by Quantum Signal
Processing by Low and Chuang,
https://arxiv.org/abs/1606.02685
ensure_bounded: True if polynomial should be normalized to be between
+/- 1
'''
r = scipy.optimize.fsolve(lambda r: (
np.e * np.abs(tau) / (2 * r))**r - (5 / 4) * epsilon, tau)[0]
print(r)
R = np.floor(r / 2).astype(int)
R = max(R, 1)
print(f"R={R}")
g = np.polynomial.chebyshev.Chebyshev([0])
for k in range(0, R + 1):
gcoef = 2 * scipy.special.jv(2 * k + 1, tau)
deg = 2 * k + 1
g += (-1)**k * gcoef * \
np.polynomial.chebyshev.Chebyshev([0] * deg + [1])
if ensure_bounded:
scale = 0.5
g = scale * g
print(f"[PolySineTX] rescaling by {scale}.")
if return_coef:
pcoefs = np.polynomial.chebyshev.cheb2poly(g.coef)
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
return g
# -----------------------------------------------------------------------------
class PolyOneOverX(PolyGenerator):
def help(self):
return "Region of validity is from 1/kappa to 1, and from -1/kappa to -1. Error is epsilon"
def generate(
self,
kappa=3,
epsilon=0.1,
return_coef=True,
ensure_bounded=True,
return_scale=False):
'''
Approximation to 1/x polynomial, using sums of Chebyshev polynomials,
from Quantum algorithm for systems of linear equations with exponentially
improved dependence on precision, by Childs, Kothari, and Somma,
https://arxiv.org/abs/1511.02306v2
Define region D_kappa to be from 1/kappa to 1, and from -1/kappa to -1. A good
approximation is desired only in this region.
ensure_bounded: True if polynomial should be normalized to be between +/- 1
'''
b = int(kappa**2 * np.log(kappa / epsilon))
j0 = int(np.sqrt(b * np.log(4 * b / epsilon)))
print(f"b={b}, j0={j0}")
g = np.polynomial.chebyshev.Chebyshev([0])
for j in range(j0 + 1):
gcoef = 0
for i in range(j + 1, b + 1):
gcoef += scipy.special.binom(2 * b, b + i) / 2**(2 * b)
deg = 2 * j + 1
g += (-1)**j * gcoef * \
np.polynomial.chebyshev.Chebyshev([0] * deg + [1])
g = 4 * g
if ensure_bounded:
res = scipy.optimize.minimize(g, (-0.1,), bounds=[(-0.8, 0.8)])
pmin = res.x
print(
f"[PolyOneOverX] minimum {g(pmin)} is at {pmin}: normalizing")
scale = 1 / abs(g(pmin))
if 0:
scale = scale * 0.9
else:
scale = scale * 0.5
print("[PolyOneOverX] bounding to 0.5")
g = scale * g
if return_coef:
if 1:
pcoefs = np.polynomial.chebyshev.cheb2poly(g.coef)
else:
pcoefs = g.coef
print(f"[pyqsp.PolyOneOverX] pcoefs={pcoefs}")
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
return g
class PolyOneOverXRect(PolyGenerator):
def help(self):
return "Region of validity is from 1/kappa to 1, and from -1/kappa to -1. Error is epsilon"
def generate(
self,
degree=6,
delta=2,
kappa=3,
epsilon=0.1,
ensure_bounded=True,
return_scale=False):
coefs_invert, scale1 = PolyOneOverX().generate(2 * kappa,
epsilon,
ensure_bounded,
return_scale=True)
coefs_rect, scale2 = PolyRect().generate(degree,
delta,
kappa,
ensure_bounded,
return_scale=True)
poly_invert = np.polynomial.Polynomial(coefs_invert)
poly_rect = np.polynomial.Polynomial(coefs_rect)
pcoefs = (poly_invert * poly_rect).coef
if return_scale:
return pcoefs, scale1 * scale2
else:
return pcoefs
# -----------------------------------------------------------------------------
class PolyTaylorSeries(PolyGenerator):
'''
Base class for PolySign and PolyThreshold
'''
def taylor_series(
self,
func,
degree,
ensure_bounded=True,
return_scale=False,
npts=100,
max_scale=0.5):
'''
Return numpy Polynomial approximation for func, constructed using
taylor series, of specified degree.
Evaluate approximation using mean absolut difference on npts points in
the domain from -1 to 1.
'''
the_poly = approximate_taylor_polynomial(func, 0, degree, 1)
the_poly = np.polynomial.Polynomial(the_poly.coef[::-1])
if ensure_bounded:
res = scipy.optimize.minimize(-the_poly, (0.1,), bounds=[(-1, 1)])
pmax = res.x
scale = 1 / abs(the_poly(pmax))
# use this for the new QuantumSignalProcessingWxPhases code, which
# employs np.polynomial.chebyshev.poly2cheb(pcoefs)
scale = scale * max_scale
print(f"[PolyTaylorSeries] max {scale} is at {pmax}: normalizing")
the_poly = scale * the_poly
adat = np.linspace(-1, 1, npts)
pdat = the_poly(adat)
edat = func(adat)
avg_err = abs(edat - pdat).mean()
print(
f"[PolyTaylorSeries] average error = {avg_err} in the domain [-1, 1] using degree {degree}")
if ensure_bounded and return_scale:
return the_poly, scale
else:
return the_poly
# -----------------------------------------------------------------------------
class PolySign(PolyTaylorSeries):
def help(self):
return "approximation to the sign function using erf(delta*a) ; given delta"
def generate(
self,
degree=7,
delta=2,
ensure_bounded=True,
return_scale=False):
'''
Approximation to sign function, using erf(delta * x)
'''
degree = int(degree)
print(f"[pyqsp.poly.PolySign] degree={degree}, delta={delta}")
if not (degree % 2):
raise Exception("[PolyErf] degree must be odd")
def erf_delta(x):
return scipy.special.erf(x * delta)
if ensure_bounded and return_scale:
the_poly, scale = self.taylor_series(
erf_delta,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=0.9)
else:
the_poly = self.taylor_series(
erf_delta,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=0.9)
pcoefs = the_poly.coef
# force even coefficients to be zero, since the polynomial must be odd
pcoefs[0::2] = 0
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return TargetPolynomial(pcoefs, target=lambda x: np.sign(x))
class PolyThreshold(PolyTaylorSeries):
def help(self):
return "approximation to a thresholding function at threshold 1/2, using linear combination of erf(delta * a); give degree and delta"
def generate(self,
degree=6,
delta=2,
ensure_bounded=True,
return_scale=False):
'''
Approximation to threshold function at a=1/2; use a bandpass built from two erf's
'''
degree = int(degree)
print(f"[pyqsp.poly.PolyThreshold] degree={degree}, delta={delta}")
if (degree % 2):
raise Exception("[PolyThreshold] degree must be even")
def erf_delta(x):
return scipy.special.erf(x * delta)
def threshold(x):
return (erf_delta(x + 0.5) - erf_delta(x - 0.5)) / 2
if ensure_bounded and return_scale:
the_poly, scale = self.taylor_series(
threshold,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=0.9)
else:
the_poly = self.taylor_series(
threshold,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=0.9)
pcoefs = the_poly.coef
# force odd coefficients to be zero, since the polynomial must be even
pcoefs[1::2] = 0
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
class PolyPhaseEstimation(PolyTaylorSeries):
def help(self):
return "phase estimation polynomial given "
def generate(self,
degree=6,
delta=2,
ensure_bounded=True,
return_scale=False):
'''
Approximation to threshold function at a=1/2; use a bandpass built from two erf's
'''
degree = int(degree)
print(f"[pyqsp.poly.PolyThreshold] degree={degree}, delta={delta}")
if (degree % 2):
raise Exception("[PolyThreshold] degree must be even")
def erf_delta(x):
return scipy.special.erf(x * delta)
def threshold(x):
return (-1 + erf_delta(1/np.sqrt(2) - x) + erf_delta(1/np.sqrt(2) + x))
if ensure_bounded and return_scale:
the_poly, scale = self.taylor_series(
threshold,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=0.9)
else:
the_poly = self.taylor_series(
threshold,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=0.9)
pcoefs = the_poly.coef
# force odd coefficients to be zero, since the polynomial must be even
pcoefs[1::2] = 0
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
class PolyRect(PolyTaylorSeries):
def help(self):
return "approximation to a thresholding function at threshold 1/2, using linear combination of erf(delta * a); give degree and delta"
def generate(self,
degree=6,
delta=2,
kappa=3,
epsilon=0.1,
ensure_bounded=True,
return_scale=False):
'''
Approximation to threshold function at a=1/2; use a bandpass built from two erf's
'''
degree = int(degree)
print(f"[pyqsp.poly.PolyThreshold] degree={degree}, delta={delta}")
if (degree % 2):
raise Exception("[PolyThreshold] degree must be even")
k = np.sqrt(2) / delta * np.sqrt(np.log(2 / (np.pi * epsilon**2)))
def erf_delta(x):
return scipy.special.erf(x * k)
def rect(x):
return 1 + (erf_delta(x - 3 / (4 * kappa)) +
erf_delta(-x - 3 / (4 * kappa))) / 2
if ensure_bounded and return_scale:
the_poly, scale = self.taylor_series(
rect,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=0.9)
else:
the_poly = self.taylor_series(
rect,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=0.9)
pcoefs = the_poly.coef
# force odd coefficients to be zero, since the polynomial must be even
pcoefs[1::2] = 0
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
# -----------------------------------------------------------------------------
class PolyLinearAmplification(PolyTaylorSeries):
def help(self):
return "approximates x/(2*gamma) in region (-2*gamma, 2*gamma) capped to +/- 1 outside for some constant gamma"
def generate(self,
degree=7,
gamma=0.25,
kappa=10,
ensure_bounded=True,
return_scale=False):
'''
Approximation to the truncated linear function described in Low's thesis (2017)
'''
degree = int(degree)
print(
f"[pyqsp.poly.PolyLinearAmplification] degree={degree}, gamma={gamma}")
if (degree % 2) != 1:
raise Exception("[PolyLinearAmplification] degree must be odd")
def erf_delta(x):
return scipy.special.erf(x * kappa)
def rect(x):
return (erf_delta(x + 2 * gamma) - erf_delta(x - 2 * gamma)) / 2
def linear_amplification(x):
return x * rect(x) / (2 * gamma)
result = self.taylor_series(
linear_amplification,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=1.)
if ensure_bounded and return_scale:
the_poly, scale = result
else:
the_poly = result
pcoefs = the_poly.coef
# force even coefficients to be zero, since the polynomial must be odd
pcoefs[0::2] = 0
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
# -----------------------------------------------------------------------------
class PolyGibbs(PolyTaylorSeries):
'''
exponential decay polynomial
'''
def help(self):
return "approximation to exp(-beta*a) ; specify degree and beta"
def generate(self,
degree=6,
beta=2,
ensure_bounded=True,
return_scale=False):
degree = int(degree)
print(f"[pyqsp.poly.PolyGibbs] degree={degree}, beta={beta}")
if (degree % 2):
raise Exception("[PolyGibbs] degree must be even")
def gibbs(x):
return np.exp(-beta * abs(x))
if ensure_bounded and return_scale:
the_poly, scale = self.taylor_series(
gibbs,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=1)
else:
the_poly = self.taylor_series(
gibbs,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=1)
pcoefs = the_poly.coef
# force odd coefficients to be zero, since the polynomial must be even
pcoefs[1::2] = 0
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return TargetPolynomial(pcoefs, target=lambda x: gibbs(x))
# -----------------------------------------------------------------------------
class PolyEigenstateFiltering(PolyTaylorSeries):
'''
Lin and Tong's eigenstate filtering polynomial
'''
def help(self):
return "Lin and Tong's eigenstate filtering polynomial ; specify degree, delta, max_scale"
def generate(
self,
degree=6,
delta=0.2,
max_scale=0.9,
ensure_bounded=True,
return_scale=False):
degree = int(degree)
print(f"[pyqsp.poly.PolyEfilter] degree={degree}, delta={delta}")
if (degree % 2):
raise Exception("[PolyEfilter] degree must be even")
def cheb(x):
Tk = np.polynomial.chebyshev.Chebyshev([0] * degree + [1])
return Tk(-1 + 2 * (x**2 - delta**2) / (1 - delta**2))
scale = 1 / cheb(0)
def efpoly(x):
return scale * cheb(x)
if ensure_bounded and return_scale:
the_poly, scale = self.taylor_series(
efpoly,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=max_scale)
else:
the_poly, scale = self.taylor_series(
efpoly,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=max_scale)
pcoefs = the_poly.coef
# force odd coefficients to be zero, since the polynomial must be even
pcoefs[1::2] = 0
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
# -----------------------------------------------------------------------------
class PolyRelu(PolyTaylorSeries):
'''
Relu function
'''
def help(self):
return "symmetric Relu function sigma(|a-delta|) = 0 if |a| < delta, else |a|-delta ; specify degree, delta"
def generate(
self,
degree=6,
delta=0.2,
max_scale=0.99,
ensure_bounded=True):
degree = int(degree)
print(f"[pyqsp.poly.PolyRelu] degree={degree}, delta={delta}")
if (degree % 2):
raise Exception("[PolyRelu] degree must be even")
def cdf(x):
return (1 + scipy.special.erf(x / np.sqrt(2))) / 2
def gelu(x):
return abs(x) * cdf(abs(x) - delta)
the_poly = self.taylor_series(
gelu,
degree,
ensure_bounded=ensure_bounded,
max_scale=max_scale)
pcoefs = the_poly.coef
# force odd coefficients to be zero, since the polynomial must be even
pcoefs[1::2] = 0
return pcoefs
class PolySoftPlus(PolyTaylorSeries):
'''
SoftPlus function
'''
def help(self):
return "symmetric softplus function sigma(|a-delta|) = 0 if |a| < delta, else |a| ; specify degree, delta"
def generate(
self,
degree=6,
delta=0.2,
kappa=1,
max_scale=0.90,
ensure_bounded=True,
return_scale=False):
degree = int(degree)
print(
f"[pyqsp.poly.PolySoftPlus] degree={degree}, delta={delta}, kappa={kappa}")
if (degree % 2):
raise Exception("[PolySoftPlus] degree must be even")
def func(x):
return np.log(1 + np.exp(kappa * (abs(x) - delta))) / kappa
if ensure_bounded and return_scale:
the_poly, scale = self.taylor_series(
func,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=max_scale)
else:
the_poly = self.taylor_series(
func,
degree,
ensure_bounded=ensure_bounded,
return_scale=return_scale,
max_scale=max_scale)
pcoefs = the_poly.coef
# force odd coefficients to be zero, since the polynomial must be even
pcoefs[1::2] = 0
if ensure_bounded and return_scale:
return pcoefs, scale
else:
return pcoefs
# -----------------------------------------------------------------------------
polynomial_generators = {'invert': PolyOneOverX,
'poly_sign': PolySign,
'poly_thresh': PolyThreshold,
'gibbs': PolyGibbs,
'efilter': PolyEigenstateFiltering,
'relu': PolyRelu,
'softplus': PolySoftPlus,
}
|
# -*- coding: utf-8 -*-
# import json
# import csv
import unittest
from com_lib.file_functions import delete_file
class Test(unittest.TestCase):
def test_clean_up(self):
files = [
"test_data_test_user.json",
"test_data_todos.json",
"test_data_users.json",
"test_data_group.json",
"test_data_group_user.json",
]
for f in files:
delete_file(f)
|
#coding: latin1
#< full
from algoritmia.problems.knapsack import branch_and_bound_knapsack3
v, w, W = [11, 16, 13, 1, 11], [3, 5, 6, 3, 6], 6
x, score = branch_and_bound_knapsack3(v, w, W)
print(x, score)
#> full
|
"""
Entradas --> 4 valores int a, b, c y d que vamos a convertir en str
a --> str --> a
b --> str --> b
c --> str --> c
d --> str --> d
Salidas --> 1 valores int redondeado a su centena mas cercana
e --> int --> e
"""
# Entradas
a = str(input("\nEscribe el valor de a "))
b = str(input("Escribe el valor de b "))
c = str(input("Escribe el valor de c "))
d = str(input("Escribe el valor de d "))
# Caja negra
e = a + b + c + d
e = int(e)
if e <= (int(a) * 1000 + int(b)*100 + 50):
e = e - int(c + d)
else:
De = e - ((int(a)*1000)+ (int(b)*100))
e = e + 100 - De
# Salida
print(f"\nEl resultado redondeado es {e}\n")
|
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import pickle
import torch
from GENRE.genre.trie import Trie
print("LOAD TRIE")
# load the prefix tree (trie)
with open("kilt_titles_trie_dict.pkl", "rb") as f:
trie = Trie.load_from_dict(pickle.load(f))
print(trie.get(""))
device = 2
gen_len = 10
model_checkpoint = "gpt2"
print("LOAD MODEL")
model = AutoModelForCausalLM.from_pretrained(model_checkpoint)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
model.to(device)
print("GENERATE")
input_ids = tokenizer("I love cats", return_tensors='pt')
input_len = len(input_ids['input_ids'][0])
with torch.no_grad():
output = model.generate(
input_ids = input_ids['input_ids'].to(device),
max_length=input_len+gen_len,
num_beams=1,
prefix_allowed_tokens_fn=lambda batch_id, sent: trie.get(sent.tolist())
)
response = tokenizer.decode(output[0][input_len:])
print(response)
|
"""
Run this script to prepare the train.csv data for analysis in Caffe.
Afterwards, run this command to train the model:
../caffe/build/tools/caffe train -solver bnp_paribas/data/caffe/nonlinear_solver.prototxt
Based on http://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/02-brewing-logreg.ipynb
"""
import os
import h5py
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# avoid scientic notation when printing matrices
np.set_printoptions(suppress=True)
# load data from CSV
train = pd.read_csv(
'./bnp_paribas/data/train.csv',
index_col=0)
test = pd.read_csv(
'./bnp_paribas/data/test.csv',
index_col=0)
# combine the datasets to apply the same feature engineering to both
all_data = pd.concat((train, test))
max_categories = 100
# feature v107 is a duplicate of v91
del all_data['v107']
# v71 and v75 can be combined
all_data.loc[:, 'v71'] = all_data['v71'].apply(lambda x: x if x else 'other')
# convert categorical columns into a series of binary dummy columns
for column in all_data.columns:
if str(all_data.dtypes[column]) == 'object':
if len(all_data[column].unique()) > max_categories:
# limit the number of dummies to `max_categories`
to_keep = list(all_data[column].value_counts()[:max_categories])
to_keep.append(np.nan)
# use a unique category for overflow, not NaN
all_data.loc[:, column] = all_data[column].apply(lambda x: x if x in to_keep else 'other')
all_data.loc[:, column] = all_data[column].astype('category')
dummies = pd.get_dummies(all_data[column], prefix=column, dummy_na=True)
all_data = pd.concat((all_data, dummies), axis=1)
del all_data[column]
# replace NaN with a number
# I'm using 0, as the caffe ReLU layer ensures coefficients
# are positive and the data is already regularized into the range 0-20.
filled_data = all_data.fillna(0)
# split back into Kaggle train / test sets
filled_train = filled_data.iloc[:len(train)]
filled_test = filled_data.iloc[len(train):]
# split the Kaggle test data into train/test data
X, Xt, y, yt = train_test_split(filled_train.iloc[:, 1:], filled_train.iloc[:, 0], test_size=0.1)
# before submitting results, train on the whole dataset
# X, y = filled_train.iloc[:, 1:], filled_train.iloc[:, 0]
# Xt, yt = filled_train.iloc[:, 1:], filled_train.iloc[:, 0]
dirname = os.path.abspath('./bnp_paribas/data')
train_filename = os.path.join(dirname, 'train.h5')
test_filename = os.path.join(dirname, 'test.h5')
comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
with h5py.File(train_filename, 'w') as f:
f.create_dataset('data', data=X, **comp_kwargs)
f.create_dataset('label', data=y.astype(np.float32), **comp_kwargs)
with h5py.File(test_filename, 'w') as f:
f.create_dataset('data', data=Xt, **comp_kwargs)
f.create_dataset('label', data=yt.astype(np.float32), **comp_kwargs)
with open(os.path.join(dirname, 'train.txt'), 'w') as f:
f.write(train_filename + '\n')
with open(os.path.join(dirname, 'test.txt'), 'w') as f:
f.write(test_filename + '\n')
# write out the feature engineered test dataset for use in the prediction step
filled_test.to_csv(os.path.join(dirname, 'test_augmented.csv'))
|
"""Tests for param_grid
"""
# Authors: Lyubomir Danov <->
# License: -
import pytest
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import ParameterGrid
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from ..param_grid import generate_param_grid
def get_test_case():
pipeline_steps = {
'preprocessor': {'skip': None},
'classifier': {
'svm': SVC(probability=True),
'rf': RandomForestClassifier()
}
}
params_dict = {
'skip': {},
'svm': {'C': [1, 10, 100],
'gamma': [.01, .1],
'kernel': ['rbf']},
'rf': {'n_estimators': [1, 10, 100],
'max_features': [1, 5, 10, 20]}
}
return pipeline_steps, params_dict
_exp_grid_out = [
{
'preprocessor': [None],
'classifier': [SVC(probability=True)],
'classifier__C': [1, 10, 100],
'classifier__gamma': [0.01, 0.1],
'classifier__kernel': ['rbf']
},
{
'preprocessor': [None],
'classifier': [RandomForestClassifier()],
'classifier__n_estimators': [1, 10, 100],
'classifier__max_features': [1, 5, 10, 20]
}
]
def test_generate_param_grid():
steps, pdict = get_test_case()
exp_params = _exp_grid_out
exp_names = list(steps.keys())
params, stepnames = generate_param_grid(steps, pdict)
assert len(params) == 2
assert len(stepnames) == len(exp_names)
assert isinstance(params[0], dict)
assert isinstance(params[1], dict)
assert stepnames == exp_names
for d1, d2 in zip(params, exp_params):
for key in d1.keys():
if key != "classifier":
assert d1[key] == d2[key]
else:
assert d1[key].__class__ == d2[key].__class__
for key, value in pdict['svm'].items():
if key == 'pipe_step_instance':
continue
else:
assert value == params[0]['classifier__' + key]
|
import torch
import torch.nn as nn
from pytorchBaselines.a2c_ppo_acktr.convgru_model import ConvGRU
from pytorchBaselines.a2c_ppo_acktr.distributions import (
Bernoulli,
Categorical,
DiagGaussian,
)
from pytorchBaselines.a2c_ppo_acktr.srnn_model import SRNN
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, base=None, base_kwargs=None):
super(Policy, self).__init__()
if base_kwargs is None:
base_kwargs = {}
if base == "srnn":
base = SRNN
self.base = base(obs_shape, base_kwargs)
self.srnn = True
elif base == "convgru":
base = ConvGRU
self.base = base(obs_shape, base_kwargs)
self.convgru = True
else:
raise NotImplementedError
if base == ConvGRU:
dist_input_sz = self.base.actor.fc2.out_features
else:
dist_input_sz = self.base.output_size
if action_space.__class__.__name__ == "Discrete":
num_outputs = action_space.n
self.dist = Categorical(dist_input_sz, num_outputs)
elif action_space.__class__.__name__ == "Box":
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(dist_input_sz, num_outputs)
elif action_space.__class__.__name__ == "MultiBinary":
num_outputs = action_space.shape[0]
self.dist = Bernoulli(dist_input_sz, num_outputs)
else:
raise NotImplementedError
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def act(self, inputs, rnn_hxs, masks, deterministic=False):
if not hasattr(self, "srnn"):
self.srnn = False
if self.srnn:
value, actor_features, rnn_hxs = self.base(
inputs, rnn_hxs, masks, infer=True
)
elif self.convgru:
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
else:
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
if self.srnn:
value, _, _ = self.base(inputs, rnn_hxs, masks, infer=True)
elif self.convgru:
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
|
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import itertools
import os
from random import choice
import discord
from __main__ import send_cmd_help
from discord.ext import commands
from cogs.utils.dataIO import dataIO
PATH = os.path.join("data", "crdatae")
CLASH_ROYALE_JSON = os.path.join(PATH, "clashroyale.json")
def grouper(n, iterable, fillvalue=None):
"""Group lists into lists of items.
grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def random_discord_color():
"""Return random color as an integer."""
color = ''.join([choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
return discord.Color(value=color)
class BotEmoji:
"""Emojis available in bot."""
def __init__(self, bot):
self.bot = bot
def name(self, name):
"""Emoji by name."""
for server in self.bot.servers:
for emoji in server.emojis:
if emoji.name == name:
return '<:{}:{}>'.format(emoji.name, emoji.id)
return ''
def key(self, key):
"""Chest emojis by api key name or key.
name is used by this cog.
key is values returned by the api.
Use key only if name is not set
"""
if key in self.map:
name = self.map[key]
return self.name(name)
return ''
class ClashRoyale:
"""Clash Royale Data."""
instance = None
class __ClashRoyale:
"""Singleton."""
def __init__(self, *args, **kwargs):
"""Init."""
self.data = dataIO.load_json(CLASH_ROYALE_JSON)
def __init__(self, *args, **kwargs):
"""Init."""
if not ClashRoyale.instance:
ClashRoyale.instance = ClashRoyale.__ClashRoyale(*args, **kwargs)
else:
pass
def __getattr__(self, name):
return getattr(self.instance, name)
def card_elixir(self, card):
""""Elixir of a card."""
try:
return self.data["Cards"][card]["elixir"]
except KeyError:
return 0
class Card():
"""Clash Royale Card."""
def __init__(self, key=None, level=None):
"""Init.
Params
+ name (str). Key in the ClashRoyale.json
"""
self.key = key
self.level = level
@property
def elixir(self):
"""Elixir value."""
return ClashRoyale().card_elixir(self.key)
def emoji(self, be: BotEmoji):
"""Emoji representation of the card."""
if self.key is None:
return ''
name = self.key.replace('-', '')
return be.name(name)
class Deck():
"""Clash Royale Deck.
Contains 8 cards.
"""
def __init__(self, card_keys=None, card_levels=None, rank=0, usage=0):
"""Init.
Params
+ rank (int). Rank on the leaderboard.
+ cards []. List of card ids (keys in ClashRoyale.json).
+ card_levels []. List of card levels.
"""
self.rank = rank
self.usage = usage
self.cards = [Card(key=key) for key in card_keys]
if card_levels is not None:
kl_zip = zip(card_keys, card_levels)
self.cards = [Card(key=k, level=l) for k, l in kl_zip]
@property
def avg_elixir(self):
"""Average elixir of the deck."""
elixirs = [c.elixir for c in self.cards if c.elixir != 0]
return sum(elixirs) / len(elixirs)
@property
def avg_elixir_str(self):
"""Average elixir with format."""
return 'Average Elixir: {:.3}'.format(self.avg_elixir)
def emoji_repr(self, be: BotEmoji, show_levels=False):
"""Emoji representaion."""
out = []
for card in self.cards:
emoji = card.emoji(be)
level = card.level
level_str = ''
if show_levels and level is not None:
level_str = '`{:.<2}`'.format(level)
out.append('{}{}'.format(emoji, level_str))
return ''.join(out)
def __repr__(self):
return ' '.join([c.key for c in self.cards])
class CRDataEnhanced:
"""Clash Royale Data - Enchanced options.
Requires CRData cog to function.
"""
error_msg = {
"requires_crdata": (
"The CRData cog is not installed or loaded. "
"This cog cannot function without it."
)
}
def __init__(self, bot):
"""Init."""
self.bot = bot
self.be = BotEmoji(bot)
self.clashroyale = ClashRoyale().data
self.per_page = 10
@commands.group(pass_context=True, no_pm=True)
async def crdatae(self, ctx):
"""Clash Royale Real-Time Global 200 Leaderboard."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@crdatae.command(name="leaderboard", aliases=['lb'], pass_context=True, no_pm=True)
async def crdatae_leaderboard(self, ctx):
"""Leaderboard."""
crdata = self.bot.get_cog('CRData')
if crdata is None:
await self.bot.say(self.error_msg["requires_crdata"])
return
data = crdata.get_last_data()
# decks = data["decks"]
decks = []
for rank, deck in enumerate(data["decks"], 1):
# fix: bad data
if deck is not None:
cards = [crdata.sfid_to_id(card["key"]) for card in deck]
levels = [card["level"] for card in deck]
decks.append(Deck(card_keys=cards, card_levels=levels, rank=rank))
# embeds
per_page = self.per_page
decks_group = list(grouper(per_page, decks))
color = random_discord_color()
for em_id, decks in enumerate(decks_group):
em = self.embed_decks_leaderboard(
decks,
page=(em_id + 1),
title="Clash Royale: Global Top 200 Decks",
color=color,
footer_text="Data provided by http://starfi.re"
)
await self.bot.say(embed=em)
if em_id < len(decks_group) - 1:
show_next = await self.show_next_page(ctx)
if not show_next:
await self.bot.say("Search results aborted.")
break
def embed_decks_leaderboard(self, decks, **kwargs):
"""Show embed decks.
Params:
+ page. Current page.
+ per_page. Number of results per page.
+ All parameters supported by Discord Embeds.
"""
em = discord.Embed(**kwargs)
page = kwargs.get('page', 1)
per_page = kwargs.get('per_page', self.per_page)
show_usage = kwargs.get('show_usage', False)
footer_text = kwargs.get('footer_text', '')
for deck_id, deck in enumerate(decks):
if deck is not None:
usage_str = ''
if deck.usage and show_usage:
usage_str = '(Usage: {})'.format(deck.usage)
field_name = "Rank {} {}".format(deck.rank, usage_str)
field_value = '{}\n{}'.format(
deck.emoji_repr(self.be, show_levels=True),
deck.avg_elixir_str)
em.add_field(name=field_name, value=field_value)
em.set_footer(text=footer_text)
return em
@crdatae.command(name="search", pass_context=True, no_pm=True)
async def crdatae_search(self, ctx, *cards):
"""Search decks.
1. Include card(s) to search for
!crdatae search fb log
2. Exclude card(s) to search for (use - as prefix)
!crdatae search golem -lightning
3. Elixir range (add elixir=min-max)
!crdatae search hog elixir=0-3.2
e.g.: Find 3M Hog decks without battle ram under 4 elixir
!crdatae search 3m hog -br elixir=0-4
"""
if not len(cards):
await self.bot.say("You must enter at least one card.")
await send_cmd_help(ctx)
return
crdata = self.bot.get_cog('CRData')
if crdata is None:
await self.bot.say(self.error_msg["requires_crdata"])
return
found_decks = await crdata.search(ctx, *cards)
if found_decks is None:
await self.bot.say("Found 0 decks.")
return
if not len(found_decks):
await self.bot.say("Found 0 decks.")
return
decks = []
for fd in found_decks:
card_keys = [crdata.sfid_to_id(card["key"]) for card in fd["deck"]]
deck = Deck(card_keys=card_keys, rank=fd["ranks"][0], usage=fd["count"])
decks.append(deck)
per_page = self.per_page
decks_group = list(grouper(per_page, decks))
color = random_discord_color()
for page, decks_page in enumerate(decks_group):
em = self.embed_decks_search(
decks_page,
page=(page + 1),
title="Clash Royale: Global Top 200 Decks",
description="Found {} decks.".format(len(decks)),
color=color,
footer_text="Data provided by http://starfi.re",
show_usage=False
)
await self.bot.say(embed=em)
if page < len(decks_group) - 1:
show_next = await self.show_next_page(ctx)
if not show_next:
await self.bot.say("Search results aborted.")
break
def embed_decks_search(self, decks, **kwargs):
"""Show embed decks.
Params:
+ page. Current page.
+ per_page. Number of results per page.
+ All parameters supported by Discord Embeds.
"""
em = discord.Embed(**kwargs)
page = kwargs.get('page', 1)
per_page = kwargs.get('per_page', self.per_page)
show_usage = kwargs.get('show_usage', False)
footer_text = kwargs.get('footer_text', '')
for deck_id, deck in enumerate(decks):
if deck is not None:
result_number = per_page * (page - 1) + (deck_id + 1)
usage_str = ''
if deck.usage and show_usage:
usage_str = '(Usage: {})'.format(deck.usage)
field_name = "{}: Rank {} {}".format(result_number, deck.rank, usage_str)
field_value = '{}\n{}'.format(
deck.emoji_repr(self.be, show_levels=True),
deck.avg_elixir_str)
em.add_field(name=field_name, value=field_value)
em.set_footer(text=footer_text)
return em
async def show_next_page(self, ctx):
"""Results pagination."""
timeout = 30
await self.bot.say(
"Would you like to see more results? (y/n)")
answer = await self.bot.wait_for_message(
timeout=timeout,
author=ctx.message.author)
if answer is None:
return False
elif not len(answer.content):
return False
elif answer.content[0].lower() != 'y':
return False
return True
def card_elixir(self, card):
"""Return elixir of a card."""
elixir = 0
try:
elixir = self.clashroyale["Cards"][card]["elixir"]
except KeyError:
pass
return elixir
def setup(bot):
"""Setup bot."""
n = CRDataEnhanced(bot)
bot.add_cog(n)
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = environ.Path(__file__) - 3 # (ctrack/config/settings/base.py - 3 = ctrack/)
APPS_DIR = ROOT_DIR.path("ctrack")
# CACHES = {
# "default": {
# "BACKEND": "django.core.cache.backends.db.DatabaseCache",
# "LOCATION": "ctrack_cache_table",
# }
# }
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR("static"))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR.path("static"))]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR("media"))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR.path("templates"))],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"ctrack.utils.context_processors.settings_context",
],
},
}
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR.path("fixtures")),)
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Update the secret key to a value of your own before deploying the app.
SECRET_KEY = 'lldtg$9(wi49j_hpv8nnqlh!cj7kmbwq0$rj7vy(b(b30vlyzj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: App Engine's security features ensure that it is safe to
# have ALLOWED_HOSTS = ['*'] when the app is deployed. If you deploy a Django
# app not on App Engine, make sure to set an appropriate host here.
# See https://docs.djangoproject.com/en/2.1/ref/settings/
ALLOWED_HOSTS = ['*']
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
ROOT_DIR = environ.Path(__file__) - 3 # (ctrack/config/settings/base.py - 3 = ctrack/)
APPS_DIR = ROOT_DIR.path("ctrack")
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=True)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path(".env")))
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR("media"))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Matt Lemon""", "matt@matthewlemon.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR("static"))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR.path("static"))]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'allauth',
'allauth.account',
"allauth.socialaccount",
"ctrack.users.apps.UsersConfig",
"ctrack.organisations.apps.OrganisationsConfig",
"ctrack.caf.apps.CafConfig",
"ctrack.register.apps.RegisterConfig",
"ctrack.assessments.apps.AssessmentsConfig",
"ctrack.core.apps.CoreConfig",
"ctrack.compliance.apps.ComplianceConfig",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'config.middleware.LoginRequiredMiddleware'
]
LOGIN_REDIRECT_URL = "users:redirect"
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
LOGIN_URL = "/accounts/login"
LOGIN_EXEMPT_URLS = (
r"^account/login/$",
r"^account/logout/$"
)
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# Install PyMySQL as mysqlclient/MySQLdb to use Django's mysqlclient adapter
# See https://docs.djangoproject.com/en/2.1/ref/databases/#mysql-db-api-drivers
# for more information
import pymysql # noqa: 402
pymysql.version_info = (1, 4, 6, 'final', 0) # change mysqlclient version
pymysql.install_as_MySQLdb()
# [START db_setup]
if os.getenv('GAE_APPLICATION', None):
# Running on production App Engine, so connect to Google Cloud SQL using
# the unix socket at /cloudsql/<your-cloudsql-connection string>
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/cloudsql/ctrack-291710:europe-west2:ctrack-alt',
'USER': 'ctrack',
'PASSWORD': os.getenv('DATABASE_PASSWORD'),
'NAME': 'ctrack',
}
}
elif os.getenv("LOCAL_CLOUD_PROXY", None):
# Running Cloud Proxy Locally. Make sure it's running separately.
# This allows for us to drop and create the database, update migrations, etc.
# Normal development happens with SQLite as described below.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'USER': 'ctrack',
'PORT': '3306',
'PASSWORD': os.getenv('DATABASE_PASSWORD'),
'NAME': 'ctrack',
}
}
else:
# Running locally so connect to either a local MySQL instance or connect to
# Cloud SQL via the proxy. To start the proxy via command line:
#
# $ cloud_sql_proxy -instances=[INSTANCE_CONNECTION_NAME]=tcp:3306
#
# See https://cloud.google.com/sql/docs/mysql-connect-proxy
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(ROOT_DIR, 'ctrack.db'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'ctrack',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# [END db_setup]
# Use a in-memory sqlite3 database when testing in CI systems
if os.getenv('TRAMPOLINE_CI', None):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT_DIR, 'db.sqlite3')
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa: 501
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa: 501
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa: 501
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa: 501
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
# STATIC_ROOT = 'static'
# STATIC_URL = '/static/'
|
# -*- coding: utf-8 -*-
"""Tests for eccemotus_lib.py."""
import unittest
import eccemotus.eccemotus_lib as eccemotus
class EccemotusTest(unittest.TestCase):
"""Tests for eccemotus library."""
def test_CreateGraph(self):
"""Tests graph creation."""
event = {
u'__container_type__': u'event',
u'__type__': u'AttributeContainer',
u'computer_name': u'REGISTRAR.internal.greendale.edu',
u'data_type': u'windows:evtx:record',
u'display_name': u'TSK:/Windows/System32/winevt/Logs/Security.evtx',
u'event_identifier': 4624,
u'event_level': 0,
u'filename': u'/Windows/System32/winevt/Logs/Security.evtx',
u'inode': 0,
u'message_identifier': 4624,
u'offset': 0,
u'parser': u'winevtx',
u'pathspec': {
u'location': u'/media/greendale_images/registrar.dd'},
u'record_number': 3803,
u'sha256_hash':
u'47387ab429ebbac1ae96162143783d1f5dab692f1311fc92ec212166347f9404',
u'source_name': u'Microsoft-Windows-Security-Auditing',
u'store_index': 5610,
u'store_number': 56,
u'strings': [
u'S-1-0-0', u'-', u'-', u'0x0000000000000000', u'S-1-5-7',
u'ANONYMOUS LOGON', u'NT AUTHORITY', u'0x0000000000094a1b', u'3',
u'NtLmSsp ', u'NTLM', u'STUDENT-PC1',
u'{00000000-0000-0000-0000-000000000000}', u'-', u'NTLM V1',
u'128', u'0x0000000000000000', u'-', u'192.168.1.11', u'49192'],
u'timestamp': 1440409600617570,
u'timestamp_desc': u'Content Modification Time',
u'username': u'-',
u'uuid': u'a85d856591d94678a555bda3d1efff54'
}
graph = eccemotus.GetGraph([event])
self.assertEqual(len(graph.nodes), 6)
self.assertEqual(len(graph.edges), 8)
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import Gaffer
import GafferUI
## The Layouts class provides a registry of named layouts for use
# in the ScriptWindow. To allow different gaffer applications to
# coexist happily in the same process (for instance to run both
# an asset management app and a shading app inside maya), separate
# sets of layouts are maintained on a per-application basis. Access
# to the layouts for a specific application is provided by the
# Layouts.acquire() method.
class Layouts( object ) :
## Typically acquire() should be used in preference
# to this constructor.
def __init__( self ) :
self.__namedLayouts = {}
self.__registeredEditors = []
## Acquires the set of layouts for the specified application.
@classmethod
def acquire( cls, applicationOrApplicationRoot ) :
if isinstance( applicationOrApplicationRoot, Gaffer.Application ) :
applicationRoot = applicationOrApplicationRoot.root()
else :
assert( isinstance( applicationOrApplicationRoot, Gaffer.ApplicationRoot ) )
applicationRoot = applicationOrApplicationRoot
try :
return applicationRoot.__layouts
except AttributeError :
pass
applicationRoot.__layouts = Layouts()
return applicationRoot.__layouts
## Serialises the passed Editor and stores it using the given name. This
# layout can then be recreated using the create() method below.
def add( self, name, editor ) :
if not isinstance( editor, basestring ) :
editor = repr( editor )
self.__namedLayouts[name] = editor
## Removes a layout previously stored with add().
def remove( self, name ) :
del self.__namedLayouts[name]
## Returns a list of the names of currently defined layouts
def names( self ) :
return self.__namedLayouts.keys()
## Recreates a previously stored layout for the specified script,
# returning it in the form of a CompoundEditor.
def create( self, name, scriptNode ) :
layout = self.__namedLayouts[name]
# first try to import the modules the layout needs
contextDict = { "scriptNode" : scriptNode }
imported = set()
classNameRegex = re.compile( "[a-zA-Z]*Gaffer[^(,]*\(" )
for className in classNameRegex.findall( layout ) :
moduleName = className.partition( "." )[0]
if moduleName not in imported :
exec( "import %s" % moduleName, contextDict, contextDict )
imported.add( moduleName )
return eval( layout, contextDict, contextDict )
## Saves all layouts whose name matches the optional regular expression into the file object
# specified. If the file is later evaluated during application startup, it will reregister
# the layouts with the application.
## \todo Remove this method and follow the model in Bookmarks.py, where user bookmarks
# are saved automatically. This wasn't possible when Layouts.py was first introduced,
# because at that point in time, the Layouts class didn't have access to an application.
def save( self, fileObject, nameRegex = None ) :
# decide what to write
namesToWrite = []
for name in self.names() :
if nameRegex.match( name ) or nameRegex is None :
namesToWrite.append( name )
# write the necessary import statement and acquire the layouts
fileObject.write( "import GafferUI\n\n" )
fileObject.write( "layouts = GafferUI.Layouts.acquire( application )\n\n" )
# finally write out the layouts
for name in namesToWrite :
fileObject.write( "layouts.add( {0}, {1} )\n\n".format( repr( name ), repr( self.__namedLayouts[name] ) ) )
# tidy up by deleting the temporary variable, keeping the namespace clean for
# subsequently executed config files.
fileObject.write( "del layouts\n" )
## The Editor factory provides access to every single registered subclass of
# editor, but specific applications may wish to only provide a subset of those
# editors to the user. This method is used from config files to define the subset
# of editors to use in the application.
def registerEditor( self, editorName ) :
if editorName not in self.__registeredEditors :
self.__registeredEditors.append( editorName )
## Deregisters a previously registered editor, this makes it unavailable to the
# user when creating new layouts.
def deregisterEditor( self, editorName ) :
self.__registeredEditors.remove( editorName )
## Returns the names of all currently registered editors.
def registeredEditors( self ) :
return self.__registeredEditors
|
from .LtccPayments import LtccPayments
from .LtccPayments import load_ltcc_recipients
from .LtccRecipient import LtccRecipient
|
from ..constants import FORMAT_CHECKS
from ..post import HEADER_OPTIONAL_FIELD_TYPES, HEADER_REQUIRED_FIELD_TYPES
from ..postprocessor import KnowledgePostProcessor
class FormatChecks(KnowledgePostProcessor):
_registry_keys = [FORMAT_CHECKS]
def process(self, kp):
headers = kp.headers
for field, typ, _ in HEADER_REQUIRED_FIELD_TYPES:
assert field in headers, \
f"Required field `{field}` missing from headers."
for field, typ, _ in \
HEADER_REQUIRED_FIELD_TYPES + HEADER_OPTIONAL_FIELD_TYPES:
if field in headers:
header_field = headers[field]
assert isinstance(header_field, typ), \
(f"Value for field `{field}` is of type "
f"{type(header_field)}, and needs to be of type {typ}.")
|
''' Connection_validation protocol
'''
class Connection_validation:
'''
Connection_validation protocol object
'''
def __init__(self):
'''
Initialize a Connection_validation object
Keyword arguments:
self: object
'''
self.num_tasks = 100
self.task_populate_method = 'json_upload'
self.unit = 'body_xyz'
self.optional_properties = ['note', 'group', 'source']
self.allowable_filters = []
# self.no_assignment = True
self.required_task_props = []
self.task_insert_props = []
def parse_tasks(self, ipd):
'''
Given a tasks list, put it in a format we can use
self: object
ipd: input parameters
'''
if 'points' not in ipd:
return "connection_validation requires a task list"
elif 'body_id' not in ipd:
return "connection_validation protocol requires a body_id"
elif not isinstance(ipd['points'], (list)):
return "points payload must be an array of arrays"
ipd['tasks'] = dict()
if 'source' not in ipd and 'software' in ipd and ipd['software']:
ipd['source'] = ipd['software']
for pnt in ipd['points']:
name = '_'.join([str(i) for i in pnt])
if 'body_id' in ipd:
name = '_'.join([str(ipd['body_id']), name])
ipd['tasks'][name] = {}
return None
|
from django.contrib import admin
from .models import DcmToBidsJson
from .models import Session
from .models import Subject
# Register your models here.
admin.register(Session, site='bids_tryworks')
admin.register(Subject, site='bids_tryworks')
admin.register(DcmToBidsJson, site='bids_tryworks')
|
# ========================================================================= #
# Copyright 2018 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================= #
from .standard import Standard
class ConvertingRunner(Standard):
"""
This class represents a standard model for running quantum circuits and adding in errors.
"""
def __init__(self, seed=None, simulator=None, converter=None):
"""
Args:
seed:
"""
super().__init__(seed, simulator)
self.converter = converter
def run_circuit(self, state, circuit, give_output=False, removed_locations=None, gate_dict=None):
"""
Apply a ``QuantumCircuit`` directly to a state without output.
Args:
state:
circuit:
give_output:
removed_locations:
gate_dict:
Returns:
"""
# Note: The circuit could be a tick or a full circuit.
if self.converter:
new_circuit = self.converter.compile(circuit)
else:
new_circuit = circuit
result = super().run_circuit(state, new_circuit, give_output=give_output, removed_locations=removed_locations,
gate_dict=gate_dict)
return result
|
# Copyright 2019-2020 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eiffel activity canceled."""
import graphene
from ..base import EiffelObjectType
from ..lib.generic import json_schema_to_graphql, load
# pylint: disable=too-few-public-methods
class ActivityCanceled(EiffelObjectType):
"""Eiffel activity canceled object type."""
data = json_schema_to_graphql(
"ActivityCanceledData",
load("EiffelActivityCanceledEvent.json").get("data").get("properties"),
)
mongo = None
def __init__(self, mongo):
"""Initialize mongo instance."""
# pylint:disable=super-init-not-called
self.mongo = mongo
class ActivityCanceledConnection(graphene.Connection):
"""Activity canceled connection."""
class Meta:
"""Graphene meta data."""
node = ActivityCanceled
|
import discord
from discord.ext import commands
from discord.commands import Option, slash_command
import json
with open ('././config/guilds.json', 'r') as f:
data = json.load(f)
guilds = data['guilds']
class NitroView(discord.ui.View):
def __init__(self, msg: discord.Message, ctx: commands.Context):
super().__init__(timeout=30)
self.msg = msg
self.ctx = ctx
@discord.ui.button(label="Claim", style=discord.ButtonStyle.success, emoji="<:nitro:914110236707680286>")
async def button_callback(self, button: discord.ui.Button, interaction: discord.Interaction):
if interaction.user != self.ctx.author:
embed = discord.Embed(description=f"<:error:897382665781669908> You can't do that {interaction.user.mention}!", color=discord.Color.red())
return await self.ctx.send(embed=embed, delete_after=5)
button.label = "Claimed"
button.style = discord.ButtonStyle.danger
button.emoji = "<:nitro:914110236707680286>"
button.disabled = True
await interaction.response.send_message(content="https://imgur.com/NQinKJB", ephemeral=True)
embed = discord.Embed(description=f"***<:nitro:914110236707680286> {self.ctx.author.mention} claimed the nitro!***", color=discord.Color.nitro_pink())
embed.set_image(url="https://media.discordapp.net/attachments/886639021772648469/903535585992523796/unknown.png")
await self.msg.edit(embed=embed, view=self)
async def on_timeout(self):
for child in self.children:
if child.disabled:
return
for child in self.children:
child.disabled = True
embed = discord.Embed(description=f"**<:error:897382665781669908> Looks like either {self.ctx.author.mention} didn't wanna have it or {self.ctx.author.mention} went AFK**", color=discord.Color.red())
embed.set_image(url="https://media.discordapp.net/attachments/886639021772648469/903535585992523796/unknown.png")
await self.msg.edit(embed=embed, view=self)
class slashNitro(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(description="Generates a nitro link!", guild_ids=guilds)
async def nitro(self, ctx):
interaction: discord.Inteaction = ctx.interaction
embed = discord.Embed(description=f"**{ctx.author.mention} generated a nitro link!**", color=discord.Color.nitro_pink())
embed.set_image(url="https://media.discordapp.net/attachments/886639021772648469/903535585992523796/unknown.png")
await interaction.response.send_message(embed=embed)
message = await interaction.original_message()
await message.edit(embed=embed, view=NitroView(message, ctx))
def setup(bot):
bot.add_cog(slashNitro(bot))
|
import datetime
from asyncio.events import AbstractEventLoop
from typing import Generator, List, Union
from ..exceptions import VoyagerException
from .base import BaseResource
__all__ = [
'FireballResource',
]
class FireballRecord(object):
__slots__ = [
'_fc',
'_date',
'_lat',
'_lon',
'_lat_dir',
'_lon_dir',
'_alt',
'_vel',
'_energy',
'_impact_e',
'_vx',
'_vy',
'_vz',
]
_FIELDS = [
'date',
'lat',
'lon',
'lat-dir',
'lon-dir',
'alt',
'vel',
'energy',
'impact-e',
'vx',
'vy',
'vz',
]
_cache = {}
def __init__(self, data: List[str], fields: List[str]) -> None:
self._fc = self._FIELDS.copy()
for field, value in zip(fields, data):
setattr(self, f"_{field.replace('-', '_')}", value)
self._FIELDS.remove(field)
for unset in self._FIELDS:
setattr(self, f"_{unset.replace('-', '_')}", None)
def __len__(self) -> int:
if (l := "len") not in self._cache:
self._cache[l] = len(self._fc) - len(self._FIELDS)
del self._FIELDS
return self._cache[l]
@property
def date(self) -> Union[str, None]:
return self._date
@property
def datetime(self) -> Union[datetime.datetime, None]:
if not self._date:
return None
return datetime.datetime.strptime(self._date, "%Y-%m-%d %H:%M:%S")
@property
def lat(self) -> Union[float, None]:
if not self._lat:
return None
return float(self._lat)
@property
def latitude(self) -> Union[float, None]:
return self.lat
@property
def lon(self) -> Union[float, None]:
if not self._lon:
return None
return float(self._lon)
@property
def longitude(self) -> Union[float, None]:
return self.lon
@property
def lat_dir(self) -> Union[str, None]:
return self._lat_dir
@property
def latitude_dir(self) -> Union[str, None]:
return self.lat_dir
@property
def lon_dir(self) -> Union[str, None]:
return self._lon_dir
@property
def longitude_dir(self) -> Union[str, None]:
return self.lon_dir
@property
def alt(self) -> Union[float, None]:
if not self._alt:
return None
return float(self._alt)
@property
def altitude(self) -> Union[float, None]:
return self.alt
@property
def vel(self) -> Union[float, None]:
if not self._vel:
return None
return float(self._vel)
@property
def velocity(self) -> Union[float, None]:
return self.vel
@property
def energy(self) -> Union[float, None]:
if not self._energy:
return None
return float(self._energy)
@property
def impact_e(self) -> Union[float, None]:
if not self._impact_e:
return None
return float(self._impact_e)
@property
def impact_energy(self) -> Union[float, None]:
return self.impact_e
@property
def vx(self) -> Union[float, None]:
if not self._vx:
return None
return float(self._vx)
@property
def velocity_x(self) -> Union[float, None]:
return self.vx
@property
def vy(self) -> Union[float, None]:
if not self._vy:
return None
return float(self._vy)
@property
def velocity_y(self) -> Union[float, None]:
return self.vy
@property
def vz(self) -> Union[float, None]:
if not self._vz:
return None
return self._vz
@property
def velocity_z(self) -> Union[float, None]:
return self.vz
def _process_dict(self) -> dict:
return {field: getattr(self, f"_{field.replace('-', '_')}") for field in self._fc}
@property
def to_dict(self) -> dict:
if self not in self._cache:
self._cache[self] = self._process_dict()
return self._cache[self]
@classmethod
def from_dict(cls, data: dict) -> "FireballRecord":
if not all((key in cls._FIELDS for key in data)):
raise VoyagerException("Malformed input. Invalid key(s) supplied")
return cls([value for value in data.values()], [key for key in data])
class FireballResource(BaseResource):
__slots__ = [
'_signature',
'_count',
'_fields',
'_data',
]
_cache = {}
def __init__(self, data: dict,
loop: AbstractEventLoop = None) -> None:
super(FireballResource, self).__init__(data, loop=loop)
self._signature = data.get("signature")
self._count = data.get("count")
self._fields = data.get("fields")
self._data = data
def __len__(self) -> int:
return self.count
def __iter__(self):
return self
def __next__(self):
for fb in self.data:
yield fb
@property
def signature(self) -> str:
return self._signature
@property
def source(self) -> str:
return self._signature.get("source")
@property
def version(self) -> str:
return self._signature.get("version")
@property
def count(self) -> int:
return int(self._count)
@property
def fields(self) -> List[str]:
return self._fields
def _process_fb_data(self) -> Union[Generator[FireballRecord, None, None], FireballRecord, None]:
if not (fb := self._data.get("data")):
return None
elif len(fb) != 1:
for values in fb:
yield FireballRecord(values, self._fields)
else:
return FireballRecord(fb[0], self._fields)
@property
def data(self) -> Union[Generator[FireballRecord, None, None], FireballRecord, None]:
if self not in self._cache:
self._cache[self] = self._process_fb_data()
return self._cache[self]
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict,
loop: AbstractEventLoop = None) -> "FireballResource":
return cls(data, loop=loop)
|
# Run with 'mpirun -n <N CPUs> python run_example.py'
from __future__ import division, print_function
import numpy as np
from parPDE import Simulator2D, LAPLACIAN
from BEC2D import BEC2D
nx_global = ny_global = 500
x_max_global = y_max_global = 10/np.sqrt(2)
simulator = Simulator2D(-x_max_global, x_max_global, -y_max_global, y_max_global, nx_global, ny_global,
periodic_x = True, periodic_y=True, operator_order=2)
bec2d = BEC2D(simulator, natural_units=True)
x = simulator.x
y = simulator.y
dx = simulator.dx
dy = simulator.dy
r = np.sqrt(x**2.0 + y**2.0)
V = 0.5 * r**2
def H(t, psi):
"""The Hamiltonian for single-component wavefunction psi. Returns the
kinetic term acting on psi and the local terms (not acting on psi)
separately."""
grad2psi = simulator.par_laplacian(psi)
H_local_lin = V
K_psi = -0.5*grad2psi
return K_psi, H_local_lin, 0
def groundstate_system(psi):
"""The system of equations Ax = b to be solved with sucessive
overrelaxation to find the groundstate. For us this is H*psi = mu*psi.
Here we compute b, the diagonal part of A, and the coefficients for
representing the nondiagonal part of A as a sum of operators to be
evaluated by the solver."""
A_diag = V
A_nondiag = -0.5*LAPLACIAN
b = psi
return A_diag, A_nondiag, b
if __name__ == '__main__':
# The initial Thomas-Fermi guess:
psi_0_1D = np.pi**(-0.25)*np.exp(-x**2/2)*np.ones(r.shape)/(np.sqrt(2*x_max_global))
psi_0 = 1/np.sqrt(np.pi)*np.exp(-r**2/2)
psi_1 = np.sqrt(2)*x/np.sqrt(np.pi)*np.exp(-r**2/2)
psi = 1/np.sqrt(2) * (psi_0 + psi_1)
psi = psi_0
sum_integral = np.abs(psi**2).sum()*dx*dy
print('Integral:', repr(sum_integral))
print('FD energy:', bec2d.compute_energy(0, psi, H))
grad2psi = simulator.fft_laplacian(psi)
E_psi = (-0.5*grad2psi + V*psi)
energy_density = (psi.conj()*E_psi).real
print('FFT energy:', energy_density.sum()*dx*dy)
err = np.abs((bec2d.compute_energy(0, psi, H) - 1.0)/1.0)
print('err:', err)
print('for comparison with paper:', 1.5*(1-1.5*err))
# import matplotlib.pyplot as plt
# plt.subplot(131)
# plt.imshow(psi_0, interpolation='nearest')
# plt.subplot(132)
# plt.imshow(psi_1, interpolation='nearest')
# plt.subplot(133)
# plt.imshow(psi, interpolation='nearest')
# plt.show()
import sys
sys.exit(0)
assert False
psi = np.ones(r.shape)
# Find the groundstate:
psi = bec2d.find_groundstate(groundstate_system, H, 1.0, psi, relaxation_parameter=1.0, convergence=1e-14,
output_interval=100, output_directory='groundstate', convergence_check_interval=10)
# psi is real so far, convert it to complex:
# psi = np.array(psi, dtype=complex)
# # Print some vortices, seeding the pseudorandom number generator so that
# # MPI processes all agree on where the vortices are:
# np.random.seed(42)
# for i in range(30):
# sign = np.sign(np.random.normal())
# x_vortex = np.random.normal(0, scale=R)
# y_vortex = np.random.normal(0, scale=R)
# psi[:] *= np.exp(sign * 1j*np.arctan2(x - y_vortex, y - x_vortex))
# psi_initial = psi.copy()
# METHOD = 'fourier'
# Smooth it a bit in imaginary time:
# for i in range(10):
# psi = bec2d.evolve(dt=0.01, t_final=1,
# H=H, psi=psi, mu=1, method='rk4', imaginary_time=True,
# output_interval=100, output_directory='smoothing', post_step_callback= lambda i, t, psi: bec2d.normalise(psi, 1))
# print(bec2d.compute_energy(0, psi, H))
# # And evolve it in time for 10ms:
# psi = bec2d.evolve(dt=dispersion_timescale/2, t_final=10e-3,
# H=H, psi=psi, mu=mu, method='rk4', imaginary_time=False,
# output_interval=100, output_directory='evolution')
# gradx_psi_fourier = simulator.fft_gradx(psi_fourier)
# grady_psi_fourier = simulator.fft_grady(psi_fourier)
# jx_fourier = (-1j*psi_fourier.conj()*gradx_psi_fourier).real
# jy_fourier = (-1j*psi_fourier.conj()*grady_psi_fourier).real
# import matplotlib.pyplot as plt
# plt.subplot(211)
# plt.title('jx of psi')
# plt.imshow(jx_fourier.transpose(), origin='lower', interpolation='nearest')
# plt.subplot(212)
# plt.title('jy of psi')
# plt.imshow(jy_fourier.transpose(), origin='lower', interpolation='nearest')
# plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 28 08:38:15 2017
@author: jorgemauricio
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from scipy.interpolate import griddata as gd
#%% read csv
data1 = pd.read_table('data/d1.txt', sep=',')
data2 = pd.read_table('data/d2.txt', sep=',')
data3 = pd.read_table('data/d3.txt', sep=',')
data4 = pd.read_table('data/d4.txt', sep=',')
data5 = pd.read_table('data/d5.txt', sep=',')
#%% make one dataFrame
data = data1.filter(items=['Long', 'Lat','Rain'])
data['Rain2'] = data2['Rain']
data['Rain3'] = data3['Rain']
data['Rain4'] = data4['Rain']
data['Rain5'] = data5['Rain']
data['Acum'] = data['Rain'] + data['Rain2'] + data['Rain3'] + data['Rain4'] + data['Rain5']
#%% get values from Ags
data = data.loc[data['Lat'] > 21.0]
data = data.loc[data['Lat'] < 24.0]
data = data.loc[data['Long'] > -104.0]
data = data.loc[data['Long'] < -100.0]
#%% get x and y values
lons = np.array(data['Long'])
lats = np.array(data['Lat'])
#%% set up plot
plt.clf()
#fig = plt.figure(figsize=(48,24))
m = Basemap(projection='mill',llcrnrlat=21.3,urcrnrlat=23,llcrnrlon=-103.5,urcrnrlon=-101,resolution='h')
#%% generate lats, lons
x, y = m(lons,lats)
#%% number of cols and rows
numcols = len(x)
numrows = len(y)
#%% generate xi, yi
xi = np.linspace(x.min(), x.max(), numcols)
yi = np.linspace(y.min(), y.max(), numrows)
#%% generate meshgrid
xi, yi = np.meshgrid(xi,yi)
#%% genate zi
z = np.array(data['Rain'])
zi = gd((x,y), z, (xi,yi), method='linear')
#%% contour plot
cs = m.contourf(xi,yi,zi, zorder=4, alpha=0.5, cmap='RdPu')
#%% draw map details
m.drawcoastlines()
m.drawstates(linewidth=0.7)
m.drawcountries()
#m.drawmapscale(22, -103, 23, -102, 100, units='km', fontsize=14, yoffset=None, barstyle='fancy', labelstyle='simple', fillcolor1='w', fillcolor2='#000000',fontcolor='#000000', zorder=5)
#%% # add colour bar and title
cbar = m.colorbar(cs, location='right', pad="5%")
cbar.set_label('mm')
plt.title('Precipitación')
plt.savefig('maps/precipitacion.png', dpi=300, transparent=True)
plt.show()
|
from .abstract_get_api_test import AbstractGetApiTest
from .abstract_post_api_test import AbstractPostApiTest
|
import psycopg2
def drop_table(elephantsql_client, command):
'''Drops table included in the input command'''
cur = elephantsql_client.cursor()
try:
cur.execute(command)
elephantsql_client.commit()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
elephantsql_client.rollback()
cur.close()
return 1
cur.close()
|
# coding=utf-8
"""
update_vagrant_metadata is an Ansible module that allows for
updates to a single provider section of a Vagrant metadata.json
file describing a box.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from json import dump, load
from ansible.module_utils.basic import AnsibleModule
from semver import bump_major, bump_minor, bump_patch
DOCUMENTATION = '''
---
module: update_vagrant_metadata
short_description: Update Vagrant metadata.json Files
author: Steve Kuznetsov
options:
dest:
description:
- The location of the metadata.json file to change.
required: true
version_increment:
description:
- Which part of the box version to update.
required: true
choices: [ 'major', 'minor', 'patch', 'none' ]
provider:
description:
- The Vagrant provider for which to change data.
required: true
choices: [ 'libvirt', 'virtualbox', 'vmware_fusion' ]
checksum:
description:
- The value of the new image checksum.
required: true
serve_local:
description:
- From where to serve the box on the local host.
required: false
requirements:
- semver
'''
EXAMPLES = '''
# Update the libvirt checksum for the latest version in a metadata file
- update_vagrant_metadata:
dest: '/home/origin/.config/origin-ci-tool/vagrant/boxes/fedora/base/metadata.json'
version_increment: 'patch'
provider: 'libvirt'
checksum: '3e1fc0abbf772899adc95a3dda776120'
# Update the libvirt checksum for the latest version in a metadata file and serve the image locally
- update_vagrant_metadata:
dest: '/home/origin/.config/origin-ci-tool/vagrant/boxes/fedora/base/metadata.json'
version_increment: 'patch'
provider: 'libvirt'
checksum: '3e1fc0abbf772899adc95a3dda776120'
serve_local: '/home/origin/.config/origin-ci-tool/vagrant/boxes/fedora/base/fedora_base.qcow2'
# '''
def main():
"""
Update a Vagrant metadata.json file to bump the box version
and record a new image checksum, optionally choosing to serve
the new image from the local host.
"""
module = AnsibleModule(
supports_check_mode=False,
argument_spec=dict(
dest=dict(
required=True,
default=None,
type='str',
),
version_increment=dict(
required=False,
default=None,
type='str',
choices=[
'major',
'minor',
'patch',
'none',
],
),
provider=dict(
required=True,
default=None,
type='str',
choices=[
'libvirt',
'virtualbox',
'vmware_fusion',
],
),
checksum=dict(
required=True,
default=None,
type='str',
),
serve_local=dict(
required=False,
default=None,
type='str',
),
),
)
metadata_path = module.params['dest']
version_increment = module.params['version_increment']
provider = module.params['provider']
checksum = module.params['checksum']
serve_local = module.params['serve_local']
with open(metadata_path) as metadata_file:
current_metadata = load(metadata_file)
new_version = update_metadata(current_metadata['versions'][0], version_increment, provider, checksum, serve_local)
del current_metadata['versions']
current_metadata['versions'] = [new_version]
with open(metadata_path, 'wb') as metadata_file:
dump(current_metadata, metadata_file, indent=2)
module.exit_json(
changed=True,
failed=False,
dest=metadata_path,
version_increment=version_increment,
provider=provider,
checksum=checksum,
serve_local=serve_local,
)
def update_metadata(metadata, version_increment, provider, checksum, serve_local):
"""
Update the Vagrant box metadata.
:param metadata: metadata to update
:param version_increment: how to increment the version
:param provider: for which provider to update the metadata
:param checksum: new checksum for the box data
:param serve_local: whether or not to serve the box locally
:return: updated metadata
:rtype: dict
"""
if version_increment == 'major':
metadata['version'] = bump_major(metadata['version'])
elif version_increment == 'minor':
metadata['version'] = bump_minor(metadata['version'])
elif version_increment == 'patch':
metadata['version'] = bump_patch(metadata['version'])
current_provider_data = None
for provider_data in metadata['providers']:
if provider_data['name'] == provider:
current_provider_data = provider_data
break
current_provider_data['checksum'] = checksum
if serve_local:
current_provider_data['url'] = serve_local
return metadata
if __name__ == '__main__':
main()
|
import logging, ipaddress
import sys
base_dir = "/usr/local/fworch"
importer_base_dir = base_dir + '/importer'
sys.path.append(importer_base_dir)
# sys.path.append(importer_base_dir + '/fortimanager5ff')
# import common, fwcommon
def normalize_nwobjects(full_config, config2import, import_id):
nw_objects = []
# 'obj_typ': obj_type, 'obj_ip': first_ip, 'obj_ip_end': last_ip,
# 'obj_member_refs': member_refs, 'obj_member_names': member_names}])
for obj_orig in full_config['network_objects']:
obj = {}
obj.update({ 'obj_typ': 'group' }) # setting default network obj type first
obj.update({'obj_name': obj_orig['name']})
if 'subnet' in obj_orig: # ipv4 object
ipa = ipaddress.ip_network(str(obj_orig['subnet'][0]) + '/' + str(obj_orig['subnet'][1]))
if ipa.num_addresses > 1:
obj.update({ 'obj_typ': 'network' })
else:
obj.update({ 'obj_typ': 'host' })
obj.update({ 'obj_ip': ipa.with_prefixlen })
elif 'ip6' in obj_orig: # ipv6 object
ipa = ipaddress.ip_network(str(obj_orig['ip6']).replace("\\", ""))
if ipa.num_addresses > 1:
obj.update({ 'obj_typ': 'network' })
else:
obj.update({ 'obj_typ': 'host' })
obj.update({ 'obj_ip': ipa.with_prefixlen })
if 'comment' in obj_orig:
obj.update({'obj_comment': obj_orig['comment']})
if 'color' in obj_orig and obj_orig['color']==0:
obj.update({'obj_color': 'black'})
# todo: deal with all other colors (will be currently ignored)
# we would need a list of fortinet color codes
obj.update({'obj_uid': obj_orig['uuid']})
obj.update({'control_id': import_id})
nw_objects.append(obj)
# todo: handle groups
# if 'list' in obj_orig:
# obj['obj_typ'] = 'group' })
config2import.update({'network_objects': nw_objects})
# for members of groups, the name of the member obj needs to be fetched separately (starting from API v1.?)
def resolve_nw_uid_to_name(uid, nw_objects):
# return name of nw_objects element where obj_uid = uid
for obj in nw_objects:
if obj['obj_uid'] == uid:
return obj['obj_name']
return 'ERROR: uid "' + uid + '" not found'
def add_member_names_for_nw_group(idx, nw_objects):
group = nw_objects.pop(idx)
if group['obj_member_refs'] == '' or group['obj_member_refs'] == None:
#member_names = None
#obj_member_refs = None
group['obj_member_names'] = None
group['obj_member_refs'] = None
else:
member_names = ''
obj_member_refs = group['obj_member_refs'].split(common.list_delimiter)
for ref in obj_member_refs:
member_name = resolve_nw_uid_to_name(ref, nw_objects)
member_names += member_name + common.list_delimiter
group['obj_member_names'] = member_names[:-1]
nw_objects.insert(idx, group)
|
# Patch the operators in maskrcnn_benchmark
import logging
logger = logging.getLogger(__name__)
try:
import siammot.operator_patch.rpn_patch
except:
logger.info("Error patching RPN operator")
try:
import siammot.operator_patch.fpn_patch
except:
logger.info("Error patching FPN operator")
logger.info("Operators from maskrcnn_benchmark are patched successfully!")
# Please don't patch your operators over here, because it can have unintended
# consequence
# unless you are sure about the consequences.
# Besides, do not change the patching order of the above operators,
# otherwise, the patching
# will fail even though it prints out the message that the the operators are
# patched successfully.
|
#validação de dados
nome = str(input('Digite seu nome: '))
sexo = str(input('SEXO [M/F] ')).upper()[0].strip()
while sexo not in 'MF':
sexo = str(input('Resposta invalida! tente novamente \n SEXO [M/F] ')).upper()[0].strip()
print('Sexo {} registrado com sucesso'.format(sexo))
idade = int(input('Digite sua idade: '))
print('Nome: {} \n Idade: {}\n Sexo: {}'.format(nome, idade, sexo))
|
l1 = list(range(100))
new_list = # list comprehension is here filter numbers divisible by 7
#print(l1)
#print(new_list)
print(sum(new_list))
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 31 22:35:08 2021
@author: LENOVO
"""
import pandas as pd
data = pd.read_csv('phpgNaXZe.csv')
data.head()
#Colocar nombres a las columnas
columnas = ['sbp','Tabaco','ldl','Adiposity','Familia','Tipo','Obesidad','Alcohol','Edad','chd']
data.columns=columnas
data.head()
#Conocer el formato de los datos
data.dtypes
#Conocer los datos nulos
data.isnull().sum()
#Cambiar los datos de Familia y CHD en digitales
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
data['Familia']=encoder.fit_transform(data['Familia'])
data['chd']=encoder.fit_transform(data['chd'])
data.head()
#Escalamos los valores de la columna sbp
from sklearn.preprocessing import MinMaxScaler
scale = MinMaxScaler(feature_range =(0,100))
data['sbp'] = scale.fit_transform(data['sbp'].values.reshape(-1,1))
data.head()
#Visualizar la obesidad de acuerdo a la edad
data.plot(x='Edad',y='Obesidad',kind='scatter',figsize =(10,5))
#Visualizar el consumo de tabaco de acuerdo a la edad
data.plot(x='Edad',y='Tabaco',kind='scatter',figsize =(10,5))
#Visualizar el consumo de alcohol de acuerdo a la edad
data.plot(x='Edad',y='Alcohol',kind='scatter',figsize =(10,5))
### ANÁLISIS DE MACHINE LEARNING ###
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score
#Definir las variable dependiente e independientes
y = data['chd']
X = data.drop('chd', axis =1)
#Separar los datos de entrenamiento y prueba
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state=1)
#Definir el algoritmo
algoritmo = svm.SVC(kernel ='linear')
#Entrenar el algoritmo
algoritmo.fit(X_train, y_train)
#Realizar una predicción
y_test_pred = algoritmo.predict(X_test)
#Se calcula la matriz de confusión
print(confusion_matrix(y_test, y_test_pred))
#Se calcula la exactitud y precisión del modelo
accuracy_score(y_test, y_test_pred)
precision_score(y_test, y_test_pred)
|
from .base import BaseModel
class TransformerSeries(LitBase):
"""
A Transformer for timeseries data.
The standard Trasnsformer encoder layer is based on the paper “Attention Is All You Need”.
It implements multi-headed self-attention. The TransformerEncoder stacks the encoder layer and
implements layer normalisation (optional). The decoder is replaced by a FNN, a step that has become
fashionable since the original paper.
"""
def __init__(self,
dataset: TimeSeriesDataset,
num_layers: int = config.MODEL["transformer"]["num_layers"],
attn_heads: int = config.MODEL["transformer"]["attn_heads"],
dropout: float = config.MODEL["dropout"],
):
"""
Args:
feature_size (int, optional): [description]. Defaults to 7.
num_layers (int, optional): The number of encoding layers. Defaults to 3.
attn_heads (int, optional): The number of attention heads at each layer. Defaults to 8.
dropout (float, optional): The dropout probability. Defaults to 0.
"""
super(TransformerSeries, self).__init__(dataset)
self.transformer_encoder = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=self.feature_size, nhead=attn_heads, dropout=dropout),
num_layers=num_layers,
norm=nn.LayerNorm(self.feature_size)
)
self.decoder = nn.Linear(self.feature_size, 2048)
self.final_layer = nn.Linear(2048, self.output_size)
self.init_weights()
def init_weights(self, initrange: float = 0.1):
"""Initiates weight variables. ~Uniform(-initrage, initrange)
Args:
initrange (float, optional): The initial weight range +/-. Defaults to 0.1.
"""
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
self.final_layer.bias.data.zero_()
self.final_layer.weight.data.uniform_(-initrange, initrange)
def _generate_square_subsequent_mask(self, size: int) -> torch.Tensor:
mask = (torch.triu(torch.ones(size, size, device=self.device)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, X: torch.Tensor) -> torch.Tensor:
"""
Args:
X (torch.Tensor): shape --> [input_size, batch, feature_size]
"""
assert len(X.shape) == 3, "Tensor must be of form [nsamples, batch, features]."
mask = self._generate_square_subsequent_mask(len(X))
output = self.transformer_encoder(X, mask)
output = self.decoder(output)
return self.final_layer(output)
|
from operator import add
import numpy as np
import digExtractionsClassifier.utility.functions as utility_functions
class ClassifyExtractions:
def __init__(self, model, classification_field, embeddings, context_range = 5, use_word_in_context = True, use_break_tokens = False):
self.model = model
self.classification_field = classification_field
self.embeddings = embeddings
self.context_range = context_range
self.use_word_in_context = use_word_in_context
self.use_break_tokens = use_break_tokens
def classify(self, tokens):
# tokens = map(lambda x:x.lower(),tokens)
for index, token in enumerate(tokens):
utility_functions.value_to_lower(token)
semantic_types = utility_functions.get_extractions_of_type(token, self.classification_field)
for semantic_type in semantic_types:
#There are extractions in the token of the same type
length = utility_functions.get_length_of_extraction(semantic_type)
context = utility_functions.get_context(tokens, index, length, self.context_range, self.use_word_in_context)
context_vector = utility_functions.get_vector_of_context(context, self.embeddings)
probability = self.get_classification_probability(context_vector)
self.append_probability(semantic_type, probability)
return tokens
def get_classification_probability(self, feature_vectors):
if(feature_vectors.ndim == 1):
# print "Reshaping the vector"
feature_vectors = feature_vectors.reshape(1, -1)
if('scaler' in self.model):
print "Scaling"
feature_vectors = self.model['scaler'].transform(feature_vectors)
if('normalizer' in self.model):
print "Normalizing"
feature_vectors = self.model['normalizer'].transform(feature_vectors)
if('k_best' in self.model):
feature_vectors = self.model['k_best'].transform(feature_vectors)
return self.model['model'].predict_proba(feature_vectors)[0][1]
def append_probability(self, semantic_type, probability):
semantic_type['probability'] = probability
|
import os
print("Hello World")
#var1 = input("Enter something please: ")
#print(var1)
var2 = open("File.txt", "r+")
print(var2)
print(var2.name)
#var2.write("Hello My Name Is Bob")
string1 = var2.read(10)
print(string1)
var2.close()
#os.rename("File.txt", "New Name.txt")
#os.remove("Filelocation and filename")
os.mkdir("New Folder")
|
import pytest # type: ignore
from redicalsearch import CreateFlags, GeoField, IndexExistsError, NumericField, TextField
pytestmark = [pytest.mark.integration, pytest.mark.asyncio]
async def test_new_index(redical):
assert True is await redical.ft.create(
'myindex',
TextField('line', TextField.SORTABLE),
TextField('play', TextField.NO_STEM),
NumericField('speech', NumericField.SORTABLE),
TextField('speaker', TextField.NO_STEM),
TextField('entry'), GeoField('location'),
flags=CreateFlags.NO_HIGHLIGHTS
)
# TODO: use `ft.info` to assert some stats
async def test_index_already_exists(redical):
assert True is await redical.ft.create(
'myindex',
TextField('line', TextField.SORTABLE),
TextField('play', TextField.NO_STEM),
NumericField('speech', NumericField.SORTABLE),
TextField('speaker', TextField.NO_STEM),
TextField('entry'), GeoField('location'),
flags=CreateFlags.NO_HIGHLIGHTS
)
with pytest.raises(IndexExistsError):
assert True is await redical.ft.create(
'myindex',
TextField('line', TextField.SORTABLE),
TextField('play', TextField.NO_STEM),
NumericField('speech', NumericField.SORTABLE),
TextField('speaker', TextField.NO_STEM),
TextField('entry'), GeoField('location'),
flags=CreateFlags.NO_HIGHLIGHTS
)
async def test_new_index_pipeline(redical):
async with redical as pipe:
fut1 = pipe.set('foo', 'bar')
fut2 = pipe.ft.create(
'myindex',
TextField('line', TextField.SORTABLE),
NumericField('speech', NumericField.SORTABLE),
)
fut3 = pipe.get('foo')
fut4 = pipe.ft.info('myindex')
assert True is await fut1
assert True is await fut2
assert 'bar' == await fut3
info = await fut4
assert 'myindex' == info.name
field_defs = dict(
line=dict(type='TEXT', options=['WEIGHT', '1', 'SORTABLE']),
speech=dict(type='NUMERIC', options=['SORTABLE']),
)
assert field_defs == info.field_defs
assert 0 == info.number_of_documents
assert 0 == info.number_of_terms
assert 0 == info.number_of_records
|
import serial
#Write the program name here
ProgName = b'TEST'
#Write the positions here
def Position_List():
positionNoFlag('P1', [426.393,-0.000,460.000,0.000,90.000,-0.000])
positionFlag('P2', [242.630,0.000,685.411,-0.000,52.760,-0.000],[6,0])
#ser = serial.Serial('COM4', 19200, timeout=0,stopbits=2, parity=serial.PARITY_EVEN, rtscts=1) # open serial port
#print(ser.name)
def writeFunction(cmd):
"""This function is responsible for sending the serial command"""
command = ('1;9;EDATA%s\r' % (cmd))
print(command)
"""command_to_byte = str.encode(command) #converts the string built in bytes to be transmitted in serial
ser.write(command_to_byte)"""
def definePosition(PosName, coords, overrideFlag, structureFlag):
"""This function writes the position to be saved, it can save the position with the structure flags or
without it"""
if overrideFlag:
posBuffer = '%s=(%.2f,%.2f,%.2f,%.2f,%.2f,%.2f)' %(PosName, coords[0], coords[1], coords[2], coords[3],
coords[4], coords[5])
else:
posBuffer = '%s=(%.2f,%.2f,%.2f,%.2f,%.2f,%.2f)(%d,%d)' %(PosName, coords[0], coords[1], coords[2], coords[3],
coords[4], coords[5], structureFlag[0], structureFlag[1])
writeFunction(posBuffer)
def positionNoFlag(PosName, coords):
"""This function writes the postion with no flags, on the format P1=(X,Y,Z,A,B,C)"""
definePosition(PosName, coords, True, [0,0])
def positionFlag(PosName, coords, structureFlag):
"""This function writes the position with the flag, on the Format P1=(X,Y,Z,A,B,C)(L1,L2)"""
definePosition(PosName, coords, False, structureFlag)
#sending to the controller
"""ser.write(b'1;1;CNTLON\r') #TURNS THE CONTROLLER ON
ser.write(b'1;1;SAVE\r') #INDICATES THE SAVING OF A NEW ITEM
ser.write(b'1;9;LOAD=%s.MB4\r' % ProgName) #CREATES THE PROJECT NAME"""
Position_List()
"""ser.write(b'1;1;SAVE\r')#FINSIH SAVING
ser.write(b'1;1;CNTLOFF\r')#TURNS THE CONTROLLER OFF"""
|
import json
def exportImagePath(filepath, listDownload, outpath):
imageIds = []
idsDescription = {}
idsLinks = {}
idsTitles = {}
with open(filepath) as f:
with open(listDownload, 'w') as fout:
with open(outpath, 'w') as fullout:
paths = []
for i, line in enumerate(f):
if i%2 == 0:
print('%2.2f'% (i/3672625.0*100.0), '\%', end='\r')
j = json.loads(line)
source = j['_source']
#imageIds.append(j['_id'])
#idsLinks[j['_id']] = source['imageLink']
#idsDescription[j['_id']] = source['description']
#idsTitles[j['_id']] = source['title']
fout.write(j['_id'] + '\t' + source['imageLink'] + '\n')
if not source['description'] is None:
fullout.write(j['_id'] +'\t'+source['description']+'\n')
if not source['title'] is None:
fullout.write(j['_id'] +'\t'+source['title']+'\n')
if __name__ == '__main__':
exportImagePath('/data/fr_shopping.json', 'imageList.txt', 'shoppingDataset.txt')
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#server
#Can only accept one client.
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import string
def acceptClient(sock):
client, address = sock.accept()
nickInfo = client.recv(1000) #assumes that NICK is the first message received
userInfo = client.recv(1000) #assumes that USER is the second message received
nick = nickInfo.split()[1]
userSplit = userInfo.split()
username = userSplit[1]
hostname = userSplit[2]
servername = userSplit[3]
realname = string.join(userSplit[4:], ' ')
entry = {'nick': nick, 'uname': username, 'address': address,
'hostname':hostname, 'servername':servername,
'realname':realname}
print entry
table[client] = entry
def receive(client):
print "Waiting for data"
data = client.recv(1000)
print data
return data
def createSocket():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((network, port))
sock.listen(1)
return sock
port = 6667
network = '127.0.0.1'
table = dict()
sock = createSocket()
print "Waiting for client to connect"
acceptClient(sock)
done = False
while not done:
keys = table.keys()
for one_key in keys:
data = receive(one_key)
if data == 'quit':
done = True
if 'JOIN' in data:
info = table[one_key]
one_key.send(':%s!n=%s@%s.%s JOIN %s' % (info['nick'], info['uname'],
info['hostname'],
info['servername'],
data[data.find('JOIN') + 5:]))
sock.close()
|
token = "1047594455:AAGGblu9FGRNgPkjMEcbX7I5BuwDHUBQqsI"
MODULE_NAME = "admin"
MESSAGE_AMOUNT = "People registered: "
MESSAGE_UNAUTHORIZED = "Unauthorized access attempt. Administrator was notified"
MESSAGE_SENT_EVERYBODY = "Message has been sent to everybody"
MESSAGE_SENT_PERSONAL = "Message has been sent"
MESSAGE_ABORTED = "Aborted"
MESSAGE_USER_NOT_FOUND = "User not found"
MESSAGE_EXCEPTION = "Exception occurred:\n"
MESSAGE_SCHEDULE_UPDATED = "Schedule is updated"
REQUEST_SPAM_MESSAGE = "With great power comes great responsibility!\nWhat do you want to spam everyone?"
REQUEST_PERSONAL_ALIAS = "Write telegram alias for personal message"
REQUEST_PERSONAL_MESSAGE = "Write your personal message to "
ADMIN_LIST = []
SUPERADMIN_LIST = []
|
import time
class Mpu6050:
'''
Installation:
sudo apt install python3-smbus
or
sudo apt-get install i2c-tools libi2c-dev python-dev python3-dev
git clone https://github.com/pimoroni/py-smbus.git
cd py-smbus/library
python setup.py build
sudo python setup.py install
pip install mpu6050-raspberrypi
'''
def __init__(self, addr=0x68, poll_delay=0.0166):
from mpu6050 import mpu6050
self.sensor = mpu6050(addr)
self.accel = { 'x' : 0., 'y' : 0., 'z' : 0. }
self.gyro = { 'x' : 0., 'y' : 0., 'z' : 0. }
self.temp = 0.
self.poll_delay = poll_delay
self.on = True
def update(self):
while self.on:
self.poll()
time.sleep(self.poll_delay)
def poll(self):
try:
self.accel, self.gyro, self.temp = self.sensor.get_all_data()
except:
print('failed to read imu!!')
def run_threaded(self):
return self.accel['x'], self.accel['y'], self.accel['z'], self.gyro['x'], self.gyro['y'], self.gyro['z'], self.temp
def run(self):
self.poll()
return self.accel['x'], self.accel['y'], self.accel['z'], self.gyro['x'], self.gyro['y'], self.gyro['z'], self.temp
def shutdown(self):
self.on = False
class Bno055Imu:
def __init__(self, serial_port='/dev/serial0', rst=18, poll_delay=0.0166):
from Adafruit_BNO055 import BNO055
print("BNO Constructor {} => rst = {}".format(serial_port, rst))
self.bno = BNO055.BNO055(serial_port=serial_port, rst=rst)
self.accel = {'x': 0., 'y': 0., 'z': 0.}
self.gyro = {'x': 0., 'y': 0., 'z': 0.}
self.temp = 0.
self.poll_delay = poll_delay
self.on = True
if not self.bno.begin():
raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?')
def update(self):
while self.on:
self.poll()
time.sleep(self.poll_delay)
def poll(self):
try:
accx, accy, accz = self.bno.read_accelerometer()
gyrx, gyry, gyrz = self.bno.read_gyroscope()
self.temp = self.bno.read_temp()
self.accel['x'] = accx
self.accel['y'] = accy
self.accel['z'] = accz
self.gyro['x'] = gyrx
self.gyro['y'] = gyry
self.gyro['z'] = gyrz
except:
print('failed to read imu!!')
def run_threaded(self):
return self.accel['x'], self.accel['y'], self.accel['z'], self.gyro['x'], self.gyro['y'], self.gyro[
'z'], self.temp
def run(self):
self.poll()
return self.accel['x'], self.accel['y'], self.accel['z'], self.gyro['x'], self.gyro['y'], self.gyro[
'z'], self.temp
def shutdown(self):
self.on = False
class SenseHatImu:
def __init__(self, poll_delay=0.0166):
from sense_hat import SenseHat
print("init sensehat")
self.sense = SenseHat()
self.sense.set_imu_config(True, True, True) # gyroscope only
self.accel = {'x': 0., 'y': 0., 'z': 0.}
self.gyro = {'x': 0., 'y': 0., 'z': 0.}
self.temp = 0.
self.poll_delay = poll_delay
self.on = True
def poll(self):
try:
orientation = self.sense.get_orientation()
gyroscope = self.sense.get_gyroscope_raw()
#compass = self.sense.get_compass()
temperature = self.sense.get_temperature()
self.accel['x'] = orientation["roll"]
self.accel['y'] = orientation["pitch"]
self.accel['z'] = orientation["yaw"]
self.gyro['x'] = gyroscope["x"]
self.gyro['y'] = gyroscope["y"]
self.gyro['z'] = gyroscope["z"]
self.temp = temperature
except Exception as e:
print('failed to read imu!!')
print(e)
def update(self):
while self.on:
self.poll()
time.sleep(self.poll_delay)
def run_threaded(self):
return self.accel['x'], self.accel['y'], self.accel['z'], self.gyro['x'], self.gyro['y'], self.gyro[
'z'], self.temp
def run(self):
self.poll()
return self.accel['x'], self.accel['y'], self.accel['z'], self.gyro['x'], self.gyro['y'], self.gyro[
'z'], self.temp
def shutdown(self):
self.on = False
if __name__ == "__main__":
iter = 0
# p = Mpu6050(
#bno = Bno055Imu()
imu = SenseHatImu()
while iter < 100:
data = imu.run()
print(data)
time.sleep(0.1)
iter += 1
|
import random
def start(input_number):
if input_number in range(18, 21):
print("Minha jogada: 21")
print("Você perdeu, playboy!")
print("Digite um número, de um a três, para jogar novamente.")
elif input_number == 21:
print("Você ganhou, espertinho!")
print("Digite um número, de um a três, para jogar novamente.")
elif input_number > 21:
print("O jogo é 21, mané! Game over para você!")
print("Digite um número, de um a três, para jogar novamente.")
else:
output_number = input_number + random.randrange(1, 4)
if output_number == 21:
print("Você perdeu, playboy!")
else:
print("Minha jogada:", output_number)
print("Bem-vindo ao jogo Black Jack 3+!")
print("Digite um número, de um a três, para iniciar o jogo.")
while True:
input_number = int(input("Sua vez, carinha: "))
start(input_number)
|
import copy
import inspect
import operator
import warnings
from collections import OrderedDict
import inflection
from django.conf import settings
from django.db.models import Manager
from django.db.models.fields.related_descriptors import (
ManyToManyDescriptor,
ReverseManyToOneDescriptor
)
from django.utils import encoding
from django.utils.module_loading import import_string as import_class_from_dotted_path
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions
from rest_framework.exceptions import APIException
from .settings import json_api_settings
# Generic relation descriptor from django.contrib.contenttypes.
if 'django.contrib.contenttypes' not in settings.INSTALLED_APPS: # pragma: no cover
# Target application does not use contenttypes. Importing would cause errors.
ReverseGenericManyToOneDescriptor = object()
else:
from django.contrib.contenttypes.fields import ReverseGenericManyToOneDescriptor
def get_resource_name(context, expand_polymorphic_types=False):
"""
Return the name of a resource.
"""
from rest_framework_json_api.serializers import PolymorphicModelSerializer
view = context.get('view')
# Sanity check to make sure we have a view.
if not view:
return None
# Check to see if there is a status code and return early
# with the resource_name value of `errors`.
try:
code = str(view.response.status_code)
except (AttributeError, ValueError):
pass
else:
if code.startswith('4') or code.startswith('5'):
return 'errors'
try:
resource_name = getattr(view, 'resource_name')
except AttributeError:
try:
serializer = view.get_serializer_class()
if expand_polymorphic_types and issubclass(serializer, PolymorphicModelSerializer):
return serializer.get_polymorphic_types()
else:
return get_resource_type_from_serializer(serializer)
except AttributeError:
try:
resource_name = get_resource_type_from_model(view.model)
except AttributeError:
resource_name = view.__class__.__name__
if not isinstance(resource_name, str):
# The resource name is not a string - return as is
return resource_name
# the name was calculated automatically from the view > pluralize and format
resource_name = format_resource_type(resource_name)
return resource_name
def get_serializer_fields(serializer):
fields = None
if hasattr(serializer, 'child'):
fields = getattr(serializer.child, 'fields')
meta = getattr(serializer.child, 'Meta', None)
if hasattr(serializer, 'fields'):
fields = getattr(serializer, 'fields')
meta = getattr(serializer, 'Meta', None)
if fields is not None:
meta_fields = getattr(meta, 'meta_fields', {})
for field in meta_fields:
try:
fields.pop(field)
except KeyError:
pass
return fields
def format_field_names(obj, format_type=None):
"""
Takes a dict and returns it with formatted keys as set in `format_type`
or `JSON_API_FORMAT_FIELD_NAMES`
:format_type: Either 'dasherize', 'camelize', 'capitalize' or 'underscore'
"""
if format_type is None:
format_type = json_api_settings.FORMAT_FIELD_NAMES
if isinstance(obj, dict):
formatted = OrderedDict()
for key, value in obj.items():
key = format_value(key, format_type)
formatted[key] = value
return formatted
return obj
def _format_object(obj, format_type=None):
"""Depending on settings calls either `format_keys` or `format_field_names`"""
if json_api_settings.FORMAT_KEYS is not None:
return format_keys(obj, format_type)
return format_field_names(obj, format_type)
def format_keys(obj, format_type=None):
"""
.. warning::
`format_keys` function and `JSON_API_FORMAT_KEYS` setting are deprecated and will be
removed in the future.
Use `format_field_names` and `JSON_API_FORMAT_FIELD_NAMES` instead. Be aware that
`format_field_names` only formats keys and preserves value.
Takes either a dict or list and returns it with camelized keys only if
JSON_API_FORMAT_KEYS is set.
:format_type: Either 'dasherize', 'camelize', 'capitalize' or 'underscore'
"""
warnings.warn(
"`format_keys` function and `JSON_API_FORMAT_KEYS` setting are deprecated and will be "
"removed in the future. "
"Use `format_field_names` and `JSON_API_FORMAT_FIELD_NAMES` instead. Be aware that "
"`format_field_names` only formats keys and preserves value.",
DeprecationWarning
)
if format_type is None:
format_type = json_api_settings.FORMAT_KEYS
if format_type in ('dasherize', 'camelize', 'underscore', 'capitalize'):
if isinstance(obj, dict):
formatted = OrderedDict()
for key, value in obj.items():
if format_type == 'dasherize':
# inflection can't dasherize camelCase
key = inflection.underscore(key)
formatted[inflection.dasherize(key)] \
= format_keys(value, format_type)
elif format_type == 'camelize':
formatted[inflection.camelize(key, False)] \
= format_keys(value, format_type)
elif format_type == 'capitalize':
formatted[inflection.camelize(key)] \
= format_keys(value, format_type)
elif format_type == 'underscore':
formatted[inflection.underscore(key)] \
= format_keys(value, format_type)
return formatted
if isinstance(obj, list):
return [format_keys(item, format_type) for item in obj]
else:
return obj
else:
return obj
def format_value(value, format_type=None):
if format_type is None:
format_type = json_api_settings.format_type
if format_type == 'dasherize':
# inflection can't dasherize camelCase
value = inflection.underscore(value)
value = inflection.dasherize(value)
elif format_type == 'camelize':
value = inflection.camelize(value, False)
elif format_type == 'capitalize':
value = inflection.camelize(value)
elif format_type == 'underscore':
value = inflection.underscore(value)
return value
def format_relation_name(value, format_type=None):
"""
.. warning::
The 'format_relation_name' function has been renamed 'format_resource_type' and the
settings are now 'JSON_API_FORMAT_TYPES' and 'JSON_API_PLURALIZE_TYPES' instead of
'JSON_API_FORMAT_RELATION_KEYS' and 'JSON_API_PLURALIZE_RELATION_TYPE'
"""
warnings.warn(
"The 'format_relation_name' function has been renamed 'format_resource_type' and the "
"settings are now 'JSON_API_FORMAT_TYPES' and 'JSON_API_PLURALIZE_TYPES' instead of "
"'JSON_API_FORMAT_RELATION_KEYS' and 'JSON_API_PLURALIZE_RELATION_TYPE'",
DeprecationWarning
)
if format_type is None:
format_type = json_api_settings.FORMAT_RELATION_KEYS
pluralize = json_api_settings.PLURALIZE_RELATION_TYPE
return format_resource_type(value, format_type, pluralize)
def format_resource_type(value, format_type=None, pluralize=None):
if format_type is None:
format_type = json_api_settings.FORMAT_TYPES
if pluralize is None:
pluralize = json_api_settings.PLURALIZE_TYPES
if format_type:
# format_type will never be None here so we can use format_value
value = format_value(value, format_type)
return inflection.pluralize(value) if pluralize else value
def get_related_resource_type(relation):
try:
return get_resource_type_from_serializer(relation)
except AttributeError:
pass
relation_model = None
if hasattr(relation, '_meta'):
relation_model = relation._meta.model
elif hasattr(relation, 'model'):
# the model type was explicitly passed as a kwarg to ResourceRelatedField
relation_model = relation.model
elif hasattr(relation, 'get_queryset') and relation.get_queryset() is not None:
relation_model = relation.get_queryset().model
elif (
getattr(relation, 'many', False) and
hasattr(relation.child, 'Meta') and
hasattr(relation.child.Meta, 'model')):
# For ManyToMany relationships, get the model from the child
# serializer of the list serializer
relation_model = relation.child.Meta.model
else:
parent_serializer = relation.parent
parent_model = None
if hasattr(parent_serializer, 'Meta'):
parent_model = getattr(parent_serializer.Meta, 'model', None)
elif hasattr(parent_serializer, 'parent') and hasattr(parent_serializer.parent, 'Meta'):
parent_model = getattr(parent_serializer.parent.Meta, 'model', None)
if parent_model is not None:
if relation.source:
if relation.source != '*':
parent_model_relation = getattr(parent_model, relation.source)
else:
parent_model_relation = getattr(parent_model, relation.field_name)
else:
parent_model_relation = getattr(parent_model, parent_serializer.field_name)
parent_model_relation_type = type(parent_model_relation)
if parent_model_relation_type is ReverseManyToOneDescriptor:
relation_model = parent_model_relation.rel.related_model
elif parent_model_relation_type is ManyToManyDescriptor:
relation_model = parent_model_relation.field.remote_field.model
# In case we are in a reverse relation
if relation_model == parent_model:
relation_model = parent_model_relation.field.model
elif parent_model_relation_type is ReverseGenericManyToOneDescriptor:
relation_model = parent_model_relation.rel.model
elif hasattr(parent_model_relation, 'field'):
try:
relation_model = parent_model_relation.field.remote_field.model
except AttributeError:
relation_model = parent_model_relation.field.related.model
else:
return get_related_resource_type(parent_model_relation)
if relation_model is None:
raise APIException(_('Could not resolve resource type for relation %s' % relation))
return get_resource_type_from_model(relation_model)
def get_resource_type_from_model(model):
json_api_meta = getattr(model, 'JSONAPIMeta', None)
return getattr(
json_api_meta,
'resource_name',
format_resource_type(model.__name__))
def get_resource_type_from_queryset(qs):
return get_resource_type_from_model(qs.model)
def get_resource_type_from_instance(instance):
if hasattr(instance, '_meta'):
return get_resource_type_from_model(instance._meta.model)
def get_resource_type_from_manager(manager):
return get_resource_type_from_model(manager.model)
def get_resource_type_from_serializer(serializer):
json_api_meta = getattr(serializer, 'JSONAPIMeta', None)
meta = getattr(serializer, 'Meta', None)
if hasattr(json_api_meta, 'resource_name'):
return json_api_meta.resource_name
elif hasattr(meta, 'resource_name'):
return meta.resource_name
elif hasattr(meta, 'model'):
return get_resource_type_from_model(meta.model)
raise AttributeError()
def get_included_resources(request, serializer=None):
""" Build a list of included resources. """
include_resources_param = request.query_params.get('include') if request else None
if include_resources_param:
return include_resources_param.split(',')
else:
return get_default_included_resources_from_serializer(serializer)
def get_default_included_resources_from_serializer(serializer):
meta = getattr(serializer, 'JSONAPIMeta', None)
if meta is None and getattr(serializer, 'many', False):
meta = getattr(serializer.child, 'JSONAPIMeta', None)
return list(getattr(meta, 'included_resources', []))
def get_included_serializers(serializer):
included_serializers = copy.copy(getattr(serializer, 'included_serializers', dict()))
for name, value in iter(included_serializers.items()):
if not isinstance(value, type):
if value == 'self':
included_serializers[name] = (
serializer if isinstance(serializer, type) else serializer.__class__
)
else:
included_serializers[name] = import_class_from_dotted_path(value)
return included_serializers
def get_relation_instance(resource_instance, source, serializer):
try:
relation_instance = operator.attrgetter(source)(resource_instance)
except AttributeError:
# if the field is not defined on the model then we check the serializer
# and if no value is there we skip over the field completely
serializer_method = getattr(serializer, source, None)
if serializer_method and hasattr(serializer_method, '__call__'):
relation_instance = serializer_method(resource_instance)
else:
return False, None
if isinstance(relation_instance, Manager):
relation_instance = relation_instance.all()
return True, relation_instance
class Hyperlink(str):
"""
A string like object that additionally has an associated name.
We use this for hyperlinked URLs that may render as a named link
in some contexts, or render as a plain URL in others.
Comes from Django REST framework 3.2
https://github.com/tomchristie/django-rest-framework
"""
def __new__(self, url, name):
ret = str.__new__(self, url)
ret.name = name
return ret
is_hyperlink = True
def format_drf_errors(response, context, exc):
errors = []
# handle generic errors. ValidationError('test') in a view for example
if isinstance(response.data, list):
for message in response.data:
errors.append({
'detail': message,
'source': {
'pointer': '/data',
},
'status': encoding.force_text(response.status_code),
})
# handle all errors thrown from serializers
else:
for field, error in response.data.items():
field = format_value(field)
pointer = '/data/attributes/{}'.format(field)
# see if they passed a dictionary to ValidationError manually
if isinstance(error, dict):
errors.append(error)
elif isinstance(error, str):
classes = inspect.getmembers(exceptions, inspect.isclass)
# DRF sets the `field` to 'detail' for its own exceptions
if isinstance(exc, tuple(x[1] for x in classes)):
pointer = '/data'
errors.append({
'detail': error,
'source': {
'pointer': pointer,
},
'status': encoding.force_text(response.status_code),
})
elif isinstance(error, list):
for message in error:
errors.append({
'detail': message,
'source': {
'pointer': pointer,
},
'status': encoding.force_text(response.status_code),
})
else:
errors.append({
'detail': error,
'source': {
'pointer': pointer,
},
'status': encoding.force_text(response.status_code),
})
context['view'].resource_name = 'errors'
response.data = errors
return response
def format_errors(data):
if len(data) > 1 and isinstance(data, list):
data.sort(key=lambda x: x.get('source', {}).get('pointer', ''))
return {'errors': data}
|
from pyspark import SparkContext, SparkConf
class Spark(object):
def __init__(self, app_name='', master='local', executor_memory='4g'):
self.app_name = app_name
self.master = master
self.executor_memory = executor_memory
self.sc = None
def context(self):
if not self.sc: self.initialize()
return self.sc
def initialize(self):
conf = SparkConf().setAppName(self.app_name)
conf.setMaster(self.master)
if self.master != 'local':
conf.set("spark.executor.memory", "4g")
self.sc = SparkContext(conf=conf)
def stop(self):
if self.sc:
self.sc.stop()
self.sc = None
|
# read in config
from __future__ import absolute_import, print_function, division
import configobj
import pkg_resources
import os
import validate
def check_user_dir(g, app_name='hfinder'):
"""
Check directories exist for saving apps/configs etc. Create if not.
"""
direc = os.path.expanduser('~/.' + app_name)
if not os.path.exists(direc):
try:
os.mkdir(direc)
except Exception as err:
g.clog.warn('Failed to make directory ' + str(err))
def load_config(g, app_name='hfinder', env_var='HCAM_FINDER_CONF'):
"""
Populate application level globals from config file
"""
configspec_file = pkg_resources.resource_filename('hcam_finder',
'data/configspec.ini')
# try and load config file.
# look in the following locations in order
# - HCAM_FINDER_CONF environment variable
# - ~/.hfinder directory
# - package resources
paths = []
if env_var in os.environ:
paths.append(os.environ[env_var])
paths.append(os.path.expanduser('~/.' + app_name))
resource_dir = pkg_resources.resource_filename('hcam_finder', 'data')
paths.append(resource_dir)
# now load config file
config = configobj.ConfigObj({}, configspec=configspec_file)
for loc in paths:
try:
with open(os.path.join(loc, "config")) as source:
config = configobj.ConfigObj(source, configspec=configspec_file)
break
except IOError:
pass
# validate ConfigObj, filling defaults from configspec if missing from config file
validator = validate.Validator()
result = config.validate(validator)
if result is not True:
g.clog.warn('Config file validation failed')
# now update globals with config
g.cpars.update(config)
def write_config(g, app_name='hfinder'):
"""
Dump application level globals to config file
"""
configspec_file = pkg_resources.resource_filename('hcam_finder',
'data/configspec.ini')
config = configobj.ConfigObj({}, configspec=configspec_file)
config.update(g.cpars)
config.filename = os.path.expanduser('~/.{}/config'.format(app_name))
if not os.path.exists(config.filename):
try:
config.write()
except Exception as err:
g.clog.warn("Could not write config file:\n" + str(err))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.