content stringlengths 5 1.05M |
|---|
from datetime import datetime
from threading import Thread
from time import sleep
import pytest
import retry
exception_message = 'testing exceptions'
def foo(bar):
if bar < 0:
raise ArithmeticError(exception_message)
return bar
def test_success_criteria():
"""Success criteria successfully raises MaximumRetriesExceeded"""
foo_with_success = retry.retry(success=lambda x: x > 0)(foo)
with pytest.raises(retry.MaximumRetriesExceeded):
foo_with_success(0)
def test_exception_criteria():
"""Exceptions specified are raised on MaximumRetriesExceeded"""
foo_with_exception = retry.retry(exceptions=(ArithmeticError,))(foo)
with pytest.raises(ArithmeticError) as exc_info:
foo_with_exception(-1)
assert exception_message in str(exc_info.value)
def test_execution():
"""Expected execution of a successful runstill works"""
foo_with_both = retry.retry(
exceptions=(ArithmeticError,), success=lambda x: x > 0)(foo)
assert foo_with_both(1) == 1
def test_interval():
"""Interval expected is the interval to complete an action"""
def _success_interval(in_dict):
in_dict['num'] += 1
return in_dict['num']
baz_with_interval = retry.retry(
success=lambda x: x > 5, interval=1)(_success_interval)
start = datetime.now()
baz_with_interval({'num': 0})
elapsed = datetime.now() - start
assert elapsed.seconds >= 5
def test_invalid_parameters():
"""The exceptions and success parameter can not both be None"""
with pytest.raises(TypeError):
retry.retry(exceptions=None, success=None)(foo)
def test_unsuccessful_timeout():
"""Unsuccessful functions with a timeout work"""
foo_with_timeout = retry.retry(
success=lambda x: x > 0, timeout=5, interval=1)(foo)
with pytest.raises(retry.MaximumTimeoutExceeded):
foo_with_timeout(-1)
def test_successful_timeout():
"""Success with a timeout still works"""
def _success_timeout(in_dict):
in_dict['num'] += 1
return in_dict['num']
try:
_test_func = retry.retry(
success=lambda x: x == 5, timeout=10, interval=1)(
_success_timeout)
_test_func({'num': 0})
except retry.MaximumTimeoutExceeded:
pytest.fail('Expected the timeout not to be exceeded')
def test_disarm_signal_on_success():
"""Success with a timeout disarms signal"""
_test_func = retry.retry(success=lambda x: True, timeout=1, interval=0.5)(foo)
_test_func(1)
sleep(1.2)
def test_successful_thread():
"""Success with function as thread"""
retryed = []
@retry.retry(timeout=1, success=lambda x: len(x) == 3)
def f(retryed):
retryed.append(0)
return retryed
t = Thread(target=f, args=[retryed])
t.start()
t.join()
assert 3 == len(retryed)
def test_unsuccessful_thread():
"""Unsuccessful with function as thread, timed out"""
retryed = []
def foo(retryed):
@retry.retry(timeout=1, success=lambda x: False)
def bar(retryed):
sleep(0.2)
retryed.append(0)
with pytest.raises(retry.MaximumTimeoutExceeded):
bar(retryed)
t = Thread(target=foo, args=[retryed])
t.start()
t.join()
assert 3 <= len(retryed) <= 5
|
from .amazon import COMPUTERS, PHOTO, Amazon
from .botnet import C2, CHORD, DEBRU, KADEM, LEET, P2P, Botnet
from .cite_seer import CiteSeer
from .coauthor import CS, PHYSICS, Coauthor
from .cora import Cora
from .ppi import PPI
from .pub_med import PubMed
from .qm9 import QM9, Qm9
__all__ = [
"CiteSeer",
"Cora",
"PPI",
"PubMed",
"QM9",
"Qm9",
"Amazon",
"COMPUTERS",
"PHOTO",
"Coauthor",
"CS",
"PHYSICS",
"Botnet",
"CHORD",
"DEBRU",
"KADEM",
"LEET",
"C2",
"P2P",
]
|
import time
import random
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import pytest
from utils.utils import *
from common.constants import *
from common.common_type import CaseLabel
TIMEOUT = 120
class TestCreateBase:
"""
******************************************************************
The following cases are used to test `create_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_a(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(600)
def test_create_partition_limit(self, connect, collection, args):
'''
target: test create partitions, check status returned
method: call function: create_partition for 4097 times
expected: exception raised
'''
threads_num = 8
threads = []
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
def create(connect, threads_num):
for i in range(max_partition_num // threads_num):
tag_tmp = gen_unique_str()
connect.create_partition(collection, tag_tmp)
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=create, args=(m, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
tag_tmp = gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_tmp)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_repeat(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
try:
connect.create_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: partition name = %s already exists" % default_tag
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_collection_not_existed(self, connect):
'''
target: test create partition, its owner collection name not existed in db, check status returned
method: call function: create_partition
expected: status not ok
'''
collection_name = gen_unique_str()
try:
connect.create_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_name_name_None(self, connect, collection):
'''
target: test create partition, tag name set None, check status returned
method: call function: create_partition
expected: status ok
'''
tag_name = None
try:
connect.create_partition(collection, tag_name)
except Exception as e:
assert e.args[0] == "`partition_name` value None is illegal"
@pytest.mark.tags(CaseLabel.L0)
def test_create_different_partition_names(self, connect, collection):
'''
target: test create partition twice with different names
method: call function: create_partition, and again
expected: status ok
'''
connect.create_partition(collection, default_tag)
tag_name = gen_unique_str()
connect.create_partition(collection, tag_name)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_default(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status not ok
'''
tag_new = "tag_new"
connect.create_partition(collection, default_tag)
ids = [i for i in range(default_nb)]
try:
connect.insert(collection, default_entities, partition_name=tag_new)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % tag_new
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_same_tags(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
ids = [(i+default_nb) for i in range(default_nb)]
new_result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([id_collection])
res = connect.get_collection_stats(id_collection)
assert res["row_count"] == default_nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
'''
target: test create two partitions, and insert vectors with the same tag to each collection, check status returned
method: call function: create_partition
expected: status ok, collection length is correct
'''
connect.create_partition(collection, default_tag)
collection_new = gen_unique_str()
connect.create_collection(collection_new, default_fields)
connect.create_partition(collection_new, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
new_result = connect.insert(collection_new, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([collection, collection_new])
res = connect.get_collection_stats(collection)
assert res["row_count"] == default_nb
res = connect.get_collection_stats(collection_new)
assert res["row_count"] == default_nb
class TestShowBase:
"""
******************************************************************
The following cases are used to test `list_partitions` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partition first, then call function: list_partitions
expected: status ok, partition correct
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions_no_partition(self, connect, collection):
'''
target: test show partitions with collection name, check status and partitions returned
method: call function: list_partitions
expected: status ok, partitions correct
'''
res = connect.list_partitions(collection)
assert compare_list_elements(res, ['_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_show_multi_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partitions first, then call function: list_partitions
expected: status ok, partitions correct
'''
tag_new = gen_unique_str()
connect.create_partition(collection, default_tag)
connect.create_partition(collection, tag_new)
res = connect.list_partitions(collection)
assert compare_list_elements(res, [default_tag, tag_new, '_default'])
class TestHasBase:
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_a(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
connect.create_partition(collection, default_tag)
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_multi_partitions(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
connect.create_partition(collection, tag_name)
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
res = connect.has_partition(collection, tag_name)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_name_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with tag not existed
expected: status ok, result empty
'''
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert not res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_collection_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
'''
collection_name = "not_existed_collection"
try:
connect.has_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "HasPartition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test has partition, with invalid tag name, check status returned
method: call function: has_partition
expected: status ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.has_partition(collection, tag_name)
class TestDropBase:
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_a(self, connect, collection):
'''
target: test drop partition, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
res1 = connect.list_partitions(collection)
assert default_tag in res1
connect.drop_partition(collection, default_tag)
res2 = connect.list_partitions(collection)
assert default_tag not in res2
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_name_not_existed(self, connect, collection):
'''
target: test drop partition, but tag not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_tag = "new_tag"
try:
connect.drop_partition(collection, new_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % new_tag
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_name_not_existed_A(self, connect, collection):
'''
target: test drop partition, but collection not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_collection = gen_unique_str()
try:
connect.drop_partition(new_collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: can't find collection: %s" % new_collection
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_repeatedly(self, connect, collection):
'''
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
try:
connect.drop_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % default_tag
tag_list = connect.list_partitions(collection)
assert default_tag not in tag_list
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_create(self, connect, collection):
'''
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status not ok, partition in db
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
connect.drop_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), ['_default'])
time.sleep(2)
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
class TestNameInvalid(object):
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test drop partition, with invalid collection name, check status returned
method: call function: drop_partition
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection_name, default_tag)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test drop partition, with invalid tag name, check status returned
method: call function: drop_partition
expected: status not ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test show partitions, with invalid collection name, check status returned
method: call function: list_partitions
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.list_partitions(collection_name)
class TestNewCase(object):
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_A(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_B(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
|
from .settings import BongSettings, DEFAULT_MESSAGE
from .metadata import VERSION, SUMMARY
import argparse
PARSER = argparse.ArgumentParser(description=SUMMARY)
PARSER.add_argument('-V', '--version', action='version',
version='%(prog)s {}'.format(VERSION),
help='show version')
PARSER.add_argument('-s', '--short-break', action='store_const', const=5,
dest='minutes', default=25,
help='time for a Pomodoro system short break')
PARSER.add_argument('-l', '--long-break', action='store_const',
const=15, dest='minutes',
help='time for a Pomodoro system long break')
PARSER.add_argument('-p', '--pomodoro', action='store_const',
const=25, dest='minutes',
help='time for a Pomodoro system single Pomodoro')
PARSER.add_argument('-t', '--time', action='store', type=int, dest='minutes',
help='timer length, in minutes')
PARSER.add_argument('-m', '--message', default=DEFAULT_MESSAGE,
help='message to display in the notifier')
def parse_args(args):
settings = PARSER.parse_args(args)
return BongSettings(time=60*settings.minutes, message=settings.message)
|
import os
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.32.0"
class TinyAesCConan(ConanFile):
name = "tiny-aes-c"
license = "Unlicense"
homepage = "https://github.com/kokke/tiny-AES-c"
url = "https://github.com/conan-io/conan-center-index"
description = "Small portable AES128/192/256 in C"
topics = ("encryption", "crypto", "AES")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
# AES128, AES192 or AES256
"aes_block_size": ["aes128", "aes192", "aes256"],
# enable AES encryption in CBC-mode of operation
"cbc": [True, False],
# enable the basic ECB 16-byte block algorithm
"ecb": [True, False],
# enable encryption in counter-mode
"ctr": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"aes_block_size": "aes128",
"cbc": True,
"ecb": True,
"ctr": True,
}
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _cflags(self):
return [
"{}=1".format(str(self.options.aes_block_size).upper()),
"CBC={}".format("1" if self.options.cbc else "0"),
"ECB={}".format("1" if self.options.ecb else "0"),
"CTR={}".format("1" if self.options.ctr else "0")
]
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.cppstd
del self.settings.compiler.libcxx
def validate(self):
if not self.options.cbc and not self.options.ecb and not self.options.ctr:
raise ConanInvalidConfiguration("Need to at least specify one of CBC, ECB or CTR modes")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "tiny-AES-c-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["CMAKE_C_FLAGS"] = " ".join("-D{}".format(flag) for flag in self._cflags)
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("unlicense.txt", dst="licenses", src=self._source_subfolder)
self.copy(pattern="*.h", dst="include", src=self._source_subfolder)
self.copy(pattern="*.hpp", dst="include", src=self._source_subfolder)
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
self.copy(pattern="*.so*", dst="lib", keep_path=False)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["tiny-aes"]
self.cpp_info.defines = self._cflags
|
from conftest import is_valid
import pytest
@pytest.mark.usefixtures("client_class", "empty_test_db")
class TestEmergenyContactsAuthorizations:
def setup(self):
self.endpoint = "/api/emergencycontacts"
def test_emergency_contacts_POST(self, auth_headers):
endpoint = "/api/emergencycontacts"
newContact = {
"name": "Narcotics Anonymous",
"description": "Cool description",
"contact_numbers": [
{"number": "503-291-9111", "numtype": "Call"},
{"number": "503-555-3321", "numtype": "Text"},
],
}
response = self.client.post(
endpoint, json=newContact, headers=auth_headers["pm"]
)
assert is_valid(response, 401) # UNAUTHORIZED - Admin Access Required
response = self.client.post(endpoint, json=newContact)
assert is_valid(response, 401) # UNAUTHORIZED - Missing Authorization Header
assert response.json == {"message": "Missing authorization header"}
def test_emergency_contacts_DELETE(self, auth_headers):
endpoint = "/api/emergencycontacts"
id = 1
response = self.client.delete(f"{endpoint}/{id}")
assert is_valid(response, 401) # UNAUTHORIZED - Missing Authorization Header
assert response.json == {"message": "Missing authorization header"}
response = self.client.delete(f"{endpoint}/{id}", headers=auth_headers["pm"])
assert is_valid(response, 401) # UNAUTHORIZED - Admin Access Required
|
import pytest
from django.urls import reverse
from model_mommy import mommy
from gs_project.django_assertions import assert_contains
from apps.produto.models import Produto
# criado para testes com o usuário logado
@pytest.fixture
def usuario_logado(db, django_user_model):
usuario_model = mommy.make(django_user_model, first_name='fulano')
return usuario_model
@pytest.fixture
def cliente_com_usuario_logado(usuario_logado, client):
client.force_login(usuario_logado)
return client
# cria três produtos fictícios
@pytest.fixture
def produto(db):
return mommy.make(Produto, 3)
# resposta com o usuário logado
@pytest.fixture
def resp(cliente_com_usuario_logado, produto):
return cliente_com_usuario_logado.get(reverse('list_produto'))
# testa se pagna da lista de produtos existe
def test_lista_disponivel(resp):
assert resp.status_code == 200
# verifica os dados dos produtos
def test_dados(resp, produto):
for p in produto:
assert_contains(resp, p.nome)
assert_contains(resp, p.descricao)
# verifica link para alterar produto
def test_link_alterar(resp, produto):
for p in produto:
assert_contains(resp, reverse('edit_produto', kwargs={'pk': p.pk}))
# verifica link para deletar produto
def test_link_deletar(resp, produto):
for p in produto:
assert_contains(resp, reverse('delete_produto', kwargs={'pk': p.pk}))
# verifica link para cadastrar produto
def test_link_cadastrar(resp, produto):
assert_contains(resp, reverse('create_produto'))
|
from flaskblog import create_app
app = create_app()
if __name__=="__main__":
app.run(debug=True, port=5500)
|
'''
Created on Dec 13, 2011
@author: sean
'''
from meta.decompiler import decompile_func
from meta.asttools.visitors import Visitor
import ast
from meta.asttools.visitors.print_visitor import print_ast
import clyther as cly
import clyther.runtime as clrt
import opencl as cl
from clyther.array.utils import broadcast_shapes
n = lambda node: {'lineno':node.lineno, 'col_offset': node.col_offset}
class BlitzVisitor(Visitor):
def __init__(self, filename, func_globals):
self.filename = filename
self.func_globals = func_globals
self.locls = {}
self.count = 0
def new_var(self):
self.count += 1
return 'var%03i' % self.count
def visitLambda(self, node):
body = self.visit(node.body)
args = ast.arguments(args=[], vararg=None, kwarg=None, defaults=[])
for var_id in sorted(self.locls.keys()):
args.args.append(ast.Name(var_id, ast.Param(), **n(node)))
return ast.Lambda(args, body, **n(node))
def visitDefault(self, node):
codeobj = compile(ast.Expression(node), self.filename, 'eval')
value = eval(codeobj, self.func_globals)
var_id = self.new_var()
self.locls[var_id] = value
return ast.Name(var_id, ast.Load(), **n(node))
def visitBinOp(self, node):
left = self.visit(node.left)
right = self.visit(node.right)
return ast.BinOp(left, node.op, right, **n(node))
blitzed_kernel_py_source = '''
def blitzed_kernel(function, out, {args}):
gid = clrt.get_global_id(0)
{arg_index}
out[gid] = function({arg_values})
'''
def create_n_arg_kernel(keys):
args = ', '.join(key for key in keys)
arg_values = ', '.join('%s_i' % key for key in keys)
arg_index = '\n '.join('%s_i = %s[gid]' % (arg, arg) for arg in keys)
py_source = blitzed_kernel_py_source.format(args=args, arg_index=arg_index, arg_values=arg_values)
locls = {}
eval(compile(py_source, '', 'exec'), globals(), locls)
blitzed_kernel = cly.kernel(locls['blitzed_kernel'])
blitzed_kernel.global_work_size = eval(compile('lambda %s: [%s.size]' % (keys[0], keys[0]), '', 'eval'))
return blitzed_kernel
def blitz(queue, func, out=None):
'''
lets get blitzed!
'''
func_ast = decompile_func(func)
func_globals = func.func_globals.copy()
if func.func_closure:
func_globals.update({name:cell.cell_contents for name, cell in zip(func.func_code.co_freevars, func.func_closure)})
blitzer = BlitzVisitor(func.func_code.co_filename, func_globals)
blitzed = ast.Expression(blitzer.visit(func_ast))
blitzed_code = compile(blitzed, func.func_code.co_filename, 'eval')
blitzed_func = eval(blitzed_code)
blitz_kernel = create_n_arg_kernel(sorted(blitzer.locls.keys()))
args = {}
for key, var in blitzer.locls.items():
if not isinstance(var, cl.DeviceMemoryView):
var = cl.from_host(queue.context, var)
args[key] = var
shape = broadcast_shapes([var.shape for var in args.values()])
print "shape", shape
for key, var in args.items():
args[key] = cl.broadcast(var, shape)
print "out, **args", out, args
blitz_kernel(queue, blitzed_func, out, **args)
# print blitzed_func()
|
# coding: UTF-8
import getRumors
import createCSV
def getClickedRumors():
clickedRumors = getRumors.getClick("2020-01-01", "2020-09-10", 2000)
# print(len(clickedRumors))
blackList = ["Uc8d4d6282aeeced782be778716d845c3", "Udda3ab30820ab29ac22829a33c516392",
"Ua07336c0212267088e339e2d50a82ace", "Uf811de50a7725a63c181cf7fc8977ae7", "U081b991780b206019a393004eadbe039"]
clickedRumorsClean = []
for c in clickedRumors:
if(c['line_user_id'] not in blackList and c['reply_rumor'] is not "null"):
clickedRumorsClean.append(c['reply_rumor'])
# print(len(clickedRumorsClean))
return clickedRumorsClean
def returnIds(arr):
arr2 = []
for a in arr:
arr2.append(a["id"])
return arr2
def main():
rumors = getRumors.getRumors(
"2020-01-01", "2020-09-10", 2000) # id, content
clickedRumorIds = getClickedRumors()
arr = []
for c in clickedRumorIds:
arr.append({"id": c, "count": clickedRumorIds.count(c)})
arr2 = []
for a in arr:
if(a["id"] not in returnIds(arr2)):
arr2.append(a)
arr3 = []
for a2 in arr2:
for r in rumors:
if(str(r["id"]) == str(a2["id"])):
arr3.append(
{"id": r["id"], "content": r["content"], "count": a2["count"]})
break
print(arr3)
createCSV.main(arr3, "clickRumors")
main()
|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from SciPy import SciPy
from numpy import allclose, arange, eye, linalg, ones
from scipy import linsolve, sparse
#######################################################################
class Matrix(SciPy):
def setSize(self, size):
pass
def setMatrix(self, m):
self.matrix = m
def numElements(self):
return self.matrix.getnnz()
def maxNumElements(self):
return self.matrix.nzmax
def rows(self):
return self.matrix.shape[0]
def cols(self):
return self.matrix.shape[1]
def Reals(self):
out = SparseMatrix()
tmp = self.matrix.copy()
out.matrix = tmp._real()
return out
def Imaginaries(self):
out = SparseMatrix()
tmp = self.matrix.copy()
out.matrix = tmp._imag()
return out
def Conjugate(self):
out = SparseMatrix()
out.matrix = self.matrix.conjugate().copy()
return out
def GetRow(self, i):
return self.matrix.getrow(i)
def GetCol(self, i):
return self.matrix.getcol(i)
class SparseMatrix(Matrix):
def setSize(self, size):
self.matrix = sparse.csc_matrix((size, size))
self.matrix.setdiag(ones(size))
class DenseMatrix(Matrix):
def setSize(self, size):
self.matrix = sparse.csc_matrix((size, size))
self.matrix.setdiag(ones(size))
self.matrix.todense()
class DOKMatrix(Matrix):
def setSize(self, size):
self.matrix = sparse.dok_matrix((size, size))
self.matrix.setdiag(ones(size))
class COOMatrix(Matrix):
def __init__(self, mat):
self.matrix=mat
def setSize(self, size):
self.matrix = sparse.coo_matrix((size, size))
self.matrix.setdiag(ones(size))
class CSRMatrix(Matrix):
def __init__(self, mat):
self.matrix=mat
def setSize(self, size):
self.matrix = sparse.csr_matrix((size, size))
self.matrix.setdiag(ones(size))
class LILMatrix(Matrix):
def __init__(self, mat):
self.matrix=mat
def setSize(self, size):
self.matrix = sparse.lil_matrix((size, size))
self.matrix.setdiag(ones(size))
#######################################################################
|
import requests
import bs4
import numpy as np
import pandas as pd
# Example Code
game_id = 13
url = 'https://www.boardgamegeek.com/xmlapi/boardgame/' + str(game_id)
result = requests.get(url)
soup = bs4.BeautifulSoup(result.text, features='lxml')
# print(soup.find('name').text)
# Task Begins
# Explore the BGG API and see if you are able to find the following information about a game:
# Name
# Max and Min Players
# Play Time
# Game Description
# Some of the game mechanics
# Docs: https://boardgamegeek.com/wiki/page/BGG_XML_API&redirectedfrom=XML_API#
tags = [tag.name for tag in soup.find_all()]
column_names = ['game_id',
'name',
'min_players',
'max_players',
'playing_time',
'description',
'game_mechanics']
rows = []
base_url = 'http://www.boardgamegeek.com/xmlapi/boardgame/'
for game_id in range(1, 11):
url = base_url + str(game_id)
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text, features='lxml')
game_row = [
game_id,
soup.find('name').text,
int(soup.find('minplayers').text),
int(soup.find('maxplayers').text),
int(soup.find('playingtime').text),
soup.find('description').text,
soup.find('boardgamemechanic').text
]
rows.append(game_row)
# print(rows[0])
# print('\nStart DF\n')
df = pd.DataFrame(data=rows, columns=column_names)
# print(df.head()) |
"""!
"""
import cv2
import imutils
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def _get_measures(gt, det, r):
"""!@brief
@note One ground truth point can be associated to
more than one predicted point.
"""
# Assertions
gt = gt[gt['frame'] == 1]
det = det[det['frame'] == 1]
# Prepare data
gt['count'] = 'FN'
det['count'] = 'FP'
# Find matchings and accumulate corresponding values
# @TODO
# Find a faster way to perform that.
for i_det, r_det in det.iterrows():
# Iterate in ground truth array
for i_gt, r_gt in gt.iterrows():
dist = np.sqrt((r_gt['x']-r_det['x'])**2 + (r_gt['y']-r_det['y'])**2)
if dist <= r:
gt['count'].at[i_gt] = 'TP'
det['count'].at[i_det] = 'TP'
break
# ===============================================
# Get countings
# ===============================================
try:
TP = gt.groupby('count').size().at['TP']
except:
TP = 0
try:
FN = gt.groupby('count').size().at['FN']
except:
FN = 0
try:
FP = det.groupby('count').size().at['FP']
except:
FP = 0
# Compute final measures
P = TP / (TP+FP)
R = TP / (TP+FN)
if P > 0 and R > 0:
F1 = (2*P*R) / (P+R)
else:
F1 = 0
return P, R, F1
def get_best_features(features=None,
threshold=0.9,
retained_value=0.1,
radius=5,
template=None,
mask_img=None,
gt=None):
"""!@brief
@return
List of best feature indexes.
"""
# Assertions
assert features is not None
assert template is not None
assert gt is not None
# Empty measures array
v_P = np.array([])
v_R = np.array([])
v_F = np.array([])
# Apply TM on all the feature images
# ==================================
for i in range(features.shape[2]):
# import timeit
# start = timeit.timeit()
# print('Processing frame', i)
feature_img = cv2.normalize(src=features[:, :, i],
dst=None,
alpha=0,
beta=255,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_8U)
# Create template image
tmp = feature_img[template[1]:template[3],
template[0]:template[2]] # [y0:y1, x0:x1]
# Apply template matching
tm_map = cv2.matchTemplate(feature_img, tmp, cv2.TM_CCOEFF_NORMED)
# Adjust resulting map by adding borders
w, h = tmp.shape[::-1]
left_right_border = int((w-1)/2)
top_down_border = int((h-1)/2)
tm_map = cv2.copyMakeBorder(src=tm_map,
top=top_down_border,
bottom=top_down_border,
left=left_right_border,
right=left_right_border,
borderType=cv2.BORDER_CONSTANT,
value=0)
# Apply thresholding to resulting map
binary = cv2.threshold(tm_map, threshold, 255, cv2.THRESH_BINARY)[1].astype(np.uint8)
# Apply mask to the resulting map image
binary = cv2.bitwise_and(binary, binary, mask=mask_img)
# Get points from each binary object in the resulting image
cnts = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# Array of detections
det = []
# Loop over the contours
for c in cnts:
# Compute the center of the contour
M = cv2.moments(c)
if M["m00"] > 0.0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX = c[0][0][0]
cY = c[0][0][1]
# Add centroid value in det array
dict_cnt = {'frame': 1, 'x': cX, 'y': cY}
det.append(dict_cnt)
# Get resulting measures
df_gt = gt[gt['frame'] == 1]
df_det = pd.DataFrame(det)
P, R, F = _get_measures(df_gt, df_det, radius)
try:
v_P = np.append(v_P, P)
v_R = np.append(v_R, R)
v_F = np.append(v_F, F)
except:
v_P = P
v_R = R
v_F = F
# Get indexes according to the retained measure value
stand_v_F = v_F / (np.sum(v_F) + np.finfo(float).eps)
idx_sorted = np.argsort(stand_v_F)[::-1]
cum_sorted_v_F = np.cumsum(stand_v_F[idx_sorted])
selected_v_F = idx_sorted[np.where(cum_sorted_v_F <= retained_value)]
# Add a threshold for v_F
sub = np.where(v_F[selected_v_F] > 0.1)[0]
sub_v_F = selected_v_F[sub]
return sub_v_F
def template_matching(image=None,
templates=[],
scale_fusion='sum',
tmpl_fusion='sum'):
"""!@brief
# ===========================================================
# TEMPLATE MATCHING
#
# @NOTE: Other methods for TM:
# [ ] TM_SQDIFF (accept a mask template)
# [ ] TM_SQDIFF_NORMED
# [ ] TM_CCORR
# [ ] TM_CCORR_NORMED (accept a mask template)
# [ ] TM_CCOEFF
# [x] TM_CCOEFF_NORMED
#
# Return a map between [-1, 1]
# -1: max anti-correlation
# 0: not correlated
# 1: max correlation
#
# @See: https://vovkos.github.io/doxyrest-showcase/opencv/sphinx_rtd_theme/page_tutorial_template_matching.html
#
# ============================================================
"""
#Assertions
assert image is not None
assert len(templates) > 0
# Normalize input image
input_img = cv2.normalize(src=image,
dst=None,
alpha=0,
beta=255,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_8U)
# Initialize fusion map for pyramid scales
map_fusion_tmpl = np.zeros_like(input_img, dtype=np.float32)
# Loop for multiple templates
# ===========================
for tmpl_pyramid in templates:
# Initialize fusion map for pyramid scales
map_fusion_pyr = np.zeros_like(input_img, dtype=np.float32)
# Loop for mutiple scales in pyramid
# ==================================
for tmpl in tmpl_pyramid:
# import matplotlib.pyplot as plt
# plt.matshow(tmpl, cmap='gray')
# plt.show()
# Apply Template Matching
tm_map = cv2.matchTemplate(input_img, tmpl, cv2.TM_CCOEFF_NORMED)
# Adjust resulting map by adding borders
w, h = tmpl.shape[::-1]
left_right_border = int((w-1)/2)
top_down_border = int((h-1)/2)
tm_map = cv2.copyMakeBorder(src=tm_map,
top=top_down_border,
bottom=top_down_border,
left=left_right_border,
right=left_right_border,
borderType=cv2.BORDER_CONSTANT,
value=0)
# # Show maps for each scale in pyramid
# plt.matshow(tm_map, cmap='gray')
# plt.show()
# ================================================
# FUSION SCALES (PYRAMID LEVELS)
#
# @NOTE: Another approach could be used to fusion
# pyramid level results
# - sum
# - max value
# - pixel-wise multiplication
#
# Acumulate TM resulting maps for pyramid scales
# ================================================
if scale_fusion == 'sum':
map_fusion_pyr = cv2.add(tm_map, map_fusion_pyr)
else:
print("Error: worng fusion map strategy.")
return None
# # Show maps for each template after fusion scales
# plt.matshow(map_fusion_pyr, cmap='gray')
# plt.show()
# ================================================
# FUSION TEMPLATES
#
# @NOTE: Another approach could be used to fusion
# pyramid level results
# - sum
# - max value
# - pixel-wise multiplication
#
# Acumulate TM resulting maps for template images
# ================================================
if tmpl_fusion == 'sum':
map_fusion_tmpl = cv2.add(map_fusion_pyr, map_fusion_tmpl)
else:
print("Error: worng fusion map strategy.")
return None
# # Show maps for each image after fusion templates
# plt.matshow(map_fusion_tmpl, cmap='gray')
# plt.show()
return map_fusion_tmpl
def build_pyramid(img=None, n_levels=3):
"""!@brief
"""
# Assertions
assert img is not None
assert 0 < n_levels < 5
# Build Gaussian pyramid resolution according
# to the number of levels required
# -------------------------------------------
out_pyramid = []
# Levels: 1
# 0: original
if n_levels == 1:
out_pyramid.append(img)
# Levels: 2
# 0: original /\
# 1: up_2 /__\
elif n_levels == 2:
# Levels
lvl_0 = cv2.pyrUp(img, dstsize=(int(img.shape[1]*2), int(img.shape[0]*2)))
lvl_1 = img
# Output
out_pyramid.append(lvl_0)
out_pyramid.append(lvl_1)
# Levels: 3
# 2: down_2 /\
# 1: original / \
# 0: up_2 /____\
elif n_levels == 3:
# Levels
lvl_0 = cv2.pyrUp(img, dstsize=(int(img.shape[1]*2), int(img.shape[0]*2)))
lvl_1 = img
lvl_2 = cv2.pyrDown(img, dstsize=(int(img.shape[1]/2), int(img.shape[0]/2)))
# Output
out_pyramid.append(lvl_0)
out_pyramid.append(lvl_1)
out_pyramid.append(lvl_2)
# Levels: 4
# 3: down_2 /\
# 2: original / \
# 1: up_2 / \
# 0: up_4 /______\
elif n_levels == 4:
# Levels
dst = cv2.pyrUp(img, dstsize=(int(img.shape[1]*2), int(img.shape[0]*2)))
lvl_0 = cv2.pyrUp(dst, dstsize=(int(dst.shape[1]*2), int(dst.shape[0]*2)))
lvl_1 = cv2.pyrUp(img, dstsize=(int(img.shape[1]*2), int(img.shape[0]*2)))
lvl_2 = img
lvl_3 = cv2.pyrDown(img, dstsize=(int(img.shape[1]/2), int(img.shape[0]/2)))
# Output
out_pyramid.append(lvl_0)
out_pyramid.append(lvl_1)
out_pyramid.append(lvl_2)
out_pyramid.append(lvl_3)
# Default
else:
print("Number of pyramid levels much lower/bigger, try a number in the range [1,4].")
return None
# Resize images to odd sizes if needed
# ------------------------------------
for i in range(n_levels):
rows = out_pyramid[i].shape[0]
cols = out_pyramid[i].shape[1]
flag = False
if (rows % 2) == 0:
rows = rows + 1
flag = True
if (cols % 2) == 0:
cols = cols + 1
flag = True
if flag:
out_pyramid[i] = cv2.resize(out_pyramid[i], dsize=(cols, rows))
return out_pyramid |
# iniexportfile
#########################################################################################################
# Imports
from configparser import ConfigParser as __ConfigParser
from ..error import SfcparseError
# Exception for Module
class IniExportFile(SfcparseError): __module__ = SfcparseError.set_module_name()
#########################################################################################################
# Export ini file
# Create hollow reference name for "ini_data" to denote ini data needs to be exported for hinting exports
class __dummy_ini:
"""Not meant to be used"""
class ini_data:
"""Not meant to be used"""
def iniexportfile(filename: str, data: __dummy_ini.ini_data) -> None:
"""
Exports a new file from a ini data (ConfigParser) obj
Enter new filename as str. Pass ini data for output to file
[Example Use]
iniexportfile('path/to/filename.ini', data)
This is using the native configparser library shipped with the python standard library. Using ConfigParser method.
For more information on the configparser library, visit: https://docs.python.org/3/library/configparser.html
"""
__err_msg_parser = f"Invalid data to export, type, or nothing specified"
if not isinstance(data, __ConfigParser):
raise IniExportFile(__err_msg_parser, f'\nFILE: "{filename}" \nDATA: {data}')
try:
with open(filename, 'w') as f:
data.write(f)
except TypeError as __err_msg: raise IniExportFile(__err_msg, f'\nFILE: "{filename}" \nDATA: {data}')
except ValueError as __err_msg: raise IniExportFile(__err_msg, f'\nFILE: "{filename}" \nDATA: {data}')
except FileNotFoundError as __err_msg: raise IniExportFile(__err_msg, f'\nFILE: "{filename}"')
|
import os
import shutil
def GetFiles(extension):
import glob
file_list = [j for j in glob.glob('*.{}'.format(extension))]
return(file_list)
def docx_opr(file_name):
import docx2txt
from profanityfilter import ProfanityFilter
pf = ProfanityFilter()
document = docx2txt.process(file_name)
words = document.split()
profane = pf.is_clean(document)
return(len(words), profane)
folders = [name for name in os.listdir(".") if os.path.isdir(name)]
for folder in folders:
# print(folder)
os.chdir(folder)
try:
os.mkdir('Disqualified')
except FileExistsError:
pass
file_list = GetFiles('docx')
for file in file_list:
# print(file)
wordlen, profane = docx_opr(file)
if (wordlen > 300) or not profane:
shutil.move(file, 'Disqualified/' + file)
os.chdir("..")
|
"""
Process backend
===============
Run jobs using a process backend.
"""
import sys
import uuid
from subprocess import Popen, PIPE
import threading
# import logging
import random
from ..workflow import get_workflow
# from ..logger import log
from .scheduler import Scheduler
# from .protect import CatchExceptions
from .hybrid import hybrid_threaded_worker
from ..lib import (pull, push, Connection, object_name, EndOfQueue, FlushQueue)
from .messages import (EndOfWork)
from .remote.io import (JSONObjectReader, JSONObjectWriter)
def process_worker(registry, verbose=False, jobdirs=False,
init=None, finish=None, status=True):
"""Process worker"""
name = "process-" + str(uuid.uuid4())
cmd = [sys.prefix + "/bin/python", "-m", "noodles.pilot_job",
"-name", name, "-registry", object_name(registry)]
if verbose:
cmd.append("-verbose")
if jobdirs:
cmd.append("-jobdirs")
if not status:
cmd.append("-nostatus")
if init:
cmd.extend(["-init", object_name(init)])
if finish:
cmd.extend(["-finish", object_name(finish)])
remote = Popen(
cmd,
stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
def read_stderr():
"""Read stderr of remote process and sends lines to logger."""
for line in remote.stderr:
print(name + ": " + line.rstrip())
stderr_reader_thread = threading.Thread(target=read_stderr, daemon=True)
stderr_reader_thread.start()
@push
def send_job():
"""Coroutine, sends jobs to remote worker over standard input."""
reg = registry()
sink = JSONObjectWriter(reg, remote.stdin)
while True:
msg = yield
if msg is EndOfQueue:
try:
sink.send(EndOfWork)
except StopIteration:
pass
remote.wait()
stderr_reader_thread.join()
return
if msg is FlushQueue:
continue
sink.send(msg)
@pull
def get_result():
"""Generator, reading results from process standard output."""
reg = registry()
yield from JSONObjectReader(reg, remote.stdout)
return Connection(get_result, send_job)
def run_process(workflow, *, n_processes, registry,
verbose=False, jobdirs=False,
init=None, finish=None, deref=False):
"""Run the workflow using a number of new python processes. Use this
runner to test the workflow in a situation where data serial
is needed.
:param workflow:
The workflow.
:type workflow: `Workflow` or `PromisedObject`
:param n_processes:
Number of processes to start.
:param registry:
The serial registry.
:param verbose:
Request verbose output on worker side
:param jobdirs:
Create a new directory for each job to prevent filename collision.(NYI)
:param init:
An init function that needs to be run in each process before other jobs
can be run. This should be a scheduled function returning True on
success.
:param finish:
A function that wraps up when the worker closes down.
:param deref:
Set this to True to pass the result through one more encoding and
decoding step with object derefencing turned on.
:type deref: bool
:returns: the result of evaluating the workflow
:rtype: any
"""
workers = {}
for i in range(n_processes):
new_worker = process_worker(registry, verbose, jobdirs, init, finish)
workers['worker {0:2}'.format(i)] = new_worker
worker_names = list(workers.keys())
def random_selector(_):
"""Selects a worker to send a job to at random."""
return random.choice(worker_names)
master_worker = hybrid_threaded_worker(random_selector, workers)
result = Scheduler().run(master_worker, get_workflow(workflow))
for worker in workers.values():
try:
worker.sink().send(EndOfQueue)
except StopIteration:
pass
# w.aux.join()
if deref:
return registry().dereference(result, host='localhost')
else:
return result
|
import matplotlib.pyplot as plt
from numpy import pi, arccos, cos, sin, sqrt, log, log10, exp
from numpy import linspace, meshgrid, where, ndarray, array, float32, float64
from slezanbear import sbmap
from threshold import th
radearth = 6371 * 1e3
dist = lambda f1,l1,f2,l2: arccos( \
cos(f1 * (pi / 180)) * cos(f2 * (pi / 180)) \
* cos((l1 - l2) * (pi / 180)) \
+ sin(f1 * (pi / 180)) * sin(f2 * (pi / 180)))
latlim = 50.75, 51.05
lnglim = 16.55, 16.85
nord = 5
mkgt = lambda lat,lng: array([lng[0], lng[1] - lng[0], 0,
lat[0], 0, lat[1] - lat[0]],
dtype = float32, order = 'F')
#-------------------------------------------------------------------------------
def latlng(latlim, lnglim, nx, ny):
lat0 = linspace(latlim[0], latlim[1], ny)
lng0 = linspace(lnglim[0], lnglim[1], nx)
lat, lng = meshgrid(lat0, lng0, indexing = 'ij')
gt = array([ lnglim[0], (lnglim[1] - lnglim[0]) / (nx - 1), 0,
latlim[0], 0, (latlim[1] - latlim[0]) / (ny - 1)],
dtype = float32, order = 'F')
return lat, lng, gt
#-------------------------------------------------------------------------------
gauss = lambda x,w: exp(-0.5 * x**2 / w**2)
n = 2**(nord + 1)
lat, lng, gth = latlng(latlim, lnglim, n, n)
r1 = radearth * dist(50.860, 16.740, lat, lng)
r2 = radearth * dist(50.875, 16.703, lat, lng)
mountain = lambda f,l,h,w: h * exp(- 0.5 * radearth**2 * dist(f,l,lat,lng)**2 / w**2)
hterr = array(200 + 300 * th(3000 - r1, 1200) + 400 * th(1800 - r2, 800) \
- 300 * (lat - 50.90) - 30 * (lng - 16.70) \
+ mountain(50.951, 16.700, 500, 500) \
+ mountain(50.951, 16.710, 500, 200) \
+ mountain(50.955, 16.720, 400, 400) \
+ mountain(50.955, 16.725, 300, 600) \
+ mountain(50.960, 16.730, 400, 500) \
+ mountain(50.891, 16.655, 500, 1300),
copy = True, dtype = float32, order = 'F')
#-------------------------------------------------------------------------------
n = 2**(nord + 1)
lat, lng, gti = latlng(latlim, lnglim, n, n)
r = radearth * dist(50.917, 16.720, lat, lng)
I0 = array(where(r < 1e3 , 99, 0),
copy = True, dtype = float32, order = 'F')
#-------------------------------------------------------------------------------
n = 2**nord
lat, lng, gt = latlng(latlim, lnglim, n, n)
I1 = ndarray((n,n), dtype = float64, order = 'F')
I2 = ndarray((n,n), dtype = float64, order = 'F')
#-------------------------------------------------------------------------------
sbmap( I0, I0.shape[0], I0.shape[1], gti,
hterr, hterr.shape[0], hterr.shape[1], gth,
I1, I2, I1.shape[0], I1.shape[1], gt)
fig, axes = plt.subplots(2, 2, figsize = (13,11))
p = axes[0,0].imshow(hterr, interpolation = 'none', cmap = 'BrBG_r',
extent = [lnglim[0], lnglim[1], latlim[0], latlim[1]], origin = 'lower')
plt.colorbar(p, ax = axes[0,0])
p = axes[0,1].imshow(I0, interpolation = 'none', cmap = 'hot',
extent = [lnglim[0], lnglim[1], latlim[0], latlim[1]], origin = 'lower')
plt.colorbar(p, ax = axes[0,1])
p = axes[1,0].imshow(sqrt(I2), interpolation = 'none', cmap = 'inferno',
extent = [lnglim[0], lnglim[1], latlim[0], latlim[1]], origin = 'lower')
plt.colorbar(p, ax = axes[1,0])
p = axes[1,1].imshow(I2 / I1 - 1, interpolation = 'none', cmap = 'inferno',
extent = [lnglim[0], lnglim[1], latlim[0], latlim[1]], origin = 'lower', vmax = 0)
plt.colorbar(p, ax = axes[1,1])
print '/tmp/testcase1.png'
plt.savefig('/tmp/testcase1.png')
# plt.show()
|
# pylint: disable=C0103
"""
This module contains vector control for PMSM drives.
"""
# %%
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from helpers import abc2complex, complex2abc
from control.common import PWM
# %%
class VectorCtrl:
"""
This class interconnects the subsystems of the PMSM control system and
provides the interface to the solver.
"""
def __init__(self, pars, speed_ctrl, current_ref, current_ctrl, datalog):
"""
Instantiate the classes.
"""
self.p = pars.p
self.current_ctrl = current_ctrl
self.speed_ctrl = speed_ctrl
self.current_ref = current_ref
self.pwm = PWM(pars)
self.datalog = datalog
def __call__(self, w_m_ref, w_M, theta_M, i_s_abc, u_dc):
"""
Main control loop.
Parameters
----------
w_m_ref : float
Rotor speed reference (in electrical rad/s).
w_M : float
Rotor speed (in mechanical rad/s).
theta_M : float
Rotor angle (in mechanical rad).
i_s_abc : ndarray, shape (3,)
Phase currents.
u_dc : float
DC-bus voltage.
Returns
-------
d_abc_ref : ndarray, shape (3,)
Duty ratio references.
T_s : float
Sampling period.
"""
# Get the states
u_s = self.pwm.realized_voltage
w_m = self.p*w_M
theta_m = np.mod(self.p*theta_M, 2*np.pi)
# Space vector and coordinate transformation
i_s = np.exp(-1j*theta_m)*abc2complex(i_s_abc)
# Outputs
tau_M_ref, tau_L = self.speed_ctrl.output(w_m_ref/self.p, w_M)
i_s_ref, tau_M = self.current_ref.output(tau_M_ref)
u_s_ref, e = self.current_ctrl.output(i_s_ref, i_s)
d_abc_ref, u_s_ref_lim = self.pwm.output(u_s_ref, u_dc, theta_m, w_m)
# Update all the states
self.speed_ctrl.update(tau_M, tau_L)
self.current_ref.update(tau_M, u_s_ref, u_dc)
self.current_ctrl.update(e, u_s_ref, u_s_ref_lim, w_m)
self.pwm.update(u_s_ref_lim)
# Data logging
self.datalog.save([i_s_ref, i_s, u_s, 0, w_m_ref, w_m, theta_m, u_dc,
tau_M, self.pwm.T_s])
return d_abc_ref, self.pwm.T_s
# %%
class CurrentCtrl2DOFPI:
"""
A current controller corresponding to the paper "Flux-linkage-based current
control of saturated synchronous motors":
https://doi.org/10.1109/TIA.2019.291925
The continuous-time complex-vector design corresponding to (13) is used
here. This design could be equivalently presented as a 2DOF PI controller.
For better performance at high speeds with low sampling frequencies, the
discrete-time design in (18) is recommended.
"""
def __init__(self, pars):
"""
Parameters
----------
pars : data object
Controller parameters.
"""
self.T_s = pars.T_s
self.L_d = pars.L_d
self.L_q = pars.L_q
self.alpha_c = pars.alpha_c
# Integral state
self.u_i = 0
def output(self, i_s_ref, i_s):
"""
Computes the unlimited voltage reference.
Parameters
----------
i_s_ref : complex
Current reference.
i_s : complex
Measured current.
Returns
-------
u_s_ref : complex
Unlimited voltage reference.
e : complex
Error signal (scaled, corresponds to the stator flux linkage).
"""
# Gains
k_t = self.alpha_c
k = 2*self.alpha_c
# PM-flux linkage cancels out
psi_s_ref = self.L_d*i_s_ref.real + 1j*self.L_q*i_s_ref.imag
psi_s = self.L_d*i_s.real + 1j*self.L_q*i_s.imag
u_s_ref = k_t*psi_s_ref - k*psi_s + self.u_i
e = psi_s_ref - psi_s
return u_s_ref, e
def update(self, e, u_s_ref, u_s_ref_lim, w_m):
"""
Updates the integral state.
Parameters
----------
e : complex
Error signal (scaled, corresponds to the stator flux linkage).
u_s_ref : complex
Unlimited voltage reference.
u_s_ref_lim : complex
Limited voltage reference.
w_m : float
Angular rotor speed.
"""
k_t = self.alpha_c
k_i = self.alpha_c*(self.alpha_c + 1j*w_m)
self.u_i += self.T_s*k_i*(e + (u_s_ref_lim - u_s_ref)/k_t)
def __str__(self):
desc = ('2DOF PI current control:\n'
' alpha_c=2*pi*{:.1f}')
return desc.format(self.alpha_c/(2*np.pi))
# %%
class SensorlessVectorCtrl:
"""
This class interconnects the subsystems of the PMSM control system and
provides the interface to the solver.
"""
def __init__(self, pars, speed_ctrl, current_ref, current_ctrl, observer,
datalog):
"""
Instantiate the classes.
"""
self.p = pars.p
self.current_ctrl = current_ctrl
self.speed_ctrl = speed_ctrl
self.current_ref = current_ref
self.observer = observer
self.pwm = PWM(pars)
self.datalog = datalog
def __call__(self, w_m_ref, i_s_abc, u_dc):
"""
Main control loop.
Parameters
----------
w_m_ref : float
Rotor speed reference (in electrical rad/s).
i_s_abc : ndarray, shape (3,)
Phase currents.
u_dc : float
DC-bus voltage.
Returns
-------
d_abc_ref : ndarray, shape (3,)
Duty ratio references.
T_s : float
Sampling period.
"""
# Get the states
u_s = self.pwm.realized_voltage
w_m = self.observer.w_m
theta_m = self.observer.theta_m
psi_s = self.observer.psi_s
# Space vector and coordinate transformation
i_s = np.exp(-1j*theta_m)*abc2complex(i_s_abc)
# Outputs
tau_M_ref, tau_L = self.speed_ctrl.output(w_m_ref/self.p, w_m/self.p)
i_s_ref, tau_M = self.current_ref.output(tau_M_ref)
u_s_ref = self.current_ctrl.output(i_s_ref, i_s, psi_s, w_m)
d_abc_ref = self.pwm(u_s_ref, u_dc, theta_m, w_m)
# Update all the states
self.speed_ctrl.update(tau_M, tau_L)
self.observer.update(u_s, i_s)
self.current_ref.update(tau_M, u_s_ref, u_dc)
# Data logging
self.datalog.save([i_s_ref, i_s, u_s, psi_s, w_m_ref, w_m, theta_m,
u_dc, tau_M, self.pwm.T_s])
return d_abc_ref, self.pwm.T_s
# %%
class CurrentRef:
"""
This reference calculation method resembles the method "Analytical design
and autotuning of adaptive flux-weakening voltage regulation loop in IPMSM
drives with accurate torque regulation":
https://doi.org/10.1109/TIA.2019.2942807
Instead of the PI controller, we use a simpler integral controller with a
constant gain. The resulting operating-point-dependent closed-loop pole
could be derived using (12) of the paper. The MTPV limit is also used.
"""
def __init__(self, pars):
"""
Parameters
----------
pars : data object
Controller parameters.
"""
self.T_s = pars.T_s
self.i_s_max = pars.i_s_max
self.p = pars.p
self.L_d = pars.L_d
self.L_q = pars.L_q
self.psi_f = pars.psi_f
self.i_sd_mtpa = pars.i_sd_mtpa
self.i_sq_mtpv = pars.i_sq_mtpv
self.i_sd_ref = self.i_sd_mtpa(0)
self.k = pars.alpha_fw/(pars.w_nom*self.L_d)
def output(self, tau_M_ref):
"""
Compute the stator current reference.
Parameters
----------
tau_M_ref : float
Torque reference.
Returns
-------
i_s_ref : complex
Stator current reference.
tau_M : float
Limited torque reference.
"""
def q_axis_current_limit(i_sd_ref):
# Limit corresponding to the maximum current
i_sq_curr = np.sqrt(self.i_s_max**2 - i_sd_ref**2)
# Take the MTPV limit into account
i_sq_mtpv = self.i_sq_mtpv(i_sd_ref)
if i_sq_mtpv:
i_sq_max = np.min([i_sq_curr, i_sq_mtpv])
else:
i_sq_max = i_sq_curr
return i_sq_max
psi_t = self.psi_f + (self.L_d - self.L_q)*self.i_sd_ref
if psi_t != 0:
i_sq_ref = tau_M_ref/(1.5*self.p*psi_t)
else:
i_sq_ref = 0
# Limit the current
i_sq_max = q_axis_current_limit(self.i_sd_ref)
if np.abs(i_sq_ref) > i_sq_max:
i_sq_ref = np.sign(i_sq_ref)*i_sq_max
# Current reference
i_s_ref = self.i_sd_ref + 1j*i_sq_ref
# Limited torque (for the speed controller)
tau_M = 1.5*self.p*psi_t*i_sq_ref
return i_s_ref, tau_M
def update(self, tau_M, u_s_ref, u_dc):
"""
Field-weakening based on the unlimited reference voltage.
Parameters
----------
tau_M : float
Limited torque reference.
u_s_ref : complex
Unlimited stator voltage reference.
u_dc : DC-bus voltage.
float.
"""
u_s_max = u_dc/np.sqrt(3)
i_sd_mtpa = self.i_sd_mtpa(np.abs(tau_M))
self.i_sd_ref += self.T_s*self.k*(u_s_max - np.abs(u_s_ref))
if self.i_sd_ref > i_sd_mtpa:
self.i_sd_ref = i_sd_mtpa
elif self.i_sd_ref < -self.i_s_max:
self.i_sd_ref = -self.i_s_max
def __str__(self):
desc = ('Current reference computation and field weakening:\n'
' i_s_max={:.1f}')
return desc.format(self.i_s_max)
# %%
class CurrentCtrl:
"""
This class represents a state-feedback current controller, with reference
feedforward, without integral action.
"""
def __init__(self, pars):
"""
Parameters
----------
pars : data object
Controller parameters.
"""
self.alpha_c = pars.alpha_c
self.L_d = pars.L_d
self.L_q = pars.L_q
self.R_s = pars.R_s
def output(self, i_s_ref, i_s, psi_s, w_m):
"""
State-feedback current controller.
Parameters
----------
i_s_ref : complex
Stator current reference.
i_s : complex
Stator current.
psi_s : complex
Stator flux linkage (calculated in the upper level).
w_m : float
Rotor speed (in electrical rad/s).
Returns
-------
u_s_ref : complex
Voltage reference.
"""
# Map current error to the flux linkage error
err = self.L_d*(i_s_ref - i_s).real + 1j*self.L_q*(i_s_ref - i_s).imag
# Voltage reference in rotor coordinates
u_s_ref = self.R_s*i_s + 1j*w_m*psi_s + self.alpha_c*err
return u_s_ref
def update(self, *_):
"""
No states, nothing to update. This method is just for compatibility.
"""
def __str__(self):
desc = ('State-feedback current control (without integral action):\n'
' alpha_c=2*pi*{:.1f}')
return desc.format(self.alpha_c/(2*np.pi))
# %%
class SensorlessObserver:
"""
A sensorless observer corresponding to the paper "Observers for sensorless
synchronous motor drives: Framework for design and analysis":
https://doi.org/10.1109/TIA.2018.2858753
The observer gain decouples the electrical and mechanical dynamicas and
allows placing the poles of the corresponding linearized estimation
error dynamics.
"""
def __init__(self, pars):
"""
Parameters
----------
pars : data object
Controller parameters.
"""
self.T_s = pars.T_s
self.R_s = pars.R_s
self.L_d = pars.L_d
self.L_q = pars.L_q
self.psi_f = pars.psi_f
self.k_p = 2*pars.w_o
self.k_i = pars.w_o**2
self.b_p = .5*pars.R_s*(pars.L_d + pars.L_q)/(pars.L_d*pars.L_q)
self.zeta_inf = .7
# Initial states
self.theta_m, self.w_m, self.psi_s = 0, 0, pars.psi_f
def update(self, u_s, i_s):
"""
Update the states for the next sampling period.
Parameters
----------
u_s : complex
Stator voltage in estimated rotor coordinates.
i_s : complex
Stator current in estimated rotor coordinates.
"""
# Auxiliary flux (12)
psi_a = self.psi_s - self.L_q*i_s.real - 1j*self.L_d*i_s.imag
# Estimation error (6)
e = self.L_d*i_s.real + 1j*self.L_q*i_s.imag + self.psi_f - self.psi_s
# Pole locations are chosend according to (36), with c = w_m**2
# and w_inf = inf, and the gain corresponding to (30) is used
k = self.b_p + 2*self.zeta_inf*np.abs(self.w_m)
psi_a_sqr = np.abs(psi_a)**2
if psi_a_sqr > 0:
# Correction voltage
v = k*psi_a*np.real(psi_a*np.conj(e))/psi_a_sqr
# Error signal (10)
eps = np.imag(psi_a*np.conj(e))/psi_a_sqr
else:
v, eps = 0, 0
# Speed estimation (9)
w_m = self.k_p*eps + self.w_m
# Update the states
self.psi_s += self.T_s*(u_s - self.R_s*i_s - 1j*w_m*self.psi_s + v)
self.w_m += self.T_s*self.k_i*eps
self.theta_m += self.T_s*w_m
self.theta_m = np.mod(self.theta_m, 2*np.pi) # Limit to [0, 2*pi]
def __str__(self):
desc = ('Sensorless observer:\n'
' w_o=2*pi*{:.1f}')
return desc.format(.5*self.k_p/(2*np.pi))
# %%
class Datalogger:
"""
This class contains a data logger.
"""
def __init__(self):
"""
Initialize the attributes.
"""
self.t = []
self.i_s_ref = []
self.i_s = []
self.u_s = []
self.psi_s = []
self.w_m_ref = []
self.w_m = []
self.theta_m = []
self.u_dc = []
self.tau_M = []
self.u_ss, self.i_ss = 0j, 0j
def save(self, data):
"""
Saves the solution.
Parameters
----------
mdl : instance of a class
Continuous-time model.
"""
(i_s_ref, i_s, u_s, psi_s, w_m_ref, w_m, theta_m,
u_dc, tau_M, T_s) = data
try:
t_new = self.t[-1] + T_s
except IndexError:
t_new = 0 # At the first step t = []
self.t.extend([t_new])
self.i_s_ref.extend([i_s_ref])
self.i_s.extend([i_s])
self.u_s.extend([u_s])
self.psi_s.extend([psi_s])
self.w_m_ref.extend([w_m_ref])
self.w_m.extend([w_m])
self.theta_m.extend([theta_m])
self.u_dc.extend([u_dc])
self.tau_M.extend([tau_M])
def post_process(self):
"""
Transforms the lists to the ndarray format and post-process them.
"""
self.i_s_ref = np.asarray(self.i_s_ref)
self.i_s = np.asarray(self.i_s)
self.u_s = np.asarray(self.u_s)
self.psi_s = np.asarray(self.psi_s)
self.w_m_ref = np.asarray(self.w_m_ref)
self.w_m = np.asarray(self.w_m)
self.theta_m = np.asarray(self.theta_m)
self.u_dc = np.asarray(self.u_dc)
self.tau_M = np.asarray(self.tau_M)
self.u_ss = np.exp(1j*self.theta_m)*self.u_s
self.i_ss = np.exp(1j*self.theta_m)*self.i_s
def plot(self, mdl, base):
"""
Plots example figures.
Parameters
----------
mdl : object
Continuous-time solution.
base : object
Base values.
"""
data = mdl.datalog # Continuous-time data
t_range = (0, self.t[-1]) # Time span
# Plotting parameters
plt.rcParams['axes.prop_cycle'] = cycler(color='brgcmyk')
plt.rcParams['lines.linewidth'] = 1.
plt.rcParams['axes.grid'] = True
plt.rcParams.update({"text.usetex": False})
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, figsize=(8, 10))
ax1.step(self.t, self.w_m_ref/base.w, '--', where='post')
ax1.step(self.t, self.w_m/base.w, where='post')
ax1.plot(data.t, data.w_m/base.w)
ax1.legend([r'$\omega_\mathrm{m,ref}$',
r'$\hat \omega_\mathrm{m}$',
r'$\omega_\mathrm{m}$'])
ax1.set_xlim(t_range)
ax1.set_xticklabels([])
ax1.set_ylabel('Speed (p.u.)')
ax2.plot(data.t, data.tau_L/base.tau, '--')
ax2.plot(data.t, data.tau_M/base.tau)
ax2.step(self.t, self.tau_M/base.tau) # Limited torque reference
ax2.set_xlim(t_range)
ax2.legend([r'$\tau_\mathrm{L}$', r'$\tau_\mathrm{M}$',
r'$\tau_\mathrm{M,ref}$'])
ax2.set_ylabel('Torque (p.u.)')
ax2.set_xticklabels([])
ax3.step(self.t, self.i_s_ref.real/base.i, '--', where='post')
ax3.step(self.t, self.i_s.real/base.i, where='post')
ax3.step(self.t, self.i_s_ref.imag/base.i, '--', where='post')
ax3.step(self.t, self.i_s.imag/base.i, where='post')
ax3.set_ylabel('Current (p.u.)')
ax3.legend([r'$i_\mathrm{sd,ref}$', r'$i_\mathrm{sd}$',
r'$i_\mathrm{sq,ref}$', r'$i_\mathrm{sq}$'])
ax3.set_xlim(t_range)
ax3.set_xticklabels([])
ax4.step(self.t, np.abs(self.u_s)/base.u, where='post')
ax4.step(self.t, self.u_dc/np.sqrt(3)/base.u, '--', where='post')
ax4.set_ylabel('Voltage (p.u.)')
ax4.set_xlim(t_range)
ax4.set_ylim(0, 1.2)
ax4.legend([r'$u_\mathrm{s}$', r'$u_\mathrm{dc}/\sqrt{3}$'])
ax4.set_xticklabels([])
ax5.plot(data.t, np.abs(data.psi_s)/base.psi)
ax5.step(self.t, np.abs(self.psi_s)/base.psi, '--', where='post')
ax5.set_xlim(t_range)
ax5.set_ylim(0, 1.2)
ax5.legend([r'$\psi_\mathrm{s}$', r'$\hat\psi_\mathrm{s}$'])
ax5.set_ylabel('Flux (p.u.)')
ax5.set_xlabel('Time (s)')
fig.align_ylabels()
plt.tight_layout()
plt.show()
def plot_latex(self, mdl, base):
"""
Plots example figures using LaTeX in a format suitable for two-column
articles. This method requires that LaTeX is installed.
Parameters
----------
mdl : object
Continuous-time solution.
base : object
Base values.
"""
data = mdl.datalog # Continuous-time data
t_range = (0, self.t[-1]) # Time span
# Plotting parameters
plt.rcParams['axes.prop_cycle'] = cycler(color='brgcmyk')
plt.rcParams['lines.linewidth'] = 1.
plt.rcParams['axes.grid'] = True
plt.rcParams.update({"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, figsize=(3, 7.5))
ax1.step(self.t, self.w_m_ref/base.w, '--', where='post')
ax1.plot(data.t, data.w_m/base.w)
ax1.legend([r'$\omega_\mathrm{m,ref}$',
r'$\omega_\mathrm{m}$'])
ax1.set_xlim(t_range)
ax1.set_ylim(-1.2, 1.2)
ax1.set_xticklabels([])
ax1.set_ylabel('Speed (p.u.)')
ax2.plot(data.t, data.tau_L/base.tau, '--')
ax2.plot(data.t, data.tau_M/base.tau)
ax2.set_xlim(t_range)
ax2.set_ylim(-.2, 1.5)
ax2.legend([r'$\tau_\mathrm{L}$', r'$\tau_\mathrm{M}$'])
ax2.set_ylabel('Torque (p.u.)')
ax2.set_xticklabels([])
ax3.step(self.t, self.i_s.real/base.i, where='post')
ax3.step(self.t, self.i_s.imag/base.i, where='post')
ax3.set_ylabel('Current (p.u.)')
ax3.legend([r'$i_\mathrm{sd}$', r'$i_\mathrm{sq}$'])
ax3.set_xlim(t_range)
ax3.set_ylim(-1.5, 1.5)
ax3.set_xticklabels([])
ax4.step(self.t, np.abs(self.u_s)/base.u, where='post')
ax4.step(self.t, self.u_dc/np.sqrt(3)/base.u, '--', where='post')
ax4.set_ylabel('Voltage (p.u.)')
ax4.set_xlim(t_range)
ax4.set_ylim(0, 1.2)
ax4.legend([r'$u_\mathrm{s}$', r'$u_\mathrm{dc}/\sqrt{3}$'])
ax4.set_xticklabels([])
ax5.plot(data.t, np.abs(data.psi_s)/base.psi)
ax5.step(self.t, np.abs(self.psi_s)/base.psi, '--', where='post')
ax5.set_xlim(t_range)
ax5.set_ylim(0, 1.2)
ax5.legend([r'$\psi_\mathrm{s}$', r'$\hat\psi_\mathrm{s}$'])
ax5.set_ylabel('Flux (p.u.)')
ax5.set_xlabel('Time (s)')
fig.align_ylabels()
plt.tight_layout()
plt.show()
# plt.savefig('fig.pdf')
def plot_extra(self, mdl, base):
"""
Plots extra waveforms if the PWM is enabled or if the DC-bus dynamics
are modeled.
Parameters
----------
t : ndarray
Discrete time.
mdl : object
Continuous-time solution.
base : object
Base values.
"""
# Continuous-time data
data = mdl.datalog
# Time span
t_zoom = (.9, .925)
# Plotting parameters
plt.rcParams['axes.prop_cycle'] = cycler(color='brgcmyk')
plt.rcParams['lines.linewidth'] = 1.
plt.rcParams.update({"text.usetex": False})
if mdl.pwm is not None:
# Plots a zoomed view of voltages and currents
fig1, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(data.t, data.u_ss.real/base.u)
ax1.plot(self.t, self.u_ss.real/base.u)
ax1.set_xlim(t_zoom)
ax1.set_ylim(-1.5, 1.5)
ax1.legend([r'$u_\mathrm{sa}$', r'$\hat u_\mathrm{sa}$'])
ax1.set_ylabel('Voltage (p.u.)')
ax1.set_xticklabels([])
ax2.plot(data.t,
complex2abc(data.i_s*np.exp(1j*data.theta_m)).T/base.i)
ax2.step(self.t, self.i_ss.real/base.i, where='post')
ax2.set_xlim(t_zoom)
ax2.legend([r'$i_\mathrm{sa}$', r'$i_\mathrm{sb}$',
r'$i_\mathrm{sc}$'])
ax2.set_ylabel('Current (p.u.)')
ax2.set_xlabel('Time (s)')
fig1.align_ylabels()
# Plots the DC bus and grid-side variables (if data exists)
try:
data.i_L
except AttributeError:
data.i_L = None
if data.i_L is not None:
fig2, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(data.t, data.u_di/base.u)
ax1.plot(data.t, data.u_dc/base.u)
ax1.plot(data.t, complex2abc(data.u_g).T/base.u)
ax1.set_xlim(t_zoom)
ax1.set_ylim(-1.5, 2)
ax1.set_xticklabels([])
ax1.legend([r'$u_\mathrm{di}$',
r'$u_\mathrm{dc}$',
r'$u_\mathrm{ga}$'])
ax1.set_ylabel('Voltage (p.u.)')
ax2.plot(data.t, data.i_L/base.i)
ax2.plot(data.t, data.i_dc/base.i)
ax2.plot(data.t, data.i_g.real/base.i)
ax2.set_xlim(t_zoom)
ax2.legend([r'$i_\mathrm{L}$',
r'$i_\mathrm{dc}$',
r'$i_\mathrm{ga}$'])
ax2.set_ylabel('Current (p.u.)')
ax2.set_xlabel('Time (s)')
fig2.align_ylabels()
plt.tight_layout()
plt.show()
|
import matplotlib.pyplot as plt
import pandas as pd
def plot_history(train_acc, val_acc, train_loss, val_loss, figsize=(10, 5),
dpi=300, acc_path='accuracy.png', loss_path='loss.png'):
plt.figure(figsize=figsize)
plt.plot(train_acc)
plt.plot(val_acc)
plt.title("Model Accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.savefig(acc_path, dpi=dpi)
print(f"[INFO] Accuracy Figure saved to: {acc_path}")
plt.figure(figsize=figsize)
plt.plot(train_loss)
plt.plot(val_loss)
plt.title("Model Loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.savefig(loss_path, dpi=dpi)
print(f"[INFO] Loss Figure saved to: {loss_path}")
def plot_loss_from_logs(csv_lists):
print(f"[INFO] Extracting values from csv files...")
for i, csv in enumerate(csv_lists):
print(f"[INFO] Getting the values of file {csv}")
csv_file = pd.read_csv(csv)
train_acc = csv_file['accuracy']
train_loss = csv_file['loss']
val_acc = csv_file['val_accuracy']
val_loss = csv_file['val_loss']
# for now save each run independently
plot_history(train_acc, val_acc, train_loss, val_loss, figsize=(10, 5),
dpi=300, acc_path=f'accuracy_{i}.png',
loss_path=f'loss_{i}.png')
if __name__ == '__main__':
import os
base_path = "/content/drive/MyDrive/FFTCustom"
csv_lists = []
for run_dir in os.listdir(base_path):
run_dir_fullpath = os.path.join(base_path, run_dir)
log_files = os.listdir(run_dir_fullpath)
log_filename = "log.csv"
csv_lists.append(os.path.join(run_dir_fullpath, log_filename))
plot_loss_from_logs(csv_lists=csv_lists)
|
'''Crie um programa que leia um número inteiro e diga se o número é PAR ou ÍMPAR'''
print('---------------'*2)
print('Digite número inteiro...')
num = int(input('Diremos se é PAR ou ÍMPAR: '))
print(' -------------------- ')
if num % 2 == 0:
print('Você digitou um número PAR!')
else:
print('O número digitado é ÍMPAR!')
print('---------------'*2) |
#!/usr/bin/env python
# Copyright 2018 the Deno authors. All rights reserved. MIT license.
# Performs benchmark and append data to //website/data.json.
# If //website/data.json doesn't exist, this script tries to import it from gh-pages branch.
# To view the results locally run ./tools/http_server.py and visit
# http://localhost:4545/website
import os
import sys
import util
import time
import subprocess
MB = 1024 * 1024
ADDR = "127.0.0.1:4544"
def cat(deno_exe, megs):
size = megs * MB
start = time.time()
cmd = deno_exe + " tests/cat.ts /dev/zero | head -c %s " % size
print cmd
subprocess.check_output(cmd, shell=True)
end = time.time()
return end - start
def tcp(deno_exe, megs):
size = megs * MB
# Run deno echo server in the background.
echo_server = subprocess.Popen(
[deno_exe, "--allow-net", "tests/echo_server.ts", ADDR])
time.sleep(5) # wait for deno to wake up. TODO racy.
try:
start = time.time()
cmd = ("head -c %s /dev/zero " % size) + "| nc " + ADDR.replace(
":", " ")
print cmd
subprocess.check_output(cmd, shell=True)
end = time.time()
return end - start
finally:
echo_server.kill()
if __name__ == '__main__':
deno_exe = sys.argv[1]
megs = int(sys.argv[2])
if not deno_exe or not megs:
print "Usage ./tools/throughput_benchmark.py out/debug/deno 100"
sys.exit(1)
secs = tcp(sys.argv[1], megs)
print secs, "seconds"
|
from . import source, effect, encode, playback, save
|
#!/usr/bin/python3
# Copyright 2017, Mengxiao Lin <linmx0130@gmail.com>
import mxnet as mx
from mxnet import nd
from .config import cfg
import numpy as np
import cv2
# Model utils
def bbox_transform(anchor, bbox):
w = anchor[:, 2] - anchor[:, 0]
h = anchor[:, 3] - anchor[:, 1]
cx = (anchor[:, 0] + anchor[:, 2]) / 2.0
cy = (anchor[:, 1] + anchor[:, 3]) / 2.0
g_w = bbox[:, 2] - bbox[:, 0]
g_h = bbox[:, 3] - bbox[:, 1]
g_cx = (bbox[:, 0] + bbox[:, 2]) / 2.0
g_cy = (bbox[:, 1] + bbox[:, 3]) / 2.0
g_w = mx.ndarray.log(g_w / w)
g_h = mx.ndarray.log(g_h / h)
g_cx = (g_cx - cx) / w
g_cy = (g_cy - cy) / h
return mx.ndarray.concatenate([
g_w.reshape((-1, 1)),
g_h.reshape((-1, 1)),
g_cx.reshape((-1, 1)),
g_cy.reshape((-1, 1))], axis=1)
def bbox_inverse_transform(anchor, bbox):
w = anchor[:, 2] - anchor[:, 0]
h = anchor[:, 3] - anchor[:, 1]
cx = (anchor[:, 0] + anchor[:, 2]) / 2.0
cy = (anchor[:, 1] + anchor[:, 3]) / 2.0
g_w = mx.ndarray.exp(bbox[:, 0]) * w
g_h = mx.ndarray.exp(bbox[:, 1]) * h
g_cx = bbox[:, 2] * w + cx
g_cy = bbox[:, 3] * h + cy
g_x1 = g_cx - g_w / 2
g_y1 = g_cy - g_h / 2
g_x2 = g_cx + g_w / 2
g_y2 = g_cy + g_h / 2
return mx.ndarray.concatenate([
g_x1.reshape((-1, 1)),
g_y1.reshape((-1, 1)),
g_x2.reshape((-1, 1)),
g_y2.reshape((-1, 1))], axis=1)
def _get_area(bbox:mx.nd.NDArray):
zeros = mx.nd.zeros_like(bbox[:, 0])
width = mx.nd.max(nd.stack(bbox[:, 2] - bbox[:, 0], zeros), axis=0)
height = mx.nd.max(nd.stack(bbox[:, 3] - bbox[:, 1], zeros), axis=0)
return width * height
def bbox_overlaps(anchors:mx.nd.NDArray, gt:mx.nd.NDArray):
"""
Get IoU of the anchors and ground truth bounding boxes.
The shape of anchors and gt should be (N, 4) and (M, 4)
So the shape of return value is (N, M)
"""
ret = []
for i in range(gt.shape[0]):
cgt = gt[i].reshape((1, 4)).broadcast_to(anchors.shape)
# inter
x0 = nd.max(nd.stack(anchors[:,0], cgt[:,0]), axis=0)
y0 = nd.max(nd.stack(anchors[:,1], cgt[:,1]), axis=0)
x1 = nd.min(nd.stack(anchors[:,2], cgt[:,2]), axis=0)
y1 = nd.min(nd.stack(anchors[:,3], cgt[:,3]), axis=0)
inter = _get_area(nd.concatenate([x0.reshape((-1, 1)),
y0.reshape((-1, 1)),
x1.reshape((-1, 1)),
y1.reshape((-1, 1))], axis=1))
outer = _get_area(anchors) + _get_area(cgt) - inter
iou = inter / outer
ret.append(iou.reshape((-1, 1)))
ret=nd.concatenate(ret, axis=1)
return ret
def bbox_clip(bbox:mx.nd.NDArray, height, width):
zeros_t = mx.nd.zeros(bbox[:, 0].shape, ctx=bbox.context)
bbox[:, 0] = mx.nd.maximum(bbox[:, 0], zeros_t)
bbox[:, 1] = mx.nd.maximum(bbox[:, 1], zeros_t)
bbox[:, 2] = mx.nd.minimum(bbox[:, 2], zeros_t + width - 1)
bbox[:, 3] = mx.nd.minimum(bbox[:, 3], zeros_t + height - 1)
return bbox
#
# Data argumentation and normalization
#
def random_flip(data, label):
if np.random.uniform() > 0.0:
c, h, w = data.shape
data = np.flip(data, axis=2)
x0 = label[:, 0].copy()
x1 = label[:, 2].copy()
label[:, 0] = w - x1
label[:, 2] = w - x0
return data, label
def imagenetNormalize(img):
mean = mx.nd.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = mx.nd.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
img = mx.nd.array(img / 255)
img = mx.image.color_normalize(img, mean, std)
return img
def img_resize(img):
h, w, c = img.shape
if h > w:
# align width to cfg.short_size
scale = cfg.resize_short_size / w
nw = int(w * scale)
nh = int(h * scale)
img = cv2.resize(img, (nw, nh))
else:
# align height to cfg.short_size
scale = cfg.resize_short_size / h
nw = int(w * scale)
nh = int(h * scale)
img = cv2.resize(img, (nw, nh))
return img, scale
def random_square_crop(img, label):
c, h, w = img.shape
if h>w:
x = np.random.randint(0, h-w)
img = img[:, x: x+w, :]
label[:, 1] -= x
label[:, 3] -= x
else:
x = np.random.randint(0, w - h)
img = img[:, :, x: x+h]
label[:, 0] -= x
label[:, 2] -= x
return img, label
def select_class_generator(class_id):
def select_class(img, label):
ret_label = []
for item in label:
if item[4] == class_id:
ret_label.append(item)
return img, np.stack(ret_label)
return select_class
def softmax_celoss_with_ignore(F, label, ignore_label):
output = mx.nd.log_softmax(F)
label_matrix = mx.nd.zeros(output.shape, ctx=output.context)
for i in range(label_matrix.shape[1]):
label_matrix[:, i] = (label==i)
ignore_unit = (label == ignore_label)
loss = -mx.nd.sum(output * label_matrix, axis=1)
return mx.nd.sum(loss) / (output.shape[0] - mx.nd.sum(ignore_unit))
|
from typing import Optional
from pincer import Client, command
from pincer.objects import TextChannel, MessageContext, InteractionFlags
from app.classes.drink_list import DrinkList
from app.exceptions import DrinkAlreadyExists, DrinkNotFound, BotError
CHANNEL_ID = 888531559861321738
MESSAGE_ID = 888535656542928906
class DrinksCog:
"""A simple commands cog template."""
def __init__(self, client):
"""Link to bot instance."""
self.name = 'Gestion Boisson'
self.client = client
self.channel: Optional[TextChannel] = None
self.drink_list: Optional[DrinkList] = None
@Client.event
async def on_ready(self):
self.channel = self.client.guild.get_channel(CHANNEL_ID)
self.drink_list = DrinkList(
await self.channel.fetch_message(MESSAGE_ID)
)
@command(
name="create",
description="Créer une nouvelle boisson",
)
async def create_command(self, drink):
await self.drink_list.create(drink)
return (
f"Boisson ajouté!\n"
f">>> {self.drink_list.flatten()}",
InteractionFlags.EPHEMERAL
)
@command(
name="add",
description="Incrémente le compteur d' une boisson"
)
async def add_command(self, drink, n=1):
await self.drink_list.add(drink, n)
return (
f"`{drink}`x`{n}` Ajouté!\n"
f">>> {self.drink_list.flatten()}",
InteractionFlags.EPHEMERAL
)
@command(
name="delete",
description="Retire un boisson de la liste"
)
async def delete_command(self, drink):
await self.drink_list.delete(drink)
return (
f"`{drink}` Supprimé!\n"
f">>> {self.drink_list.flatten()}",
InteractionFlags.EPHEMERAL
)
@command(
name="remove",
description="Décrémente le compteur d' une boisson"
)
async def remove_command(self, drink, n=1):
await self.drink_list.remove(drink, n)
return (
f"`{drink}` `x`{n} Retiré!\n"
f">>> {self.drink_list.flatten()}",
InteractionFlags.EPHEMERAL
)
@command(
name="desc-1",
description="Met à jour la description de la liste de boissons."
)
async def set_drink_list_description(self, ctx, *, message):
await ctx.message.delete()
with open(
"assets/drinklist_description.txt",
'w', encoding='utf-8'
) as f:
f.write(message)
await self.drink_list.update()
return (
f"Description mise à jour\n>>> {message}",
InteractionFlags.EPHEMERAL
)
setup = DrinksCog
|
from django.shortcuts import render, redirect
from django.views import View, generic
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.views import LoginView
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.models import Group
from .forms import CafeManagerCreationForm
from kaffeefinder.apps.core.mixins import AnonymousMixin
class SignUpView(AnonymousMixin, View):
def get(self, request):
return render(request, "accounts/signup.html")
def post(self, request):
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "شما با موفقیت جساب حور را ساختید")
return redirect("cafes:list")
else:
print(form.errors)
messages.error(request, form.errors)
return redirect("accounts:signup")
class NewLoginView(LoginView):
template_name = "accounts/login.html"
# class NewLoginView(View):
# def get(self, request, *args, **kwargs):
# return render(request, "accounts/login.html")
# def post(self, request, *args, **kwargs):
# username = request.POST.get("username")
# passowrd = request.POST.get("passowrd")
# if username and passowrd:
# user = authenticate(username=username, password=passowrd)
# if user:
# login(request, user)
# messages.success(request, "باموفقیت وارد شدید")
# return redirect("cafes:list")
# else:
# messages.success(request, "کاربری با این مشخصات وجود ندارد!")
# return redirect("accounts:login")
# else:
# messages.warning(request, "نام کاربری یا رمزعبور نمیتواند خالی باشد")
# return redirect("accounts:login")
# a different view for cafe managers to sign up
# which redirects to a view to create their cafe
class SignUpAsCafeManager(AnonymousMixin, generic.FormView):
template_name = "accounts/signup.html"
form_class = CafeManagerCreationForm
success_url = '/cafes/add/'
def form_valid(self, form):
user = form.save()
login(self.request, user)
return super().form_valid(form)
class SignUpAsCafeManager(AnonymousMixin, View):
def get(self, request):
return render(request, "accounts/signup.html", context={"manager": True})
def post(self, request):
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
mnger_gp, created = Group.objects.get_or_create(name="cafe manager")
mnger_gp.user_set.add(user)
user.save()
login(request, user)
messages.success(request, "شما با موفقیت جساب حور را ساختید")
return redirect("cafes:add_cafe")
else:
print(form.errors)
messages.error(request, form.errors)
return redirect("accounts:signup")
class LogoutView(View):
def get(self, request):
if self.request.user.is_authenticated:
logout(request)
messages.warning(request, "شما از حساب کاربری خود خارج شدید")
return redirect("pages:index")
else:
messages.warning(request, "شما به این مسیر دسترسی ندارید")
return redirect("pages:index")
def post(self, request):
if self.request.is_authenticated:
logout(request)
messages.warning(request, "شما از حساب کاربری خود خارج شدید")
return redirect("pages:index")
else:
messages.warning(request, "شما به این مسیر دسترسی ندارید")
return redirect("pages:index")
|
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import inspect
import math
import os
import shutil
import sys
import tarfile
from collections import OrderedDict
if sys.version_info[0] < 3:
# noinspection PyUnresolvedReferences,PyCompatibility
from cStringIO import StringIO
else:
# noinspection PyUnresolvedReferences,PyCompatibility
from io import StringIO
if sys.version_info[0] < 3:
# noinspection PyUnresolvedReferences,PyCompatibility
from Queue import Queue
else:
# noinspection PyUnresolvedReferences,PyCompatibility
from queue import Queue
if sys.version_info[0] < 3:
import unicodedata
from .types import *
_RedText = "\x1b[31m"
_ResetStyle = "\x1b[0m"
_had_error = False
error_file = sys.stderr
warning_file = sys.stderr
info_file = sys.stdout
def had_error():
return _had_error
def reset_error():
global _had_error
_had_error = False
class ConversionException(Exception):
pass
def raise_if_had_error(listing=""):
if had_error():
reset_error()
if listing:
print_info("Listing code before raise:")
print_info(listing)
raise ConversionException("nn_lang_converter: There were errors!")
def print_error(msg):
global _had_error
if error_file.isatty():
error_file.write("%sError: %s%s\n" % (_RedText, msg, _ResetStyle))
else:
error_file.write("Error: %s\n" % (msg,))
error_file.flush()
_had_error = True
def print_warning(msg):
if warning_file.isatty():
warning_file.write("%sWarning: %s%s\n" % (_RedText, msg, _ResetStyle))
else:
warning_file.write("Warning: %s\n" % (msg,))
warning_file.flush()
def print_info(msg, end='\n'):
info_file.write(msg + end)
info_file.flush()
def add_line_numbers(s):
r = ''
for i, line in enumerate(s.split('\n')):
r += str(i + 1) + ': ' + line + '\n'
return r[:-1]
REMOVE = object()
def recursive_transform(data, fun):
if type(data) is dict or type(data) is OrderedDict:
data2 = type(data)()
for k, v in data.items():
w = recursive_transform(v, fun)
if w is not REMOVE:
data2[k] = w
elif type(data) is list or type(data) is tuple:
data2 = []
for v in data:
w = recursive_transform(v, fun)
if w is not REMOVE:
data2.append(w)
data2 = type(data)(data2)
else:
data2 = fun(data)
return data2
def recursive_visit(data, fun):
if type(data) is dict or type(data) is OrderedDict:
for _k, v in data.items():
recursive_visit(v, fun)
elif type(data) is list or type(data) is tuple:
for v in data:
recursive_visit(v, fun)
else:
fun(data)
def recursive_contains_by_pointer(haystack, needle):
is_found = [False]
def visitor(x):
if x is needle:
is_found[0] = True
recursive_visit(haystack, visitor)
return is_found[0]
def zip_inverse(output_count, arr):
if not arr:
return tuple([[] for _ in range(output_count)])
return tuple([list(a) for a in zip(*arr)])
def flatten(x):
out_arr = []
_flatten(x, out_arr)
return out_arr
def _flatten(x, out_arr):
if isinstance(x, (list, tuple)):
for y in x:
_flatten(y, out_arr)
else:
out_arr.append(x)
def to_list(x):
import numpy as np
if isinstance(x, list):
return x
elif isinstance(x, tuple):
return list(x)
elif isinstance(x, np.ndarray):
return x.tolist()
else:
raise Exception("Cannot convert {} to list".format(x.__class__.__name__))
def unique(arr, key=None):
if key is None:
key = lambda item: item
s = set()
arr2 = []
for a in arr:
k = key(a)
if k not in s:
s.add(k)
arr2.append(a)
return arr2
def has_greater_than(arr, x):
has = [False]
def visit(y):
if y > x:
has[0] = True
recursive_visit(arr, visit)
return has[0]
def has_not_equal(arr, x):
has = [False]
def visit(y):
if y != x:
has[0] = True
recursive_visit(arr, visit)
return has[0]
def has_greater_than_0(arr):
return has_greater_than(arr, 0)
def has_greater_than_1(arr):
return has_greater_than(arr, 1)
def has_not_equal_0(arr):
return has_not_equal(arr, 0)
def has_not_equal_1(arr):
return has_not_equal(arr, 1)
def int_log2(i):
if i == 0:
return -1
for j in range(0, 32):
if i == (1 << j):
return j
return -2
def get_inverse_permutation(perm):
inverse = [0] * len(perm)
for i, p in enumerate(perm):
inverse[p] = i
return inverse
def apply_permutation(list_, perm):
assert len(list_) == len(perm)
list2 = [None] * len(list_)
for i in range(len(list_)):
list2[i] = list_[perm[i]]
return list2
def apply_permutation_to_axis(axis, perm):
return perm.index(axis)
def apply_permutation_to_axes(axes, perm):
return [apply_permutation_to_axis(a, perm) for a in axes]
def _unsqueeze_shape2(shape, axes, i, n):
return ([] if i == n
else ([1] + _unsqueeze_shape2(shape, axes, i + 1, n) if i in axes
else [shape[0]] + _unsqueeze_shape2(shape[1:], axes, i + 1, n)))
# as in nnef (axes correspond to output dims)
def apply_unsqueeze_shape(shape, axes):
return _unsqueeze_shape2(shape, axes, 0, len(shape) + len(axes))
def apply_squeeze_shape(shape, axes, can_squeeze_non_one=False):
if not can_squeeze_non_one:
for i in range(len(shape)):
if i in axes:
assert shape[i] == 1
return [shape[i] for i in range(len(shape)) if i not in axes]
def without(iterable, x):
list_ = list(iterable)
if x in list_:
list_.remove(x)
return list_
def get_functions(prefix="", module=None):
if module is None:
caller_frame = inspect.stack()[1]
module = inspect.getmodule(caller_frame[0])
return sorted(
[
obj
for name, obj in inspect.getmembers(module)
if inspect.isfunction(obj) and name.startswith(prefix)
],
key=lambda f: inspect.getsourcelines(f)[1]
)
def try_tf_dtype_to_np(tf_dtype):
import tensorflow as tf
tf_dtype = tf.as_dtype(tf_dtype).base_dtype
if tf_dtype.is_numpy_compatible:
return tf_dtype.as_numpy_dtype
return None
_nnef_dtype_by_np_dtype = None
def _np_load_dtype_table():
import numpy as np
global _nnef_dtype_by_np_dtype
if _nnef_dtype_by_np_dtype is None:
_nnef_dtype_by_np_dtype = {
np.dtype(np.float16): 'scalar',
np.dtype(np.float32): 'scalar',
np.dtype(np.float64): 'scalar',
np.dtype(np.int8): 'integer',
np.dtype(np.uint8): 'integer',
np.dtype(np.int16): 'integer',
np.dtype(np.uint16): 'integer',
np.dtype(np.int32): 'integer',
np.dtype(np.uint32): 'integer',
np.dtype(np.int64): 'integer',
np.dtype(np.uint64): 'integer',
np.dtype(np.bool_): 'logical'
}
def is_np_dtype_exportable_to_nnef(dtype):
import numpy as np
_np_load_dtype_table()
return np.dtype(dtype) in _nnef_dtype_by_np_dtype
def try_np_dtype_to_nnef(dtype):
import numpy as np
_np_load_dtype_table()
return _nnef_dtype_by_np_dtype.get(np.dtype(dtype))
def are_np_dtypes_compatible_in_nnef(t1, t2):
t1 = try_np_dtype_to_nnef(t1)
t2 = try_np_dtype_to_nnef(t2)
return t1 is not None and t2 is not None and t1 == t2
def nnef_dtype_to_tf(nnef_dtype):
import tensorflow as tf
return {
"integer": tf.int32,
"scalar": tf.float32,
"logical": tf.bool
}[nnef_dtype]
def tf_type_of_python_scalar(x):
import tensorflow as tf
if isinstance(x, float):
return tf.float32
elif isinstance(x, int):
return tf.int32
elif isinstance(x, bool):
return tf.bool
else:
assert False
def write_nnef_tensor(filename, tensor):
import nnef
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filename, "wb") as file:
nnef.write_tensor(file, tensor, version=(1, 0))
def read_nnef_tensor(filename):
import nnef
with open(filename, "rb") as file:
return nnef.read_tensor(file)[0]
def tf_call_silently(fun, *args):
if 'TF_CPP_MIN_LOG_LEVEL' in os.environ:
old_value = os.environ['TF_CPP_MIN_LOG_LEVEL']
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
retval = fun(*args)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = old_value
return retval
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
retval = fun(*args)
del os.environ['TF_CPP_MIN_LOG_LEVEL']
return retval
def tf_has_cuda_gpu():
import tensorflow as tf
return tf_call_silently(tf.test.is_gpu_available, True)
def _tf_is_constant(tensor):
# for the rank operator this is not good logic, it can have a placeholder as input but it is still constant
if "Variable" in tensor.op.node_def.op or "Placeholder" in tensor.op.node_def.op:
return False
for t in tensor.op.inputs:
if not _tf_is_constant(t):
return False
return True
def _tf_evaluate_if_constant(tensor):
import tensorflow as tf
if not _tf_is_constant(tensor):
return None
with tf.Session() as sess:
return sess.run(tensor)
def tf_constant_value(tensor, evaluate_if_needed=True):
import tensorflow as tf
res = tf.contrib.util.constant_value(tensor)
if not evaluate_if_needed or res is not None:
return res
return tf_call_silently(_tf_evaluate_if_constant, tensor)
def tf_version_greater_equal(major, minor):
import tensorflow as tf
str = tf.__version__
i = str.index('.')
j = str.index('.', i + 1)
return (int(str[:i]), int(str[i + 1:j])) >= (major, minor)
def tf_print_graph(output_tensors):
import tensorflow as tf
if isinstance(output_tensors, tf.Tensor):
output_tensors = [output_tensors]
if isinstance(output_tensors, dict):
output_tensors = list(output_tensors.values())
print_info("{}".format(output_tensors))
visited = set()
indent = ""
_tf_print_graph2(output_tensors, visited, indent)
def _tf_print_graph2(tensors, visited, indent):
for tensor in tensors:
_tf_print_graph3(tensor, visited, indent)
def _tf_print_graph3(tensor, visited, indent):
if tensor in visited:
return
visited.add(tensor)
input_names = [i.name for i in tensor.op.inputs]
const_val = tf_constant_value(tensor)
if const_val is not None:
if const_val.size > 5:
const_val = "value_known"
else:
const_val = "value_known: {}".format(const_val)
print_info("{}{} = {}({}) {} shape: {}".format(
indent,
tensor.name,
tensor.op.node_def.op,
", ".join(input_names),
"" if const_val is None else const_val,
tensor.shape
))
_tf_print_graph2(tensor.op.inputs, visited, indent + " ")
def _tf_get_inputs():
import tensorflow as tf
tensors = []
for op in tf.get_default_graph().get_operations():
if "Variable" in op.node_def.op or "Placeholder" in op.node_def.op:
tensors.append(op.outputs[0])
return sorted(tensors, key=lambda t: t.name)
def to_id(s):
cc = []
for c in s:
if not c.isalnum() and c != "_":
c = "_"
cc.append(c)
return ''.join(cc)
def to_id_without_number(s):
pos = s.find(':')
s = s[:pos] if pos != -1 else s
return to_id(s)
def get_short_name(name):
pos = name.find(':')
name = name[:pos] if pos != -1 else name
pos = name.rfind('/')
return name[pos + 1:] if pos != -1 else name
def normalize_str_upper(str_or_none):
if str_or_none is None:
return None
elif isinstance(str_or_none, bytes):
return str_or_none.decode('utf-8').upper()
else:
return str_or_none.upper()
def tf_with_gradients(net_fun):
import tensorflow as tf
def f():
outputs = net_fun()
if isinstance(outputs, (list, tuple)):
outputs = list(outputs)
else:
outputs = [outputs]
inputs = _tf_get_inputs()
# We can test with other grad_ys too
# grad_ys = [tf.constant(value=2.0, dtype=tf.float32, shape=o.shape) for o in outputs]
grad_ys = None
ys = [y for y in outputs if y.dtype.name.startswith("float") or y.dtype.name.startswith("int")]
gradients = [g for g in tf.gradients(ys=ys, xs=inputs, grad_ys=grad_ys) if g not in outputs]
items = [("output{}".format(i), o) for i, o in enumerate(outputs)]
items += [("grad_{}".format(to_id(i.name[:-2])), g) for i, g in zip(inputs, gradients) if None not in [i, g]]
return dict(unique(items, key=lambda item: item[1]))
f.__name__ = net_fun.__name__
return f
def is_nhwc(op):
data_format = op.args.get('data_format')
if isinstance(data_format, bytes):
data_format = data_format.decode('utf-8')
return data_format is None or not data_format.upper().startswith("NC")
def shape_nhwc_to_nchw(shape):
return shape[0:1] + shape[-1:] + shape[1:-1]
def shape_nchw_to_nhwc(shape):
return shape[0:1] + shape[2:] + shape[1:2]
def shape_hwcn_to_nchw(shape):
return shape[-1:] + shape[-2:-1] + shape[:-2]
def shape_hwcm_to_nchw(shape):
return [shape[-2] * shape[-1], 1] + shape[:-2]
def shape_nchw_to_hwcn(shape):
return shape[2:] + shape[1:2] + shape[:1]
def shape_nchw_to_hwcm(shape, input_channels):
return shape[2:] + [input_channels, shape[0] // input_channels]
def transpose_axes_nhwc_to_nchw(rank):
return shape_nhwc_to_nchw(list(range(rank)))
def transpose_axes_nchw_to_nhwc(rank):
return shape_nchw_to_nhwc(list(range(rank)))
def transpose_axes_hwcn_to_nchw(rank):
return shape_hwcn_to_nchw(list(range(rank)))
def transpose_axes_nchw_to_hwcn(rank):
return shape_nchw_to_hwcn(list(range(rank)))
def reorder_axes(axes, perm):
return [perm.index(a) for a in axes]
def is_list_of_consecutive_numbers(_list):
if len(_list) <= 1:
return True
return _list == list(range(_list[0], _list[0] + len(_list)))
def is_reorderable_squeeze(squeeze_axes, perm):
return is_list_of_consecutive_numbers(sorted(reorder_axes(squeeze_axes, perm)))
def np_apply_transforms(arr, trafos):
if not trafos:
return arr
else:
for trafo in trafos:
import numpy as np
if trafo[0] == "transpose":
arr = np.transpose(arr, axes=trafo[1])
elif trafo[0] == "unsqueeze":
arr = np.reshape(arr, newshape=apply_unsqueeze_shape(list(arr.shape), trafo[1]))
elif trafo[0] == "squeeze":
arr = np.reshape(arr, newshape=apply_squeeze_shape(list(arr.shape), trafo[1]))
elif trafo[0] == "reshape":
arr = np.reshape(arr, newshape=trafo[1])
else:
assert False
return arr
def starts_with(list_, prefix):
return len(list_) >= len(prefix) and list_[:len(prefix)] == prefix
def ends_with(list_, ending):
return len(list_) >= len(ending) and list_[-len(ending):] == ending
def can_broadcast_from_left(list_, prefix):
if len(list_) < len(prefix):
return False
for i in range(len(prefix)):
if prefix[i] not in [1, list_[i]]:
return False
return True
def can_broadcast_from_right(list_, ending):
if len(list_) < len(ending):
return False
for i in range(len(ending)):
if ending[-1 - i] not in [1, list_[-1 - i]]:
return False
return True
def ensure_dir(path, clear=False):
if os.path.exists(path):
if not os.path.isdir(path):
raise Exception("{} is not a directory".format(path))
if clear:
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def without_slash(path):
while len(path) > 1 and path[-1] == '/':
path = path[:-1]
return path
def without_file_name(path):
return os.path.dirname(path)
class _OrderedDictMaker(object):
def __getitem__(self, keys):
if not isinstance(keys, tuple):
keys = (keys,)
assert all(isinstance(key, slice) for key in keys)
return OrderedDict([(k.start, k.stop) for k in keys])
ordered_dict_maker = _OrderedDictMaker()
def nnef_dilated_filter_size_element(filter_size, dilation):
return (filter_size - 1) * dilation + 1
def nnef_auto_padding_element(upscaled_size, downscaled_size, filter_size, stride, dilation):
t = (downscaled_size - 1) * stride + nnef_dilated_filter_size_element(filter_size, dilation) - upscaled_size
return int(math.floor(t / 2)), int(math.ceil(t / 2))
# uses spatial sizes
def nnef_auto_padding(upscaled_size, downscaled_size, filter_size, stride, dilation):
if len(unique([len(upscaled_size), len(downscaled_size), len(filter_size), len(stride), len(dilation)])) != 1:
print_error("nnef_auto_padding: different ranks")
return [
nnef_auto_padding_element(X, x, f, s, d)
for X, x, f, s, d
in zip(upscaled_size, downscaled_size, filter_size, stride, dilation)
]
def count(iterable):
c = 0
for x in iterable:
if x:
c += 1
return c
def silence(fun, *args, **kwargs):
old_out = sys.stdout
string_io = StringIO()
sys.stdout = string_io
try:
return fun(*args, **kwargs)
finally:
output = string_io.getvalue()
output_lower = output.lower()
string_io.close()
sys.stdout = old_out
if "not implemented" in output_lower or "error" in output_lower:
raise Exception("Error in output:\n" + output)
def compare_activation_dirs_np(dirname1, dirname2, verbose=False, tf_hack_fun_name=""):
import numpy as np
print_info("DIFF {} {}".format(dirname1, dirname2))
fnlist1 = sorted(os.listdir(dirname1))
fnlist2 = sorted(os.listdir(dirname2))
fnset1 = set(fnlist1)
fnset2 = set(fnlist2)
fns = [fn for fn in fnlist1 if fn not in fnset2]
if fns:
print_warning("files only present in left dir: " + ", ".join(fns))
fns = [fn for fn in fnlist2 if fn not in fnset1]
if fns:
print_warning("files only present in right dir: " + ", ".join(fns))
fns = [fn for fn in fnlist1 if fn in fnset2]
good = True
for fn in fns:
arr1 = read_nnef_tensor('{}/{}'.format(dirname1, fn))
arr2 = read_nnef_tensor('{}/{}'.format(dirname2, fn))
can_reshape = (starts_with(arr1.shape, arr2.shape) or starts_with(arr2.shape, arr1.shape))
if can_reshape:
arr1 = np.reshape(arr1, arr2.shape)
if arr1.dtype != arr2.dtype:
print_warning("different dtypes: {} {}".format(arr1.dtype.name, arr2.dtype.name))
if not are_np_dtypes_compatible_in_nnef(arr1.dtype, arr2.dtype):
print_error("incompatible dtypes: {} {}".format(arr1.dtype.name, arr2.dtype.name))
good = False
elif arr1.shape != arr2.shape:
print_error("{} shape error {} {}".format(fn, arr1.shape, arr2.shape))
good = False
else:
if arr1.dtype == np.dtype(np.bool_):
differences = (arr1 != arr2).astype(np.int8).sum(dtype=np.int64)
if differences > 0: # Maybe too strict
print_error("bool tensors different at {} places".format(differences))
good = False
else:
max_error_rate1 = np.max(np.abs(arr2 - arr1) / np.maximum(np.abs(arr1), 1e-32))
max_error_rate2 = np.max(np.abs(arr2 - arr1) / np.maximum(np.abs(arr2), 1e-32))
max_error_rate = max(max_error_rate1, max_error_rate2)
max_diff = np.max(np.abs(arr2 - arr1))
max_value = min(np.max(np.abs(arr1)), np.max(np.abs(arr2)))
if max_value < 1e-5:
print_info("{} max value: {}".format(fn, max_value))
problematic_functions = [
"big_test_inception_v2",
"test_optimizer11_with_gradients",
"test_optimizer_no_io_transpose_with_gradients"
]
err_thresh = 1e-4 if tf_hack_fun_name in problematic_functions else 1e-5
if max_error_rate > err_thresh and max_diff > err_thresh:
print_error("{} max error rate: {} max diff: {}".format(fn, max_error_rate, max_diff))
# print(arr1.flat[:10], '\n', arr2.flat[:10], file=sys.stderr, flush=True)
good = False
elif max_diff != 0 or verbose:
print_info("{} max error rate: {} max diff: {}".format(fn, max_error_rate, max_diff))
if good:
print_info("Activations were (almost) the same.")
return good
def get_function_without_decorators(func, _orig_name=None):
if _orig_name is None:
_orig_name = func.__name__
if not hasattr(func, "__closure__") or not func.__closure__:
return func
for obj in (c.cell_contents for c in func.__closure__):
if hasattr(obj, "__name__") and obj.__name__ == _orig_name:
return obj
if hasattr(obj, "__closure__") and obj.__closure__:
found = get_function_without_decorators(obj, _orig_name)
if found:
return found
return None
def get_qualified_name(func, undecorate=True):
undecorated = get_function_without_decorators(func) if undecorate else func
return undecorated.__module__ + '.' + undecorated.__name__
def get_qualified_names(functions, undecorate=True):
return [get_qualified_name(f, undecorate) for f in functions]
def without_nones(list_):
return [x for x in list_ if x is not None]
def ensure_not_unicode_in_python2(s):
# type: (AnyStr)->str
if sys.version_info[0] < 3 and isinstance(s, unicode):
return unicodedata.normalize('NFKD', s).encode('ascii', 'replace')
return s
def pad_right(list_, min_rank, pad_value):
return list_ + (max(0, min_rank - len(list_))) * [pad_value]
def tgz_compress(dir_name, file_name):
with tarfile.open(file_name, 'w:gz') as tar:
for file_ in os.listdir(dir_name):
tar.add(dir_name + '/' + file_, file_)
def tgz_extract(file_name, dir_name):
with tarfile.open(file_name, 'r:gz') as tar:
tar.extractall(dir_name)
|
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from .auditanalytics import *
from .brain import *
from .extractalpha import *
from .fraudfactors import *
from .newconstructs import *
from .kavout import *
from .precisionalpha import *
from .stocktwits import *
from .valuengine import *
from .wallstreethorizon import *
|
from ..models import User
from .resource import ResourceSerializer
import smartchef.serializers.household as HouseholdSerializer
from django.contrib.auth.hashers import make_password
from django.db.transaction import atomic
from rest_framework import serializers
class UserSerializer(ResourceSerializer):
households = serializers.SlugRelatedField(
slug_field='id', many=True, read_only=True)
# Override update method to set the user's password
def update(self, instance, validated_data):
if 'password' in validated_data:
validated_data['password'] = make_password(
validated_data['password'])
return super(UserSerializer, self).update(instance, validated_data)
class Meta:
model = User
fields = ('id', 'firstName', 'lastName',
'createdAt', 'updatedAt', 'password', 'email', 'households')
extra_kwargs = {
'password': {'write_only': True},
}
class CreateUserSerializer(UserSerializer):
# Override create method to set the user's password
@atomic
def create(self, validated_data):
validated_data['password'] = make_password(validated_data['password'])
createdUser: User = super(UserSerializer, self).create(validated_data)
# Create household for user with him as the owner and add him to the users list
household_data = {
'name': f"{createdUser.firstName}'s Haushalt",
'owner': createdUser.id,
'users': [createdUser.id]
}
household_serializer = HouseholdSerializer.HouseholdSerializer(
data=household_data)
household_serializer.is_valid(raise_exception=True)
household_serializer.save()
return createdUser
class Meta:
model = User
fields = ('id', 'firstName', 'lastName',
'createdAt', 'updatedAt', 'password', 'email')
extra_kwargs = {
'password': {'write_only': True},
}
class UpdateUserSerializer(ResourceSerializer):
class Meta:
model = User
fields = ('id', 'firstName', 'lastName', 'password')
extra_kwargs = {
'password': {'write_only': True}
}
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import pandas as pd
import pytest
from InnerEye.Common import common_util
from InnerEye.Common.common_util import BEST_EPOCH_FOLDER_NAME
from InnerEye.Common.fixed_paths import DEFAULT_AML_UPLOAD_DIR
from InnerEye.Common.output_directories import OutputFolderForTests
from InnerEye.ML.baselines_util import ComparisonBaseline, get_comparison_baselines, perform_score_comparisons
from InnerEye.ML.common import ModelExecutionMode
from Tests.ML.util import get_default_azure_config
from Tests.AfterTraining.test_after_training import get_most_recent_run_id
@pytest.mark.skipif(common_util.is_windows(), reason="Loading tk sometimes fails on Windows")
def test_perform_score_comparisons() -> None:
dataset_df = pd.DataFrame()
dataset_df['subject'] = list(range(10))
dataset_df['seriesId'] = [f"s{i}" for i in range(10)]
dataset_df['institutionId'] = ["xyz"] * 10
metrics_df = pd.DataFrame()
metrics_df['Patient'] = list(range(10))
metrics_df['Structure'] = ['appendix'] * 10
metrics_df['Dice'] = [0.5 + i * 0.02 for i in range(10)]
comparison_metrics_df = pd.DataFrame()
comparison_metrics_df['Patient'] = list(range(10))
comparison_metrics_df['Structure'] = ['appendix'] * 10
comparison_metrics_df['Dice'] = [0.51 + i * 0.02 for i in range(10)]
comparison_name = "DefaultName"
comparison_run_rec_id = "DefaultRunRecId"
baseline = ComparisonBaseline(comparison_name, dataset_df, comparison_metrics_df, comparison_run_rec_id)
result = perform_score_comparisons(dataset_df, metrics_df, [baseline])
assert result.did_comparisons
assert len(result.wilcoxon_lines) == 5
assert result.wilcoxon_lines[0] == f"Run 1: {comparison_name}"
assert result.wilcoxon_lines[1] == "Run 2: CURRENT"
assert result.wilcoxon_lines[3].find("WORSE") > 0
assert list(result.plots.keys()) == [f"{comparison_name}_vs_CURRENT"]
@pytest.mark.after_training_single_run
def test_get_comparison_data(test_output_dirs: OutputFolderForTests) -> None:
azure_config = get_default_azure_config()
comparison_name = "DefaultName"
comparison_path = get_most_recent_run_id() + \
f"/{DEFAULT_AML_UPLOAD_DIR}/{BEST_EPOCH_FOLDER_NAME}/{ModelExecutionMode.TEST.value}"
baselines = get_comparison_baselines(test_output_dirs.root_dir,
azure_config, [(comparison_name, comparison_path)])
assert len(baselines) == 1
assert baselines[0].name == comparison_name
|
class Config(object):
"""项目共用的一些配置"""
SECRET_KEY = '123456'
class DevelopmentConfig(Config):
"""开发环境"""
DEBUG = True # 打开测试
# 数据库相关配置
# 设置数据库的链接地址
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:mysql@localhost:3306/falsk_text1?charset=utf8'
# 关闭追踪数据库的修改
SQLALCHEMY_TRACK_MODIFICATIONS = False
# 下面是SMTP服务器配置
MAIL_SERVER = 'smtp.126.com' # 电子邮件服务器的主机名或IP地址
MAIL_PORT = 25 # 电子邮件服务器的端口
MAIL_USE_TLS = True # 启用传输层安全
# 注意这里启用的是TLS协议(transport layer security),而不是SSL协议所以用的是25号端口
MAIL_USERNAME = 'yanjianglong@126.com' # 你的邮件账户用户名
MAIL_PASSWORD = 'q1w2e3' # 邮件账户的密码,这个密码是指的授权码!授权码!授权码!
class ProductionConfig(Config):
"""生产环境"""
DEBUG = False # 打开测试
APPCONFIG = {
'development': DevelopmentConfig,
'production': ProductionConfig
}
|
# -*- coding: utf-8 -*-
"""Convert fiber position result from 3DTIMON model to VTK."""
import sys
import meshio
import numpy as np
if __name__ == "__main__":
in_filename = sys.argv[-1]
out_filename = in_filename.replace(".unv", ".vtk")
print("Converting %s" % in_filename)
with open(in_filename) as infile:
nodes = []
lines = []
nodeid = 0
while True:
line = infile.readline()
if not line:
break
# This is a quick and dirty implementation and not feasible for
# general *.unv files
if line.split()[-1] == "7":
x, y, z = infile.readline().split()
nodes.append([float(x), float(y), float(z)])
elif line.split()[-1] == "2":
infile.readline()
n1, n2 = infile.readline().split()
lines.append([int(n1) - 1, int(n2) - 1])
else:
infile.readline()
points = np.array(nodes)
cells = [("line", np.array(lines))]
meshio.write_points_cells(
out_filename,
points,
cells,
)
|
class HipsSkyMaps():
__sky_map_url={
"CFHT":"CDS/P/CFHTLS/W/Color/ugi"
}
@staticmethod
def getMap(archive):
self = HipsSkyMaps()
if archive in self.__sky_map_url:
return self.__sky_map_url[archive]
else:
return None |
import numpy as np
import os,sys
import math
import time
work_path='./work/'
def cross(s1,s2):
length=len(s1)
res=[s1[i] for i in range(length//2)]
#res=s1[:length//2]
for i in range(length//2,length):
if s2[i] not in res:
res.append(s2[i])
for i in range(length//2):
if s2[i] not in res:
res.append(s2[i])
return np.array(res)
def mutation(s):
lenght=len(s)
a,b=np.random.permutation(lenght)[:2]
s[a],s[b]=s[b],s[a]
return s
def cross_and_mutation(s1,s2):
child=cross(s1,s2)
return mutation(child)
def next_generation(solutions,matrix):
length=len(solutions)
tmp=[]
for i in range(length):
for j in range(length):
if i!=j:
tmp.append(cross_and_mutation(solutions[i],solutions[j]))
tmp=np.array(tmp)
#print("tmp.shape",tmp.shape)
#print(solutions.shape)
solutions=np.vstack((solutions,tmp))
sorted_solutions=sorted(solutions,key=lambda x:cost(x,matrix))[:length]
return np.array(sorted_solutions)
def iteration(solutions,matrix,max_iteration=100,max_time=180):
start=time.time()
time_escaped=0
iter=0
while iter<max_iteration and time_escaped<max_time:
solutions=next_generation(solutions,matrix)
iter+=1
time_escaped=time.time()-start
#print("This is the %d-th iteration."%iter)
#print("solutions[0]",solutions[0])
#print("This iteration is over.")
return solutions[0]
def create_city(low=0,up=90):
x=np.random.uniform(low,up,2)
return x
def create_cities(num=100):
cities=np.random.uniform(0,90,(num,2))
return cities
def save_data(num=100):
cities=create_cities(num)
files=os.listdir(work_path)
if 'work' not in files:
os.mkdir(work_path)
np.save(work_path+'cities.npy',cities)
def load_data(filepath):
cities=np.load(filepath)
return cities
def dist(city1,city2):
theta=city1[0]-city2[0]
dist=math.sin(math.radians(city1[1]))*math.sin(math.radians(city2[1]))+math.cos(math.radians(city1[1]))*math.cos(math.radians(city2[1]))*math.cos(theta)
dist=math.acos(dist)
dist=math.degrees(dist)
dist=dist*60*1.1515
dist=dist*1.609344
return dist
def distance_matrix(cities):
num=len(cities)
matrix=np.zeros((num,num))
for i in range(num):
for j in range(i+1,num):
matrix[i][j]=dist(cities[i],cities[j])
matrix[j][i]=matrix[i][j]
return matrix.tolist()
def cost(solution,matrix):
#tmp=np.copy(solution)
tmp=np.append(solution,solution[0])
#print("tmp",tmp)
n=len(solution)
cost=0
for i in range(n):
cost+=matrix[tmp[i]][tmp[i+1]]
return cost
def random_solution(num=100):
return np.random.permutation(num).tolist()
def init_solutions(num=10,cities=100):
solutions=[]
for i in range(num):
solutions.append(random_solution(cities))
return solutions
def mutation_1(solution):
n=len(solution)
start=np.random.randint(0,n-2)
end=np.random.randint(start+1,n)
k=solution[start:end]
solution[start:end]=k[::-1]
return solution
def mutation_2(solution):
n=len(solution)
start=np.random.randint(0,n-2)
end=np.random.randint(start+1,n)
k=solution[start]
solution[start:end-1]=solution[start+1:end]
solution[end-1]=k
return solution
#-----used for create a new TSP------------#
if __name__=="__main__":
#tos.clean_work_path(work_path)
cities=create_cities(100)
np.save(work_path+'cities.npy',cities)
# file_name='new_job.txt'
# with open(file_name,'w') as ff:
# ff.write('cities.npy')
# hdfs_set_file('./',work_path,file_name)
|
import matplotlib.pyplot as plt
import numpy as np
import math
def ParityCheckMatrix(k) :
sqrtk = int(math.sqrt(k))
sqrtn = int(sqrtk + 1)
n = int(k + 2 * sqrtk + 1)
OfSet = 0
ParityCheckMatrix = np.zeros((n - k, n))
for i in range(0,sqrtk):
for j in range(OfSet,sqrtk + OfSet + 1):
ParityCheckMatrix[i][j] = 1
OfSet = OfSet + sqrtk + 1
for i in range(sqrtk,2 * sqrtk):
for j in range(i - sqrtk,n,sqrtn):
ParityCheckMatrix[i][j] = 1
for i in range(0,n):
ParityCheckMatrix[n-k-1][i] = 1
return ParityCheckMatrix
z=ParityCheckMatrix(4)
print(z)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 28 22:01:18 2017
@author: Nadiar
"""
#secretWord = 'apple'
#lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
if len(secretWord) == 1:
if secretWord[0] in lettersGuessed:
return secretWord[0]
else:
return "_ "
if secretWord[0] in lettersGuessed:
return secretWord[0] + getGuessedWord(secretWord[1:], lettersGuessed)
else:
return "_ " + getGuessedWord(secretWord[1:], lettersGuessed)
#print(getGuessedWord(secretWord, lettersGuessed)) |
import csv
import dataclasses
import decimal
import os
import tempfile
import zipfile
from urllib.request import urlretrieve
@dataclasses.dataclass
class Wgs84Coordinates:
longitude: decimal.Decimal
latitude: decimal.Decimal
@dataclasses.dataclass
class Lv95Coordinates:
E: decimal.Decimal
N: decimal.Decimal
@dataclasses.dataclass
class Location:
official_name: str
canton: str
municipality: str
coordinates: Lv95Coordinates
def lv95_to_wgs84(lv95_coordinates: Lv95Coordinates) -> Wgs84Coordinates:
"""
Based on https://www.swisstopo.admin.ch/content/swisstopo-internet/fr/topics/survey/reference-systems/switzerland/_jcr_content/contentPar/tabs/items/dokumente_publikatio/tabPar/downloadlist/downloadItems/516_1459343097192.download/ch1903wgs84_f.pdf
"""
def d(number: str) -> decimal.Decimal:
return decimal.Decimal(number)
north = lv95_coordinates.N
east = lv95_coordinates.E
y = (east - 2600000) / 1000000
x = (north - 1200000) / 1000000
longitude = (
(
d("2.6779094")
+ d("4.728982") * y
+ d("0.791484") * y * x
+ d("0.1306") * y * x ** 2
- d("0.0436") * y ** 3
)
* 100
/ 36
)
latitude = (
(
d("16.9023892")
+ d("3.238272") * x
- d("0.270978") * y ** 2
- d("0.002528") * x ** 2
- d("0.0447") * y ** 2 * x
- d("0.0140") * x ** 3
)
* 100
/ 36
)
return Wgs84Coordinates(longitude=longitude, latitude=latitude)
class ZipcodesDatabase:
"""
Database of swiss zipcodes.
The main methods are :meth:`get_zipcodes_list` and :meth:`get_zipcode`,
that will download the zipcodes database if necessary, parse it and return
a `Location` instance.
Use it like this:
>>> zd = ZipcodesDatabase('/tmp/zipcodes')
>>> zd.get_location(1003)
Location(official_name='Lausanne', canton='VD', municipality='Lausanne')
The CSV file has the following fields, in this order:
Ortschaftsname nom officiel de la localité
PLZ code postal (NPA) à quatre chiffres,
compris entre 1000 et 9999
Zusatzziffer La valeur des chiffres supplémentaires est comprise entre
0 et 99. Combinés à l'attribut PLZ, ils donnent
naissance au NPA6.
Gemeindename nom de la commune principale de la localité
BFS-Nr numéro de la commune principale de la localité
Kantonskürzel abréviation du canton dans lequel la localité se trouve
majoritairement
E la coordonnée Est indique la position d’un point
quelconque au sein du périmètre de la localité ou du code
postal.
N la coordonnée Nord indique la position d’un point
quelconque au sein du périmètre de la localité ou du code
postal.
"""
DOWNLOAD_URL = "http://data.geo.admin.ch/ch.swisstopo-vd.ortschaftenverzeichnis_plz/PLZO_CSV_LV95.zip" # NOQA
def __init__(self, file_path):
"""
``file_path`` is the path to the CSV file containing the zipcodes. You
can put an inexistent file here, in which case the file will be
downloaded from the internets.
"""
self.file_path = file_path
self.zipcode_mapping = {}
def download(self, overwrite=True):
"""
Download the zipcodes CSV file. If ``overwrite`` is set to False, the
file won't be downloaded if it already exists.
"""
if overwrite or not os.path.exists(self.file_path):
_, f = tempfile.mkstemp()
try:
urlretrieve(self.DOWNLOAD_URL, f)
extract_csv(f, self.file_path)
finally:
os.remove(f)
def get_locations(self):
"""
Return the zipcodes mapping as a list of ``{zipcode: location}`` dicts.
The zipcodes file will be downloaded if necessary.
"""
if self.zipcode_mapping:
return self.zipcode_mapping
self.download(overwrite=False)
zipcode_mapping = {}
with open(self.file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=";")
# Skip header
next(csv_reader)
for line in csv_reader:
zipcode_mapping[int(line[1])] = Location(
official_name=line[0],
canton=line[5],
municipality=line[3],
coordinates=Lv95Coordinates(
E=decimal.Decimal(line[6]), N=decimal.Decimal(line[7])
),
)
self.zipcode_mapping = zipcode_mapping
return self.zipcode_mapping
def get_location(self, zipcode):
"""
Return the place name of the given zipcode. Raises :class:`IndexError`
if the zipcode doesn't exist.
"""
return self.get_locations()[zipcode]
def get_zipcodes_for_municipality(self, municipality):
zipcodes = [
zipcode
for zipcode, location in self.get_locations().items()
if location.municipality == municipality
]
return zipcodes
def get_zipcodes_for_canton(self, canton):
"""
Return the list of zipcodes for the given canton code.
"""
zipcodes = [
zipcode
for zipcode, location in self.get_locations().items()
if location.canton == canton
]
return zipcodes
def get_cantons(self):
"""
Return the list of unique cantons, sorted by name.
"""
return sorted(
list(set([location.canton for location in self.get_locations().values()]))
)
def get_municipalities(self):
"""
Return the list of unique municipalities, sorted by name.
"""
return sorted(
list(
set(
[
location.municipality
for location in self.get_locations().values()
]
)
)
)
def extract_csv(zip_path, destination):
"""
Extract the first CSV file found in the given ``zip_path`` ZIP file to the
``destination`` file. Raises :class:`LookupError` if no CSV file can be
found in the ZIP.
"""
with zipfile.ZipFile(zip_path) as zf:
member_to_unzip = None
for member in zf.namelist():
if member.endswith(".csv"):
member_to_unzip = member
break
if not member_to_unzip:
raise LookupError("Couldn't find any CSV file in the archive")
with zf.open(member_to_unzip) as zfp, open(destination, "wb") as dfp:
dfp.write(zfp.read())
|
# Generated by Django 2.0.6 on 2018-08-12 12:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('DublinBusTest', '0005_busrouteinfojoined_weatherforecast'),
('DublinBusTest', '0006_busrouteinfojoined'),
]
operations = [
]
|
"""
This test script is adopted from:
https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
"""
import pkgutil
import types
import importlib
import warnings
import scipy
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
if module_name is None:
module_name = module.__name__
results = {}
for name in dir(module):
item = getattr(module, name)
if (hasattr(item, '__module__') and hasattr(item, '__name__')
and item.__module__ != module_name):
results[name] = item.__module__ + '.' + item.__name__
return results
def test_scipy_namespace():
# None of these objects are publicly documented to be part of the main
# SciPy namespace (some are useful though, others need to be cleaned up)
undocumented = {
'LowLevelCallable': 'scipy._lib._ccallback.LowLevelCallable'
}
# We override dir to not show these members
allowlist = undocumented
bad_results = check_dir(scipy)
# pytest gives better error messages with the builtin assert than with
# assert_equal
assert bad_results == allowlist
def test_dir_testing():
"""Assert that output of dir has only one "testing/tester"
attribute without duplicate"""
assert len(dir(scipy)) == len(set(dir(scipy)))
# Historically SciPy has not used leading underscores for private submodules
# much. This has resulted in lots of things that look like public modules
# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
# but were never intended to be public. The PUBLIC_MODULES list contains
# modules that are either public because they were meant to be, or because they
# contain public functions/objects that aren't present in any other namespace
# for whatever reason and therefore should be treated as public.
PUBLIC_MODULES = ["scipy." + s for s in [
"cluster",
"cluster.vq",
"cluster.hierarchy",
"constants",
"fft",
"fftpack",
"integrate",
"interpolate",
"io",
"io.arff",
"io.matlab",
"io.wavfile",
"linalg",
"linalg.blas",
"linalg.cython_blas",
"linalg.lapack",
"linalg.cython_lapack",
"linalg.interpolative",
"misc",
"ndimage",
"odr",
"optimize",
"signal",
"signal.windows",
"sparse",
"sparse.linalg",
"sparse.csgraph",
"spatial",
"spatial.distance",
"spatial.transform",
"special",
"stats",
"stats.distributions",
"stats.mstats",
"stats.qmc",
]]
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
# of underscores) but should not be used. For many of those modules the
# current status is fine. For others it may make sense to work on making them
# private, to clean up our public API and avoid confusion.
# These private modules support will be removed in SciPy v2.0.0
PRIVATE_BUT_PRESENT_MODULES = [
'scipy.constants.codata',
'scipy.constants.constants',
'scipy.fftpack.basic',
'scipy.fftpack.convolve',
'scipy.fftpack.helper',
'scipy.fftpack.pseudo_diffs',
'scipy.fftpack.realtransforms',
'scipy.integrate.dop',
'scipy.integrate.lsoda',
'scipy.integrate.vode',
'scipy.interpolate.dfitpack',
'scipy.interpolate.fitpack',
'scipy.interpolate.fitpack2',
'scipy.interpolate.interpnd',
'scipy.interpolate.interpolate',
'scipy.interpolate.ndgriddata',
'scipy.interpolate.polyint',
'scipy.interpolate.rbf',
'scipy.io.arff.arffread',
'scipy.io.harwell_boeing',
'scipy.io.idl',
'scipy.io.mmio',
'scipy.io.netcdf',
'scipy.linalg.basic',
'scipy.linalg.decomp',
'scipy.linalg.decomp_cholesky',
'scipy.linalg.decomp_lu',
'scipy.linalg.decomp_qr',
'scipy.linalg.decomp_schur',
'scipy.linalg.decomp_svd',
'scipy.linalg.flinalg',
'scipy.linalg.matfuncs',
'scipy.linalg.misc',
'scipy.linalg.special_matrices',
'scipy.misc.common',
'scipy.misc.doccer',
'scipy.ndimage.filters',
'scipy.ndimage.fourier',
'scipy.ndimage.interpolation',
'scipy.ndimage.measurements',
'scipy.ndimage.morphology',
'scipy.odr.models',
'scipy.odr.odrpack',
'scipy.optimize.cobyla',
'scipy.optimize.cython_optimize',
'scipy.optimize.lbfgsb',
'scipy.optimize.linesearch',
'scipy.optimize.minpack',
'scipy.optimize.minpack2',
'scipy.optimize.moduleTNC',
'scipy.optimize.nonlin',
'scipy.optimize.optimize',
'scipy.optimize.slsqp',
'scipy.optimize.tnc',
'scipy.optimize.zeros',
'scipy.signal.bsplines',
'scipy.signal.filter_design',
'scipy.signal.fir_filter_design',
'scipy.signal.lti_conversion',
'scipy.signal.ltisys',
'scipy.signal.signaltools',
'scipy.signal.spectral',
'scipy.signal.spline',
'scipy.signal.waveforms',
'scipy.signal.wavelets',
'scipy.signal.windows.windows',
'scipy.sparse.base',
'scipy.sparse.bsr',
'scipy.sparse.compressed',
'scipy.sparse.construct',
'scipy.sparse.coo',
'scipy.sparse.csc',
'scipy.sparse.csr',
'scipy.sparse.data',
'scipy.sparse.dia',
'scipy.sparse.dok',
'scipy.sparse.extract',
'scipy.sparse.lil',
'scipy.sparse.linalg.dsolve',
'scipy.sparse.linalg.eigen',
'scipy.sparse.linalg.interface',
'scipy.sparse.linalg.isolve',
'scipy.sparse.linalg.matfuncs',
'scipy.sparse.sparsetools',
'scipy.sparse.sputils',
'scipy.spatial.ckdtree',
'scipy.spatial.kdtree',
'scipy.spatial.qhull',
'scipy.spatial.transform.rotation',
'scipy.special.add_newdocs',
'scipy.special.basic',
'scipy.special.cython_special',
'scipy.special.orthogonal',
'scipy.special.sf_error',
'scipy.special.specfun',
'scipy.special.spfun_stats',
'scipy.stats.biasedurn',
'scipy.stats.contingency',
'scipy.stats.kde',
'scipy.stats.morestats',
'scipy.stats.mstats_basic',
'scipy.stats.mstats_extras',
'scipy.stats.mvn',
'scipy.stats.statlib',
'scipy.stats.stats',
]
def is_unexpected(name):
"""Check if this needs to be considered."""
if '._' in name or '.tests' in name or '.setup' in name:
return False
if name in PUBLIC_MODULES:
return False
if name in PRIVATE_BUT_PRESENT_MODULES:
return False
return True
SKIP_LIST = [
'scipy.conftest',
'scipy.version',
]
def test_all_modules_are_expected():
"""
Test that we don't add anything that looks like a new public module by
accident. Check is based on filenames.
"""
modnames = []
for _, modname, ispkg in pkgutil.walk_packages(path=scipy.__path__,
prefix=scipy.__name__ + '.',
onerror=None):
if is_unexpected(modname) and modname not in SKIP_LIST:
# We have a name that is new. If that's on purpose, add it to
# PUBLIC_MODULES. We don't expect to have to add anything to
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
modnames.append(modname)
if modnames:
raise AssertionError(f'Found unexpected modules: {modnames}')
# Stuff that clearly shouldn't be in the API and is detected by the next test
# below
SKIP_LIST_2 = [
'scipy.char',
'scipy.rec',
'scipy.emath',
'scipy.math',
'scipy.random',
'scipy.ctypeslib',
'scipy.ma'
]
def test_all_modules_are_expected_2():
"""
Method checking all objects. The pkgutil-based method in
`test_all_modules_are_expected` does not catch imports into a namespace,
only filenames.
"""
def find_unexpected_members(mod_name):
members = []
module = importlib.import_module(mod_name)
if hasattr(module, '__all__'):
objnames = module.__all__
else:
objnames = dir(module)
for objname in objnames:
if not objname.startswith('_'):
fullobjname = mod_name + '.' + objname
if isinstance(getattr(module, objname), types.ModuleType):
if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
members.append(fullobjname)
return members
unexpected_members = find_unexpected_members("scipy")
for modname in PUBLIC_MODULES:
unexpected_members.extend(find_unexpected_members(modname))
if unexpected_members:
raise AssertionError("Found unexpected object(s) that look like "
"modules: {}".format(unexpected_members))
def test_api_importable():
"""
Check that all submodules listed higher up in this file can be imported
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
simply need to be removed from the list (deprecation may or may not be
needed - apply common sense).
"""
def check_importable(module_name):
try:
importlib.import_module(module_name)
except (ImportError, AttributeError):
return False
return True
module_names = []
for module_name in PUBLIC_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that cannot be "
"imported: {}".format(module_names))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=DeprecationWarning)
warnings.filterwarnings('always', category=ImportWarning)
for module_name in PRIVATE_BUT_PRESENT_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules that are not really public but looked "
"public and can not be imported: "
"{}".format(module_names))
|
"""
resource_descriptor_models.py
==============================
"""
from sqlalchemy import ARRAY, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from literature.database.base import Base
class ResourceDescriptorPageModel(Base):
__tablename__ = "resource_descriptor_pages"
resource_descriptor_pages_id = Column(
Integer,
primary_key=True,
autoincrement=True
)
name = Column(
String,
unique=False,
nullable=False
)
url = Column(
String,
unique=False,
nullable=False
)
resource_descriptor_id = Column(
Integer,
ForeignKey("resource_descriptors.resource_descriptor_id",
ondelete="CASCADE"),
index=True
)
resource_descriptor = relationship(
"ResourceDescriptorModel",
back_populates="pages"
)
class ResourceDescriptorModel(Base):
__tablename__ = "resource_descriptors"
resource_descriptor_id = Column(
Integer,
primary_key=True,
autoincrement=True
)
pages = relationship(
"ResourceDescriptorPageModel",
lazy="joined",
back_populates="resource_descriptor",
cascade="all, delete, delete-orphan"
)
db_prefix = Column(
String,
nullable=False,
unique=True
)
name = Column(
String(),
unique=False,
nullable=True
)
aliases = Column(
ARRAY(String()),
nullable=True
)
example_gid = Column(
String,
nullable=True
)
gid_pattern = Column(
String,
nullable=True
)
default_url = Column(
String(),
unique=False,
nullable=True
)
|
# -*- coding: utf-8 -*-
"""
Click Parameter Types - URL
"""
from click import ParamType
from validators import url as url_validator
class UrlParam(ParamType):
"""Validate the parameter is a URL.
Examples:
'https://example.com/?test=test'
"""
name = 'URL'
def convert(self, value: str, param, context) -> str:
"""The function which will perform validation or normalization
Arguments:
value (str): The URL
Returns:
str: The validated URL
"""
## Make sure URL is a string
url = f"{value}"
## Check if the URL is valid
if not url_validator(url):
self.fail(f'Could not validate "{value!r}" as a URL')
## Return the lower case domain
return url
|
__author__ = 'max'
from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
from torch.nn import Parameter
from macow.flows.flow import Flow
from macow.flows.actnorm import ActNorm2dFlow
from macow.flows.conv import Conv1x1Flow
from macow.flows.nice import NICE
from macow.utils import squeeze2d, unsqueeze2d, split2d, unsplit2d
class Prior(Flow):
"""
prior for multi-scale architecture
"""
def __init__(self, in_channels, hidden_channels=None, s_channels=None, scale=True, inverse=False, factor=2):
super(Prior, self).__init__(inverse)
self.actnorm = ActNorm2dFlow(in_channels, inverse=inverse)
self.conv1x1 = Conv1x1Flow(in_channels, inverse=inverse)
self.nice = NICE(in_channels, hidden_channels=hidden_channels, s_channels=s_channels, scale=scale, inverse=inverse, factor=factor)
self.z1_channels = self.nice.z1_channels
def sync(self):
self.conv1x1.sync()
@overrides
def forward(self, input: torch.Tensor, s=None) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm.forward(input)
out, logdet = self.conv1x1.forward(out)
logdet_accum = logdet_accum + logdet
out, logdet = self.nice.forward(out, s=s)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
def backward(self, input: torch.Tensor, s=None) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.nice.backward(input, s=s)
out, logdet = self.conv1x1.backward(out)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm.backward(out)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data, s=None, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm.init(data, init_scale=init_scale)
out, logdet = self.conv1x1.init(out, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.nice.init(out, s=s, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class GlowStep(Flow):
"""
A step of Glow. A Conv1x1 followed with a NICE
"""
def __init__(self, in_channels, hidden_channels=512, s_channels=0, scale=True, inverse=False,
coupling_type='conv', slice=None, heads=1, pos_enc=True, dropout=0.0):
super(GlowStep, self).__init__(inverse)
self.actnorm = ActNorm2dFlow(in_channels, inverse=inverse)
self.conv1x1 = Conv1x1Flow(in_channels, inverse=inverse)
self.coupling = NICE(in_channels, hidden_channels=hidden_channels, s_channels=s_channels,
scale=scale, inverse=inverse, type=coupling_type, slice=slice, heads=heads, pos_enc=pos_enc, dropout=dropout)
def sync(self):
self.conv1x1.sync()
@overrides
def forward(self, input: torch.Tensor, s=None) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm.forward(input)
out, logdet = self.conv1x1.forward(out)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling.forward(out, s=s)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, s=None) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.coupling.backward(input, s=s)
out, logdet = self.conv1x1.backward(out)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm.backward(out)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data, s=None, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm.init(data, init_scale=init_scale)
out, logdet = self.conv1x1.init(out, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling.init(out, s=s, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class GlowTopBlock(Flow):
"""
Glow Block (squeeze at beginning)
"""
def __init__(self, num_steps, in_channels, scale=True, inverse=False):
super(GlowTopBlock, self).__init__(inverse)
steps = [GlowStep(in_channels, scale=scale, inverse=inverse) for _ in range(num_steps)]
self.steps = nn.ModuleList(steps)
def sync(self):
for step in self.steps:
step.sync()
@overrides
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out = input
# [batch]
logdet_accum = input.new_zeros(input.size(0))
for step in self.steps:
out, logdet = step.forward(out)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
logdet_accum = input.new_zeros(input.size(0))
out = input
for step in reversed(self.steps):
out, logdet = step.backward(out)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
out = data
# [batch]
logdet_accum = data.new_zeros(data.size(0))
for step in self.steps:
out, logdet = step.init(out, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class GlowInternalBlock(Flow):
"""
Glow Internal Block (squeeze at beginning and split at end)
"""
def __init__(self, num_steps, in_channels, scale=True, inverse=False):
super(GlowInternalBlock, self).__init__(inverse)
steps = [GlowStep(in_channels, scale=scale, inverse=inverse) for _ in range(num_steps)]
self.steps = nn.ModuleList(steps)
self.prior = Prior(in_channels, scale=scale, inverse=True)
def sync(self):
for step in self.steps:
step.sync()
self.prior.sync()
@overrides
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out = input
# [batch]
logdet_accum = input.new_zeros(input.size(0))
for step in self.steps:
out, logdet = step.forward(out)
logdet_accum = logdet_accum + logdet
out, logdet = self.prior.forward(out)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch]
out, logdet_accum = self.prior.backward(input)
for step in reversed(self.steps):
out, logdet = step.backward(out)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data, init_scale=1.0) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
out = data
# [batch]
logdet_accum = data.new_zeros(data.size(0))
for step in self.steps:
out, logdet = step.init(out, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.prior.init(out, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class Glow(Flow):
"""
Glow
"""
def __init__(self, levels, num_steps, in_channels, scale=True, inverse=False):
super(Glow, self).__init__(inverse)
assert levels > 1, 'Glow should have at least 2 levels.'
assert levels == len(num_steps)
blocks = []
self.levels = levels
for level in range(levels):
if level == levels - 1:
in_channels = in_channels * 4
macow_block = GlowTopBlock(num_steps[level], in_channels, scale=scale, inverse=inverse)
blocks.append(macow_block)
else:
in_channels = in_channels * 4
macow_block = GlowInternalBlock(num_steps[level], in_channels, scale=scale, inverse=inverse)
blocks.append(macow_block)
in_channels = in_channels // 2
self.blocks = nn.ModuleList(blocks)
def sync(self):
for block in self.blocks:
block.sync()
@overrides
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
logdet_accum = input.new_zeros(input.size(0))
out = input
outputs = []
for i, block in enumerate(self.blocks):
out = squeeze2d(out, factor=2)
out, logdet = block.forward(out)
logdet_accum = logdet_accum + logdet
if isinstance(block, GlowInternalBlock):
out1, out2 = split2d(out, out.size(1) // 2)
outputs.append(out2)
out = out1
out = unsqueeze2d(out, factor=2)
for _ in range(self.levels - 1):
out2 = outputs.pop()
out = unsqueeze2d(unsplit2d([out, out2]), factor=2)
assert len(outputs) == 0
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
outputs = []
out = squeeze2d(input, factor=2)
for _ in range(self.levels - 1):
out1, out2 = split2d(out, out.size(1) // 2)
outputs.append(out2)
out = squeeze2d(out1, factor=2)
logdet_accum = input.new_zeros(input.size(0))
for i, block in enumerate(reversed(self.blocks)):
if isinstance(block, GlowInternalBlock):
out2 = outputs.pop()
out = unsplit2d([out, out2])
out, logdet = block.backward(out)
logdet_accum = logdet_accum + logdet
out = unsqueeze2d(out, factor=2)
assert len(outputs) == 0
return out, logdet_accum
@overrides
def init(self, data, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
logdet_accum = data.new_zeros(data.size(0))
out = data
outputs = []
for i, block in enumerate(self.blocks):
out = squeeze2d(out, factor=2)
out, logdet = block.init(out, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
if isinstance(block, GlowInternalBlock):
out1, out2 = split2d(out, out.size(1) // 2)
outputs.append(out2)
out = out1
out = unsqueeze2d(out, factor=2)
for _ in range(self.levels - 1):
out2 = outputs.pop()
out = unsqueeze2d(unsplit2d([out, out2]), factor=2)
assert len(outputs) == 0
return out, logdet_accum
@classmethod
def from_params(cls, params: Dict) -> "Glow":
return Glow(**params)
Glow.register('glow')
|
import requests
import logging
import json
import boto3
from botocore.exceptions import ClientError
from urllib2 import build_opener, HTTPHandler, Request
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
endpoint = event["ResourceProperties"]["SplunkHttpEventCollectorManagementURL"]
splunk_user = event["ResourceProperties"]["SplunkUser"]
splunk_password = event["ResourceProperties"]["SplunkPassword"]
# List tokens and remove from token list if we already have them
response = requests.get(endpoint + '/services/data/inputs/http?output_mode=json', verify=False,
auth=(splunk_user, splunk_password))
json_data = json.loads(response.text)
indexer_no_ack_token_name = 'generated-indexer-no-ack'
indexer_ack_token_name = 'generated-indexer-ack'
token_names = [indexer_no_ack_token_name, indexer_ack_token_name]
for token_data in json_data["entry"]:
token_name = token_data["name"].split("http://")[1]
if token_name in token_names:
token_names.remove(token_name)
# Create tokens that don't already exist
for token_name in token_names:
print "Creating token: " + token_name
data = [
('name', token_name),
]
response = requests.post(endpoint + '/services/data/inputs/http',
data=data, verify=False, auth=(splunk_user, splunk_password))
data = [
('enabled', '1')
]
# useACK overrides for firehose sourcetypes
if (token_name == indexer_ack_token_name):
data.append(('useACK', "1"))
response = requests.post(
endpoint + '/services/data/inputs/http/' + token_name, data=data,
verify=False, auth=(splunk_user, splunk_password))
# Grab all tokens (included newly created ones - if any)
response = requests.get(endpoint + '/services/data/inputs/http?output_mode=json', verify=False,
auth=(splunk_user, splunk_password))
json_data = json.loads(response.text)
HEC_tokens = {}
token_names = [indexer_no_ack_token_name, indexer_ack_token_name]
for token_data in json_data["entry"]:
if (token_data["name"].split("http://")[1] in token_names):
HEC_tokens[token_data["name"].split("http://")[1]] = token_data["content"]["token"]
sendResponse(event, context, "SUCCESS", {"Message": "Splunk HEC configuration successful!",
"IndexerAckHECToken": HEC_tokens["generated-indexer-ack"],
"IndexerNoAckHECToken": HEC_tokens["generated-indexer-no-ack"]})
def sendResponse(event, context, responseStatus, responseData):
responseBody = json.dumps({
"Status": responseStatus,
"Reason": "See the details in CloudWatch Log Stream: " + context.log_stream_name,
"PhysicalResourceId": context.log_stream_name,
"StackId": event['StackId'],
"RequestId": event['RequestId'],
"LogicalResourceId": event['LogicalResourceId'],
"Data": responseData
})
logger.info('ResponseURL: {}'.format(event['ResponseURL']))
logger.info('ResponseBody: {}'.format(responseBody))
opener = build_opener(HTTPHandler)
request = Request(event['ResponseURL'], data=responseBody)
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(responseBody))
request.get_method = lambda: 'PUT'
response = opener.open(request)
print("Status code: {}".format(response.getcode()))
print("Status message: {}".format(response.msg)) |
# -*- coding: utf-8 -*-
"""Test the TcEx Batch Module."""
import pytest
from ..tcex_init import tcex
# pylint: disable=R0201,W0201
class TestInflect:
"""Test the TcEx Batch Module."""
def setup_class(self):
"""Configure setup before all tests."""
@pytest.mark.parametrize(
'string,result',
[
# ('Adversary', 'Adversaries'),
('adversary', 'adversaries'),
('Campaign', 'Campaigns'),
('Document', 'Documents'),
('Email', 'Emails'),
('Event', 'Events'),
('Incident', 'Incidents'),
('Intrusion Set', 'Intrusion Sets'),
('Report', 'Reports'),
('Signature', 'Signatures'),
('Task', 'Tasks'),
('Threat', 'Threats'),
],
)
def test_inflect(self, string, result):
"""Test any to datetime"""
plural_string = tcex.utils.inflect.plural(string)
assert plural_string == result, 'String {} != {}'.format(plural_string, result)
|
"""Modules for building and running look-ahead indicators and label generators."""
from vectorbt.labels.enums import *
from vectorbt.labels.generators import (
FMEAN,
FSTD,
FMIN,
FMAX,
FIXLB,
MEANLB,
LEXLB,
TRENDLB,
BOLB
)
__all__ = [
'FMEAN',
'FSTD',
'FMIN',
'FMAX',
'FIXLB',
'MEANLB',
'LEXLB',
'TRENDLB',
'BOLB'
]
__pdoc__ = {k: False for k in __all__}
|
import json
import sys
import warnings
from nltk.tokenize.punkt import PunktSentenceTokenizer
import numpy as np
from pytorch_pretrained_bert import BertTokenizer
from pytorch_pretrained_bert.modeling import BertConfig
from pytorch_pretrained_bert.optimization import BertAdam
from sklearn.metrics import f1_score
import torch
import torch.nn as nn
from torchtext import data as torchdata
from tqdm import tqdm
from BERTModel import BERTModel
sys.path.append("..")
from utils import utils
from utils.utils import get_weights, save_model
with open('config.json', 'r') as f:
config = json.load(f)
# fine-tuning model
def train(model, criterion, optimiser, train_iterator, vocab):
model.train()
model.freeze_bert_encoder()
total_correct = 0
total_batches = len(train_iterator.data()) // train_iterator.batch_size
model_predictions = []
true_labels = []
for epoch in range(config['num_epochs']):
train_loss = 0
pbar = tqdm(total=total_batches)
epoch_predictions = 0
epoch_correct = 0
for i, batch in enumerate(train_iterator):
# logits = model(input_ids, segment_ids, input_mask)
segment_ids, input_mask = extract_features(batch.text, vocab)
predictions = model(batch.text, segment_ids, input_mask) # forward pass
loss = criterion(predictions, batch.label)
train_loss += loss.item()
label_pred = [np.argmax(p) for p in predictions.cpu().detach().numpy()]
true_labels = true_labels + batch.label.cpu().detach().tolist()
model_predictions = model_predictions + label_pred
for p, tp in zip(label_pred, batch.label.cpu().detach().tolist()):
epoch_predictions += 1
if p == tp:
total_correct += 1
epoch_correct += 1
pbar.set_description(
f'{epoch + 1}/{config["num_epochs"]} ' +
f'Loss: {train_loss / (i + 1):.7f} ' +
f'Acc: {epoch_correct / epoch_predictions:.7f} ' +
f'F1: {f1_score(true_labels, model_predictions, average="macro"):.7f} ' +
f'Total correct {total_correct} out of {len(model_predictions)}'
)
# Backward and optimize
optimiser.zero_grad()
loss.backward()
optimiser.step()
pbar.update(1)
# if epoch + 1 == config['freeze_after']:
# model.freeze_bert_encoder()
# if epoch == config['unfreeze_after_epoch']:
# model.unfreeze_bert_encoder()
def test(model, test_iterator, vocab):
global num_classes
model.eval()
print('Testing model ...')
total_correct = 0
total_batches = len(test_iterator.data()) // test_iterator.batch_size
true_labels = []
model_predictions = []
true_predictions = []
for i, batch in enumerate(test_iterator):
segment_ids, input_mask = extract_features(batch.text, vocab)
predictions = model(batch.text, segment_ids, input_mask) # forward pass
label_pred = [np.argmax(p) for p in predictions.cpu().detach().numpy()]
true_labels = true_labels + batch.label.cpu().detach().tolist()
model_predictions = model_predictions + label_pred
for p, tp in zip(label_pred, batch.label.cpu().detach().tolist()):
if p == tp:
total_correct += 1
true_predictions.append(p)
print(
f'\n\n\nAcc: {total_correct / (len(batch) * (i + 1)):.7f} ' +
f'F1: {f1_score(true_labels, model_predictions, average="macro"):.7f} ' +
f'Total correct {total_correct} out of {len(model_predictions)}' +
f'Correct by classes: {[true_predictions.count(c) for c in list(range(num_classes))]} /' +
f'{[true_labels.count(c) for c in list(range(num_classes))]}\n'
)
tokenizer = BertTokenizer.from_pretrained(config['bert_model'], do_lower_case=True, max_len=config['max_seq_length'])
sentence_tokenizer = PunktSentenceTokenizer()
def tokenize(text):
if not config['one_seq']:
sentences = sentence_tokenizer.tokenize(text)
text = ''
for sentence in sentences:
text += sentence + ' [SEP] '
else:
text += ' [SEP] '
tokens = []
tokens.append("[CLS]")
tokens += tokenizer.tokenize(text)
return tokens
def extract_features(batch, vocab):
batch_segment_ids = []
batch_input_mask = []
for example in batch:
# example == input_ids
segment_ids = [0] * len(example)
batch_segment_ids.append(segment_ids)
input_mask = [1] * len(example)
batch_input_mask.append(input_mask)
# padding = [0] * (config['max_seq_length'] - len(example))
# input_mask += padding
# segment_ids += padding
return torch.tensor(batch_segment_ids).to(device), torch.tensor(batch_input_mask).to(device)
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=config['lower_case'])
if not config['lower_case'] and 'uncased' in config['bert_model']:
warnings.warn('Using uncased bert model should be lower casting characters.')
config['lower_case'] = True
TEXT = torchdata.Field(tokenize=tokenize, sequential=True, lower=config['lower_case'], batch_first=True,
fix_length=config['max_seq_length'])
LABEL = torchdata.Field(use_vocab=False, sequential=False, preprocessing=lambda x: int(x), is_target=True)
train_dataset, test_dataset = torchdata.TabularDataset.splits(path=config['dataset_path'],
train=config['dataset_train'],
test=config['dataset_test'],
format='tsv',
fields=[('label', LABEL), ('text', TEXT)])
train_iterator = torchdata.BucketIterator(train_dataset, batch_size=config['batch_size'],
sort_key=lambda x: len(x.text),
device=device,
sort_within_batch=False)
test_iterator = torchdata.BucketIterator(test_dataset, batch_size=config['test_batch_size'],
sort_key=lambda x: len(x.text),
device=device,
sort_within_batch=False)
TEXT.build_vocab(train_dataset)
LABEL.build_vocab(train_dataset)
num_classes, weights = get_weights([e.label for e in train_dataset.examples], config)
bert_config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12,
num_attention_heads=12, intermediate_size=3072)
bert_model = BERTModel(bert_config, num_classes, config['bert_model']).to(device)
print(f'Model has {utils.count_parameters(bert_model)} trainable parameters')
if config['load_model']:
bert_model.load_state_dict(torch.load(config['checkpoint']))
if config['optimiser'] == 'adam':
optimiser = torch.optim.Adam(bert_model.parameters(), lr=config['learning_rate'])
elif config['optimiser'] == 'bert_adam':
optimiser = BertAdam(bert_model.parameters(), lr=config['learning_rate'], warmup=0.1,
t_total=int(len(train_dataset) / config['batch_size'] / 1) * config['num_epochs'])
else:
raise NotImplementedError('Optimiser should be set as either "adam" or "bert_adam".')
criterion = nn.CrossEntropyLoss(weight=torch.as_tensor(weights, device=device).float())
if not config['do_train'] and not config['do_test']:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if config['do_train']:
train(bert_model, criterion, optimiser, train_iterator, TEXT.vocab)
if config['do_test']:
test(bert_model, test_iterator, TEXT.vocab)
if config['save_model']:
save_model('modelBert.ckpt', bert_model)
|
"""Demonstrates the Flyweight mixin in caixa.metaclasses"""
from typing import Optional, Any
from caixa.metaclasses import Flyweight
def describe(label: str = 'object', obj: Optional[Any]= None) -> str:
return f"id={id(obj)} type={type(obj)} object={obj}"
class A(str, metaclass=Flyweight):
pass
class B(A):
pass
class C(metaclass=Flyweight):
foo: Optional[str] = None
bar: Optional[int] = None
def __init__(self, foo: Optional[str] = "junk", bar: Optional[int] = None) -> None:
self.foo = foo
self.bar = bar
def __str__(self) -> str:
return f"C(foo='{self.foo}',bar={self.bar})"
def demo_simple():
a1 = A('foo')
a2 = A('foo')
print(describe('A',A))
print(describe('a1',a1))
print(describe('a2',a2))
def demo_inherit():
b1 = B('ook')
b2 = B('ook')
print(describe('b1',b1))
print(describe('b2',b2))
b3 = B('foo')
b4 = B('foo')
print(describe('b3',b3))
print(describe('b4',b4))
def demo_other():
c1 = C('woo',3)
c2 = C('woo',3)
c3 = C(foo='woo',bar=3)
print(describe('c1',c1))
print(describe('c2',c2))
print(describe('c3',c3))
def main():
demo_simple()
demo_inherit()
demo_other()
if __name__ == '__main__':
main()
"""
class B(A, metaclass=Flyweight):
pass
class C(str, metaclass=Flyweight):
pass
"""
|
def gen_primes():
""" Generates an infinite sequence of prime numbers.
"""
# Maps composites to primes witnessing their compositeness.
# This is memory efficient, as the sieve is not "run forward"
# indefinitely, but only as long as required by the current
# number being tested.
#
sieve = {}
# The running integer that's checked for primeness
q = 2
while True:
if q not in sieve:
# q is a new prime.
# Yield it and mark its first multiple that isn't
# already marked in previous iterations
#
yield q
sieve[q * q] = [q]
else:
# q is composite. D[q] is the list of primes that
# divide it. Since we've reached q, we no longer
# need it in the map, but we'll mark the next
# multiples of its witnesses to prepare for larger
# numbers
#
for p in sieve[q]:
sieve.setdefault(p + q, []).append(p)
del sieve[q]
q += 1
# take the number as input from user upto which prime numbers are to be generated
n = int (input ("Enter upper limit: "))
print ("List of prime numbers upto %d: " % (n))
# call the generator function to yield prime numbers upto n values
for value in gen_primes ():
if value > n:
break
print (value, end = " ")
print ("") |
import os
import reframe as rfm
import reframe.utility.sanity as sn
class Cp2kCheck(rfm.RunOnlyRegressionTest):
def __init__(self, check_name, check_descr):
super().__init__(check_name, os.path.dirname(__file__))
self.descr = check_descr
self.valid_prog_environs = ['PrgEnv-gnu']
self.executable = 'cp2k.psmp'
self.executable_opts = ['H2O-256.inp']
energy = sn.extractsingle(r'\s+ENERGY\| Total FORCE_EVAL \( QS \) '
r'energy \(a\.u\.\):\s+(?P<energy>\S+)',
self.stdout, 'energy', float, item=-1)
energy_reference = -4404.2323
energy_diff = sn.abs(energy-energy_reference)
self.sanity_patterns = sn.all([
sn.assert_found(r'PROGRAM STOPPED IN', self.stdout),
sn.assert_eq(sn.count(sn.extractall(
r'(?P<step_count>STEP NUM)',
self.stdout, 'step_count')), 10),
sn.assert_lt(energy_diff, 1e-4)
])
self.perf_patterns = {
'perf': sn.extractsingle(r'^ CP2K(\s+[\d\.]+){4}\s+(?P<perf>\S+)',
self.stdout, 'perf', float)
}
self.maintainers = ['LM']
self.tags = {'scs'}
self.strict_check = False
self.modules = ['CP2K']
self.extra_resources = {
'switches': {
'num_switches': 1
}
}
@rfm.parameterized_test(['prod'], ['maint'])
class Cp2kCpuCheck(Cp2kCheck):
def __init__(self, variant):
super().__init__('cp2k_cpu_%s_check' % variant,
'CP2K check CPU')
self.valid_systems = ['daint:mc', 'dom:mc']
self.num_gpus_per_node = 0
if self.current_system.name == 'dom':
self.num_tasks = 216
else:
self.num_tasks = 576
self.num_tasks_per_node = 36
if variant == 'maint':
self.tags |= {'maintenance'}
self.reference = {
'dom:mc': {
'perf': (182.6, None, 0.05)
},
'daint:mc': {
'perf': (141.0, None, 0.05)
},
}
else:
self.tags |= {'production'}
self.reference = {
'dom:mc': {
'perf': (174.5, None, 0.05)
},
'daint:mc': {
'perf': (113.0, None, 0.25)
},
}
@rfm.parameterized_test(['prod'], ['maint'])
class Cp2kGpuCheck(Cp2kCheck):
def __init__(self, variant):
super().__init__('cp2k_gpu_%s_check' % variant,
'CP2K check GPU')
self.valid_systems = ['daint:gpu', 'dom:gpu']
self.variables = {'CRAY_CUDA_MPS': '1'}
self.modules = ['CP2K']
self.num_gpus_per_node = 1
if self.current_system.name == 'dom':
self.num_tasks = 72
else:
self.num_tasks = 192
self.num_tasks_per_node = 12
if variant == 'maint':
self.tags |= {'maintenance'}
self.reference = {
'dom:gpu': {
'perf': (251.8, None, 0.15)
},
'daint:gpu': {
'perf': (222.6, None, 0.05)
},
}
else:
self.tags |= {'production'}
self.reference = {
'dom:gpu': {
'perf': (240.0, None, 0.05)
},
'daint:gpu': {
'perf': (222.6, None, 0.05)
},
}
|
"Main module to be run for training the model."
import json
import torch
import torch.nn as nn
from absl import app
from torch.utils.data import DataLoader, ConcatDataset
import utils.dataset as dataset
import utils.train_utils as train_utils
from model.vqa_model import VQAModel, ModelParams
from train import train
from utils.flags import FLAGS
def main(_):
"""Main function"""
train_utils.create_dir(FLAGS.save_folder)
logger = train_utils.get_logger("VQA", FLAGS.save_folder)
torch.manual_seed(FLAGS.seed)
torch.cuda.manual_seed(FLAGS.seed)
torch.backends.cudnn.benchmark = True
data_params = json.load(open(FLAGS.data_params_path))
dictionary = dataset.Dictionary.load_from_file(FLAGS.dictionary_path)
train_configs = train_utils.TrainingConfigs(
start_epoch=FLAGS.start_epoch,
number_of_epochs=FLAGS.number_of_epochs,
batch_size=FLAGS.batch_size,
base_learning_rate=FLAGS.base_learning_rate,
warmup_length=FLAGS.warmup_length,
warmup_factor=FLAGS.warmup_factor,
lr_decay_factor=FLAGS.lr_decay_factor,
lr_decay_start=FLAGS.lr_decay_start,
decay_step=FLAGS.decay_step,
save_score_threshold=FLAGS.save_score_threshold,
save_step=FLAGS.save_step,
grad_clip=FLAGS.grad_clip,
)
model_params = ModelParams(
add_self_attention=FLAGS.add_self_attention,
fusion_method=FLAGS.fusion_method,
question_sequence_length=dataset.MAX_QUES_SEQ_LEN,
number_of_objects=dataset.NO_OBJECTS,
word_embedding_dimension=data_params["word_feat_dimension"],
object_embedding_dimension=data_params["image_feat_dimension"],
vocabulary_size=data_params["vocabulary_size"],
num_ans_candidates=data_params["number_of_answer_candidiates"],
)
logger.info("Model params:\t%s\n", model_params)
model = VQAModel(
glove_path=FLAGS.glove_path,
model_params=model_params,
hidden_dimension=FLAGS.hidden_dimension,
).cuda()
model = nn.DataParallel(model).cuda()
train_dset = dataset.VQAFeatureDataset(
name="train",
dictionary=dictionary,
)
eval_dset = dataset.VQAFeatureDataset("val", dictionary)
if FLAGS.use_train_and_val:
train_dset = ConcatDataset([train_dset, eval_dset])
eval_loader = None
else:
eval_loader = DataLoader(
eval_dset, FLAGS.batch_size, shuffle=True, num_workers=1
)
train_loader = DataLoader(
train_dset,
train_configs.batch_size,
shuffle=True,
num_workers=1,
)
train(
model,
train_configs,
train_loader,
eval_loader,
FLAGS.save_folder,
FLAGS.final_save_name,
FLAGS.snapshot_path,
logger,
)
if __name__ == "__main__":
app.run(main)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Dremio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bearer auth for Nessie client."""
from requests import models
from requests.auth import AuthBase
class TokenAuth(AuthBase):
"""AuthBase override for bearer token based auth."""
def __init__(self, token: str) -> None:
"""Create an AuthBase for the specified bearer auth token."""
self._token = token
def __call__(self, r: models.PreparedRequest) -> models.PreparedRequest:
"""Append Auth Header to request."""
r.headers["Authorization"] = "Bearer {}".format(self._token)
return r
|
for j in range(int(input())):
_ = int(input("enter n"))
x = input("enter numbers with space\n").split()
a=0
p=input("enter searching No\n")
for i in range (len(x)):
if x[i]==p:
a+=1
t = i
if a==1:
print("\npresent",t+1)
else:
print("\nnot present",i+1)
|
#!/usr/bin/env python
import rospy
from hide_and_seek_navigation.msg import listmsg
from std_msgs.msg import String
def planner():
pub = rospy.Publisher('move_base_goal', listmsg, queue_size=5)
rospy.init_node('planner')
rate = rospy.Rate(10)
while not rospy.is_shutdown():
msg = listmsg()
msg.x = 0.5
msg.y = 0.5
msg.yaw = 90
pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
try:
planner()
except rospy.ROSInterruptException:
pass |
import logging
import os
import redis
import sqlalchemy
from flask import Blueprint, jsonify, request
from web3 import HTTPProvider, Web3
from src.models import Block
from src.utils import helpers
from src.utils.db_session import get_db_read_replica
from src.utils.config import shared_config
from src.utils.redis_constants import latest_block_redis_key, latest_block_hash_redis_key
logger = logging.getLogger(__name__)
bp = Blueprint("health_check", __name__)
web3endpoint = helpers.get_web3_endpoint(shared_config)
web3 = Web3(HTTPProvider(web3endpoint))
redis_url = shared_config["redis"]["url"]
redis = redis.Redis.from_url(url=redis_url)
disc_prov_version = helpers.get_discovery_provider_version()
HEALTHY_BLOCK_DIFF = 100
#### INTERNAL FUNCTIONS ####
# Returns DB block state & diff
def _get_db_block_state(latest_blocknum, latest_blockhash):
db = get_db_read_replica()
with db.scoped_session() as session:
# Fetch latest block from DB
db_block_query = session.query(Block).filter(Block.is_current == True).all()
assert len(db_block_query) == 1, "Expected SINGLE row marked as current"
health_results = {
"web": {
"blocknumber": latest_blocknum,
"blockhash": latest_blockhash,
},
"db": helpers.model_to_dictionary(db_block_query[0]),
"git": os.getenv("GIT_SHA"),
}
block_difference = abs(
health_results["web"]["blocknumber"] - health_results["db"]["number"]
)
health_results["block_difference"] = block_difference
health_results["maximum_healthy_block_difference"] = HEALTHY_BLOCK_DIFF
return health_results
# Returns number of and info on open db connections
def _get_db_conn_state():
db = get_db_read_replica()
with db.scoped_session() as session:
# Query number of open DB connections
num_connections = session.execute(
sqlalchemy.text("select sum(numbackends) from pg_stat_database;")
).fetchall()
if not (num_connections and num_connections[0][0]):
return jsonify('pg_stat_database query failed'), 500
num_connections = num_connections[0][0]
# Query connection info
connection_info = session.execute(
sqlalchemy.text("select datname, state, query, wait_event_type, wait_event from pg_stat_activity where state is not null;")
).fetchall()
connection_info = [dict(row) for row in connection_info]
return {"open_connections": num_connections, "connection_info": connection_info}
#### ROUTES ####
@bp.route("/version", methods=["GET"])
def version():
return jsonify(disc_prov_version), 200
# Consume cached latest block from redis
@bp.route("/health_check", methods=["GET"])
def health_check():
# can extend this in future to include ganache connectivity, how recently a block
# has been added (ex. if it's been more than 30 minutes since last block), etc.
latest_block_num = None
latest_block_hash = None
stored_latest_block_num = redis.get(latest_block_redis_key)
if stored_latest_block_num is not None:
latest_block_num = int(stored_latest_block_num)
stored_latest_blockhash = redis.get(latest_block_hash_redis_key)
if stored_latest_blockhash is not None:
latest_block_hash = stored_latest_blockhash.decode("utf-8")
if latest_block_num is None or latest_block_hash is None:
latest_block = web3.eth.getBlock("latest", True)
latest_block_num = latest_block.number
latest_block_hash = latest_block.hash
health_results = _get_db_block_state(latest_block_num, latest_block_hash)
verbose = request.args.get("verbose", type=str) == 'true'
if verbose:
# DB connections check
health_results["db_connections"] = _get_db_conn_state()
if health_results["block_difference"] > HEALTHY_BLOCK_DIFF:
return jsonify(health_results), 500
return jsonify(health_results), 200
# Query latest block from web3 provider
@bp.route("/block_check", methods=["GET"])
def block_check():
latest_block = web3.eth.getBlock("latest", True)
latest_block_num = latest_block.number
latest_block_hash = latest_block.hash.hex()
health_results = _get_db_block_state(latest_block_num, latest_block_hash)
if health_results["block_difference"] > HEALTHY_BLOCK_DIFF:
return jsonify(health_results), 500
return jsonify(health_results), 200
|
'''Exercício 1: Escreva uma função que conta a frequência de ocorrência de cada
palavra em um texto (arquivo txt) e armazena tal quantidade em um dicionário, onde a
chave é a palavra considerada.'''
dicionario = {}
file = open(r'C:\Users\maria\Desktop\POO-2\Terceira Lista\exercicio_1.txt')
texto = file.read().lower()
texto = texto.replace(',', '')
texto = texto.replace('.', '')
palavras = texto.split()
for palavra in palavras:
if palavra not in dicionario:
dicionario[palavra] = palavras.count(palavra)
print(dicionario)
print(f'Tamanho do dicionário: {len(dicionario)}')
|
import pandas as pd
DATE = 'date'
COUNTY = 'county'
NAME = 'name'
ID = 'id'
POPULATION = 'population'
CONFIRMED_CASES = 'confirmed_cases'
NEW_CASES = 'new_cases'
NEW_CASES_7DAY, NEW_CASES_14DAY = [f'new_cases_{x}day' for x in (7, 14)]
CASE_RATE_7DAY, CASE_RATE_14DAY = [f'case_rate_{x}day' for x in (7, 14)]
df = pd.read_csv('sources/latimes-place-totals.csv',
parse_dates=[DATE],
infer_datetime_format=True)
STR_COL = ['id', 'name', 'county', 'note']
df[STR_COL] = df[STR_COL].convert_dtypes()
df.sort_values([DATE, COUNTY, NAME, ID], inplace=True)
df.reset_index(drop=True, inplace=True)
for id_ in df[ID].unique():
id_mask = df[ID] == id_
df.loc[id_mask, NEW_CASES] = df.loc[id_mask, CONFIRMED_CASES].diff()
df.loc[id_mask, NEW_CASES_7DAY] = df.loc[id_mask,
NEW_CASES].rolling(7).sum()
df.loc[id_mask,
NEW_CASES_14DAY] = df.loc[id_mask, NEW_CASES].rolling(14).sum() / 2
df.loc[id_mask, CASE_RATE_7DAY] = (
(df.loc[id_mask, NEW_CASES_7DAY] / df.loc[id_mask, POPULATION]) *
100_000).round(1)
df.loc[id_mask, CASE_RATE_14DAY] = (
(df.loc[id_mask, NEW_CASES_14DAY] / df.loc[id_mask, POPULATION]) *
100_000).round(1)
if __name__ == '__main__':
df.to_pickle('data/latimes-places-ts.pickle')
|
#!/usr/bin/env python
# coding: utf-8
import codecs
import sys
import sklearn as sk
import pandas as pd
import numpy as np
import math
from sklearn import preprocessing
from sklearn.decomposition import PCA
from src.pca.algoritmo_QR import eigenvectores_eigenvalores_QR_vf
from src.pca.metodo_potencia_deflation import power_iteration
from src.pca.metodo_potencia_deflation import power_deflation
def PCA_from_sklearn(X):
"""
componentes_principales(X): Función que devuelve las componentes principales.
Parámetros
----------
n_components: número de componentes.
svd_solver: str {‘auto’, ‘full’, ‘arpack’, ‘randomized’}
Se elige 'full', lo que significa que se ejecuta completamente SVD llamando al
solucionador estándar LAPACK a través de scipy.linalg.svd y se seleccionan los componentes mediante postprocessing.
Atributos
---------
varianza_explicada: porcentaje de varianza explicada por cada componente.
valores_singulares: valores singulares correspondientes a cada componente.
pca.components_: ejes principales que representan las direcciones de máxima varianza en los datos.
eigenvalues: son los valores propios utilizando la matriz de covarianza.
Método
---------
fit_transform: ajusta el modelo a los datos y aplica la reducción de dimensionalidad en los datos.
"""
X = pd.DataFrame(X)
n_components = len(X.columns)
pca_1 = PCA(n_components, svd_solver='full')
componentesprincipales_1 = pca_1.fit_transform(X)
pca_1.components_
var_exp = pca_1.explained_variance_ratio_
##Se obtiene el número de componentes a través de la varianza explicada acumulada de los componentes, la cual debe sumar 60%.
var_acumulada = var_exp.cumsum()
conteo = (var_acumulada) < 0.8
n_componentes = conteo.sum() + 1
pca = PCA(n_componentes, svd_solver='full')
componentesprincipales = pca.fit_transform(X)
pca.components_
varianza_explicada = pca.explained_variance_ratio_
eigenvalues = pca.explained_variance_
val_sing = pca.singular_values_
return pca, varianza_explicada, componentesprincipales, val_sing, pca.components_, eigenvalues
def PCA_from_SVD(A):
"""
Función para PCA a partir de la SVD de numpy
params: A matriz de datos
num_componentes número de componentes deseados
return: valores_singulares Los valores singulares de la descomposición SVD
componentes Los coeficientes para calcular los componentes principales
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
"""
# Centrar los datos
A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame
A_centered = A - A.mean(axis=0)
# Calcular SVD
U, S, Vt = np.linalg.svd(A_centered, full_matrices=False)
# Los valores singulares
valores_singulares = S
# Los componentes (coeficientes)
componentes = ((Vt))
# Los datos transformados (componentes principales)
Z = A_centered@np.transpose(Vt)
# La varianza explicada
varianza_explicada = S**2/np.sum(S**2)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = A.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return valores_singulares[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
def PCA_from_SVD_jacobi(A):
"""
Función para PCA a partir de la SVD
params: A matriz de datos
num_componentes número de componentes deseados
return: valores_singulares Los valores singulares de la descomposición SVD
componentes Los coeficientes para calcular los componentes principales
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
"""
# Centrar los datos
A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame
A_centered = A - A.mean(axis=0)
# Modificar esta línea de código, mandar a llamar la función creada por el equipo
# Calcular SVD
U, S, Vt = svd_jacobi_aprox(A_centered,1e-12,500)
# Los valores singulares
valores_singulares = S
# Los componentes (coeficientes)
componentes = ((Vt))
# Los datos transformados (componentes principales)
Z = A_centered@np.transpose(Vt)
# La varianza explicada
varianza_explicada = S**2/np.sum(S**2)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = A.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return valores_singulares[:(num_componentes)], componentes[:(num_componentes)], Z[:,:(num_componentes)], varianza_explicada[:(num_componentes)]
def PCA_from_QR_vf(data,niter = 450):
"""
Función para PCA a partir de los eigenvectores
params: data: matriz de datos
niter: número de iteraciones máximas
return: componentes Los coeficientes para calcular los componentes principales (eigenvectores de la matriz de covarianzas)
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
Depende de la función: eigenvectores_QR
"""
# convertir a array
A = np.array(data)
# Centrar los datos
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
# Matriz de Covarianzas
#C = (datos_centrados.T@datos_centrados)/(datos_centrados.shape[0]-1)
C = (A - mean_vec).T.dot((A - mean_vec)) / (A.shape[0]-1)
# Calcular algoritmo QR
E, Q = eigenvectores_eigenvalores_QR_vf(C,niter)
# Los componentes (coeficientes)
componentes = Q.T
# Los datos transformados (componentes principales)
# Aquí marcaba error al filtrar porque no se reconocia a Z como numpy array
Z = datos_centrados@Q
# La varianza explicada
varianza_explicada = E/np.sum(E)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = data.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return E[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes] #, varianza_acumulada, num_componentes
def PCA_from_potencia(X):
"""
Función que calcula PCA a partir del método de la potencia y deflation de Hotteling
params: A: matriz de datos
return: eigenvalues Numpy array con los eigenvectores de A
eigenvectors Numpy array con los correspondientes eigenvectores de A
"""
prop = 0 # Proporción de varianza explicada
comp = 1
cur_var = 0
comp_vecs = np.zeros([X.shape[1], X.shape[1]])
# convertir a array
A = np.array(X)
# Centrar los datos
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
#Calculamos la matriz de covarianzas
cov = np.dot(X.T, X)/X.shape[0]
#Aplicamos el método de la potencia
evalues_pow, evectors_pow = power_deflation(cov,2000)
# La varianza explicada
varianza_explicada = evalues_pow/np.sum(evalues_pow)
# Los datos transformados (componentes principales)
Z = datos_centrados@evectors_pow
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 80%
n = X.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
return evalues_pow[:num_componentes], evectors_pow.T[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes] |
import torch
import numpy as np
import yaml, pickle, os, math, logging
from random import choice, randint, sample
from Audio import Audio_Prep
def Calc_RMS(audio):
return np.sqrt(np.mean(np.square(audio), axis= -1))
class Dataset(torch.utils.data.Dataset):
def __init__(self, wav_paths, noise_paths, sample_rate):
super(Dataset, self).__init__()
self.sample_Rate = sample_rate
self.files = []
for path in wav_paths:
for root, _, files in os.walk(path):
for file in files:
if os.path.splitext(file)[1].lower() != '.wav':
continue
self.files.append(os.path.join(root, file))
self.noises = []
for path in noise_paths:
for root, _, files in os.walk(path):
for file in files:
if os.path.splitext(file)[1].lower() != '.wav':
continue
self.noises.append(os.path.join(root, file))
def __getitem__(self, idx):
audio = Audio_Prep(self.files[idx], sample_rate= self.sample_Rate)
noise = Audio_Prep(choice(self.noises), sample_rate= self.sample_Rate)
return audio, noise
def __len__(self):
return len(self.files)
class Inference_Dataset(torch.utils.data.Dataset):
def __init__(self, patterns, sample_rate):
super(Inference_Dataset, self).__init__()
self.patterns = patterns
self.sample_Rate = sample_rate
def __getitem__(self, idx):
label, file = self.patterns[idx]
noisy = Audio_Prep(file, sample_rate= self.sample_Rate)
return noisy, label
def __len__(self):
return len(self.patterns)
class Collater:
def __init__(self, wav_length, samples):
self.wav_Length = wav_length
self.samples = samples
def __call__(self, batch):
audios = []
noises = []
noisies = []
for audio, noise in batch:
if any([x.shape[0] < self.wav_Length * 2 for x in [audio, noise]]):
continue
audio_Offsets = sample(range(0, audio.shape[0] - self.wav_Length), self.samples)
noise_Offsets = sample(range(0, noise.shape[0] - self.wav_Length), self.samples)
for audio_Offset, noise_Offset in zip(audio_Offsets, noise_Offsets):
for _ in range(100):
audio_Sample = audio[audio_Offset:audio_Offset + self.wav_Length]
audio_RMS = Calc_RMS(audio_Sample)
if audio_RMS > 0.01:
break
for _ in range(100):
noise_Sample = noise[noise_Offset:noise_Offset + self.wav_Length]
noise_RMS = Calc_RMS(noise_Sample)
if noise_RMS > 0.01:
break
if any([x < 0.01 for x in [audio_RMS, noise_RMS]]):
continue
alpha = audio_RMS / noise_RMS / 10 ** (np.random.uniform(-5.0, 15.0) / 20)
noise_Sample *= alpha
noisy = audio_Sample + noise_Sample
max_Noisy = np.max(np.abs(noisy))
if max_Noisy > 1.0:
audio_Sample /= max_Noisy * 1.01 + 1e-7
noise_Sample /= max_Noisy * 1.01 + 1e-7
noisy /= max_Noisy * 1.01 + 1e-7
audios.append(audio_Sample)
noises.append(noise_Sample)
noisies.append(noisy)
audios = torch.FloatTensor(audios) # [Batch, Time]
noises = torch.FloatTensor(noises) # [Batch, Time]
noisies = torch.FloatTensor(noisies) # [Batch, Time]
return audios, noises, noisies
class Inference_Collater:
def __init__(self, reduction):
self.reduction = reduction
def __call__(self, batch):
noisies, labels = zip(*batch)
lengths = [noisy.shape[0] for noisy in noisies]
max_Length = math.ceil(max(lengths) / float(self.reduction)) * self.reduction
noisies = [np.pad(noisy, [0, max_Length - noisy.shape[0]]) for noisy in noisies]
noisies = torch.FloatTensor(noisies) # [Batch, Time]
return noisies, lengths, labels |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 16 09:54:40 2021
@author: Marijn Venderbosch
Computes trap depth from eq. 1.6 PhD thesis Ludovic Brossard
"""
#%% Imports
from scipy.constants import Boltzmann, c, pi, hbar, Planck, atomic_mass
import numpy as np
#%% Variables
mRb = 87 * atomic_mass
# Dipole trap
power = 1.2e-3 #mW
waist = 0.8e-6 # m
rayleigh = 3.6e-6 #m
wavelength = 820e-9 # m
# D1 Rubidium
linewidth_D1 = 2 * pi * 5.7e6 # 1/s
transition_wavelength_D1 = 795e-9 # m
# D2 Rubidium
linewidth_D2 = 2 * pi * 6e6 # 1/s
transition_wavelength_D2 = 780e-9
# Functions
def intensity(power, waist):
return 2 * power / (pi * waist**2)
def detuning(wavelength, transition_wavelength):
return 2 * pi * c * (1 / wavelength - 1 / transition_wavelength)
def saturation_intensity(linewidth, transition_wavelength):
return 2 * pi**2 * hbar * c * linewidth / (3 * transition_wavelength**3)
def dipole_potential(detuning_D1,
detuning_D2,
transition_wavelength_D1,
transition_wavelength_D2):
# eq. 1.6 from Brossard PhD thesis (Browaeys, 2020)
# matrix elements same for all transitions because linear polarization
# prefactors 1/3 and 2/3 prefactors come from 2J'+1/2J+1 and Clebsch-Gordon
D1_contribution = 1 * linewidth_D1**2 / (3 * saturation_D1 * detuning_D1)
D2_contribution = 2 * linewidth_D2**2 / (3 * saturation_D2 * detuning_D2)
return hbar / 8 * (D1_contribution + D2_contribution) * intensity(power, waist)
def trap_frequency_radial(waist, mass, potential):
return np.sqrt(-4 * potential / (mass * waist**2))
def trap_frequency_axial(waist, mass, potential):
return np.sqrt(-2 * potential / (mass * rayleigh**2))
#%% Executing functions and print result
detuning_D1 = detuning(wavelength, transition_wavelength_D1)
detuning_D2 = detuning(wavelength, transition_wavelength_D2)
saturation_D1 = saturation_intensity(linewidth_D1, transition_wavelength_D1)
saturation_D2 = saturation_intensity(linewidth_D2, transition_wavelength_D2)
dipole_potential = dipole_potential(detuning_D1,
detuning_D2,
transition_wavelength_D1,
transition_wavelength_D2)
potential_depth_mK = round(-dipole_potential / Boltzmann * 1e3, 2)
print("Trap depth is: " + str(potential_depth_mK) + " mK")
potential_depth_MHz = round(-dipole_potential / Planck * 1e-6, 1)
print("Trap depth (Hz) is: " + str(potential_depth_MHz) + "MHz")
radial_trap_frequency = trap_frequency_radial(waist, mRb, dipole_potential)
radial_trap_frequency_kHz = round(radial_trap_frequency * 1e-3 / (2 * np.pi))
axial_trap_frequency = trap_frequency_axial(waist, mRb, dipole_potential)
axial_trap_frequency_kHz = round(axial_trap_frequency * 1e-3 / (2 * np.pi))
print("Raxial, axial trap frequency are: " +
str(radial_trap_frequency_kHz) +
", and " +
str(axial_trap_frequency_kHz) +
" (kHz * 2pi)")
|
'''
nbgrader APIs for vserver
Similar to https://github.com/jupyter/nbgrader/issues/659
Authentication
To make things easy, we are simply putting the user id in HTTP GET
parameter or POST data using key `user`.
For example: /api/courses?user=Eric
'''
import os, json, operator
from app import request
from helper import (json_success, error_catcher, json_files_pack,
json_files_unpack, strftime, strptime, get_user,
find_course, find_assignment, find_course_user,
find_student_submissions, find_student_latest_submission,
find_student_submission, JsonError, app_get, app_post,
check_course_instructor, check_course_user)
from database.database import *
@app_get('/api/courses')
def list_courses(db) :
'''
GET /api/courses
List all available courses the user is taking or teaching (anyone)
'''
user = get_user(db)
courses = set()
for i in user.teaching :
courses.add(i.id)
for i in user.taking :
courses.add(i.id)
return json_success(courses=sorted(courses))
@app_post('/api/course/<course_id>')
def add_course(db, course_id) :
'''
POST /api/course/<course_id>
Add a course (anyone)
'''
user = get_user(db)
if db.query(Course).filter(Course.id == course_id).one_or_none() :
raise JsonError('Course already exists')
course = Course(course_id, user)
db.add(course)
db.commit()
return json_success()
@app_get('/api/assignments/<course_id>')
def list_assignments(db, course_id) :
'''
GET /api/assignments/<course_id>
List all assignments for a course (students+instructors)
'''
user = get_user(db)
course = find_course(db, course_id)
check_course_user(db, course, user)
assignments = course.assignments
return json_success(assignments=list(map(lambda x: x.id, assignments)))
@app_get('/api/assignment/<course_id>/<assignment_id>')
def download_assignment(db, course_id, assignment_id) :
'''
GET /api/assignment/<course_id>/<assignment_id>
Download a copy of an assignment (students+instructors)
'''
user = get_user(db)
course = find_course(db, course_id)
check_course_user(db, course, user)
assignment = find_assignment(db, course, assignment_id)
list_only = request.args.get('list_only', 'false') == 'true'
return json_success(files=json_files_pack(assignment.files, list_only))
@app_post('/api/assignment/<course_id>/<assignment_id>')
def release_assignment(db, course_id, assignment_id) :
'''
POST /api/assignment/<course_id>/<assignment_id>
Release an assignment (instructors only)
'''
user = get_user(db)
course = find_course(db, course_id)
check_course_instructor(db, course, user)
if db.query(Assignment).filter(Assignment.id == assignment_id,
Assignment.course == course).one_or_none() :
raise JsonError('Assignment already exists')
assignment = Assignment(assignment_id, course)
json_files_unpack(request.form.get('files'), assignment.files)
db.commit()
return json_success()
@app_get('/api/submissions/<course_id>/<assignment_id>')
def list_submissions(db, course_id, assignment_id) :
'''
GET /api/submissions/<course_id>/<assignment_id>
List all submissions for an assignment from all students
(instructors only)
'''
user = get_user(db)
course = find_course(db, course_id)
check_course_instructor(db, course, user)
assignment = find_assignment(db, course, assignment_id)
submissions = []
for submission in assignment.submissions :
submissions.append({
'student_id': submission.student.id,
'timestamp': strftime(submission.timestamp),
# TODO: "notebooks": [],
})
return json_success(submissions=submissions)
@app_get('/api/submissions/<course_id>/<assignment_id>/<student_id>')
def list_student_submission(db, course_id, assignment_id, student_id) :
'''
GET /api/submissions/<course_id>/<assignment_id>/<student_id>
List all submissions for an assignment from a particular student
(instructors+students, students restricted to their own submissions)
'''
user = get_user(db)
course = find_course(db, course_id)
if user.id != student_id :
check_course_instructor(db, course, user)
assignment = find_assignment(db, course, assignment_id)
student = find_course_user(db, course, student_id)
submissions = []
for submission in find_student_submissions(db, assignment, student) :
submissions.append({
'student_id': submission.student.id,
'timestamp': strftime(submission.timestamp),
# TODO: "notebooks": [],
})
return json_success(submissions=submissions)
@app_post('/api/submission/<course_id>/<assignment_id>')
def submit_assignment(db, course_id, assignment_id) :
'''
POST /api/submission/<course_id>/<assignment_id>
Submit a copy of an assignment (students+instructors)
'''
user = get_user(db)
course = find_course(db, course_id)
check_course_user(db, course, user)
assignment = find_assignment(db, course, assignment_id)
submission = Submission(user, assignment)
json_files_unpack(request.form.get('files'), submission.files)
db.commit()
return json_success()
@app_get('/api/submission/<course_id>/<assignment_id>/<student_id>')
def download_submission(db, course_id, assignment_id, student_id) :
'''
GET /api/submission/<course_id>/<assignment_id>/<student_id>
Download a student's submitted assignment (instructors only)
TODO: maybe allow student to see their own submissions?
'''
user = get_user(db)
course = find_course(db, course_id)
check_course_instructor(db, course, user)
assignment = find_assignment(db, course, assignment_id)
student = find_course_user(db, course, student_id)
submission = find_student_latest_submission(db, assignment, student)
list_only = request.args.get('list_only', 'false') == 'true'
return json_success(files=json_files_pack(submission.files, list_only),
timestamp=strftime(submission.timestamp))
@app_post('/api/feedback/<course_id>/<assignment_id>/<student_id>')
def upload_feedback(db, course_id, assignment_id, student_id) :
'''
POST /api/feedback/<course_id>/<assignment_id>/<student_id>
Upload feedback on a student's assignment (instructors only)
'''
user = get_user(db)
course = find_course(db, course_id)
check_course_instructor(db, course, user)
assignment = find_assignment(db, course, assignment_id)
student = find_course_user(db, course, student_id)
if 'timestamp' not in request.form :
raise JsonError('Please supply timestamp')
timestamp = strptime(request.form.get('timestamp'))
submission = find_student_submission(db, assignment, student, timestamp)
submission.feedbacks.clear()
# TODO: does this automatically remove the files?
json_files_unpack(request.form.get('files'), submission.feedbacks)
db.commit()
return json_success()
@app_get('/api/feedback/<course_id>/<assignment_id>/<student_id>')
def download_feedback(db, course_id, assignment_id, student_id) :
'''
GET /api/feedback/<course_id>/<assignment_id>/<student_id>
Download feedback on a student's assignment
(instructors+students, students restricted to their own submissions)
'''
user = get_user(db)
course = find_course(db, course_id)
if user.id != student_id :
check_course_instructor(db, course, user)
assignment = find_assignment(db, course, assignment_id)
student = find_course_user(db, course, student_id)
if 'timestamp' not in request.args :
raise JsonError('Please supply timestamp')
timestamp = strptime(request.args.get('timestamp'))
submission = find_student_submission(db, assignment, student, timestamp)
list_only = request.args.get('list_only', 'false') == 'true'
return json_success(files=json_files_pack(submission.feedbacks, list_only),
timestamp=strftime(submission.timestamp))
|
# -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
import properties
from ..base_model import BaseModel
from .dispensary import Dispensary
class MenuItem(BaseModel):
"""Menu items for dispensaries."""
name = properties.String(
'Name of the item.',
)
type = properties.StringChoice(
'Type of item.',
choices=['strain', 'flower', 'extract', 'edible', 'product'],
)
item = properties.Property(
'The strain, extract, edible, or product.',
)
price = properties.Float(
'The price for the item. This is not set for strains and extracts.',
)
price_half_gram = properties.Float(
'Price for one half gram of the item. This is not set for edibles '
'and products.',
)
price_gram = properties.Float(
'Price for one gram of this item. This is not set for edibles and '
'products.',
)
price_eighth = properties.Float(
'Price for one eighth ounce of this item. This is not set for '
'edibles and products.',
)
price_quarter = properties.Float(
'Price for one quarter ounce of this item. This is not set for '
'edibles and products.',
)
price_half_ounce = properties.Float(
'Price for one half ounce of this item. This is not set for '
'edibles and products.',
)
price_ounce = properties.Float(
'Price for one ounce of this item. This is not set for '
'edibles and products.',
)
class MenuItemSummary(MenuItem):
"""Menu item summary for when it is known what the menu item is."""
location = properties.Instance(
'Object containing information about the location this item is at.',
instance_class=Dispensary,
)
|
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import os
import numpy as np
from tqdm import tqdm
import glob
from PIL import Image
from torchvision import transforms
from torchvision.models import resnet50
import random
'''
1. 写一个dataset √
2. load他并且所有数据提取特征 √
3. 保存特征和标签txt √
4. t-sne
'''
class MyModel(nn.Module):
def __init__(self):
super(MyModel,self).__init__()
feature_extractor = resnet50(pretrained = False)
self.net = nn.Sequential()
for name, module in feature_extractor.named_children():
if not name == "fc":
self.net.add_module(name, module)
self.net.eval()
def forward(self,x):
b,c = x.size()[:2]
x = self.net(x).view(b,-1)
return x
def glob2(pattern1, pattern2):
files = glob.glob(pattern1)
files.extend(glob.glob(pattern2))
return files
class T_SNEDataset(Dataset):
def __init__(self, root = "/ssd/xingduan/BCTC_ALL/Data",sub_dirs = ["2D_Plane","2D_Plane_Mask", "3D_Head_Model_Silicone", "3D_Head_Model_Wax", "Half_Mask"]):
self.root = root
self.sub_dirs = sub_dirs
self.pos_filelist = glob2("{}/{}/*_rgb.jpg".format(root, "Live_Person"), "{}/{}/*_ir.jpg".format(root, "Live_Person"))
self.neg_filelist = []
for sub_dir in sub_dirs:
self.neg_filelist.extend(glob2("{}/{}/*_rgb.jpg".format(root, sub_dir), "{}/{}/*_ir.jpg".format(root, sub_dir)))
self.transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor()
])
def __getitem__(self,idx):
p = random.randint(0,1)
if p == 0:
q = random.randint(0, len(self.pos_filelist) - 1)
l = self.pos_filelist[q].split()
else:
q = random.randint(0, len(self.neg_filelist) -1 )
l = self.neg_filelist[q].split()
img_path = l[0]
img = Image.open(os.path.join(self.root, img_path)).convert("RGB")
img_w, img_h = img.size
ymin,ymax,xmin,xmax = 92, 188, 42, 138 # crop 整张脸
img = img.crop([xmin,ymin,xmax,ymax])
img = self.transform(img)
if "Live_Person" in img_path:
label = 1 # 正样本
else:
label = 0
return img , torch.tensor(label, dtype = torch.long)
def __len__(self):
return len(self.pos_filelist) + len(self.neg_filelist)
if __name__ == "__main__":
da = T_SNEDataset()
model = MyModel()
model.load_state_dict(torch.load("./ckpt/149.pth"))
model.eval()
model.cuda(9)
features, labels = [], []
with torch.no_grad():
for _ in tqdm(range(1000)):
idx = random.randint(0, len(da) -1)
img, label = da[idx]
img = img.cuda(9)
label = label.cuda(9)
img = img.unsqueeze(0)
label = label.unsqueeze(0)
feat = model(img)
features.append(feat.data.cpu())
labels.append(label.data.cpu())
features = torch.cat(features, dim = 0).numpy()
labels = torch.cat(labels, dim = 0).numpy()
np.savetxt("x.txt",features)
np.savetxt("labels.txt", labels)
|
import csv
from os import path
file_path = input("Enter The file path: ")
active = True
while active:
if path.exists(file_path) and file_path.endswith('CSV') or file_path.endswith('csv'):
print('File will be ready @ BatmonLogCleaned.csv in root folder')
with open(file_path, 'r+') as inp, open('BatmonLogCleaned.csv', 'w+') as out:
cleaned_file = csv.writer(out)
input_file = csv.reader(inp)
next(input_file)
row_starter = "Day"
headerCount = 0
for row in input_file:
if row[0] == row_starter and headerCount == 0:
headerCount += 1
cleaned_file.writerow(row)
if row[0] != row_starter and headerCount >= 1:
cleaned_file.writerow(row)
break
else:
print('File does not exist or invalid file type exiting program')
active = False
|
# Note this assumes undirected and unweighted right now TMC
import sys
import networkx
import random
random.seed(1234)
class Tab2GMLPlugin:
def input(self, file):
tabfile = open(file, 'r')
nodes = ()#set()
edges = ()#set()
for line in tabfile:
elements = line.split("\t")
node1 = elements[0].strip()
node2 = elements[1].strip()
if (node1 not in nodes):
nodes += (node1,)
if (node2 not in nodes):
nodes += (node2,)
edge = (node1, node2)
if (edge not in edges):
edges += (edge,)
self.G = networkx.OrderedGraph()
self.G.add_nodes_from(nodes)
self.G.add_edges_from(edges)
#for node in nodes:
# self.G.add_node(node)
#for edge in edges:
# self.G.add_edge(edge[0], edge[1], weight=0.5)
# self.G.add_edge(edge[1], edge[0], weight=0.5)
def run(self):
pass
def output(self, file):
#gmlfile = open(file, 'w')
networkx.write_gml(self.G, file)
|
from __future__ import print_function
import sys
# Call like this: ngram -ppl <(sed 's/ //g' contextWordCartProd ) -lm train.lm -debug 1 | python ./thisscript
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
lookupFile = 'context.lookup'
contextDir = 'contexts'
contextRef = {}
ngram = ''
phase = 1
for lineNr, line in enumerate(sys.stdin):
line = line.rstrip()
# if not lineNr % 1000:
# print ("WARNING: %d" % lineNr, end='\n', file=sys.stderr)
for case in switch(phase):
if case(1): # n-gram
ngram = ' '.join(line.split())
phase = 2
break
if case(2): # n-gram probabilities
if line.startswith('\tp('):
try:
prob = line.split('gram] ')[1].split(' [ ')[0]
except IndexError:
#print "This line: (%d) %s" % (lineNr, line)
#print("This line: (%d) %s" % (lineNr, line), end='\n', file=sys.stderr)
prob = 0
break
else:
phase = 3
if case(3): # numbers N j K
phase = 4
break
if case(4): # sentence perplexity
phase = 0
break
if case(0): # empty line
phase = 1
output = ngram + '\t' + prob
print(output, end='\n')
break
|
import unittest
from problems.problem46 import Trie
class Test(unittest.TestCase):
def test(self):
trie = Trie()
trie.insert('apple')
self.assertTrue(trie.search('apple'))
self.assertFalse(trie.search('app'))
self.assertTrue(trie.startsWith('app'))
trie.insert('app')
self.assertTrue(trie.search('app'))
|
#!/usr/bin/env python
import argparse
import struct
import sys
import copy
import ipdb
import rospy
from std_msgs.msg import (
Empty,
Header,
Int64
)
import copy
from baxter_core_msgs.msg import EndpointState
from sensor_msgs.msg import JointState
from geometry_msgs.msg import WrenchStamped
from smach_based_introspection_framework.msg import (
Tag_MultiModal, tactile_static
)
from smach_based_introspection_framework.srv import (
State_Switch,
State_SwitchResponse
)
hmm_state = None
def state_switch_handle(req):
global hmm_state
hmm_state = req.state
rospy.loginfo("tag is changed to %d" %req.state)
resp = State_SwitchResponse()
resp.finish.data = True
return resp
def main():
global hmm_state
global shared_endpoint_state
global shared_joint_state
global shared_wrench_stamped
hmm_state = 0
publishing_rate = 100
rospy.init_node("topic_multimodal", anonymous=True)
rospy.loginfo("tag_multimodal_topic_and_service.py starts")
pub = rospy.Publisher("/tag_multimodal",Tag_MultiModal, queue_size=10)
state_switch = rospy.Service('/hmm_state_switch', State_Switch, state_switch_handle)
r = rospy.Rate(publishing_rate)
while not rospy.is_shutdown():
tag_multimodal = Tag_MultiModal()
tag_multimodal.tag = hmm_state
tag_multimodal.header = Header()
tag_multimodal.header.stamp = rospy.Time.now()
pub.publish(tag_multimodal)
try:
r.sleep()
except rospy.exceptions.ROSInterruptException:
break
rospy.loginfo("tag_multimodal_topic_and_service.py exits")
if __name__ == '__main__':
main()
|
# pylint: disable=unused-argument
from dagster import Failure, InputDefinition, Output, OutputDefinition, pipeline, solid
conditional = True
@solid(output_defs=[OutputDefinition(int, "a", is_required=False)])
def my_solid(context):
if conditional:
yield Output(1, "a")
@solid(
output_defs=[
OutputDefinition(int, "a", is_required=False),
OutputDefinition(int, "b", is_required=False),
]
)
def branching_solid(context):
if conditional:
yield Output(1, "a")
else:
yield Output(2, "b")
@solid(input_defs=[InputDefinition("inp", int)])
def path_1(context, inp):
pass
@solid(input_defs=[InputDefinition("inp", int)])
def path_2(context, inp):
pass
@pipeline
def my_pipeline():
a, b = my_solid()
path_1(a)
path_2(b)
def do_dangerous_thing():
raise Exception("my_exception")
class MyException(Exception):
pass
@solid
def exception_handling_solid(context):
try:
do_dangerous_thing()
except MyException as e:
raise Failure("Failure description", metadata_entries=[...]) from e
# Or in Python 2
# six.raise_from(Failure("Failure description"), metadata_entries=[...], e)
|
import pandas as pd
def consolidate_home_ownership_values(val):
if val == "HaveMortgage":
val.replace("HaveMortgage", "Home Mortgage")
else:
return val
# TODO - find a more generic way
def consolidate_purpose_values(val):
if val == "other":
return "Other"
elif val == "major_purchase":
return "Major Purchase"
elif val == "small_business":
return "Small Business"
elif val == "renewable_energy":
return "Renewable Energy"
elif val == "wedding":
return "Wedding"
elif val == "vacation":
return "Vacation"
elif val == "moving":
return "Moving"
else:
return val
def replace_col_space_with_underscore(df: pd.DataFrame) -> pd.DataFrame:
df.columns = [c.replace(' ', '_') for c in df.columns]
return df
def replace_space_in_data(df: pd.DataFrame) -> pd.DataFrame:
for col in df.columns:
df[col] = df[col].map(lambda v: str(v).replace(" ", "_"))
return df
def replace_unsupported_chars(df: pd.DataFrame) -> pd.DataFrame:
df.columns = df.columns.map(lambda col: col.replace(">", "gt")
.replace("<", "lt")
.replace("+", "plus"))
return df
if __name__ == "__main__":
train_data = pd.read_csv("../data/raw_bank_loan_status.csv")
train_data_cleaned = replace_col_space_with_underscore(train_data)
# Handle nan values in y and X
train_data_cleaned.dropna(axis=0, subset=["Loan_Status"], how="any", inplace=True)
# drop not useful features
train_data_cleaned.drop(["Loan_ID", "Customer_ID", "Tax_Liens"], axis=1, inplace=True)
consolidate_home_ownership = train_data_cleaned.Home_Ownership.apply(consolidate_home_ownership_values)
train_data_cleaned["Home_Ownership"] = consolidate_home_ownership
consolidate_purpose = train_data_cleaned.Purpose.apply(consolidate_purpose_values)
train_data_cleaned["Purpose"] = consolidate_purpose
train_data_cleaned = replace_space_in_data(train_data_cleaned)
train_data_cleaned = replace_unsupported_chars(train_data_cleaned)
train_data_cleaned.to_csv("../data/bank_loan_status.csv")
|
#!/usr/bin/env python
"""
Created by: Lee Bergstrand
Description: A program that extracts the protein annotations from a fasta file and searches these
annotations using HMMsearch and an HMM file. It then stores hits along with organism
information (gathered from a csv file) in a sqlite3 database.
Requirements: - This software requires the Biopython module: http://biopython.org/wiki/Download
- This software requires HMMER 3.1 or later.
- This software requires sqlLite3 or later.
"""
# Imports & Setup:
import argparse
import sqlite3
from hmm_parser import *
from lib import *
from os import path
from multiprocessing import cpu_count
cpu_num = cpu_count() # Gets the number of processor cores for HMMER.
# ==========
# Functions:
# ==========
# ----------------------------------------------------------------------------------------
def main(args, processors):
input_file_path = str(args.in_file[0])
organism_csv_path = str(args.in_csv[0])
hmm_model_paths = list(args.hmm_models)
database_path = str(args.database[0])
user_processes = int(args.processes[0])
"""
Only use the user specified process count if it is less
than the number of cpu cores. Use all cpu cores by default.
"""
if 0 < user_processes < processors:
processors = user_processes
print('\nHMMER-DB')
print('====================================================')
check_extensions(input_file_path, organism_csv_path, hmm_model_paths, database_path)
print('')
sequence_record_list = extract_sequence_records(input_file_path, 'fasta')
fasta_string = generate_fasta_string(sequence_record_list)
fasta_dict = generate_fasta_dict(sequence_record_list)
organism_data_dict = extract_csv_dict(organism_csv_path)
organism_file_name = path.basename(input_file_path).split('.')[0]
organism_data = organism_data_dict[organism_file_name]
organism_accession = organism_data[0]
print('')
hmm_hit_list = []
protein_data_list = []
for hmm_path in hmm_model_paths:
hits_to_add, proteins_to_add = run_hmm_search(hmm_path, fasta_string, fasta_dict,
organism_accession, processors)
hmm_hit_list.extend(hits_to_add)
protein_data_list.extend(proteins_to_add)
# To account for sqlite3 table lock on write timeout
# is delayed in proportion to the number of CPUs used.
timeout_for_parallelism = 225 * processors
if path.isfile(database_path):
try:
print('')
hmm_db = sqlite3.connect(database_path, timeout=timeout_for_parallelism)
print(">> Opening sqlite3 file: " + database_path)
cursor = hmm_db.cursor()
print(">> Inserting organism info...")
insert_organism_info(cursor, organism_data)
print(">> Inserting cached protein data...")
insert_proteins(cursor, protein_data_list)
print(">> Inserting cached HMM hit data...")
insert_hits(cursor, hmm_hit_list)
hmm_db.commit()
hmm_db.close()
except sqlite3.Error as error:
print("sqlite3 Error: " + str(error))
print("The program will be aborted.")
sys.exit(1)
else:
print("Failed to open " + database_path)
sys.exit(1)
print("\n>> Done!")
# ------------------------------------------------------------------------------------
def run_hmm_search(hmm_path, fasta_string, fasta_dict, organism_accession, processes):
"""
Runs the HMM search using hmmsearch.
:param hmm_path: Path to the HMM search file.
:param fasta_string: A FASTA string containing proteins to be searched.
:param fasta_dict: A dict of FASTA strings containing proteins to be searched keyed by sequence ID.
:param organism_accession: The accession of the organism.
:param processes: The number of threads to use when running hmmsearch.
:return: list of HMM hit objects and list of lists of protein data.
"""
print('')
hmm_name = path.basename(hmm_path).split('.')[0]
hmm_length = get_hmm_length(hmm_path)
print('>> Running hmmsearch on ' + str(processes) + ' CPUs...')
hmm_results_string = hmm_search(fasta_string, hmm_path, processes)
hmm_hit_list = parse_hmmsearch_results(hmm_results_string, hmm_name, hmm_length)
print('>> Filtering HMM hits...')
filtered_hmm_hit_list = filter_hmm_hit_list(hmm_hit_list)
protein_data_list = get_hit_protein_data(filtered_hmm_hit_list, fasta_dict, organism_accession)
print('>> Caching HMM hit and subject proteins data...')
return filtered_hmm_hit_list, protein_data_list
# ----------------------------------------------------------------------------------------
if __name__ == '__main__':
descriptor = """
A program that extracts the protein annotations from a fasta file and searches these
annotations using HMMsearch and an HMM file. It then stores hits along with organism
information (gathered from a csv file) in a sqlite3 database.
"""
parser = argparse.ArgumentParser(description=descriptor)
parser.add_argument('-i', '--in_file', metavar='FASTA', nargs=1, help='''
The input FASTA file containing protein sequences (Created by GenbankToFASTAandOrganismTableRow.py).''')
parser.add_argument('-c', '--in_csv', metavar='CSV', nargs=1, help='''
The CSV file containing the information of all input organism (Created by GenbankToFASTAandOrganismTableRow.py).''')
parser.add_argument('-m', '--hmm_models', metavar='HMM', nargs='+', help='''
The HMM model files representing proteins''')
parser.add_argument('-d', '--database', metavar='DATABASE', nargs=1, help='''
The input sqlite3 database for which the organism info and HMM results are writen to.''')
parser.add_argument('-p', '--processes', metavar='PROCESSES', nargs=1, default=[cpu_num], help='''
Number of parallel processes to be used by hmmsearch.''')
cli_args = parser.parse_args()
# At minimum we require all CLI inputs.
proceed = True
if cli_args.in_file is None:
print("Error: Missing input FASTA file path...")
proceed = False
if cli_args.in_csv is None:
print("Error: Missing CSV file path...")
proceed = False
if cli_args.hmm_models is None:
print("Error: Missing HMM file paths...")
proceed = False
if cli_args.database is None:
print("Error: Missing sqlite3 database path...")
proceed = False
if proceed:
main(cli_args, cpu_num)
else:
print("")
parser.print_help()
print("")
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sparse Transformer modules."""
from absl import logging
from flax import nn
import jax.numpy as jnp
from lra_benchmarks.models.layers import common_layers
from lra_benchmarks.models.sparse_transformer import sparse_attention
class SparseTransformerBlock(nn.Module):
"""Sparse Transformer layer (https://arxiv.org/pdf/1904.10509.pdf)."""
def apply(self,
inputs,
qkv_dim,
mlp_dim,
num_heads,
attention_patterns,
dtype=jnp.float32,
inputs_segmentation=None,
padding_mask=None,
dropout_rate=0.1,
attention_dropout_rate=0.1,
deterministic=False,
use_cls_token=False):
"""Applies the SparseTransformerBlock module.
All Sparse Transformer attention patterns (both encoder and decoder) are
causal. To apply the sparse attention pattern reported in the paper
on the EnWik8 data set:
attention_patterns = [
sparse_attention.Fixed1Pattern(block_size=128),
sparse_attention.Fixed2Pattern(block_size=128, c=32)
].
Args:
inputs: input data of size `[bs, seq_len, features]`.
qkv_dim: dimension of the query/key/value.
mlp_dim: dimension of the mlp on top of attention block.
num_heads: number of attention heads.
attention_patterns: list of sparse attention patterns to apply.
dtype: the dtype of the computation (default: float32).
inputs_segmentation: input segmentation info for packed examples.
padding_mask: bool, mask padding tokens.
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
deterministic: if true, apply dropout else don't.
use_cls_token: using cls token or not.
Returns:
output of shape `[bs, seq_len, mlp_dim]`.
"""
assert inputs.ndim == 3
x = nn.LayerNorm(inputs)
x = sparse_attention.SparseSelfAttention(
x,
num_heads=num_heads,
qkv_features=qkv_dim,
attention_patterns=attention_patterns,
dtype=dtype,
segmentation=inputs_segmentation,
padding_mask=padding_mask,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=deterministic,
use_cls_token=use_cls_token)
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
x = x + inputs
y = nn.LayerNorm(x)
y = common_layers.MlpBlock(
y,
mlp_dim=mlp_dim,
dtype=dtype,
dropout_rate=dropout_rate,
deterministic=deterministic)
return x + y
class SparseTransformerEncoder(nn.Module):
"""Sparse Transformer Model Encoder.
The attention pattern for encoding is causal.
"""
def apply(self,
inputs,
vocab_size,
attention_patterns,
inputs_positions=None,
inputs_segmentation=None,
shared_embedding=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
dtype=jnp.float32,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=512,
train=True,
dropout_rate=0.1,
attention_dropout_rate=0.1,
learn_pos_emb=False,
classifier=False,
classifier_pool='CLS',
num_classes=10):
"""Applies model on the inputs.
Args:
inputs: input data.
vocab_size: size of the vocabulary.
attention_patterns: list of sparse attention patterns to use.
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
shared_embedding: a shared embedding layer to use.
use_bfloat16: bool: whether use bfloat16.
emb_dim: dimension of embedding
num_heads: number of heads
dtype: the dtype of the computation (default: float32)
num_layers: number of layers
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
max_len: maximum length.
train: if it is training,
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
learn_pos_emb: boolean, if learn the positional embedding or use the
sinusoidal positional embedding.
classifier: boolean, for classification mode (output N-class logits)
classifier_pool: str, supports "MEAN", "MAX" pooling.
num_classes: int, number of classification classes.
Returns:
output of the encoder or logits if classifier_mode is true.
"""
assert inputs.ndim == 2 # (batch, len)
# Padding Masks
src_padding_mask = (inputs > 0)[..., None]
use_cls_token = False
if classifier_pool == 'CLS':
use_cls_token = True
logging.info('Setting use cls token to true')
# Input Embedding
if shared_embedding is None:
input_embed = nn.Embed.partial(
num_embeddings=vocab_size,
features=emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
if classifier and classifier_pool == 'CLS':
cls = self.param('cls', (1, 1, emb_dim), nn.initializers.zeros)
cls = jnp.tile(cls, [x.shape[0], 1, 1])
x = jnp.concatenate([cls, x], axis=1)
max_len += 1
src_padding_mask = jnp.concatenate(
[src_padding_mask[:, :1], src_padding_mask], axis=1)
pe_init = nn.initializers.normal(stddev=0.02) if learn_pos_emb else None
x = common_layers.AddPositionEmbs(
x,
inputs_positions=inputs_positions,
posemb_init=pe_init,
max_len=max_len,
name='posembed_input')
x = nn.dropout(x, rate=dropout_rate, deterministic=not train)
if use_bfloat16:
x = x.astype(jnp.bfloat16)
dtype = jnp.bfloat16
else:
dtype = jnp.float32
# Input Encoder
for lyr in range(num_layers):
x = SparseTransformerBlock(
x,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
attention_patterns=attention_patterns,
dtype=dtype,
inputs_segmentation=inputs_segmentation,
padding_mask=src_padding_mask,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
deterministic=not train,
name=f'encoderblock_{lyr}',
use_cls_token=use_cls_token)
encoded = nn.LayerNorm(x, dtype=dtype, name='encoder_norm')
if classifier:
encoded = common_layers.classifier_head(
encoded, num_classes, mlp_dim, pooling_mode=classifier_pool)
return encoded
class SparseTransformerDualEncoder(nn.Module):
"""Sparse Transformer Model for Matching (dual encoding) tasks."""
def apply(self,
inputs1,
inputs2,
attention_patterns,
vocab_size=None,
inputs1_positions=None,
inputs2_positions=None,
inputs1_segmentation=None,
inputs2_segmentation=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=2048,
train=False,
dropout_rate=0.1,
attention_dropout_rate=0.1,
classifier=True,
classifier_pool='CLS',
num_classes=2,
interaction=None):
"""Applies Transformer model on text similarity.
A deliberate choice to distinguish this from NLI because
we may want to do different things to the model later. Dual Encoding
mode enforces that we do not do cross attention between pairs.
Args:
inputs1: input data.
inputs2: target data.
attention_patterns: attention patterns.
vocab_size: size of the input vocabulary.
inputs1_positions: input subsequence positions for packed examples.
inputs2_positions: target subsequence positions for packed examples.
inputs1_segmentation: input segmentation info for packed examples.
inputs2_segmentation: target segmentation info for packed examples.
use_bfloat16: bool: whether use bfloat16.
emb_dim: dimension of embedding.
num_heads: number of heads.
num_layers: number of layers.
qkv_dim: dimension of the query/key/value.
mlp_dim: dimension of the mlp on top of attention block.
max_len: maximum length.
train: whether it is training.
dropout_rate: dropout rate.
attention_dropout_rate: dropout rate for attention weights.
classifier: boolean, to use classifier.
classifier_pool: str, supports "MEAN", "MAX" pooling.
num_classes: int, number of classification classes.
interaction: str
Returns:
output of a transformer decoder.
"""
encoder = SparseTransformerEncoder.shared(
attention_patterns=attention_patterns,
inputs_positions=inputs1_positions,
inputs_segmentation=inputs1_segmentation,
vocab_size=vocab_size,
use_bfloat16=use_bfloat16,
emb_dim=emb_dim,
num_heads=num_heads,
num_layers=num_layers,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
max_len=max_len,
train=train,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
name='encoder')
inputs1_encoded = encoder(inputs1)
inputs2_encoded = encoder(inputs2)
encoded = common_layers.classifier_head_dual(
inputs1_encoded,
inputs2_encoded,
num_classes,
mlp_dim,
pooling_mode=classifier_pool,
interaction=interaction)
return encoded
class SparseTransformerDecoder(nn.Module):
"""Sparse Transformer Decoder."""
def apply(self,
inputs,
vocab_size,
attention_patterns,
emb_dim=512,
num_heads=8,
dtype=jnp.float32,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=2048,
train=False,
shift=True,
dropout_rate=0.1,
attention_dropout_rate=0.1):
"""Applies Sparse Transformer model on the inputs.
Args:
inputs: input data
vocab_size: size of the vocabulary
attention_patterns: list of attention patterns to use.
emb_dim: dimension of embedding
num_heads: number of heads
dtype: the dtype of the computation (default: float32)
num_layers: number of layers
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
max_len: maximum length.
train: bool: if model is training.
shift: bool: if we right-shift input - this is only disabled for fast,
looped single-token autoregressive decoding.
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
Returns:
output of a transformer decoder.
"""
padding_mask = jnp.where(inputs > 0, 1, 0).astype(jnp.float32)[..., None]
assert inputs.ndim == 2 # (batch, len)
x = inputs
if shift:
x = common_layers.shift_right(x)
x = x.astype('int32')
x = common_layers.Embed(
x, num_embeddings=vocab_size, features=emb_dim, name='embed')
x = common_layers.AddPositionEmbs(
x,
max_len=max_len,
posemb_init=common_layers.sinusoidal_init(max_len=max_len),
cache=None)
x = nn.dropout(x, rate=dropout_rate, deterministic=not train)
for _ in range(num_layers):
x = SparseTransformerBlock(
x,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
attention_patterns=attention_patterns,
padding_mask=padding_mask,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
deterministic=not train,
cache=None,
)
x = nn.LayerNorm(x)
logits = nn.Dense(
x,
vocab_size,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))
return logits
|
from unittest import TestCase
from unittest.mock import patch, Mock
from llc1_document_api import blueprints
class TestBlueprints(TestCase):
@patch('llc1_document_api.blueprints.general')
@patch('llc1_document_api.blueprints.generate')
def test_register_blueprints(self, generate_mock, general_mock):
"""Should register the expected blueprints."""
app_mock = Mock()
app_mock.register_blueprint = Mock()
blueprints.register_blueprints(app_mock)
app_mock.register_blueprint.assert_any_call(generate_mock)
app_mock.register_blueprint.assert_any_call(general_mock)
|
import uuid
import ndjson
import pytest
import requests
from labelbox.schema.annotation_import import AnnotationImportState, MEAPredictionImport
"""
- Here we only want to check that the uploads are calling the validation
- Then with unit tests we can check the types of errors raised
"""
def check_running_state(req, name, url=None):
assert req.name == name
if url is not None:
assert req.input_file_url == url
assert req.error_file_url is None
assert req.status_file_url is None
assert req.state == AnnotationImportState.RUNNING
def test_create_from_url(model_run):
name = str(uuid.uuid4())
url = "https://storage.googleapis.com/labelbox-public-bucket/predictions_test_v2.ndjson"
annotation_import = model_run.add_predictions(name=name, predictions=url)
assert annotation_import.model_run_id == model_run.uid
check_running_state(annotation_import, name, url)
def test_create_from_objects(model_run, object_predictions):
name = str(uuid.uuid4())
annotation_import = model_run.add_predictions(
name=name, predictions=object_predictions)
assert annotation_import.model_run_id == model_run.uid
check_running_state(annotation_import, name)
assert_file_content(annotation_import.input_file_url, object_predictions)
def test_create_from_local_file(tmp_path, model_run, object_predictions):
name = str(uuid.uuid4())
file_name = f"{name}.ndjson"
file_path = tmp_path / file_name
with file_path.open("w") as f:
ndjson.dump(object_predictions, f)
annotation_import = model_run.add_predictions(name=name,
predictions=str(file_path))
assert annotation_import.model_run_id == model_run.uid
check_running_state(annotation_import, name)
assert_file_content(annotation_import.input_file_url, object_predictions)
def test_get(client, model_run):
name = str(uuid.uuid4())
url = "https://storage.googleapis.com/labelbox-public-bucket/predictions_test_v2.ndjson"
model_run.add_predictions(name=name, predictions=url)
annotation_import = MEAPredictionImport.from_name(
client, model_run_id=model_run.uid, name=name)
assert annotation_import.model_run_id == model_run.uid
check_running_state(annotation_import, name, url)
@pytest.mark.slow
def test_wait_till_done(model_run_predictions, model_run):
name = str(uuid.uuid4())
annotation_import = model_run.add_predictions(
name=name, predictions=model_run_predictions)
assert len(annotation_import.inputs) == len(model_run_predictions)
annotation_import.wait_until_done()
assert annotation_import.state == AnnotationImportState.FINISHED
# Check that the status files are being returned as expected
assert len(annotation_import.errors) == 0
assert len(annotation_import.inputs) == len(model_run_predictions)
input_uuids = [
input_annot['uuid'] for input_annot in annotation_import.inputs
]
inference_uuids = [pred['uuid'] for pred in model_run_predictions]
assert set(input_uuids) == set(inference_uuids)
assert len(annotation_import.statuses) == len(model_run_predictions)
for status in annotation_import.statuses:
assert status['status'] == 'SUCCESS'
status_uuids = [
input_annot['uuid'] for input_annot in annotation_import.statuses
]
assert set(input_uuids) == set(status_uuids)
def assert_file_content(url: str, predictions):
response = requests.get(url)
assert response.text == ndjson.dumps(predictions)
|
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
sys.path.append(os.path.pardir)
from module.averagemeter import AverageMeter
from module.function import *
def validate(args, val_loader, model, criterion, normalizer, test=False, trans=False):
batch_time = AverageMeter()
losses = AverageMeter()
if args.task == 'regression':
mae_errors = AverageMeter()
else:
accuracies = AverageMeter()
precisions = AverageMeter()
recalls = AverageMeter()
fscores = AverageMeter()
auc_scores = AverageMeter()
if test:
graph_output = torch.Tensor()
graph_target = torch.Tensor()
test_targets = []
test_preds = []
test_cif_ids = []
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target, batch_cif_ids) in enumerate(val_loader):
if args.cuda:
with torch.no_grad():
input_var = (Variable(input[0].cuda(non_blocking=True)),
Variable(input[1].cuda(non_blocking=True)),
input[2].cuda(non_blocking=True),
[crys_idx.cuda(non_blocking=True) for crys_idx in input[3]])
else:
with torch.no_grad():
input_var = (Variable(input[0]),
Variable(input[1]),
input[2],
input[3])
if args.task == 'regression':
target_normed = normalizer.norm(target)
else:
target_normed = target.view(-1).long()
if args.cuda:
with torch.no_grad():
target_var = Variable(target_normed.cuda(non_blocking=True))
else:
with torch.no_grad():
target_var = Variable(target_normed)
# compute output
output = model(*input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
if args.task == 'regression':
mae_error = mae(normalizer.denorm(output.data.cpu()), target)
losses.update(loss.data.cpu().item(), target.size(0))
mae_errors.update(mae_error, target.size(0))
if test:
test_pred = normalizer.denorm(output.data.cpu())
test_target = target
test_preds += test_pred.view(-1).tolist()
test_targets += test_target.view(-1).tolist()
test_cif_ids += batch_cif_ids
else:
accuracy, precision, recall, fscore, auc_score = \
class_eval(output.data.cpu(), target)
losses.update(loss.data.cpu().item(), target.size(0))
accuracies.update(accuracy, target.size(0))
precisions.update(precision, target.size(0))
recalls.update(recall, target.size(0))
fscores.update(fscore, target.size(0))
auc_scores.update(auc_score, target.size(0))
if test:
test_pred = torch.exp(output.data.cpu())
test_target = target
assert test_pred.shape[1] == 2
test_preds += test_pred[:, 1].tolist()
test_targets += test_target.view(-1).tolist()
test_cif_ids += batch_cif_ids
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
if args.task == 'regression':
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'MAE {mae_errors.val:.3f} ({mae_errors.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
mae_errors=mae_errors))
else:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accu {accu.val:.3f} ({accu.avg:.3f})\t'
'Precision {prec.val:.3f} ({prec.avg:.3f})\t'
'Recall {recall.val:.3f} ({recall.avg:.3f})\t'
'F1 {f1.val:.3f} ({f1.avg:.3f})\t'
'AUC {auc.val:.3f} ({auc.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
accu=accuracies, prec=precisions, recall=recalls,
f1=fscores, auc=auc_scores))
if test:
graph_output = torch.cat((graph_output, normalizer.denorm(output.detach())), dim=1)
graph_target = torch.cat((graph_target, target), dim=1)
if test:
# graph
x = graph_output.numpy()
t = graph_target.numpy()
n_max = max(np.max(x), np.max(t))
n_min = min(np.min(x), np.min(t))
a = np.linspace(n_min - abs(n_max), n_max + abs(n_max))
b = a
plt.rcParams["font.family"] = "Times New Roman"
plt.plot(a, b, color = 'blue')
plt.scatter(t, x, marker = ".", color = 'red', edgecolors = 'black')
plt.xlim(n_min - abs(n_min) , n_max + abs(n_min))
plt.ylim(n_min - abs(n_min) , n_max + abs(n_min))
if trans:
title = "Prediction by CGCNN with Transfer Learning"
else:
title = "Predicion by CGCNN"
plt.title(title)
plt.xlabel("observation")
plt.ylabel("prediction")
plt.show()
star_label = '**'
import csv
with open('../result/test_results.csv', 'w') as f:
writer = csv.writer(f)
for cif_id, target, pred in zip(test_cif_ids, test_targets,
test_preds):
writer.writerow((cif_id, target, pred))
else:
star_label = '*'
if args.task == 'regression':
print(' {star} MAE {mae_errors.avg:.3f}'.format(star=star_label,
mae_errors=mae_errors))
return mae_errors.avg
else:
print(' {star} AUC {auc.avg:.3f}'.format(star=star_label,
auc=auc_scores))
return auc_scores.avg
|
import os
from pychecker.check.extra_dep_detection import detect_extra_deps
from pychecker.check.local_comp_detection import detect_local_comp_detection
from pychecker.check.incomp_feature_detection import detect_incomp_feature_usage
from pychecker.check.no_avl_resource_detection import detect_no_avl_resource_pkg
from pychecker.check.common import parse_custom_modules
from pychecker.utils import read_object_from_file
from pychecker.check.experiment.exp_config import IN_THE_LAB_ROOT
class Test:
def __init__(self, func):
print("\033[0m", "-"*10, self.__class__.__name__, "-"*10)
self.root = IN_THE_LAB_ROOT
self.mapping = {detect_incomp_feature_usage: 1,
detect_extra_deps: 2,
detect_local_comp_detection: 3,
detect_no_avl_resource_pkg: 4}
self.func = func
self.data = self.prepare_data()
self.result_mat = list() # [(answer, my_answer), ]
def prepare_data(self):
meta_path = os.path.join(self.root, "dataset-lab.csv")
with open(meta_path) as f:
metadata = f.readlines()[1:]
data = dict()
col = self.mapping[self.func]
for row in metadata:
row = row.strip()
parts = row.split(",")
project, answer = parts[0], parts[col]
answer = True if answer == "1" else False
data[project] = answer
return data
def test(self, start=None, end=None):
if not start:
start = 0
if not end:
end = len(self.data.items())
for ind, (project, answer) in enumerate(list(self.data.items())[start:end]):
my_answer = self.test_item(project)
result = True if my_answer == answer else False
color = "\033[32m" if result else "\033[31m"
print(color, f"{ind:2}", project, result)
self.result_mat.append((answer, my_answer))
self.statistic()
def statistic(self):
tp, fp, tn, fn = 0, 0, 0, 0
correct = 0
for item in self.result_mat:
tp += 1 if item[0] and item[0] == item[1] else 0
tn += 1 if item[0] and item[0] != item[1] else 0
fn += 1 if not item[0] and item[0] == item[1] else 0
fp += 1 if not item[0] and item[0] != item[1] else 0
correct += 1 if item[0] == item[1] else 0
acc = correct/len(self.result_mat)
pre = tp/(tp+fp) if tp+fp != 0 else 0
rec = tp/(tp+tn) if tp+tn != 0 else 0
f1 = 2*pre*rec/(pre+rec) if pre+rec != 0 else 0
color = "\033[0m"
print(color)
print(" \t True \t False")
print(" Positive\t", tp, " \t", fp)
print(" Negative\t", tn, " \t", fn)
print(" acc:", acc, "\n pre:", pre, "\n rec:", rec, "\n f1-score:", f1)
def test_item(self, project):
return True
class LocalCompTest(Test):
def __init__(self):
super().__init__(detect_local_comp_detection)
def test_item(self, project):
setup_path = os.path.join(self.root, project, "setup.py")
return detect_local_comp_detection(setup_path)
class ExtraDepTest(Test):
def __init__(self):
super().__init__(detect_extra_deps)
def test_item(self, project):
setup_path = os.path.join(self.root, project, "setup.py")
local_modules = parse_custom_modules(os.path.dirname(setup_path))
return detect_extra_deps(setup_path, custom_modules=local_modules)
class IncompTest(Test):
def __init__(self):
super().__init__(detect_incomp_feature_usage)
comp_info_path = os.path.join(self.root, "compatibility.json")
self.comp_info = read_object_from_file(comp_info_path)
def test_item(self, project):
setup_path = os.path.join(self.root, project, "setup.py")
comp_info = self.comp_info[project]
local_modules = parse_custom_modules(os.path.dirname(setup_path))
return detect_incomp_feature_usage(setup_path, comp_info, local_modules)
class NoAvlTest(Test):
def __init__(self):
super().__init__(detect_no_avl_resource_pkg)
def test_item(self, project):
parts = project.split("-")
pkg = "-".join(parts[:-1])
ver = parts[-1]
return detect_no_avl_resource_pkg(pkg, ver)
def generate_result(tests, output_path):
result = list()
for ind, project in enumerate(tests[0].data.keys()):
line = f"{project}"
for test in tests:
res = test.result_mat[ind]
line += f",{int(res[1])}"
line += "\n"
result.append(line)
with open(output_path, "w") as f:
f.writelines(result)
if __name__ == '__main__':
testA = LocalCompTest()
testA.test()
# testB = ExtraDepTest()
# testB.test()
testC = IncompTest()
testC.test()
# testD = NoAvlTest()
# testD.test()
# generate_result([testC, testA, testD], "./result-lab.csv") |
#! /usr/bin/env python
import sys
import os
sys.path.insert(0, os.environ["QUEX_PATH"])
from StringIO import StringIO
from quex.input.regular_expression.exception import *
from quex.blackboard import setup
setup.buffer_limit_code = -1
setup.path_limit_code = -1
import quex.engine.state_machine.index as sm_index
import quex.input.regular_expression.engine as regex
import quex.engine.state_machine.construction.ambiguous_post_context as ambiguous_post_context
import quex.engine.state_machine.algorithm.beautifier as beautifier
import quex.engine.state_machine.algebra.reverse as reverse
if "--hwut-info" in sys.argv:
print "Pseudo Ambigous Post Condition: Mounting"
sys.exit(0)
def test(RE_Core, RE_PostCondition):
string_stream_Core = StringIO(RE_Core)
string_stream_PostCondition = StringIO(RE_PostCondition)
# reset the index, so that things get a litter less 'historic'
try:
core_sm = regex.do(string_stream_Core, {}).extract_sm()
except RegularExpressionException, x:
print "Core Pattern:\n" + repr(x)
return
try:
post_context_sm = regex.do(string_stream_PostCondition, {}).extract_sm()
except RegularExpressionException, x:
print "Post Condition Pattern:\n" + repr(x)
return
print "---------------------------------------------------------"
print "core pattern =", RE_Core
print "post condition pattern =", RE_PostCondition
backward_search_sm = ambiguous_post_context.mount(core_sm, post_context_sm)
backward_search_sm = reverse.do(backward_search_sm)
# .mount() does not transformation from NFA to DFA
core_sm = beautifier.do(core_sm)
print "ambigous post condition =", core_sm
print "backward detector =", backward_search_sm
test('"xy"+', '((ab)+|xy)')
test('"xz"+', '[a-z]{2}')
test('"xyz"+', '"xyz"')
test("(a)+", "ab")
test("(.)+a", "(.)+")
# test('"xz"+', '"xz"+')
# test('"xyz"+', '("abc")|(("x"|"X")[a-z]{1}("z"|"Z"))')
# test('("abc"+|"xyz")+', '("abc")|(("x"|"X")[a-z]{1}("z"|"Z"))')
# test('(("xyz")+hello)+', '"xyz"hello')
|
import os
import json
import requests
import logging
import asyncio
import discord
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path+'/client_config.json', 'r') as f:
config = json.load(f)
TOKEN = config['TOKEN']
SERVER_ID = config['GUILD']
ROLES = config['ROLES']
HOST_ROLE_ID = config['ROLES']['HOST']['id']
CALLER_ROLE_ID = config['ROLES']['CALLER']['id']
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s'
)
def get_roles_from_server():
url = 'https://discordapp.com/api/guilds/'+str(SERVER_ID)+'/roles'
headers = {
'User-Agent': 'Mozilla/5.0 (compatible; Rigor/1.0.0; http://rigor.com)',
'Authorization': 'Bot '+TOKEN
}
response = requests.get(url, headers=headers)
roles_data = json.loads(response.text)
return roles_data
def find_roles():
caller_found = False
host_found = False
for role in get_roles_from_server():
if role['id'] == str(ROLES['HOST']['id']):
host_found = True
elif role['id'] == str(ROLES['CALLER']['id']):
caller_found = True
if caller_found and host_found:
return {}
else:
return {'HOST': host_found, 'CALLER': caller_found}
async def role_check(bot):
logging.info("Checking if roles are available")
missing_roles = find_roles()
if missing_roles:
logging.info("Roles are missing!")
await create_missing_roles(missing_roles, bot)
else:
logging.info("All required roles are available")
return
async def create_missing_roles(missing_roles, bot):
logging.info("Creating roles...")
# Here is were we'll handle creating anything that's missing
print(missing_roles)
for role in missing_roles:
print(missing_roles[role])
if not missing_roles[role]:
role_name = config['ROLES'][role]['name']
logging.info("Creating Role: " + role_name)
if role == 'HOST':
perms = discord.PermissionOverwrite(
connect=True,
speak=True,
mute_members=True,
deafen_members=True,
move_members=True,
use_voice_activation=True,
priority_speaker=True,
read_messages=True
)
new_role = await bot.get_guild(config['GUILD']).create_role(name=role_name)
global HOST_ROLE_ID
HOST_ROLE_ID = new_role.id
else:
perms = discord.PermissionOverwrite(
connect=True,
speak=True,
mute_members=False,
deafen_members=False,
move_members=False,
use_voice_activation=True,
priority_speaker=False,
read_messages=True
)
new_role = await bot.get_guild(config['GUILD']).create_role(name=role_name)
global CALLER_ROLE_ID
CALLER_ROLE_ID = new_role.id
update_config_file_role_ids()
return
def update_config_file_role_ids():
# Need to update config file with new roles
config['ROLES']['HOST']['id'] = HOST_ROLE_ID
config['ROLES']['CALLER']['id'] = CALLER_ROLE_ID
with open("config.json", "w") as jsonFile:
json.dump(config, jsonFile)
return
# if __name__ == '__main__':
# print(find_roles([str(458818918169968640), str(519697126557745153)]))
|
from django.shortcuts import redirect
from django.shortcuts import render
#
|
'''
Cost of balloons
You are conducting a contest at your college. This contest consists of two problems and participants. You know the problem that a candidate will solve during the contest.
You provide a balloon to a participant after he or she solves a problem. There are only green and purple-colored balloons available in a market. Each problem must have a balloon associated with it as a prize for solving that specific problem. You can distribute balloons to each participant by performing the following operation:
Use green-colored balloons for the first problem and purple-colored balloons for the second problem
Use purple-colored balloons for the first problem and green-colored balloons for the second problem
You are given the cost of each balloon and problems that each participant solve. Your task is to print the minimum price that you have to pay while purchasing balloons.
Input format
First line: that denotes the number of test cases ()
For each test case:
First line: Cost of green and purple-colored balloons
Second line: that denotes the number of participants ()
Next lines: Contain the status of users. For example, if the value of the integer in the row is , then it depicts that the participant has not solved the problem. Similarly, if the value of the integer in the row is , then it depicts that the participant has solved the problem.
Output format
For each test case, print the minimum cost that you have to pay to purchase balloons.
SAMPLE INPUT
2
9 6
10
1 1
1 1
0 1
0 0
0 1
0 0
0 1
0 1
1 1
0 0
1 9
10
0 1
0 0
0 0
0 1
1 0
0 1
0 1
0 0
0 1
0 0
SAMPLE OUTPUT
69
14
'''
for _ in range(int(input())):
A=[]
B=[]
G,P = map(int,input().split())
n = int(input())
for i in range(n):
x,y = map(int,input().split())
A.append(x)
B.append(y)
Gprice = (sum(A) * G) + (sum(B) * P)
Pprice = (sum(A) * P) + (sum(B) * G)
print(Gprice) if Gprice<Pprice else print(Pprice)
|
from .mnist import *
from .svhn import *
from .usps import *
|
# 个人中心模块蓝图
from flask import Blueprint
profile_blu = Blueprint("profile",__name__,url_prefix="/user")
from . import views |
from configuration_loader.mergers.base import Base
class DefaultMerger(Base):
def merge(self, dict_a, dict_b, path=None):
""""
Merges b into a
"""
if path is None:
path = []
for key in dict_b:
if key in dict_a:
if isinstance(dict_a[key], dict) and isinstance(dict_b[key], dict):
self.merge(dict_a[key], dict_b[key], path + [str(key)])
elif dict_a[key] != dict_b[key]:
dict_a[key] = dict_b[key]
else:
dict_a[key] = dict_b[key]
return dict_a
|
from cwlab.database.connector import db
from cwlab.database.sqlalchemy.models import User, Exec, Job, Run
import sqlalchemy
from datetime import datetime
class JobManager():
def create_job(
self,
job_name,
username,
wf_target
):
job = Job(
job_name=job_name,
username=username,
wf_target=wf_target
)
self.store(job)
return job.id
def create_runs(
self,
run_names,
job_name,
):
for run_name in run_names:
run = Run(
run_name=run_name,
job_name=job_name,
)
self.store(run, do_not_update=True)
self.update()
def create_exec(
self,
job_name,
run_name,
wf_target,
run_input,
out_dir,
global_temp_dir,
log,
status,
err_message,
retry_count,
time_started,
time_finished,
timeout_limit,
pid,
username,
exec_profile,
exec_profile_name,
add_exec_info,
user_email,
access_token
):
exec_ = Exec(
job_name=job_name,
run_name=run_name,
wf_target=wf_target,
run_input=run_input,
out_dir=out_dir,
global_temp_dir=global_temp_dir,
log=log,
status=status,
custom_status=None,
custom_status_color="grey",
err_message=err_message,
retry_count=retry_count,
time_started=time_started,
time_finished=time_finished,
timeout_limit=timeout_limit,
pid=pid,
username=username,
exec_profile=exec_profile,
exec_profile_name=exec_profile_name,
add_exec_info=add_exec_info,
user_email=user_email,
access_token=access_token
)
self.store(exec_)
return exec_.id
def update(self):
retry_delays = [1, 4]
for retry_delay in retry_delays:
try:
db.session.commit()
except Exception as e:
assert retry_delay != retry_delays[-1], "Could not connect to database."
sleep(retry_delay + retry_delay*random())
def store(self, obj, do_not_update=False):
db.session.add(obj)
if not do_not_update:
self.update()
def get_running_runs_names(self, job_name, run_names):
already_running_runs = []
db_job_name_request = db.session.query(Exec).filter(Exec.job_name==job_name)
for run_name in run_names:
execs_request = self.get_execs_db_query_(job_name, run_name).distinct()
if execs_request.count() > 0:
# find latest:
run_info = execs_request.filter(Exec.id==max([exec.id for exec in execs_request])).first()
if run_info.time_finished is None or run_info.status == "finished":
already_running_runs.append(run_name)
return already_running_runs
def get_execs_db_query_(self, job_name, run_name):
# this is just an Manager Internal helper function
# it should not be used outside of this class
retry_delays = [1, 4]
for retry_delay in retry_delays:
try:
return db.session.query(Exec).filter(Exec.job_name==job_name, Exec.run_name==run_name)
except Exception as e:
assert retry_delay != retry_delays[-1], "Could not connect to database."
sleep(retry_delay + retry_delay*random())
def get_exec(self, job_name, run_name):
execs = self.get_execs_db_query_(job_name, run_name).distinct().all()
if len(execs) == 0:
return None
else:
# find latest:
return [exec_ for exec_ in execs if exec_.id==max([temp_exec.id for temp_exec in execs])][0]
def get_exec_info(self, job_name, run_name):
exec_ = self.get_exec(job_name, run_name)
if exec_ is None:
return None
return {
"pid": exec_.pid,
"status": exec_.status,
"custom_status": exec_.custom_status,
"custom_status_color": exec_.custom_status_color,
"time_started": exec_.time_started,
"time_finished": exec_.time_finished,
"exec_profile": exec_.exec_profile_name,
"retry_count": exec_.retry_count
}
def load_run_by_name(self, job_name, run_name):
retry_delays = [1, 4]
for retry_delay in retry_delays:
try:
db_request = db.session.query(Run).filter(Run.run_name == run_name, Run.job_name == job_name)
if db_request.count() == 0:
return None
run = db_request.first()
except Exception as e:
assert retry_delay != retry_delays[-1], "Could not connect to database."
sleep(retry_delay + retry_delay*random())
return run
def load_all_runs_by_job_name(self, job_name):
retry_delays = [1, 4]
for retry_delay in retry_delays:
try:
runs = db.session.query(Run).filter(Run.job_name == job_name).all()
except Exception as e:
assert retry_delay != retry_delays[-1], "Could not connect to database."
sleep(retry_delay + retry_delay*random())
return runs
def load_job_by_name(self, job_name):
retry_delays = [1, 4]
for retry_delay in retry_delays:
try:
db_request = db.session.query(Job).filter(Job.job_name == job_name)
if db_request.count() == 0:
return None
job = db_request.first()
except Exception as e:
assert retry_delay != retry_delays[-1], "Could not connect to database."
sleep(retry_delay + retry_delay*random())
return job
def load_jobs_for_user(self, username):
retry_delays = [1, 4]
for retry_delay in retry_delays:
try:
jobs = db.session.query(Job).filter(Job.username == username).all()
except Exception as e:
assert retry_delay != retry_delays[-1], "Could not connect to database."
sleep(retry_delay + retry_delay*random())
return jobs
def delete_run(self, job_name, run_name):
self.get_execs_db_query_(job_name, run_name).delete(synchronize_session=False)
db.session.delete(self.load_run_by_name(job_name, run_name))
self.update()
def delete_job(self, job_name):
db.session.delete(self.load_job_by_name(job_name))
[db.session.delete(run) for run in self.load_all_runs_by_job_name(job_name)]
self.update()
def get_jobs_info_for_user(self, username):
jobs = self.load_jobs_for_user(username)
return [{"job_name": job.job_name, "wf_target": job.wf_target} for job in jobs]
def get_run_names(self, job_name):
runs = self.load_all_runs_by_job_name(job_name)
return [run.run_name for run in runs]
def set_exec_ended(self, job_name, run_name, status, pid=-1, time_finished=datetime.now()):
exec_ = self.get_exec(job_name, run_name)
exec_.status = status
exec_.pid = pid
exec_.time_finished = time_finished
self.store(exec_)
def delete_exec(self, job_name, run_name):
self.get_execs_db_query_(job_name, run_name).delete(synchronize_session=False)
self.update()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import imperial.model.mapper.mapeador as mapeador
mapeador.main() |
"""Boilerplate settings parsing"""
# pragma: no cover
from mitol.common.envs import get_string
MITOL_DIGITAL_CREDENTIALS_VERIFY_SERVICE_BASE_URL = get_string(
name="MITOL_DIGITAL_CREDENTIALS_VERIFY_SERVICE_BASE_URL",
description="Base URL for sing-and-verify service to call for digital credentials",
required=False,
)
MITOL_DIGITAL_CREDENTIALS_HMAC_SECRET = get_string(
name="MITOL_DIGITAL_CREDENTIALS_HMAC_SECRET",
description="HMAC secret to sign digital credentials requests with",
required=False,
)
MITOL_DIGITAL_CREDENTIALS_DEEP_LINK_URL = get_string(
name="MITOL_DIGITAL_CREDENTIALS_DEEP_LINK_URL",
default=None,
description="URL at which to deep link the learner to for the digital credentials wallet",
required=False,
)
MITOL_DIGITAL_CREDENTIALS_AUTH_TYPE = get_string(
name="MITOL_DIGITAL_CREDENTIALS_AUTH_TYPE",
default=None,
description="Auth type that is passed to the digital credentials wallet app",
required=False,
)
|
from mpl_toolkits.mplot3d import Axes3D
import pylab
import numpy
def _produce_axis(low, high, bins):
""" This method produces an array that represents the axis between low and
high with bins.
Args:
low (float): Low edge of the axis
high (float): High edge of the axis
bins (int): Number of bins
"""
return [low + x * (high - low) / bins for x in range(bins)]
def plot_projection(spectra, dimension):
""" Plot the spectra as projected onto the dimension.
For example dimension == 0 will plot the spectra as projected onto the
energy dimension.
Args:
spectra (:class:`echidna.core.spectra`): The spectra to plot.
dimension (int): The dimension to project the spectra onto.
"""
figure = pylab.figure()
axis = figure.add_subplot(1, 1, 1)
if dimension == 0:
x = _produce_axis(spectra._energy_low, spectra._energy_high, spectra._energy_bins)
width = spectra._energy_width
pylab.xlabel("Energy [MeV]")
elif dimension == 1:
x = _produce_axis(spectra._radial_low, spectra._radial_high, spectra._radial_bins)
width = spectra._radial_width
pylab.xlabel("Radius [mm]")
elif dimension == 2:
x = _produce_axis(spectra._time_low, spectra._time_high, spectra._time_bins)
width = spectra._time_width
pylab.xlabel("Time [yr]")
pylab.ylabel("Count per %f bin" % width)
data = spectra.project(dimension)
axis.bar(x, data, width=width)
pylab.show()
def plot_surface(spectra, dimension):
""" Plot the spectra with the dimension projected out.
For example dimension == 0 will plot the spectra as projected onto the
radial and time dimensions i.e. not energy.
Args:
spectra (:class:`echidna.core.spectra`): The spectra to plot.
dimension (int): The dimension to project out.
"""
figure = pylab.figure()
axis = figure.add_subplot(111, projection='3d')
if dimension == 0:
x = _produce_axis(spectra._radial_low, spectra._radial_high, spectra._radial_bins)
y = _produce_axis(spectra._energy_low, spectra._energy_high, spectra._energy_bins)
data = spectra.surface(2)
axis.set_xlabel("Radius [mm]")
axis.set_ylabel("Energy [MeV]")
elif dimension == 1:
x = _produce_axis(spectra._time_low, spectra._time_high, spectra._time_bins)
y = _produce_axis(spectra._energy_low, spectra._energy_high, spectra._energy_bins)
data = spectra.surface(1)
axis.set_xlabel("Time [yr]")
axis.set_ylabel("Energy [MeV]")
elif dimension == 2:
x = _produce_axis(spectra._time_low, spectra._time_high, spectra._time_bins)
y = _produce_axis(spectra._radial_low, spectra._radial_high, spectra._radial_bins)
data = spectra.surface(0)
axis.set_xlabel("Time [yr]")
axis.set_ylabel("Radius [mm]")
axis.set_zlabel("Count per bin")
X, Y = numpy.meshgrid(x, y) # `plot_surface` expects `x` and `y` data to be 2D
axis.plot_surface(X, Y, data)
pylab.show()
|
import re
import requests
from bs4 import BeautifulSoup
import urllib
def getHTMLText(url):
try:
r = requests.get(url, timeout = 30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return
# 爬取网上回答
def pachong_answer(question):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
}
url = 'https://www.so.com/s?ie=utf-8&fr=none&src=360sou_newhome&q={}'.format(urllib.parse.quote(question))
try:
try:
demo = getHTMLText(url)
soup = BeautifulSoup(demo, 'html.parser')
baike_url = soup.find_all('a', attrs={'class': "mh-more-detail"})
if len(baike_url) == 0:
baike_url = soup.find_all('a', attrs={'class': "detail"})
baike_url = baike_url[0]['href']
baike_demo = requests.get(baike_url, headers=headers).content.decode('utf-8')
baike_soup = BeautifulSoup(baike_demo, 'html.parser')
answer = baike_soup.find_all('div', attrs={'class': "entry-card-content"})[0].p
answer = ''.join(answer.find_all(text=True))
if len(answer) == 0:
return '换个问题呗'
return answer
except:
demo = getHTMLText(url)
soup = BeautifulSoup(demo, 'html.parser')
baike_url = soup.find_all('a', attrs={'class': "detail"})
if len(baike_url) == 0:
return '请换一种提问方式'
baike_url = baike_url[0]['href']
baike_demo = requests.get(baike_url, headers=headers).content.decode('utf-8')
baike_soup = BeautifulSoup(baike_demo, 'html.parser')
answer = baike_soup.find_all('div', attrs={'class': "card_content"})[0]
answer = ''.join(answer.find_all(text=True))
if len(answer) == 0:
return '换个问题呗'
return answer
except:
return '我不会哎'
# print(pachong_answer('刘备'))
# print(len(None))
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\university\university_commands.py
# Compiled at: 2020-07-31 03:14:26
# Size of source mod 2**32: 21942 bytes
from protocolbuffers import Consts_pb2
from event_testing import test_events
from event_testing.resolver import SingleSimResolver
from server_commands.argument_helpers import TunableInstanceParam, get_optional_target, OptionalSimInfoParam
from server_commands.household_commands import household_split
from sims.loan_tuning import LoanTunables, LoanType
from sims.university.university_enums import EnrollmentStatus, Grade, UniversityHousingKickOutReason
from sims.university.university_housing_tuning import UniversityHousingTuning
from sims.university.university_telemetry import UniversityTelemetry
from sims.university.university_tuning import University
from sims4.common import Pack
from sims4.resources import Types
from sims4.tuning.tunable import TunableList, TunableTuple, TunableReference, Tunable
from situations.situation_guest_list import SituationGuestList
import build_buy, services, sims4.commands
class UniversityCommandTuning:
DEGREE_TRAITS = TunableList(description='\n A list of all possible combinations of degrees, where each list\n is assigned a specific prestige and honors permutation.\n ',
tunable=TunableTuple(description='\n A tuple of prestige and honors booleans, and the associated list\n of degree traits.\n ',
prestige=Tunable(description='\n The prestige type (Prestige or No Prestige) of this degree\n list.\n ',
tunable_type=bool,
default=False),
honors=Tunable(description='\n The honors type (Honors or No Honors) of this degree list.\n ',
tunable_type=bool,
default=False),
traits=TunableList(description='\n The list of degree traits for this prestige and honors \n permutation.\n ',
tunable=TunableReference(description='\n The degree trait.\n ',
manager=(services.get_instance_manager(Types.TRAIT)),
pack_safe=True))))
def get_target_household_id_for_zone(zone_id, account):
target_household_id = services.get_persistence_service().get_household_id_from_zone_id(zone_id)
if target_household_id is None or target_household_id == 0:
household = services.household_manager().create_household(account, starting_funds=0)
target_household_id = household.id
return target_household_id
@sims4.commands.Command('university.enroll', pack=(Pack.EP08), command_type=(sims4.commands.CommandType.Live))
def enroll(major: TunableInstanceParam(sims4.resources.Types.UNIVERSITY_MAJOR), university: TunableInstanceParam(sims4.resources.Types.UNIVERSITY), opt_sim: OptionalSimInfoParam=None, classes: int=3, elective: TunableInstanceParam(sims4.resources.Types.UNIVERSITY_COURSE_DATA)=None, tuition_cost: int=0, total_scholarship_taken: int=0, is_using_loan: bool=False, destination_zone_id: int=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.enroll', _connection)
return False
else:
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.enroll'.format(sim_info), _connection)
return False
electives = () if elective is None else (elective,)
degree_tracker.enroll(major, university, classes, courses=electives)
if is_using_loan:
LoanTunables.add_debt(sim_info, LoanTunables.get_loan_amount(tuition_cost, LoanType.UNIVERSITY))
else:
sim_info.household.funds.try_remove(tuition_cost, Consts_pb2.FUNDS_TUITION_COST, sim_info)
UniversityTelemetry.send_university_tuition_telemetry(sim_info, tuition_cost, is_using_loan)
degree_tracker.handle_scholarships_after_enrollment(total_scholarship_taken)
UniversityTelemetry.send_university_housing_telemetry(destination_zone_id)
if destination_zone_id is None:
return True
home_zone_id = sim_info.household.home_zone_id
if home_zone_id == destination_zone_id:
degree_tracker.on_enroll_in_same_housing()
return True
venue_manager = services.get_instance_manager(sims4.resources.Types.VENUE)
venue = venue_manager.get(build_buy.get_current_venue(home_zone_id))
if venue.is_university_housing:
sim_info.degree_tracker.set_kickout_info(destination_zone_id, UniversityHousingKickOutReason.MOVED)
return True
target_household_id = 0
if destination_zone_id != 0:
account = services.client_manager().get(_connection).account
target_household_id = get_target_household_id_for_zone(destination_zone_id, account)
household_split(sourceHouseholdId=(sim_info.household.id), targetHouseholdId=target_household_id,
cancelable=False,
allow_sim_transfer=False,
selected_sim_ids=[
sim_info.sim_id],
destination_zone_id=destination_zone_id)
return True
@sims4.commands.Command('university.accept_all_degrees', pack=(Pack.EP08))
def accept_all_degrees(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, _connection, target_type=OptionalSimInfoParam)
if sim_info is None:
sims4.commands.output('Failed to find SimInfo.', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.accept_all_degrees'.format(sim_info), _connection)
return False
for university in University.ALL_UNIVERSITIES:
for degree in University.ALL_DEGREES:
if degree_tracker.is_accepted_degree(university, degree):
continue
degree_tracker.set_accepted_degree(university, degree)
return True
@sims4.commands.Command('university.show_brochure', pack=(Pack.EP08), command_type=(sims4.commands.CommandType.Live))
def show_brochure(university: TunableInstanceParam(sims4.resources.Types.UNIVERSITY), opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, _connection, target_type=OptionalSimInfoParam)
if sim_info is None:
sims4.commands.output('Failed to find SimInfo.', _connection)
return False
university.brochure_loot.apply_to_resolver(SingleSimResolver(sim_info))
return True
@sims4.commands.Command('university.show_enrollment_dialog', pack=(Pack.EP08), command_type=(sims4.commands.CommandType.Live))
def show_enrollment_dialog(opt_sim: OptionalSimInfoParam=None, is_reenrollment: bool=False, _connection=None):
sim_info = get_optional_target(opt_sim, _connection, target_type=OptionalSimInfoParam)
if sim_info is None:
sims4.commands.output('Failed to find SimInfo.', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker.'.format(sim_info), _connection)
return False
degree_tracker.generate_enrollment_information(is_reenrollment=is_reenrollment)
return True
@sims4.commands.Command('university.cancel_enrollment_dialog', pack=(Pack.EP08), command_type=(sims4.commands.CommandType.Live))
def cancel_enrollment_dialog(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, _connection, target_type=OptionalSimInfoParam)
if sim_info is None:
sims4.commands.output('Failed to find SimInfo.', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker.'.format(sim_info), _connection)
return False
degree_tracker.on_cancel_enrollment_dialog()
return True
@sims4.commands.Command('university.create_kick_out_situation', pack=(Pack.EP08), command_type=(sims4.commands.CommandType.Live))
def create_kick_out_situation(kick_out_reason: UniversityHousingKickOutReason, sim_id: int=None, additional_sim_ids: []=None, university_housing_destination_zone_id: int=0, _connection=None):
if sim_id is None:
active_sim = services.get_active_sim()
sim_id = active_sim.sim_id
guest_list = SituationGuestList(invite_only=True, host_sim_id=sim_id)
services.get_zone_situation_manager().create_situation((UniversityHousingTuning.UNIVERSITY_HOUSING_KICK_OUT_SITUATION), guest_list=guest_list,
scoring_enabled=False,
kick_out_reason=kick_out_reason,
additional_sim_ids=additional_sim_ids,
university_housing_destination_zone_id=university_housing_destination_zone_id)
@sims4.commands.Command('university.dropout', pack=(Pack.EP08), command_type=(sims4.commands.CommandType.Live))
def dropout(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.dropout', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.dropout'.format(sim_info), _connection)
return False
degree_tracker.show_dropout_dialog()
return True
@sims4.commands.Command('university.withdraw', pack=(Pack.EP08), command_type=(sims4.commands.CommandType.Live))
def withdraw(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.withdraw', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.withdraw'.format(sim_info), _connection)
return False
degree_tracker.withdraw()
services.venue_service().validate_university_housing_household_sims()
return True
@sims4.commands.Command('university.complete_course', pack=(Pack.EP08))
def complete_course(course: TunableInstanceParam(sims4.resources.Types.UNIVERSITY_COURSE_DATA), course_score: int=100, opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.complete_course', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.complete_course'.format(sim_info), _connection)
return False
for course_guid, course_info in list(degree_tracker._course_infos.items()):
if course_info.course_data is course:
course_info.lectures = degree_tracker.COURSE_LECTURE_COUNT
course_info.final_requirement_completed = True
degree_tracker.course_infos[course_guid] = course_info
degree_tracker.complete_course(course_guid, course_score)
return True
sims4.commands.output('Sim is not currently enrolled in course {}'.format(course), _connection)
return False
@sims4.commands.Command('university.finish_term', pack=(Pack.EP08))
def finish_term(course_score: int=100, opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.finish_term', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.finish_term'.format(sim_info), _connection)
return False
enrollment_status = degree_tracker.get_enrollment_status()
if not enrollment_status == EnrollmentStatus.ENROLLED:
if not enrollment_status == EnrollmentStatus.PROBATION:
sims4.commands.output('The Sim is not currently enrolled in a university term.', _connection)
return False
for course_guid, course_info in list(degree_tracker._course_infos.items()):
if course_info.final_grade == Grade.UNKNOWN:
course_info.lectures = degree_tracker.COURSE_LECTURE_COUNT
course_info.final_requirement_completed = True
degree_tracker.course_infos[course_guid] = course_info
degree_tracker.complete_course(course_guid, course_score)
degree_tracker.complete_term()
return True
@sims4.commands.Command('university.graduate', pack=(Pack.EP08))
def graduate(gpa: float=4.0, opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.graduate', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.graduate'.format(sim_info), _connection)
return False
if degree_tracker.get_enrollment_status() == EnrollmentStatus.NONE:
sims4.commands.output('The Sim is not currently in a degree program.', _connection)
return False
degree_tracker.drop_enrolled_courses()
degree_tracker.graduate(gpa=gpa)
@sims4.commands.Command('university.grade_report', pack=(Pack.EP08))
def grade_report(course: TunableInstanceParam(sims4.resources.Types.UNIVERSITY_COURSE_DATA), opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.grade_report', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.grade_report'.format(sim_info), _connection)
return False
for course_guid, course_info in degree_tracker._course_infos.items():
if course_info.course_data is course:
degree_tracker.get_grade_report(course_guid)
return True
@sims4.commands.Command('university.degree_info', pack=(Pack.EP08))
def degree_info(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.degree_info', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker in university.degree_info'.format(sim_info), _connection)
return False
major = degree_tracker.get_major()
university = degree_tracker.get_university()
current_day = degree_tracker.get_current_day_of_term()
previous_courses = degree_tracker.get_previous_courses()
sims4.commands.output('Major: {}'.format(major.__name__ if major else 'None'), _connection)
sims4.commands.output('University: {}'.format(university.__name__ if university else 'None'), _connection)
sims4.commands.output('GPA: {}'.format(degree_tracker.get_gpa()), _connection)
sims4.commands.output('Enrollment Status: {}'.format(degree_tracker.get_enrollment_status().name), _connection)
sims4.commands.output('Current day of term: {}'.format(current_day if current_day else 'None'), _connection)
sims4.commands.output('Previous Courses: {}'.format('' if previous_courses else 'None'), _connection)
for course_data in previous_courses:
sims4.commands.output(' {}'.format(course_data.__name__), _connection)
return True
@sims4.commands.Command('university.end_kickout_situation', pack=(Pack.EP08))
def end_kickout_situation(_connection=None):
services.get_event_manager().process_event(test_events.TestEvent.HouseholdSplitPanelClosed)
@sims4.commands.Command('university.clear_scholarships', pack=(Pack.EP08), command_type=(sims4.commands.CommandType.Live))
def clear_scholarships(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for clear scholarship command.', _connection)
return False
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
sims4.commands.output('Sim: {} has no degree tracker.'.format(sim_info), _connection)
return False
degree_tracker.clear_scholarships()
@sims4.commands.Command('university.award_all_degrees', pack=(Pack.EP08))
def award_all_degrees(prestige: bool=False, honors: bool=False, opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('No valid target for university.award_all_degrees.', _connection)
return False
for degree_trait_tuple in UniversityCommandTuning.DEGREE_TRAITS:
if degree_trait_tuple.prestige == prestige and degree_trait_tuple.honors == honors:
for trait in degree_trait_tuple.traits:
sim_info.add_trait(trait)
return True
return False |
"""
The module :mod:`~tsfresh.transformers` contains several transformers which can be used inside a sklearn pipeline.
"""
from tsfresh.transformers.feature_augmenter import FeatureAugmenter
from tsfresh.transformers.feature_selector import FeatureSelector
from tsfresh.transformers.relevant_feature_augmenter import RelevantFeatureAugmenter
from tsfresh.transformers.per_column_imputer import PerColumnImputer
|
from datetime import datetime
from pydantic import BaseModel, Field
from logcord.models.message import Message
class MessageLog(BaseModel):
"""The model schema for a message log."""
id: str
created_at: datetime = Field(default_factory=datetime.utcnow)
messages: list[Message]
|
import pytest
from unittest.mock import sentinel, Mock
import bokeh.palettes
import pandas as pd
import pandas.testing as pdt
import datetime as dt
import numpy as np
import glob
import forest.drivers
from forest.drivers import earth_networks
LINES = [
"1,20190417T000001.440,+02.7514400,+031.9206400,-000001778,000,15635,007,001",
"1,20190417T000001.093,+02.6388400,+031.9008800,+000002524,000,14931,007,012"
]
def test_earth_networks(tmpdir):
path = str(tmpdir / "sample.txt")
with open(path, "w") as stream:
stream.writelines(LINES)
loader = earth_networks.Loader()
frame = loader.load([path])
result = frame.iloc[0]
atol = 0.000001
if isinstance(result["date"], dt.datetime):
# Pandas <0.25.x
assert result["date"] == dt.datetime(2019, 4, 17, 0, 0, 1, 440000)
else:
# Pandas 1.0.x
assert result["date"] == np.datetime64('2019-04-17T00:00:01.440000000')
assert result["flash_type"] == "IC"
assert abs(result["latitude"] - 2.75144) < atol
assert abs(result["longitude"] - 31.92064) < atol
def test_dataset():
dataset = forest.drivers.get_dataset("earth_networks")
assert isinstance(dataset, forest.drivers.earth_networks.Dataset)
def get_navigator(settings):
dataset = forest.drivers.get_dataset("earth_networks", settings)
return dataset.navigator()
def test_dataset_navigator():
navigator = get_navigator({"pattern": "*.txt"})
assert isinstance(navigator, forest.drivers.earth_networks.Navigator)
def test_navigator_variables():
navigator = earth_networks.Navigator([])
assert set(navigator.variables(None)) == set([
"Strike density (cloud-ground)",
"Strike density (intra-cloud)",
"Strike density (total)",
"Time since flash (cloud-ground)",
"Time since flash (intra-cloud)",
"Time since flash (total)"
])
def test_view_render_density():
locator = Mock(specs=["find"])
loader = Mock(specs=["load"])
loader.load.return_value = pd.DataFrame({
"flash_type": [],
"longitude": [],
"latitude": [],
})
view = earth_networks.View(loader, locator)
view.render({
"variable": "Strike density (cloud-ground)",
"valid_time": "1970-01-01T00:00:00Z"
})
expect = bokeh.palettes.all_palettes["Spectral"][8]
assert view.color_mappers["image"].palette == expect
def test_view_render_time_since_flash():
locator = Mock(specs=["find"])
loader = Mock(specs=["load"])
loader.load.return_value = pd.DataFrame({
"date": [],
"flash_type": [],
"longitude": [],
"latitude": [],
})
view = earth_networks.View(loader, locator)
view.render({
"variable": "Time since flash (cloud-ground)",
"valid_time": "1970-01-01T00:00:00Z"
})
expect = bokeh.palettes.all_palettes["RdGy"][8]
assert view.color_mappers["image"].palette == expect
@pytest.mark.parametrize("variable, expect", [
pytest.param("Time since flash (intra-cloud)", [
('Variable', '@variable'),
('Time window', '@window{00:00:00}'),
('Period start', '@date{%Y-%m-%d %H:%M:%S}'),
("Since start", "@image{00:00:00}")
], id="time since flash"),
pytest.param("Strike density (cloud-ground)", [
('Variable', '@variable'),
('Time window', '@window{00:00:00}'),
('Period start', '@date{%Y-%m-%d %H:%M:%S}'),
('Value', '@image @units'),
], id="strike density"),
])
def test_view_tooltips(variable, expect):
assert earth_networks.View.tooltips(variable) == expect
@pytest.mark.parametrize("variable, expect", [
pytest.param("Time since flash (intra-cloud)", {
'@date': 'datetime',
'@window': 'numeral',
'@image': 'numeral'
}, id="time since flash"),
pytest.param("Strike density (cloud-ground)", {
'@date': 'datetime',
'@window': 'numeral'
}, id="strike density"),
])
def test_view_formatters(variable, expect):
assert earth_networks.View.formatters(variable) == expect
def test_view_since_flash():
view = earth_networks.View(Mock(), Mock())
strike_times = ["2020-01-01T00:00:00Z", "2020-01-01T01:00:00Z"]
period_start = "2020-01-01T00:00:00Z"
result = view.since_flash(strike_times, period_start)
expect = pd.Series([0., 3600.])
pdt.assert_series_equal(result, expect)
|
import string
import secrets
from config import TOKENS_FILE
def generate_tokens(limit=10000000):
"""Generates n number of tokens and dumps them in the token file
Args:
limit (int, optional): number of tokens thats required to be generated. Defaults to 10000000.
"""
alphabet = string.ascii_lowercase # Numbers and uppercase letter can be added here
i = 0
tokens = []
while i < limit:
i += 1
token = "".join(secrets.choice(alphabet) for i in range(7))
tokens.append(f"{token}\n")
with open(TOKENS_FILE, "w") as file:
file.writelines(tokens)
# Following way of generating and saving tokens is bit inefficient, compared to the upper method which I decided to use.
# i = 0
# with open("tokens.txt", "w") as file:
# while i < 10000000:
# i += 1
# token = "".join(secrets.choice(alphabet) for i in range(7))
# file.write(f"{token}\n")
print("Tokens generated.")
if __name__ == "__main__":
generate_tokens()
|
from __future__ import annotations
import pathlib
import shutil
import pdfkit
import pymdownx # noqa: F401
from abc import abstractmethod
from typing import Any, cast
from jinja2 import Environment, DictLoader, StrictUndefined
from markdown import markdown
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from logging import Logger, getLogger
from statements_manager.src.params_maker.lang_to_class import lang_to_class
from statements_manager.src.variables_converter import VariablesConverter
from statements_manager.template import template_html, template_pdf_options
logger = getLogger(__name__) # type: Logger
class ReplaceSampleFormatExpr(Preprocessor):
def run(self, lines):
cnt_all = 0
new_lines = []
for line in lines:
if line.strip().startswith("```"):
match = line.strip() == "```"
if match and cnt_all % 2 == 0:
new_lines.append("``` { .input-format .input-format }")
else:
new_lines.append(line)
cnt_all += 1
else:
new_lines.append(line)
assert cnt_all % 2 == 0
return new_lines
class ReplaceSampleFormatExprExtension(Extension):
def extendMarkdown(self, md):
md.preprocessors.register(
ReplaceSampleFormatExpr(md), "replace_sample_format", 999
)
class BaseManager:
def __init__(self, problem_attr):
self._cwd = pathlib.Path.cwd()
self.problem_attr = problem_attr # type: dict[str, Any]
self.state = True
@abstractmethod
def get_contents(self, statement_path: pathlib.Path) -> str:
pass
def replace_vars(self, html: str) -> str:
vars_manager = VariablesConverter(self.problem_attr)
env = Environment(
variable_start_string="{@",
variable_end_string="}",
loader=DictLoader({"task": html}),
undefined=StrictUndefined,
)
template = env.get_template("task")
replaced_html = template.render(
constraints=vars_manager["constraints"],
samples=vars_manager["samples"],
)
return replaced_html
def apply_template(self, html: str) -> str:
env = Environment(
variable_start_string="{@",
variable_end_string="}",
loader=DictLoader({"template": template_html}),
)
replaced_html = env.get_template("template").render(task={"statements": html})
return replaced_html
def save_html(self, html: str, output_path: pathlib.Path):
with open(output_path, "w") as f:
f.write(html)
def run_problem(self) -> str:
if not self.state:
logger.info(f"skipped [problem id: {self.problem_attr['id']}]")
return ""
logger.info(f"rendering [problem id: {self.problem_attr['id']}]")
# get contents (main text)
if "statement_path" not in self.problem_attr:
logger.error("statement_path is not set")
raise KeyError("statement_path is not set")
contents = self.get_contents(pathlib.Path(self.problem_attr["statement_path"]))
contents = self.replace_vars(contents)
# create params
logger.info("create params file")
if "params_path" in self.problem_attr and "constraints" in self.problem_attr:
ext = pathlib.Path(self.problem_attr["params_path"]).suffix # type: str
if ext in lang_to_class:
params_maker = lang_to_class[ext](
self.problem_attr["constraints"],
self.problem_attr["params_path"],
) # type: Any
params_maker.run()
else:
logger.warning(
f"skip creating params: no language config which matches '{ext}'"
)
elif "constraints" not in self.problem_attr:
logger.warning("skip creating params: constraints are not set")
else:
logger.warning("skip creating params: params_path is not set")
# make output directory
output_path = self.problem_attr["output_path"]
if output_path.exists():
logger.warning(f"output directory '{output_path}' already exists.")
else:
output_path.mkdir()
# copy assets (related toml: problem)
if "assets_path" in self.problem_attr:
assets_src_path = pathlib.Path(self.problem_attr["assets_path"])
assets_dst_path = output_path / pathlib.Path("assets")
if assets_src_path.exists():
logger.info("copy assets file")
if assets_dst_path.exists():
logger.warning(
f"assets directory '{assets_dst_path}' already exists."
)
shutil.copytree(assets_src_path, assets_dst_path, dirs_exist_ok=True)
else:
logger.warning(
f"assets_path '{self.problem_attr['assets_path']}' does not exist."
)
output_ext = self.problem_attr["output_ext"]
result_html = ""
logger.info(f"saving replaced {output_ext}")
if output_ext == "html":
# convert: markdown -> html
replace_sample_format = ReplaceSampleFormatExprExtension()
html = markdown(
contents,
extensions=[
replace_sample_format,
"md_in_html",
"tables",
"markdown.extensions.fenced_code",
],
)
html = self.apply_template(html)
output_path = output_path / pathlib.Path(self.problem_attr["id"] + ".html")
self.save_html(html, output_path)
elif output_ext == "pdf":
# convert: markdown -> html
replace_sample_format = ReplaceSampleFormatExprExtension()
html = markdown(
contents,
extensions=[
replace_sample_format,
"md_in_html",
"tables",
"markdown.extensions.fenced_code",
"pymdownx.pathconverter",
],
extension_configs={
"pymdownx.pathconverter": {
"absolute": True,
"base_path": output_path.resolve(),
}
},
)
result_html = html
html = self.apply_template(html)
output_path = output_path / pathlib.Path(self.problem_attr["id"] + ".pdf")
wait_second = (
int(cast(int, template_pdf_options["javascript-delay"])) // 1000
)
logger.info(f"please wait... ({wait_second} sec or greater)")
pdfkit.from_string(html, output_path, options=template_pdf_options)
elif output_ext == "md":
output_path = output_path / pathlib.Path(self.problem_attr["id"] + ".md")
with open(output_path, "w") as f:
f.write(contents)
else:
logger.error(f"invalid extension '{output_ext}'")
raise ValueError(f"invalid extension '{output_ext}'")
return result_html
def run_problemset(self, problemset_html: str, output_path: pathlib.Path) -> None:
# PDF 出力モード以外で実行された場合は何もしない
if len(problemset_html) == 0:
return
logger.info("create problemset pdf")
html = self.apply_template(problemset_html)
if output_path.exists():
logger.warning(f"output directory '{output_path}' already exists.")
else:
output_path.mkdir()
output_path = output_path / pathlib.Path("problemset.pdf")
template_pdf_options["javascript-delay"] = 10000
wait_second = int(cast(int, template_pdf_options["javascript-delay"])) // 1000
logger.info(f"please wait... ({wait_second} sec or greater)")
pdfkit.from_string(html, output_path, options=template_pdf_options)
|
# Copyright 2019 FairwindsOps Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from .provider import HelmProvider
from .command import HelmCommand
import re
import logging
def get_helm_client(helm_arguments, client_version=None, helm_provider=HelmProvider):
"""
Args:
client_version (string): Which client class to use
helm_arguments(list): Arguments passed into the HelmClient class
Returns:
Helm3Client
Raises
HelmClientException
"""
try:
if client_version is not None:
if client_version == "3":
return Helm3Client(default_helm_arguments=helm_arguments, provider=helm_provider)
else:
raise HelmClientException("Unsupported version explicitly specified: client_version={}".format(client_version))
else:
logging.debug('Helm version not declared, detecting version...')
client3 = Helm3Client(default_helm_arguments=helm_arguments, provider=helm_provider)
logging.debug('Checking for Helm 3 client')
detected_version = client3.version
logging.info('Found Helm Version {}'.format(detected_version))
return client3
except HelmClientException as e:
logging.error(e)
raise HelmClientException('Could not detect helm version')
except Exception as e:
logging.error(e)
raise HelmClientException('Received an unexpected exception in the helm detection, please see the debug output for details.')
class HelmClient(ABC):
repository_header_regex = re.compile(r'^NAME\s+URL$')
@abstractmethod
def version_regex(self):
pass
@abstractmethod
def global_helm_flags(self):
pass
def __init__(self, default_helm_arguments=[], provider=HelmProvider):
self._default_helm_arguments = self._validate_default_helm_args(default_helm_arguments)
self._provider = provider
@property
def default_helm_arguments(self):
"""The default helm arguments for all commands run through the client."""
return self._default_helm_arguments
@property
def cache(self):
try:
return self._cache
except Exception as e:
logging.error("Error determining repository cache location. Cannot proceed")
raise e
@default_helm_arguments.setter
def default_helm_arguments(self, value):
"""Setter of the default helm arguments to override"""
self._default_helm_arguments = value
def execute(self, command, arguments=[], filter_non_global_flags=False, plugin=None):
"""
Run the command with the help of the provider.
return HelmCmdResponse
"""
default_args = list(self.default_helm_arguments)
if filter_non_global_flags:
self._clean_non_global_flags(default_args)
arguments = default_args + list(arguments)
# If we need to run wrapped in a plugin, then put that command first, always
if plugin:
arguments = [command] + arguments
command = plugin
helm_command = HelmCommand(
command=command,
arguments=arguments,
)
response = self._provider.execute(helm_command)
if response.succeeded:
return response
else:
err = HelmClientException('Command Failed with output below:\nSTDOUT: {}\nSTDERR: {}\nCOMMAND: {}'.format(
response.stdout, response.stderr, response.command))
logging.debug(err)
raise err
@property
def repositories(self):
logging.debug("Listing repositories configured on helm client")
repository_names = []
try:
raw_repositories = self.execute('repo', ['list'], filter_non_global_flags=True).stdout
except HelmClientException:
logging.warning("Error getting repositories from client, maybe none have been initialized?")
return repository_names
for line in raw_repositories.splitlines():
# Try to filter out the header line as a viable repo name
if HelmClient.repository_header_regex.match(str(line)):
continue
# If the line is blank
if not line:
continue
repository_names.append(line.split()[0])
return repository_names
def check_helm_command(self):
return self.execute("help", [], filter_non_global_flags=True).succeeded
def upgrade(self, args, install=True, plugin=None):
if install:
arguments = ['--install'] + args
else:
arguments = args
return self.execute("upgrade", arguments, plugin=plugin)
def template(self, args, plugin=None):
return self.execute("template", args, plugin=plugin)
def get_manifest(self, args, plugin=None):
return self.execute("get manifest", args, plugin=plugin)
def rollback(self, release):
raise NotImplementedError(
"""This is known bad. If you see this error then you are likely implementing the solution :)"""
)
def dependency_update(self, chart_path):
"""Function to update chart dependencies"""
return self.execute('dependency', ['update', chart_path], filter_non_global_flags=True)
def repo_update(self):
"""Function to update all the repositories"""
return self.execute('repo', ['update'], filter_non_global_flags=True)
def repo_add(self, name, url):
"""Function add repositories to helm via command line"""
return self.execute('repo', ['add', name, url], filter_non_global_flags=True)
@classmethod
def _clean_non_global_flags(self, list_of_args):
"""Return a copy of the set arguments without any non-global flags - do not edit the instance of default_helm_args"""
# Filtering out non-global helm flags -- this is to try and support
# setting all-encompassing flags like `tiller-namespace` but avoiding
# passing subcommand specific flags to commands that don't support
# them.
# Example: `helm upgrade --install --recreate-pods ...` but we don't
# want to run `helm repo add --recreate-pods repo-name ...`
#
# TODO: This is a slow implementation but it's fine for cli (presumably)
# Bad nesting - there's a better pattern for sure
#
# Looping logic:
# 1. run through each argument in defaults
# 2. Set known global false for item's iteration
# 3. For each item in defaults check if it matches a known good global argument
# 4. if matches note it, set known good = true and break inner iteration
# 5. if inner iter doesn't find global param then known_global is bad and delete it from list
for arg in list_of_args:
logging.debug('Processing {} argument'.format(arg))
known_global = False
for valid in self.global_helm_flags:
if re.findall(r"--{}(\s|$)+".format(valid), arg):
known_global = True
break # break out of loop and stop searching for valids for this one argument
if known_global:
logging.debug('This argument {} was found in valid arguments: {}, keeping in list.'.format(arg, ' '.join(self.global_helm_flags)))
else:
list_of_args.remove(arg)
logging.debug('This argument {} was not found in valid arguments: {}, removing from list.'.format(arg, ' '.join(self.global_helm_flags)))
@staticmethod
def _validate_default_helm_args(helm_args):
# Allow class to be instantiated with default_helm_arguments to be None
if helm_args is None:
helm_args = []
# Validate that we're providing an iterator for default helm args
# also check for type string, python3 strings contain __iter__
if not hasattr(helm_args, '__iter__') or isinstance(helm_args, str):
logging.error("This class is being instantiated without an iterator for default_helm_args.")
raise ValueError('default_helm_arguments needs to be an iterator')
return helm_args
@abstractmethod
def version(self):
pass
class HelmClientException(Exception):
pass
class HelmVersionException(Exception):
pass
class Helm3Client(HelmClient):
version_regex = re.compile(r'v([0-9\.]+)([\-,\+][a-zA-Z]+)(\+g[0-9a-f]+)?')
global_helm_flags = ['debug', 'home', 'host', 'kube-context', 'kubeconfig']
@property
def version(self):
return self._get_version()
@property
def _cache(self):
response = self.execute("env", filter_non_global_flags=True)
# Get the value of the HELM_REPOSITORY_CACHE from the helm env command
return [_var_line.split('=')[1] for _var_line in response.stdout.splitlines() if 'HELM_REPOSITORY_CACHE' in _var_line][0].replace('"', '')
def _get_version(self):
get_ver = self.execute("version", arguments=['--short'], filter_non_global_flags=True)
ver = self._find_version(get_ver.stdout)
if ver is None:
raise HelmClientException(
"""Could not find version!! Could the helm response format have changed?
STDOUT: {}
STDERR: {}
COMMAND: {}""".format(get_ver.stdout, get_ver.stderr, get_ver.command)
)
return ver
@staticmethod
def _find_version(raw_version):
ver = Helm3Client.version_regex.search(str(raw_version))
if ver:
return ver.group(1)
else:
return None
|
numbers = [1,2,3,[4,5,6],[7,8,9]]
def type_function(l):
output = []
for i in l:
if type(i) == list:
output.append(type(i))
return len(output)
print(type_function(numbers)) |
import os
import numpy as np
def filter_points(points: np.ndarray, eps: float):
"""
Removes points that are within `eps` distance of each other.
# Arguments
points (np.ndarray): point array to filter
eps (float): remove adjacent points within this distance of each other
# Returns
Filtered points
"""
from scipy.spatial.distance import cdist
mask = np.ones(np.shape(points)[0], dtype=bool)
for (i, p) in enumerate(points):
if mask[i]:
dst = cdist(points, [p])
mask[np.argwhere((dst > 0.0) & (dst < eps))] = False
return points[mask]
def cleanup(files, silent=True):
if not isinstance(files, list):
files = [files]
for file in files:
try:
os.remove(file)
except Exception as e:
if not silent:
print(f"Could not remove {file}: {e.message}")
def line_connectivity(nodes: np.ndarray, connect_ends: bool = False):
"""
Simple function to define a closed or open polyline for a set of
nodes. Assumes adjacency in array == implicit connection.
That is, requires a clockwise- or counter-clockwise set of nodes.
"""
delta = 0 if connect_ends else -1
size = np.shape(nodes)[0]
connectivity = np.empty((size + delta, 2), dtype=np.int)
for i in range(size - 1):
connectivity[i] = np.array((i + 1, i + 2))
if connect_ends:
connectivity[-1] = np.array((size, 1))
return connectivity
def write_line(
boundary,
outfile: str,
connections=None,
material_id=None,
node_atts: dict = None,
cell_atts: dict = None,
):
nnodes = np.shape(boundary)[0]
nlines = np.shape(connections)[0] if connections is not None else 0
natts = len(node_atts.keys()) if node_atts is not None else 0
catts = len(cell_atts.keys()) if cell_atts is not None else 0
if material_id is not None:
assert (
np.shape(material_id)[0] >= nlines
), "Mismatch count between material ID and cells"
with open(outfile, "w") as f:
f.write("{} {} {} {} 0\n".format(nnodes, nlines, natts, catts))
for i in range(nnodes):
f.write("{} {} {} 0.0\n".format(i + 1, boundary[i][0], boundary[i][1]))
for i in range(nlines):
mat_id = material_id[i] if material_id is not None else 1
f.write(
"{} {} line {} {}\n".format(
i + 1, mat_id, connections[i][0], connections[i][1]
)
)
if natts:
for key in node_atts.keys():
assert np.shape(node_atts[key])[0] >= nnodes, (
"Length of node attribute %s does not match length of points array"
% key
)
# 00007 1 1 1 1 1 1 1
f.write(str(natts) + " 1" * natts + "\n")
# imt1, integer
# itp1, integer
_t = "\n".join(
[
key + ", " + "integer" if node_atts[key].dtype == int else "real"
for key in node_atts.keys()
]
)
f.write(_t + "\n")
for i in range(nnodes):
_att_str = "%d" % (i + 1)
for key in node_atts.keys():
_att_str += " " + str(node_atts[key][i])
_att_str += "\n"
f.write(_att_str)
if catts:
for key in cell_atts.keys():
assert np.shape(cell_atts[key])[0] >= nlines, (
"Length of cell attribute %s does not match length of elem array"
% key
)
# 00007 1 1 1 1 1 1 1
f.write(str(catts) + " 1" * catts + "\n")
# imt1, integer
# itp1, integer
_t = "\n".join(
[
key + ", " + "integer" if cell_atts[key].dtype == int else "real"
for key in cell_atts.keys()
]
)
f.write(_t + "\n")
for i in range(nlines):
_att_str = "%d" % (i + 1)
for key in cell_atts.keys():
_att_str += " " + str(cell_atts[key][i])
_att_str += "\n"
f.write(_att_str)
f.write("\n")
class Infiles:
def _surf_mesh_backup(in_name, out_name, skip_sort=False):
# Driver for producing a surface mesh from
# a prism mesh
if skip_sort:
infile = """read/avs/{0}/mo1
resetpts/itp
extract/surfmesh/1,0,0/mo2/mo1/external
dump/avs/{1}/mo2
finish""".format(
in_name, out_name
)
return infile
infile = """read/avs/{0}/mo1
resetpts/itp
createpts/median
sort/mo1/index/ascending/ikey/itetclr zmed ymed xmed
reorder/mo1/ikey
cmo/DELATT/mo1/ikey
cmo/DELATT/mo1/xmed
cmo/DELATT/mo1/ymed
cmo/DELATT/mo1/zmed
cmo/DELATT/mo1/ikey
extract/surfmesh/1,0,0/mo2/mo1/external
dump/avs/{1}/mo2
finish
""".format(
in_name, out_name
)
return infile
# user_function
distance_field = """cmo/DELATT/mo_pts/dfield
compute / distance_field / mo_pts / mo_line_work / dfield
math/multiply/mo_pts/x_four/1,0,0/mo_pts/dfield/PARAM_A/
math/add/mo_pts/x_four/1,0,0/mo_pts/x_four/PARAM_B/
cmo/copyatt/mo_pts/mo_pts/fac_n/x_four
finish
"""
# user_function2
distance_field_2 = """cmo/DELATT/mo_pts/dfield
compute / distance_field / mo_pts / mo_line_work / dfield
math/multiply/mo_pts/x_four/1,0,0/mo_pts/dfield/PARAM_A2/
math/add/mo_pts/x_four/1,0,0/mo_pts/x_four/PARAM_B2/
cmo/copyatt/mo_pts/mo_pts/fac_n/x_four
finish
"""
# infile_get_facesets3
get_facesets3 = """# get default facesets bottom, top, sides
# FIX so MO has same numbering as exodus mesh
# use sort to order element blocks as exodus will order
# if this is not done, lagrit faceset numbers will not
# correlate to exodus faceset numbers
# itetclr must be ordered correctly
# sort based on element itetclr number and median location
# save median points to check they are inside mesh
cmo status CMO_PRISM brief
cmo select CMO_PRISM
createpts / median
sort / CMO_PRISM / index / ascending / ikey / itetclr xmed ymed zmed
reorder / CMO_PRISM / ikey
cmo / DELATT / CMO_PRISM / ikey
# get outside surface mesh
extract/surfmesh/1,0,0/motmp_s/CMO_PRISM/external
cmo select motmp_s
#################################################
# BEGIN facesets based on layer and river surface
# Default value for all sides is 3
cmo /setatt/ motmp_s / itetclr 3
# bottom
cmo select motmp_s
pset/p1/attribute/layertyp/1,0,0/-1/eq
eltset/e1/exclusive/pset/get/p1
cmo/setatt/motmp_s/itetclr eltset,get,e1 1
cmo/copy/mo_tmp1/motmp_s
cmo/DELATT/mo_tmp1/itetclr0
cmo/DELATT/mo_tmp1/itetclr1
cmo/DELATT/mo_tmp1/facecol
cmo/DELATT/mo_tmp1/idface0
cmo/DELATT/mo_tmp1/idelem0
eltset/eall/itetclr/ge/0
eltset/edel/not eall e1
rmpoint/element/eltset get edel
rmpoint/compress
dump/avs2/fs1_bottom.avs/mo_tmp1/0 0 0 2
# top
cmo/delete/mo_tmp1
cmo select motmp_s
pset/p2/attribute/layertyp/1,0,0/-2/eq
eltset/e2/exclusive/pset/get/p2
cmo/setatt/motmp_s/itetclr eltset,get,e2 2
cmo/copy/mo_tmp1/motmp_s
cmo/DELATT/mo_tmp1/itetclr0
cmo/DELATT/mo_tmp1/itetclr1
cmo/DELATT/mo_tmp1/facecol
cmo/DELATT/mo_tmp1/idface0
cmo/DELATT/mo_tmp1/idelem0
eltset/eall/itetclr/ge/0
eltset/edel/not eall e2
rmpoint/element/eltset get edel
rmpoint/compress
dump/avs2/fs2_top.avs/mo_tmp1/0 0 0 2
dump gmv tmp_top.gmv mo_tmp1
cmo/delete/mo_tmp1
# sides - all sides, no direction
cmo select motmp_s
cmo/copy/mo_tmp1/motmp_s
cmo/DELATT/mo_tmp1/itetclr0
cmo/DELATT/mo_tmp1/itetclr1
cmo/DELATT/mo_tmp1/facecol
cmo/DELATT/mo_tmp1/idface0
cmo/DELATT/mo_tmp1/idelem0
eltset/edel/ itetclr lt 3
rmpoint/element/eltset get edel
rmpoint/compress
dump/avs2/fs3_sides_all.avs/mo_tmp1/0 0 0 2
dump gmv tmp_sides.gmv mo_tmp1
cmo/delete/mo_tmp1
###################################
# At this point mesh facesets are set for default
# bottom=1, top=2, sides=3
# fs1_bottom.avs fs2_top.avs fs3_sides_all.avs
finish
"""
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.python import keras
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Flatten, Conv2D, Dropout, MaxPool2D
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
# Initial variables
img_rows, img_cols = 28, 28
num_classes = 10
train_file = "./input/digits/train.csv"
raw_data = pd.read_csv(train_file)
# Model counting
model_count = 0
with open('./evaluations/model_count.txt', 'r') as count_file:
model_count = int(count_file.read())
with open('./evaluations/model_count.txt', 'w') as count_file:
count_file.write(str(model_count+1))
# Preparing the data
y = keras.utils.to_categorical(raw_data.label, num_classes)
num_images = raw_data.shape[0]
x_as_array = raw_data.values[:,1:]
x_shaped_array = x_as_array.reshape(num_images, img_rows, img_cols, 1)
x = x_shaped_array / 255
x_train, x_val_, y_train, y_val_ = train_test_split(
x, y, test_size=0.3)
x_val, x_test, y_val, y_test = train_test_split(
x_val_, y_val_, test_size=0.3)
# The model
model = Sequential()
model.add(Conv2D(16, kernel_size=(3, 3),
activation='relu',
input_shape=(img_rows, img_cols, 1)))
model.add(MaxPool2D(pool_size=(2,2),strides=(1,1)))
model.add(Dropout(0.2))
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),strides=(1,1)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy'])
generator = ImageDataGenerator(zoom_range = 0.2,
height_shift_range = 0.2,
width_shift_range = 0.2,
rotation_range = 15)
# Annealer
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
# Training the model
history = model.fit_generator(generator.flow(x_train,y_train,batch_size=75),
epochs=5,
steps_per_epoch = 350,
verbose = 1,
validation_data = generator.flow(x_val,y_val,batch_size=63),
validation_steps = 200,
callbacks=[learning_rate_reduction])
scores = model.evaluate(x_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# Saving the model
model_json = model.to_json()
with open("./model/model_digit" + str(model_count) + ".json", "w") as json_file:
json_file.write(model_json)
model.save_weights("./model/model_digit" + str(model_count) + ".h5")
# Saving accuracies
with open('./evaluations/model' + str(model_count) + '.txt', 'w') as accuracy_file:
accuracy_file.write('Accuracies:\n')
accuracy_file.write(str(history.history['acc']) + '\n')
accuracy_file.write('Accuracies from Validation:\n')
accuracy_file.write(str(history.history['val_acc']) + '\n')
accuracy_file.write('Losses:\n')
accuracy_file.write(str(history.history['loss']) + '\n')
accuracy_file.write('Losses from Validation:\n')
accuracy_file.write(str(history.history['val_loss']))
accuracy_file.write('\nmodel' + str(model_count) + ': ' + str(scores[1]))
# Plotting accuracy history
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('accuracies')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','validation'])
plt.show()
# Plotting loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('losses')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','validation'])
plt.show()
|
from __future__ import print_function, division, absolute_import
import unittest
import matplotlib
matplotlib.use('Agg')
import numpy as np
from openmdao.api import Problem, Group, pyOptSparseDriver, ScipyOptimizeDriver, DirectSolver
from openmdao.utils.assert_utils import assert_rel_error
from dymos import Phase, GaussLobatto, Radau, RungeKutta
from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
SHOW_PLOTS = True
class TestTimeseriesOutput(unittest.TestCase):
def test_timeseries_gl(self):
p = Problem(model=Group())
p.driver = ScipyOptimizeDriver()
p.driver.options['dynamic_simul_derivs'] = True
phase = Phase(ode_class=BrachistochroneODE,
transcription=GaussLobatto(num_segments=8, order=3, compressed=True))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
phase.set_state_options('x', fix_initial=True, fix_final=True)
phase.set_state_options('y', fix_initial=True, fix_final=True)
phase.set_state_options('v', fix_initial=True, fix_final=False)
phase.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665)
# Minimize time at the end of the phase
phase.add_objective('time_phase', loc='final', scaler=10)
p.model.linear_solver = DirectSolver()
p.setup(check=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = 2.0
p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input')
p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input')
p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input')
p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100], nodes='control_input')
p['phase0.design_parameters:g'] = 9.80665
p.run_driver()
gd = phase.options['transcription'].grid_data
state_input_idxs = gd.subset_node_indices['state_input']
control_input_idxs = gd.subset_node_indices['control_input']
col_idxs = gd.subset_node_indices['col']
assert_rel_error(self,
p.get_val('phase0.time'),
p.get_val('phase0.timeseries.time')[:, 0])
assert_rel_error(self,
p.get_val('phase0.time_phase'),
p.get_val('phase0.timeseries.time_phase')[:, 0])
for state in ('x', 'y', 'v'):
assert_rel_error(self,
p.get_val('phase0.states:{0}'.format(state)),
p.get_val('phase0.timeseries.states:'
'{0}'.format(state))[state_input_idxs])
assert_rel_error(self,
p.get_val('phase0.state_interp.state_col:{0}'.format(state)),
p.get_val('phase0.timeseries.states:'
'{0}'.format(state))[col_idxs])
for control in ('theta',):
assert_rel_error(self,
p.get_val('phase0.controls:{0}'.format(control)),
p.get_val('phase0.timeseries.controls:'
'{0}'.format(control))[control_input_idxs])
for dp in ('g',):
for i in range(gd.subset_num_nodes['all']):
assert_rel_error(self,
p.get_val('phase0.design_parameters:{0}'.format(dp))[0, :],
p.get_val('phase0.timeseries.design_parameters:{0}'.format(dp))[i])
def test_timeseries_radau(self):
p = Problem(model=Group())
p.driver = ScipyOptimizeDriver()
p.driver.options['dynamic_simul_derivs'] = True
phase = Phase(ode_class=BrachistochroneODE,
transcription=Radau(num_segments=8, order=3, compressed=True))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
phase.set_state_options('x', fix_initial=True, fix_final=True)
phase.set_state_options('y', fix_initial=True, fix_final=True)
phase.set_state_options('v', fix_initial=True, fix_final=False)
phase.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665)
# Minimize time at the end of the phase
phase.add_objective('time_phase', loc='final', scaler=10)
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver()
p.setup(check=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = 2.0
p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input')
p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input')
p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input')
p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100], nodes='control_input')
p['phase0.design_parameters:g'] = 9.80665
p.run_driver()
gd = phase.options['transcription'].grid_data
state_input_idxs = gd.subset_node_indices['state_input']
control_input_idxs = gd.subset_node_indices['control_input']
assert_rel_error(self,
p.get_val('phase0.time'),
p.get_val('phase0.timeseries.time')[:, 0])
assert_rel_error(self,
p.get_val('phase0.time_phase'),
p.get_val('phase0.timeseries.time_phase')[:, 0])
for state in ('x', 'y', 'v'):
assert_rel_error(self,
p.get_val('phase0.states:{0}'.format(state)),
p.get_val('phase0.timeseries.states:'
'{0}'.format(state))[state_input_idxs])
for control in ('theta',):
assert_rel_error(self,
p.get_val('phase0.controls:{0}'.format(control)),
p.get_val('phase0.timeseries.controls:'
'{0}'.format(control))[control_input_idxs])
for dp in ('g',):
for i in range(gd.subset_num_nodes['all']):
assert_rel_error(self,
p.get_val('phase0.design_parameters:{0}'.format(dp))[0, :],
p.get_val('phase0.timeseries.design_parameters:'
'{0}'.format(dp))[i])
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
# Author: Diego González Chávez
# email : diegogch@cbpf.br / diego.gonzalez.chavez@gmail.com
#
# This class controls the:
# Temperature controler
# LakeShore 311
#
# TODO:
# Make documentation
from .instruments_base import InstrumentBase as _InstrumentBase
__all__ = ['LakeShore_311']
class LakeShore_311(_InstrumentBase):
def __init__(self,
GPIB_Address=12, GPIB_Device=0,
ResourceName=None, logFile=None):
if ResourceName is None:
ResourceName = 'GPIB%d::%d::INSTR' % (GPIB_Device, GPIB_Address)
super().__init__(ResourceName, logFile)
self._IDN = 'LakeShore 475'
self.VI.write_termination = self.VI.LF
self.VI.read_termination = self.VI.LF
self.write('*CLS')
self.write('*RST')
@property
def field(self):
'''Field Value'''
return self.query_float('RDGFIELD?')
|
import os
import datetime
import httplib
import json
import logging
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
GCS_BUCKET=os.environ['GCS_BUCKET']
class Export(webapp2.RequestHandler):
def get(self):
access_token, _ = app_identity.get_access_token(
'https://www.googleapis.com/auth/datastore')
app_id = app_identity.get_application_id()
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
output_url_prefix = "gs://" + GCS_BUCKET
# comment this out because at the moment we do not want to use a URL param to run this service,
# output_url_prefix = self.request.get('output_url_prefix')
assert output_url_prefix and output_url_prefix.startswith('gs://')
if '/' not in output_url_prefix[5:]:
# Only a bucket name has been provided - no prefix or trailing slash
output_url_prefix += '/' + timestamp
else:
output_url_prefix += timestamp
entity_filter = {
'kinds': self.request.get_all('kind'),
'namespace_ids': self.request.get_all('namespace_id')
}
request = {
'project_id': app_id,
'output_url_prefix': output_url_prefix,
'entity_filter': entity_filter
}
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
url = 'https://datastore.googleapis.com/v1/projects/%s:export' % app_id
try:
result = urlfetch.fetch(
url=url,
payload=json.dumps(request),
method=urlfetch.POST,
deadline=60,
headers=headers)
if result.status_code == httplib.OK:
logging.info(result.content)
elif result.status_code >= 500:
logging.error(result.content)
else:
logging.warning(result.content)
self.response.status_int = result.status_code
except urlfetch.Error:
logging.exception('Failed to initiate export.')
self.response.status_int = httplib.INTERNAL_SERVER_ERROR
app = webapp2.WSGIApplication(
[
('/cloud-datastore-export', Export),
], debug=True)
|
import re
def preprocess(data, lang):
if "evr" in data:
evr = data["evr"]
if evr and not re.match(r'\d:\d[\d\w+.]*-\d[\d\w+.]*', evr, 0):
raise RuntimeError(
"ERROR: input violation: evr key should be in "
"epoch:version-release format, but package {0} has set "
"evr to {1}".format(data["pkgname"], evr))
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.