hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92d378f87edbde25754367021abfde6825961d05 | 2,478 | py | Python | {{cookiecutter.project_name}}/core/config.py | bergran/fast-api-project-template | ad29b3f6d37bb3653efff66cf0d8c76ce6015bb5 | [
"MIT"
] | 50 | 2019-06-25T23:30:35.000Z | 2022-02-14T14:12:41.000Z | {{cookiecutter.project_name}}/core/config.py | bergran/fast-api-project-template | ad29b3f6d37bb3653efff66cf0d8c76ce6015bb5 | [
"MIT"
] | 2 | 2019-05-22T15:28:12.000Z | 2020-03-15T23:12:28.000Z | {{cookiecutter.project_name}}/core/config.py | bergran/fast-api-project-template | ad29b3f6d37bb3653efff66cf0d8c76ce6015bb5 | [
"MIT"
] | 8 | 2019-12-24T17:36:48.000Z | 2022-03-01T09:47:11.000Z | import os
from datetime import timedelta
# ~~~~~ PATH ~~~~~
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ~~~~~ TEST ~~~~~
TEST_RUN = getenv_boolean('TEST_RUN', False)
# ~~~~~ API ~~~~~
# ~~~~~ SECRET ~~~~~
SECRET_KEY = os.getenv('SECRET_KEY', 'cuerno de unicornio :D')
if not SECRET_KEY:
SECRET_KEY = os.urandom(32)
# ~~~~~ APPS ~~~~~
APPS = [
'health_check',
'token',
'hello_world'
]
# ~~~~~ JWT ~~~~~
JWT_EXPIRATION_DELTA = timedelta(hours=int(os.getenv('ACCESS_TOKEN_EXPIRE_MINUTES', 10))) # in hours
JWT_REFRESH_EXPIRATION_DELTA = timedelta(hours=int(os.getenv('JWT_REFRESH_EXPIRATION_DELTA', 10))) # in hours
JWT_AUTH_HEADER_PREFIX = os.getenv('JWT_AUTH_HEADER_PREFIX', 'JWT')
JWT_SECRET_KEY = SECRET_KEY
# ~~~~~ CORS ~~~~~
BACKEND_CORS_ORIGINS = os.getenv(
'BACKEND_CORS_ORIGINS'
) # a string of origins separated by commas, e.g: 'http://localhost, http://localhost:4200, http://localhost:3000
# ~~~~~ APP ~~~~~
PROJECT_NAME = os.getenv('PROJECT_NAME', 'Fastapi')
# ~~~~~ EMAIL ~~~~~
SENTRY_DSN = os.getenv('SENTRY_DSN')
SMTP_TLS = getenv_boolean('SMTP_TLS', True)
SMTP_PORT = None
_SMTP_PORT = os.getenv('SMTP_PORT')
if _SMTP_PORT is not None:
SMTP_PORT = int(_SMTP_PORT)
SMTP_HOST = os.getenv('SMTP_HOST')
SMTP_USER = os.getenv('SMTP_USER')
SMTP_PASSWORD = os.getenv('SMTP_PASSWORD')
EMAILS_FROM_EMAIL = os.getenv('EMAILS_FROM_EMAIL')
EMAILS_FROM_NAME = PROJECT_NAME
EMAIL_RESET_TOKEN_EXPIRE_HOURS = 48
EMAIL_TEMPLATES_DIR = '/app/app/email-templates/build'
EMAILS_ENABLED = SMTP_HOST and SMTP_PORT and EMAILS_FROM_EMAIL
# ~~~~~ DATA_BASE ~~~~~
DATABASES = {
'type': os.environ.get('type', 'postgresql'),
'database': os.environ.get('database', 'fastapi'),
'username': os.environ.get('username', 'myproject'),
'password': os.environ.get('password', 'myproject'),
'host': os.environ.get('host', 'localhost'),
'port': os.environ.get('port', 5432)
}
# ~~~~~ OAUTH 2 ~~~~~
SCOPES = {
'read': 'Read',
'write': 'Write'
}
| 23.6 | 114 | 0.642857 | import os
from datetime import timedelta
class Config:
TEST = {
'database': 'test_default',
}
_DATABASES = {
'type': '',
'username': '',
'password': '',
'host': '',
'port': '',
'database': '',
}
def getenv_boolean(var_name, default_value=False):
result = default_value
env_value = os.getenv(var_name)
if env_value is not None:
result = env_value.upper() in ('TRUE', '1')
return result
# ~~~~~ PATH ~~~~~
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ~~~~~ TEST ~~~~~
TEST_RUN = getenv_boolean('TEST_RUN', False)
# ~~~~~ API ~~~~~
# ~~~~~ SECRET ~~~~~
SECRET_KEY = os.getenv('SECRET_KEY', 'cuerno de unicornio :D')
if not SECRET_KEY:
SECRET_KEY = os.urandom(32)
# ~~~~~ APPS ~~~~~
APPS = [
'health_check',
'token',
'hello_world'
]
# ~~~~~ JWT ~~~~~
JWT_EXPIRATION_DELTA = timedelta(hours=int(os.getenv('ACCESS_TOKEN_EXPIRE_MINUTES', 10))) # in hours
JWT_REFRESH_EXPIRATION_DELTA = timedelta(hours=int(os.getenv('JWT_REFRESH_EXPIRATION_DELTA', 10))) # in hours
JWT_AUTH_HEADER_PREFIX = os.getenv('JWT_AUTH_HEADER_PREFIX', 'JWT')
JWT_SECRET_KEY = SECRET_KEY
# ~~~~~ CORS ~~~~~
BACKEND_CORS_ORIGINS = os.getenv(
'BACKEND_CORS_ORIGINS'
) # a string of origins separated by commas, e.g: 'http://localhost, http://localhost:4200, http://localhost:3000
# ~~~~~ APP ~~~~~
PROJECT_NAME = os.getenv('PROJECT_NAME', 'Fastapi')
# ~~~~~ EMAIL ~~~~~
SENTRY_DSN = os.getenv('SENTRY_DSN')
SMTP_TLS = getenv_boolean('SMTP_TLS', True)
SMTP_PORT = None
_SMTP_PORT = os.getenv('SMTP_PORT')
if _SMTP_PORT is not None:
SMTP_PORT = int(_SMTP_PORT)
SMTP_HOST = os.getenv('SMTP_HOST')
SMTP_USER = os.getenv('SMTP_USER')
SMTP_PASSWORD = os.getenv('SMTP_PASSWORD')
EMAILS_FROM_EMAIL = os.getenv('EMAILS_FROM_EMAIL')
EMAILS_FROM_NAME = PROJECT_NAME
EMAIL_RESET_TOKEN_EXPIRE_HOURS = 48
EMAIL_TEMPLATES_DIR = '/app/app/email-templates/build'
EMAILS_ENABLED = SMTP_HOST and SMTP_PORT and EMAILS_FROM_EMAIL
# ~~~~~ DATA_BASE ~~~~~
DATABASES = {
'type': os.environ.get('type', 'postgresql'),
'database': os.environ.get('database', 'fastapi'),
'username': os.environ.get('username', 'myproject'),
'password': os.environ.get('password', 'myproject'),
'host': os.environ.get('host', 'localhost'),
'port': os.environ.get('port', 5432)
}
# ~~~~~ OAUTH 2 ~~~~~
SCOPES = {
'read': 'Read',
'write': 'Write'
}
| 192 | 205 | 46 |
9c74e994614dc050aa7649990b47821bd6d075cd | 3,444 | py | Python | venv/lib/python3.6/site-packages/skvideo/tests/test_pattern.py | mesquitadev/grpc | 747660f2ed4e62e30999741f4359793192158cad | [
"MIT"
] | 615 | 2015-12-11T23:27:21.000Z | 2022-03-30T22:51:49.000Z | skvideo/tests/test_pattern.py | lidq92/scikit-video | 7e3e22ef8f8fabb75629f631747981b139e96a2b | [
"BSD-3-Clause"
] | 124 | 2016-04-26T17:30:40.000Z | 2022-03-15T13:00:07.000Z | skvideo/tests/test_pattern.py | lidq92/scikit-video | 7e3e22ef8f8fabb75629f631747981b139e96a2b | [
"BSD-3-Clause"
] | 150 | 2015-12-09T02:53:52.000Z | 2022-03-27T11:52:33.000Z | import skvideo.io
import skvideo.utils
import numpy as np
import os
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
@unittest.skipIf(not skvideo._HAS_FFMPEG, "FFmpeg required for this test.")
@unittest.skipIf(not skvideo._HAS_AVCONV, "LibAV required for this test.")
@unittest.skipIf(not skvideo._HAS_FFMPEG, "FFmpeg required for this test.")
@unittest.skipIf(not skvideo._HAS_AVCONV, "LibAV required for this test.")
| 36.638298 | 127 | 0.728513 | import skvideo.io
import skvideo.utils
import numpy as np
import os
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
def pattern_sinusoid(backend):
# write out a sine wave
sinusoid1d = np.zeros((100, 100))
for i in range(100):
sinusoid1d[i, :] = 127*np.sin(2 * np.pi * i / 100) + 128
skvideo.io.vwrite("sinusoid1d.yuv", sinusoid1d)
# load it and resave it to check the pipeline for drift
videoData1 = skvideo.io.vread("sinusoid1d.yuv", width=100, height=100)
skvideo.io.vwrite("sinusoid1d_resaved.yuv", videoData1)
videoData2 = skvideo.io.vread("sinusoid1d_resaved.yuv", width=100, height=100)
# check slices
sinusoidDataOriginal = np.array(sinusoid1d[:, 1])
sinusoidDataVideo1 = skvideo.utils.rgb2gray(videoData1[0])[0, :, 1, 0]
sinusoidDataVideo2 = skvideo.utils.rgb2gray(videoData2[0])[0, :, 1, 0]
# check that the mean squared error is within 1 pixel
floattopixel_mse = np.mean((sinusoidDataOriginal-sinusoidDataVideo1)**2)
assert floattopixel_mse < 1, "Possible conversion error between floating point and raw video. MSE=%f" % (floattopixel_mse,)
# check that saving and loading a loaded file is identical
pixeltopixel_mse = np.mean((sinusoidDataVideo1-sinusoidDataVideo2)**2)
assert pixeltopixel_mse == 0, "Creeping error inside vread/vwrite."
os.remove("sinusoid1d.yuv")
os.remove("sinusoid1d_resaved.yuv")
def pattern_noise(backend):
np.random.seed(1)
# write out random data
randomNoiseData = np.random.random((100, 100))*255
randomNoiseData[0, 0] = 0
randomNoiseData[0, 1] = 1
randomNoiseData[0, 2] = 255
skvideo.io.vwrite("randomNoisePattern.yuv", randomNoiseData, backend=backend)
# load it and resave it to check the pipeline for drift
videoData1 = skvideo.io.vread("randomNoisePattern.yuv", width=100, height=100, backend=backend)
skvideo.io.vwrite("randomNoisePattern_resaved.yuv", videoData1, backend=backend)
videoData2 = skvideo.io.vread("randomNoisePattern_resaved.yuv", width=100, height=100, backend=backend)
# check slices
randomDataOriginal = np.array(randomNoiseData)
randomDataVideo1 = skvideo.utils.rgb2gray(videoData1[0])[0, :, :, 0]
randomDataVideo2 = skvideo.utils.rgb2gray(videoData2[0])[0, :, :, 0]
# check that the mean squared error is within 1 pixel
floattopixel_mse = np.mean((randomDataOriginal-randomDataVideo1)**2)
assert floattopixel_mse < 1, "Possible conversion error between floating point and raw video. MSE=%f" % (floattopixel_mse,)
# check that saving and loading a loaded file is identical
pixeltopixel_mse = np.mean((randomDataVideo1-randomDataVideo2)**2)
assert pixeltopixel_mse == 0, "Creeping error inside vread/vwrite."
os.remove("randomNoisePattern.yuv")
os.remove("randomNoisePattern_resaved.yuv")
@unittest.skipIf(not skvideo._HAS_FFMPEG, "FFmpeg required for this test.")
def test_sinusoid_ffmpeg():
pattern_sinusoid('ffmpeg')
@unittest.skipIf(not skvideo._HAS_AVCONV, "LibAV required for this test.")
def test_sinusoid_libav():
pattern_sinusoid('libav')
@unittest.skipIf(not skvideo._HAS_FFMPEG, "FFmpeg required for this test.")
def test_noisepattern_ffmpeg():
pattern_noise('ffmpeg')
@unittest.skipIf(not skvideo._HAS_AVCONV, "LibAV required for this test.")
def test_noisepattern_libav():
pattern_noise('libav')
| 2,830 | 0 | 134 |
0296ea9a20b9d23d9b0da58ff6672cddca9edbbd | 1,105 | py | Python | setup.py | nicolemon/snote | 1675b231f2d9d0b29d7cb0d16c78759e2bb282ad | [
"MIT"
] | null | null | null | setup.py | nicolemon/snote | 1675b231f2d9d0b29d7cb0d16c78759e2bb282ad | [
"MIT"
] | null | null | null | setup.py | nicolemon/snote | 1675b231f2d9d0b29d7cb0d16c78759e2bb282ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
setup(
name = "snotebook",
version = "0.2.2",
author = "Nicole A Montano",
author_email = "n@nicolemon.com",
description = ("A rudimentary CLI to write and organize text"),
license = "MIT",
keywords = "cli commandline terminal notes python",
url = "https://github.com/nicolemon/snote",
packages=['snote'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
extras_require={
'test': ['pytest'],
},
entry_points={
'console_scripts': [
'snote=snote:main',
],
},
)
| 28.333333 | 79 | 0.61629 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "snotebook",
version = "0.2.2",
author = "Nicole A Montano",
author_email = "n@nicolemon.com",
description = ("A rudimentary CLI to write and organize text"),
license = "MIT",
keywords = "cli commandline terminal notes python",
url = "https://github.com/nicolemon/snote",
packages=['snote'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
extras_require={
'test': ['pytest'],
},
entry_points={
'console_scripts': [
'snote=snote:main',
],
},
)
| 66 | 0 | 22 |
4cd1c56e98758beeccff3138145a6eb5e037e4cc | 153 | py | Python | Chapter1/aijack/src/aijack/attack/inversion/__init__.py | PacktPublishing/Designing-Models-for-Responsible-AI | 36b60f1e3e9db8b3d2db3ace873dbdee1b076b74 | [
"MIT"
] | null | null | null | Chapter1/aijack/src/aijack/attack/inversion/__init__.py | PacktPublishing/Designing-Models-for-Responsible-AI | 36b60f1e3e9db8b3d2db3ace873dbdee1b076b74 | [
"MIT"
] | null | null | null | Chapter1/aijack/src/aijack/attack/inversion/__init__.py | PacktPublishing/Designing-Models-for-Responsible-AI | 36b60f1e3e9db8b3d2db3ace873dbdee1b076b74 | [
"MIT"
] | 2 | 2022-01-17T07:28:22.000Z | 2022-01-30T00:12:53.000Z | from .gan_attack import GAN_Attack # noqa: F401
from .generator_attack import Generator_Attack # noqa: F401
from .mi_face import MI_FACE # noqa: F401
| 38.25 | 60 | 0.784314 | from .gan_attack import GAN_Attack # noqa: F401
from .generator_attack import Generator_Attack # noqa: F401
from .mi_face import MI_FACE # noqa: F401
| 0 | 0 | 0 |
24120a7385611a54050a48b990b1f813946d663a | 3,090 | py | Python | test/test_prototype_builtin_datasets.py | futurelife2016/vision | bbd9ff8fb936846aa0412996abab19b563677e5b | [
"BSD-3-Clause"
] | 1 | 2022-01-06T01:58:01.000Z | 2022-01-06T01:58:01.000Z | test/test_prototype_builtin_datasets.py | futurelife2016/vision | bbd9ff8fb936846aa0412996abab19b563677e5b | [
"BSD-3-Clause"
] | null | null | null | test/test_prototype_builtin_datasets.py | futurelife2016/vision | bbd9ff8fb936846aa0412996abab19b563677e5b | [
"BSD-3-Clause"
] | null | null | null | import functools
import io
import builtin_dataset_mocks
import pytest
from torchdata.datapipes.iter import IterDataPipe
from torchvision.prototype import datasets
from torchvision.prototype.utils._internal import sequence_to_str
_loaders = []
_datasets = []
# TODO: this can be replaced by torchvision.prototype.datasets.list() as soon as all builtin datasets are supported
TMP = [
"mnist",
"fashionmnist",
"kmnist",
"emnist",
"qmnist",
"cifar10",
"cifar100",
"caltech256",
"caltech101",
"imagenet",
]
for name in TMP:
loader = functools.partial(builtin_dataset_mocks.load, name)
_loaders.append(pytest.param(loader, id=name))
info = datasets.info(name)
_datasets.extend(
[
pytest.param(*loader(**config), id=f"{name}-{'-'.join([str(value) for value in config.values()])}")
for config in info._configs
]
)
loaders = pytest.mark.parametrize("loader", _loaders)
builtin_datasets = pytest.mark.parametrize(("dataset", "mock_info"), _datasets)
| 30.9 | 120 | 0.630097 | import functools
import io
import builtin_dataset_mocks
import pytest
from torchdata.datapipes.iter import IterDataPipe
from torchvision.prototype import datasets
from torchvision.prototype.utils._internal import sequence_to_str
_loaders = []
_datasets = []
# TODO: this can be replaced by torchvision.prototype.datasets.list() as soon as all builtin datasets are supported
TMP = [
"mnist",
"fashionmnist",
"kmnist",
"emnist",
"qmnist",
"cifar10",
"cifar100",
"caltech256",
"caltech101",
"imagenet",
]
for name in TMP:
loader = functools.partial(builtin_dataset_mocks.load, name)
_loaders.append(pytest.param(loader, id=name))
info = datasets.info(name)
_datasets.extend(
[
pytest.param(*loader(**config), id=f"{name}-{'-'.join([str(value) for value in config.values()])}")
for config in info._configs
]
)
loaders = pytest.mark.parametrize("loader", _loaders)
builtin_datasets = pytest.mark.parametrize(("dataset", "mock_info"), _datasets)
class TestCommon:
@builtin_datasets
def test_smoke(self, dataset, mock_info):
if not isinstance(dataset, IterDataPipe):
raise AssertionError(f"Loading the dataset should return an IterDataPipe, but got {type(dataset)} instead.")
@builtin_datasets
def test_sample(self, dataset, mock_info):
try:
sample = next(iter(dataset))
except Exception as error:
raise AssertionError("Drawing a sample raised the error above.") from error
if not isinstance(sample, dict):
raise AssertionError(f"Samples should be dictionaries, but got {type(sample)} instead.")
if not sample:
raise AssertionError("Sample dictionary is empty.")
@builtin_datasets
def test_num_samples(self, dataset, mock_info):
num_samples = 0
for _ in dataset:
num_samples += 1
assert num_samples == mock_info["num_samples"]
@builtin_datasets
def test_decoding(self, dataset, mock_info):
undecoded_features = {key for key, value in next(iter(dataset)).items() if isinstance(value, io.IOBase)}
if undecoded_features:
raise AssertionError(
f"The values of key(s) "
f"{sequence_to_str(sorted(undecoded_features), separate_last='and ')} were not decoded."
)
class TestQMNIST:
@pytest.mark.parametrize(
"dataset",
[
pytest.param(builtin_dataset_mocks.load("qmnist", split=split)[0], id=split)
for split in ("train", "test", "test10k", "test50k", "nist")
],
)
def test_extra_label(self, dataset):
sample = next(iter(dataset))
for key, type in (
("nist_hsf_series", int),
("nist_writer_id", int),
("digit_index", int),
("nist_label", int),
("global_digit_index", int),
("duplicate", bool),
("unused", bool),
):
assert key in sample and isinstance(sample[key], type)
| 1,545 | 451 | 46 |
8c2f080d9fcd298f38ae23d4ca3d6c6f5f47d144 | 350 | py | Python | mytube/users/urls.py | ashowlsky/mytube_c | 122d75d7dcd23ed0240448e5db5ca130266d26a2 | [
"MIT"
] | null | null | null | mytube/users/urls.py | ashowlsky/mytube_c | 122d75d7dcd23ed0240448e5db5ca130266d26a2 | [
"MIT"
] | null | null | null | mytube/users/urls.py | ashowlsky/mytube_c | 122d75d7dcd23ed0240448e5db5ca130266d26a2 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path("signup/", views.SignUp.as_view(), name="signup"),
path("manageuserprofile/", views.manage_user_profile, name="manageuserprofile"),
path("followingprocessor/", views.follow, name="followuser"),
path("unfollowingprocessor/", views.unfollow, name='unfollowuser'),
] | 38.888889 | 84 | 0.725714 | from django.urls import path
from . import views
urlpatterns = [
path("signup/", views.SignUp.as_view(), name="signup"),
path("manageuserprofile/", views.manage_user_profile, name="manageuserprofile"),
path("followingprocessor/", views.follow, name="followuser"),
path("unfollowingprocessor/", views.unfollow, name='unfollowuser'),
] | 0 | 0 | 0 |
0eabfec7534df36ecbaff54e4b32dbc587d639ec | 939 | py | Python | actions/lib/formatters.py | copartit/stackstorm-st2 | 0fc8c16f4e6ca5efec639db7fb34f40e881e889e | [
"Apache-2.0"
] | 5 | 2017-08-22T09:06:50.000Z | 2022-03-06T08:32:07.000Z | actions/lib/formatters.py | copartit/stackstorm-st2 | 0fc8c16f4e6ca5efec639db7fb34f40e881e889e | [
"Apache-2.0"
] | 16 | 2017-05-27T00:35:42.000Z | 2021-02-09T20:31:36.000Z | actions/lib/formatters.py | copartit/stackstorm-st2 | 0fc8c16f4e6ca5efec639db7fb34f40e881e889e | [
"Apache-2.0"
] | 15 | 2017-06-20T01:12:35.000Z | 2021-04-26T08:25:06.000Z | def format_client_list_result(result, exclude_attributes=None):
"""
Format an API client list return which contains a list of objects.
:param exclude_attributes: Optional list of attributes to exclude from the item.
:type exclude_attributes: ``list``
:rtype: ``list`` of ``dict``
"""
formatted = []
for item in result:
value = item.to_dict(exclude_attributes=exclude_attributes)
formatted.append(value)
return formatted
| 27.617647 | 95 | 0.707135 | def format_client_list_result(result, exclude_attributes=None):
"""
Format an API client list return which contains a list of objects.
:param exclude_attributes: Optional list of attributes to exclude from the item.
:type exclude_attributes: ``list``
:rtype: ``list`` of ``dict``
"""
formatted = []
for item in result:
value = item.to_dict(exclude_attributes=exclude_attributes)
formatted.append(value)
return formatted
def format_result(item):
return item.to_dict() if item else None
def format_rule_update_result(result, exclude_attributes):
return format_client_list_result(result=[result], exclude_attributes=exclude_attributes)[0]
def format_rule_result(rule, exclude):
if rule is None or isinstance(rule, str):
# error happened
return False, rule
else:
# all good here
return True, format_rule_update_result(rule, exclude)
| 391 | 0 | 69 |
c85988c0fef5aa16b9d25da3dad266c0ca3dd484 | 78 | py | Python | dynamicserialize/dstypes/com/__init__.py | srcarter3/python-awips | d981062662968cf3fb105e8e23d955950ae2497e | [
"BSD-3-Clause"
] | 33 | 2016-03-17T01:21:18.000Z | 2022-02-08T10:41:06.000Z | dynamicserialize/dstypes/com/__init__.py | srcarter3/python-awips | d981062662968cf3fb105e8e23d955950ae2497e | [
"BSD-3-Clause"
] | 15 | 2016-04-19T16:34:08.000Z | 2020-09-09T19:57:54.000Z | dynamicserialize/dstypes/com/__init__.py | Unidata/python-awips | 8459aa756816e5a45d2e5bea534d23d5b1dd1690 | [
"BSD-3-Clause"
] | 20 | 2016-03-12T01:46:58.000Z | 2022-02-08T06:53:22.000Z |
__all__ = [
'raytheon',
'vividsolutions'
]
| 13 | 28 | 0.371795 |
__all__ = [
'raytheon',
'vividsolutions'
]
| 0 | 0 | 0 |
d6a31e9f3e5dcf4723aa4e0a39dc88503f8e8ae7 | 6,746 | py | Python | tests/test_locus.py | MrTomRod/gene_loci_comparison | 8d1ce9c1229a83c9e316d92cae3c02a2cbb48eba | [
"MIT"
] | 1 | 2022-02-04T13:27:55.000Z | 2022-02-04T13:27:55.000Z | tests/test_locus.py | MrTomRod/gene-loci-comparison | 8d1ce9c1229a83c9e316d92cae3c02a2cbb48eba | [
"MIT"
] | 1 | 2021-12-15T11:14:58.000Z | 2021-12-15T11:14:58.000Z | tests/test_locus.py | MrTomRod/gene-loci-comparison | 8d1ce9c1229a83c9e316d92cae3c02a2cbb48eba | [
"MIT"
] | null | null | null | import os
from unittest import TestCase
from gene_loci_comparison import Locus
import matplotlib
import matplotlib.pyplot as plt
from bokeh.plotting import output_file, show, save
matplotlib.rcParams['font.family'] = "PT Sans Narrow"
save_plots = False
assert os.path.isfile('tests/test_loci.py'), f'Please set working directory to git root!'
pgap_file = 'tests/data/PGAP/FAM3257.gbk'
prokka_file = 'tests/data/Prokka/FAM3257.gbk'
new_prokka_file = 'tests/data/Prokka/Lbombicola_ESL0228.gbk'
bad_first_gene_file = 'tests/data/FirstGene/REFERENCE.gbk'
yeast_file = 'tests/data/yeast/R64-3-1.gbk'
| 30.251121 | 92 | 0.629558 | import os
from unittest import TestCase
from gene_loci_comparison import Locus
import matplotlib
import matplotlib.pyplot as plt
from bokeh.plotting import output_file, show, save
matplotlib.rcParams['font.family'] = "PT Sans Narrow"
save_plots = False
assert os.path.isfile('tests/test_loci.py'), f'Please set working directory to git root!'
pgap_file = 'tests/data/PGAP/FAM3257.gbk'
prokka_file = 'tests/data/Prokka/FAM3257.gbk'
new_prokka_file = 'tests/data/Prokka/Lbombicola_ESL0228.gbk'
bad_first_gene_file = 'tests/data/FirstGene/REFERENCE.gbk'
yeast_file = 'tests/data/yeast/R64-3-1.gbk'
class TestLocus(TestCase):
def test_edge_genes(self):
Locus(gbk_file=prokka_file, locus_tag='FAM3257_00001') # at the start of the file
Locus(gbk_file=prokka_file, locus_tag='FAM3257_03189') # at the end of the file
Locus(gbk_file=prokka_file, locus_tag='FAM3257_00099') # at the end of scf1
def test_nonexistent_gene(self):
with self.assertRaises(KeyError):
Locus(gbk_file=prokka_file, locus_tag='FAM3257_00000')
def test_pgap(self):
locus_tag = 'FAM3257_000993'
locus = Locus(gbk_file=pgap_file, locus_tag=locus_tag)
locus_tags = locus.locus_tags()
print('locus_tags:', locus_tags)
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_pgap.svg', format='svg')
else:
plt.show()
def test_prokka(self):
locus_tag = 'FAM3257_00934'
locus = Locus(gbk_file=prokka_file, locus_tag=locus_tag)
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_prokka.svg', format='svg')
else:
plt.show()
def test_new_prokka(self):
locus_tag = 'Lbombicola_ESL0228_00004'
locus = Locus(gbk_file=new_prokka_file, locus_tag=locus_tag)
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_new_prokka.svg', format='svg')
else:
plt.show()
def test_gc(self):
locus_tag = 'FAM3257_000993'
locus = Locus(gbk_file=pgap_file, locus_tag=locus_tag)
locus_tags = locus.locus_tags()
print('locus_tags:', locus_tags)
ax, _ = locus.plot_gc(figure_width=12, window_bp=20)
if save_plots:
plt.savefig('tests/output/locus/test_gc.svg', format='svg')
else:
plt.show()
def test_to_string(self):
locus_tag = 'FAM3257_000993'
locus = Locus(gbk_file=pgap_file, locus_tag=locus_tag)
svg_string = locus.plot_to_string(figure_width=12)
with open('tests/output/locus/test_to_string.svg', 'w') as f:
f.write(svg_string)
def test_scf_end(self):
locus_tag = 'FAM3257_00098'
locus = Locus(gbk_file=prokka_file, locus_tag=locus_tag)
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_scf_end.svg', format='svg')
else:
plt.show()
def test_scf_start(self):
locus_tag = 'FAM3257_00001'
locus = Locus(gbk_file=prokka_file, locus_tag=locus_tag)
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_scf_start.svg', format='svg')
else:
plt.show()
def test_scf_start_bad_first_gene(self):
locus = Locus(gbk_file=bad_first_gene_file, locus_tag='REFERENCE.1_000003')
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_scf_start.svg', format='svg')
else:
plt.show()
def test_custom_colors(self):
locus_tag = 'FAM3257_001019'
locus = Locus(gbk_file=pgap_file, locus_tag=locus_tag)
locus_tags = locus.locus_tags()
print('locus_tags:', locus_tags)
locus_to_color = dict(
FAM3257_001014='#1271c3',
FAM3257_001015='#3171c3',
FAM3257_001016='#5d71c3',
FAM3257_001017='#9371c3',
FAM3257_001018='#b171c3',
FAM3257_001019='#cb71c3',
FAM3257_001020='#ea71c3',
FAM3257_001021='#fd71c3',
# FAM3257_001021='#fd71c3' # last gene: white (default color)
)
locus.colorize(locus_to_color)
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_custom_colors.svg', format='svg')
else:
plt.show()
def test_custom_locus_tags(self):
locus_tag = 'FAM3257_001019'
locus = Locus(gbk_file=pgap_file, locus_tag=locus_tag)
locus_tags = locus.locus_tags()
print('locus_tags:', locus_tags)
locus_to_new_name_dict = dict(
FAM3257_001014='FAM3257_001014 | OG1',
# FAM3257_001015='FAM3257_001015 | OG0',
FAM3257_001016='FAM3257_001016 | OG4',
FAM3257_001017='FAM3257_001017 | OG6',
FAM3257_001018='FAM3257_001018 | OG8',
FAM3257_001019='FAM3257_001019 \n OG5', # NOTE: newlines (\n) do not work well.
FAM3257_001020='FAM3257_001020 | OG7',
FAM3257_001021='FAM3257_001021 | OG2',
FAM3257_001022='FAM3257_001022 | OG2',
)
locus = locus.rename_labels(locus_to_new_name_dict)
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_custom_locus_tags.svg', format='svg')
else:
plt.show()
def test_bokeh(self):
locus_tag = 'FAM3257_001020'
locus = Locus(gbk_file=pgap_file, locus_tag=locus_tag, span=10000)
bokeh = locus.plot_bokeh(figure_width=12, figure_height='auto', viewspan=3000)
if save_plots:
output_file(filename='tests/output/locus/test_single_locus_pgap.html', )
save(bokeh)
else:
show(bokeh)
def test_yeast(self):
locus_tag = 'R64-3-1_00340'
locus = Locus(gbk_file=yeast_file, locus_tag=locus_tag, span=5000)
locus_tags = locus.locus_tags()
print('locus_tags:', locus_tags)
ax, _ = locus.plot(figure_width=12)
ax.figure.tight_layout()
if save_plots:
plt.savefig('tests/output/locus/test_yeast.svg', format='svg')
else:
plt.show()
| 5,741 | 5 | 400 |
0cfa0ff7449ae8596c8341b9e3c83a943cc5644f | 6,050 | py | Python | tests/test_dataset_uploader.py | mtlynch/sia_load_tester | f4e2785e6dbceb1cf9c912ccb2fad49617102afb | [
"MIT"
] | 6 | 2018-03-01T04:06:50.000Z | 2020-07-28T12:28:28.000Z | tests/test_dataset_uploader.py | mtlynch/sia_load_tester | f4e2785e6dbceb1cf9c912ccb2fad49617102afb | [
"MIT"
] | 40 | 2018-02-09T00:41:41.000Z | 2018-04-20T04:02:57.000Z | tests/test_dataset_uploader.py | mtlynch/sia_load_tester | f4e2785e6dbceb1cf9c912ccb2fad49617102afb | [
"MIT"
] | null | null | null | import threading
import unittest
import mock
from sia_load_tester import jobs
from sia_load_tester import dataset_uploader
from sia_load_tester import sia_client as sc
from sia_load_tester import upload_queue
| 41.156463 | 80 | 0.534215 | import threading
import unittest
import mock
from sia_load_tester import jobs
from sia_load_tester import dataset_uploader
from sia_load_tester import sia_client as sc
from sia_load_tester import upload_queue
class DatasetUploaderTest(unittest.TestCase):
def setUp(self):
self.mock_sia_api_impl = mock.Mock()
mock_sleep_fn = mock.Mock()
self.mock_sia_client = sc.SiaClient(self.mock_sia_api_impl,
mock_sleep_fn)
self.mock_condition_waiter = mock.Mock()
self.exit_event = threading.Event()
def test_blocks_until_all_uploads_complete(self):
upload_jobs = [
jobs.Job(local_path='/dummy-path/1.txt', sia_path='1.txt'),
jobs.Job(local_path='/dummy-path/2.txt', sia_path='2.txt'),
jobs.Job(local_path='/dummy-path/3.txt', sia_path='3.txt'),
]
self.mock_sia_api_impl.get_renter_files.return_value = {
u'files': [
{
u'siapath': u'1.txt',
u'localpath': u'/dummy-path/1.txt',
u'uploadprogress': 15,
},
{
u'siapath': u'2.txt',
u'localpath': u'/dummy-path/2.txt',
u'uploadprogress': 18,
},
{
u'siapath': u'3.txt',
u'localpath': u'/dummy-path/3.txt',
u'uploadprogress': 19,
},
]
}
queue = upload_queue.from_upload_jobs_and_sia_client(
upload_jobs, self.mock_sia_client)
uploader = dataset_uploader.DatasetUploader(queue, self.mock_sia_client,
self.mock_condition_waiter,
self.exit_event)
uploader.upload()
self.assertFalse(self.mock_sia_api_impl.set_renter_upload.called)
self.assertEqual(1, self.mock_condition_waiter.
wait_for_all_uploads_to_complete.call_count)
self.assertTrue(self.exit_event.is_set())
def test_does_not_start_new_uploads_when_too_many_uploads_are_in_progress(
self):
upload_jobs = [
jobs.Job(local_path='/dummy-path/1.txt', sia_path='1.txt'),
jobs.Job(local_path='/dummy-path/2.txt', sia_path='2.txt'),
jobs.Job(local_path='/dummy-path/3.txt', sia_path='3.txt'),
jobs.Job(local_path='/dummy-path/4.txt', sia_path='4.txt'),
jobs.Job(local_path='/dummy-path/5.txt', sia_path='5.txt'),
jobs.Job(local_path='/dummy-path/6.txt', sia_path='6.txt'),
jobs.Job(local_path='/dummy-path/7.txt', sia_path='7.txt'),
]
self.mock_sia_api_impl.get_renter_files.return_value = {
u'files': [
{
u'siapath': u'1.txt',
u'localpath': u'/dummy-path/1.txt',
u'uploadprogress': 15,
},
{
u'siapath': u'2.txt',
u'localpath': u'/dummy-path/2.txt',
u'uploadprogress': 18,
},
{
u'siapath': u'3.txt',
u'localpath': u'/dummy-path/3.txt',
u'uploadprogress': 19,
},
{
u'siapath': u'4.txt',
u'localpath': u'/dummy-path/4.txt',
u'uploadprogress': 16,
},
{
u'siapath': u'5.txt',
u'localpath': u'/dummy-path/5.txt',
u'uploadprogress': 5,
},
]
}
self.mock_sia_api_impl.set_renter_upload.return_value = True
queue = upload_queue.from_upload_jobs_and_sia_client(
upload_jobs, self.mock_sia_client)
uploader = dataset_uploader.DatasetUploader(queue, self.mock_sia_client,
self.mock_condition_waiter,
self.exit_event)
uploader.upload()
self.mock_sia_api_impl.set_renter_upload.assert_has_calls([
mock.call('6.txt', source='/dummy-path/6.txt'),
mock.call('7.txt', source='/dummy-path/7.txt')
])
self.assertEqual(
2,
self.mock_condition_waiter.wait_for_available_upload_slot.call_count
)
self.assertEqual(1, self.mock_condition_waiter.
wait_for_all_uploads_to_complete.call_count)
self.assertTrue(self.exit_event.is_set())
def test_swallows_exceptions_instead_of_interrupting_upload(self):
upload_jobs = [
jobs.Job(local_path='/dummy-path/a.txt', sia_path='a.txt'),
jobs.Job(local_path='/dummy-path/b.txt', sia_path='b.txt'),
]
self.mock_sia_api_impl.get_renter_files.return_value = {u'files': None}
self.mock_sia_api_impl.set_renter_upload.side_effect = [
ValueError('dummy upload error'), True
]
queue = upload_queue.from_upload_jobs_and_sia_client(
upload_jobs, self.mock_sia_client)
uploader = dataset_uploader.DatasetUploader(queue, self.mock_sia_client,
self.mock_condition_waiter,
self.exit_event)
uploader.upload()
self.mock_sia_api_impl.set_renter_upload.assert_has_calls([
mock.call('a.txt', source='/dummy-path/a.txt'),
mock.call('b.txt', source='/dummy-path/b.txt'),
mock.call('a.txt', source='/dummy-path/a.txt'),
mock.call('a.txt', source='/dummy-path/a.txt'),
])
self.assertEqual(1, self.mock_condition_waiter.
wait_for_all_uploads_to_complete.call_count)
self.assertTrue(self.exit_event.is_set())
| 5,683 | 24 | 131 |
448454e0c717962da8492bcdab015d3d4cd96397 | 4,177 | py | Python | deepmechanics/tests/test_utilities.py | FernandezErbes/deepmechanics | 175ce4dd9be82bbbc94921fd262cf4519ae17890 | [
"MIT"
] | null | null | null | deepmechanics/tests/test_utilities.py | FernandezErbes/deepmechanics | 175ce4dd9be82bbbc94921fd262cf4519ae17890 | [
"MIT"
] | null | null | null | deepmechanics/tests/test_utilities.py | FernandezErbes/deepmechanics | 175ce4dd9be82bbbc94921fd262cf4519ae17890 | [
"MIT"
] | null | null | null | import torch
from torch.autograd import grad
from torch.functional import unique
from deepmechanics import utilities
import unittest
| 38.321101 | 91 | 0.586545 | import torch
from torch.autograd import grad
from torch.functional import unique
from deepmechanics import utilities
import unittest
class TestIntegration(unittest.TestCase):
def test_make_array_unique(self):
non_unique_array = [1, 2, 1, 2, 3, 4, 4, 2, 1]
unique_array = [1, 2, 3, 4]
self.assertEqual(utilities.make_array_unique(non_unique_array), unique_array)
def test_get_derivative(self):
xs = torch.linspace(0, 10, 11, requires_grad=True, dtype=torch.float64).view(-1, 1)
ys = xs**3
first_derivative = utilities.get_derivative(ys, xs, 1)
second_derivative = utilities.get_derivative(ys, xs, 2)
for x, dy, ddy in zip(xs, first_derivative, second_derivative):
x = x.detach().numpy()[0]
dy = dy.detach().numpy()[0]
ddy = ddy.detach().numpy()[0]
self.assertAlmostEqual(dy, 3 * x**2)
self.assertAlmostEqual(ddy, 6 * x)
def test_tensorize_1d(self):
# scalar
x = 12
x = utilities.tensorize_1d(x)
self.assertTrue(isinstance(x, torch.Tensor))
self.assertEqual(list(x.size()), [1])
self.assertAlmostEqual(x.detach().numpy()[0], x)
# list
x = [1, 2, 5, 9]
x = utilities.tensorize_1d(x)
self.assertTrue(isinstance(x, torch.Tensor))
self.assertEqual(list(x.size()), [4, 1])
self.assertAlmostEqual(x.detach().numpy()[0][0], x[0])
self.assertAlmostEqual(x.detach().numpy()[1][0], x[1])
self.assertAlmostEqual(x.detach().numpy()[2][0], x[2])
self.assertAlmostEqual(x.detach().numpy()[3][0], x[3])
def test_tensorize_2d(self):
# scalar
x = 12
y = 24
xy = utilities.tensorize_2d(x, y)
self.assertTrue(isinstance(xy, torch.Tensor))
self.assertEqual(list(xy.size()), [2])
self.assertAlmostEqual(xy.detach().numpy()[0], x)
self.assertAlmostEqual(xy.detach().numpy()[1], y)
# list
x = [1, 2, 5, 9]
y = [11, 12, 15, 19]
xy = utilities.tensorize_2d(x, y)
self.assertTrue(isinstance(xy, torch.Tensor))
self.assertEqual(list(xy.size()), [4, 2])
self.assertAlmostEqual(xy.detach().numpy()[0][0], x[0])
self.assertAlmostEqual(xy.detach().numpy()[1][0], x[1])
self.assertAlmostEqual(xy.detach().numpy()[2][0], x[2])
self.assertAlmostEqual(xy.detach().numpy()[3][0], x[3])
self.assertAlmostEqual(xy.detach().numpy()[0][1], y[0])
self.assertAlmostEqual(xy.detach().numpy()[1][1], y[1])
self.assertAlmostEqual(xy.detach().numpy()[2][1], y[2])
self.assertAlmostEqual(xy.detach().numpy()[3][1], y[3])
def test_tensorize_3d(self):
# scalar
x = 12
y = 24
z = 36
xyz = utilities.tensorize_3d(x, y, z)
self.assertTrue(isinstance(xyz, torch.Tensor))
self.assertEqual(list(xyz.size()), [3])
self.assertAlmostEqual(xyz.detach().numpy()[0], x)
self.assertAlmostEqual(xyz.detach().numpy()[1], y)
self.assertAlmostEqual(xyz.detach().numpy()[2], z)
# list
x = [1, 2, 5, 9]
y = [11, 12, 15, 19]
z = [21, 22, 25, 29]
xyz = utilities.tensorize_3d(x, y, z)
self.assertTrue(isinstance(xyz, torch.Tensor))
self.assertEqual(list(xyz.size()), [4, 3])
self.assertAlmostEqual(xyz.detach().numpy()[0][0], x[0])
self.assertAlmostEqual(xyz.detach().numpy()[1][0], x[1])
self.assertAlmostEqual(xyz.detach().numpy()[2][0], x[2])
self.assertAlmostEqual(xyz.detach().numpy()[3][0], x[3])
self.assertAlmostEqual(xyz.detach().numpy()[0][1], y[0])
self.assertAlmostEqual(xyz.detach().numpy()[1][1], y[1])
self.assertAlmostEqual(xyz.detach().numpy()[2][1], y[2])
self.assertAlmostEqual(xyz.detach().numpy()[3][1], y[3])
self.assertAlmostEqual(xyz.detach().numpy()[0][2], z[0])
self.assertAlmostEqual(xyz.detach().numpy()[1][2], z[1])
self.assertAlmostEqual(xyz.detach().numpy()[2][2], z[2])
self.assertAlmostEqual(xyz.detach().numpy()[3][2], z[3])
| 3,865 | 20 | 158 |
3ad9d8f9dc0fb1da49d7f93f4c0f5e0bea3e201f | 15,153 | py | Python | archive/2021-03-7/tools/cx_analysis/build.py | CambridgeSemiticsLab/BH_time_collocations | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | 5 | 2019-06-19T19:42:21.000Z | 2021-04-20T22:43:45.000Z | archive/2021-03-7/tools/cx_analysis/build.py | CambridgeSemiticsLab/BHTenseAndAspect | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | 2 | 2020-02-25T10:19:40.000Z | 2020-03-13T15:29:01.000Z | archive/2021-03-7/tools/cx_analysis/build.py | CambridgeSemiticsLab/BHTenseAndAspect | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | null | null | null | """
Classes used to identify and build Construction objects
"""
import collections
import copy
import networkx as nx
from positions import Dummy, Positions, PositionsTF, Walker
from debugging import Debugger
from .cx import Construction
class CXbuilder(object):
"""Identifies and builds constructions using Text-Fabric nodes."""
def __init__(self):
"""Initialize CXbuilder, giving methods for CX detection."""
# cache matched constructions for backreferences
self.cache = collections.defaultdict(
lambda: collections.defaultdict()
)
# NB: objects below should be overwritten
# and configured for the particular cxs needed
self.cxs = tuple()
self.yieldsto = {}
# for drip-bucket categories
self.dripbucket = tuple()
def cxcache(self, element, name, method):
"""Get cx from cache or run."""
try:
return self.cache[element][name]
except KeyError:
return method(element)
def test_result(self, test, *cases):
"""Return the result of a test as a new Construction object"""
# return last test
if test:
cx = Construction(
match=test[-1],
cases=cases,
**test[-1]
)
self.cache[cx.element][cx.name] = cx
return cx
else:
return Construction(cases=cases, **cases[0])
def test(self, *cases):
"""Populate Construction obj based on a cases's all Truth value.
The last-matching case will be used to populate
a Construction object. This allows more complex
cases to take precedence over simpler ones.
Args:
cases: an arbitrary number of dictionaries,
each of which contains a string key that
describes the test and a test that evals
to a Boolean.
Returns:
a populated or blank Construction object
"""
# find cases where all cnds == True
test = [
case for case in cases
if all(case['conds'].values())
and all(get_roles(case).values())
]
return self.test_result(test, *cases)
def findall(self, element):
"""Runs analysis for all constructions with an element.
Returns as dict with test:result as key:value.
"""
results = []
# add cxs from this builder
for funct in self.cxs:
cx = funct(element)
if cx:
results.append(cx)
return results
def sortbyslot(self, cxlist):
"""Sort constructions by order of contained slots."""
sort = sorted(
((sorted(cx.slots), cx) for cx in cxlist),
key=lambda k: k[0]
)
return [cx[-1] for cx in sort]
def clusterCXs(self, cxlist):
"""Cluster constructions which overlap in their slots/roles.
Overlapping constructions form a graph wherein the constructions
are nodes and the overlaps are edges. This algorithm retrieves all
interconnected constructions. It does so with a recursive check
for overlapping slot sets. Merging the slot sets produces new
overlaps. The algorithm passes over all constructions until no
further overlaps are detected.
Args:
cxlist: list of Construction objects
Returns:
list of lists, where each embedded list
is a cluster of overlapping constructions.
"""
clusters = []
cxlist = [i for i in cxlist] # operate on copy
# iterate until no more intersections found
thiscluster = [cxlist.pop(0)]
theseslots = set(s for s in thiscluster[0].slots)
# loop continues as it snowballs and picks up slots
# loop stops when a complete loop produces no other matches
while cxlist:
matched = False # whether loop was successful
for cx in cxlist:
if theseslots & set(cx.slots):
thiscluster.append(cx)
theseslots |= set(cx.slots)
matched = True
# cxlist shrinks; when empty, it stops loop
cxlist = [
cx for cx in cxlist
if cx not in thiscluster
]
# assemble loop
if not matched:
clusters.append(thiscluster)
thiscluster = [cxlist.pop(0)]
theseslots = set(s for s in thiscluster[0].slots)
# add last cluster
clusters.append(thiscluster)
return clusters
def yields(self, cx1, cx2):
"""Determine whether to submit cx1 to cx2."""
# determine which yields dict to use
# yielding can be configured generally
# or specific to a pattern and its rules
yieldsto = cx1.__dict__.get('yieldsto', self.yieldsto)
# get name or class yields
cx1yields = yieldsto.get(
cx1.name,
yieldsto.get(cx1.kind, set())
)
# test yields
if type(cx1yields) == set:
return bool({cx2.name, cx2.kind} & cx1yields)
elif type(cx1yields) == bool:
return cx1yields
def interslots(self, cx1, cx2):
"""Get the intersecting slots of two CXs
Return as sorted tuple.
"""
return tuple(sorted(
set(cx1.slots) & set(cx2.slots)
))
def slots2node(self, cx, slots):
"""Get a CX node from a tuple of slots."""
return_node = None # return last matching node
for node in nx.bfs_tree(cx.graph, cx):
if cx.getslots(node) == slots and type(node) != int:
return_node = node
return return_node
def intersect_node(self, cx1, cx2):
"""Get node from cx1 with slots common with cx2."""
intersect = self.interslots(cx1, cx2)
return self.slots2node(cx1, intersect)
def weaveCX(self, cxlist, debug=False):
"""Weave together constructions on their intersections.
Overlapping constructions form a graph wherein constructions
are nodes and the overlaps are edges. The graph indicates
that the constructions function together as one single unit.
weaveCX combines all constructions into a single one. Moving
from right-to-left (Hebrew), the function consumes and subsumes
subsequent constructions to previous ones. The result is a
single unit with embedding based on the order of consumption.
Roles in previous constructions are thus expanded into the
constructions of their subsequent constituents.
For instance, take the following phrase in English:
> "to the dog"
Say a CXbuilder object contains basic noun patterns and can
recognize the following contained constructions:
> cx Preposition: ('prep', to), ('obj', the),
> cx Definite: ('art', the), ('noun', dog)
When the words of the constructions are compared, an overlap
can be seen:
> cx Preposition: to the
> cx Definite: the dog
The overlap in this case is "the". The overlap suggests that
the slot filled by "the" in the Preposition construction
should be expanded. This can be done by remapping the role
filled by "the" alone to the subsequent Definite construction.
This results in embedding:
> cx Preposition: ('prep', to),
('obj', cx Definite: ('art', the),
('noun', dog))
weaveCX accomplishes this by calling the updaterole method native
to Construction objects. The end result is a list of merged
constructions that contain embedding.
Args:
cxlist: a list of constructions pre-sorted for word order;
the list shrinks throughout recursive iteration until
the job is finished
cx: a construction object to begin/continue analysis on
debug: an option to display debugging messages for when
things go wrong 🤪
Prerequisites:
self.yieldsto: A dictionary in CXbuilder that tells weaveCX
to subsume one construction into another regardless of
word order. Key is name of submissive construction, value
is a set of dominating constructions. Important for, e.g.,
cases of quantification where a head-noun might be preceded
by a chain of quantifiers but should still be at the top of
the structure since it is more semantically prominent.
Returns:
a list of composed constructions
"""
db = Debugger(debug)
db.say(f'\nReceived cxlist {cxlist}', 0)
# compile all cxs to here
root = copy.deepcopy(cxlist.pop(0))
db.say(f'Beginning analysis with {root}')
# begin matching and remapping
while cxlist:
# get next cx
ncx = copy.deepcopy(cxlist.pop(0))
# find root node with slots intersecting next cx
db.say(f'comparing {root} with {ncx}', 1)
node = self.intersect_node(root, ncx)
db.say(f'intersect is at {node}')
# remove cxs covered by larger version
if root in ncx:
db.say(f'root {root} in ncx {ncx}...replacing root with ncx')
root = ncx
# update yielded nodes
elif self.yields(node, ncx):
db.say(f'{node} being yielded to {ncx}')
# get top-most yielding node
path = nx.shortest_path(root.graph, root, node)
while path and self.yields(path[-1], ncx):
node = path.pop(-1)
db.say(f'top-yielding node is {node}', 2)
# update ncx graph
db.say(f'comparing {ncx} with {node}')
ncxnode = self.intersect_node(ncx, node)
db.say(f'intersect is at {ncxnode}')
ncx.updategraph(ncxnode, node)
db.say(f'ncx updated to {ncx}')
# update root graph or remap root to ncx
if root != node:
rnode = self.intersect_node(root, ncx)
db.say(f'replacing node {rnode} in root {root} with {ncx}')
root.updategraph(rnode, ncx)
else:
# switch root and ncx
db.say(f'switching {root} with {ncx}')
root = ncx
# update all non-yielding nodes
else:
db.say(f'\tupdating {node} in root with {ncx}')
root.updategraph(node, ncx)
return root
def analyzestretch(self, stretch, duplicate=False, debug=False):
"""Analyze an entire stretch of a linguistic unit.
Applies construction tests for every constituent
and merges all overlapping constructions into a
single construction.
Args:
stretch: an iterable containing elements that
are tested by construction tests to build
Construction objects. e.g. stretch might be
a list of TF word nodes.
duplicate: whether to keep a copy of an analyzed
cx
debug: option to display debuggin messages
Returns:
list of merged constructions
"""
db = Debugger(debug)
# match elements to constructions based on tests
rawcxs = []
covered = set()
for element in stretch:
matches = self.findall(element)
if matches:
rawcxs.extend(matches)
covered |= set(
el for cx in matches
for el in cx.graph
)
# keep copy of the cx
if duplicate:
rawcxs.append(element)
covered.add(element)
# apply drip-bucket categories
for element in set(stretch) - covered:
for funct in self.dripbucket:
dripcx = funct(element)
if dripcx:
rawcxs.append(dripcx)
db.say(f'rawcxs found: {rawcxs}...')
# return empty results
if not rawcxs:
db.say(f'!no cx pattern matches! returning []')
return []
# cluster and sort matched constructions
clsort = [
self.sortbyslot(cxlist)
for cxlist in self.clusterCXs(rawcxs)
]
db.say(f'cxs clustered into: {clsort}...')
db.say(f'Beginning weaveCX method...')
# merge overlapping constructions
cxs = [
self.weaveCX(cluster, debug=debug)
for cluster in clsort
]
return self.sortbyslot(cxs)
class CXbuilderTF(CXbuilder):
"""Build Constructions with TF integration."""
def getP(self, node, context=None):
"""Get Positions object for a TF node.
Return Dummy object if not node.
"""
context = context or self.context
if not node:
return Dummy
return PositionsTF(node, context, self.tf.api).get
def getWk(self, node, context=None):
"""Get Walker object for a TF word node.
Return Dummy object if not node.
"""
if not node:
return Dummy()
# format tf things to send
thisotype = self.F.otype.v(node)
get_context = context or self.context
context = self.L.u(node, get_context)[0]
positions = self.L.d(context, thisotype)
return Walker(node, positions)
| 34.995381 | 79 | 0.539563 | """
Classes used to identify and build Construction objects
"""
import collections
import copy
import networkx as nx
from positions import Dummy, Positions, PositionsTF, Walker
from debugging import Debugger
from .cx import Construction
class CXbuilder(object):
"""Identifies and builds constructions using Text-Fabric nodes."""
def __init__(self):
"""Initialize CXbuilder, giving methods for CX detection."""
# cache matched constructions for backreferences
self.cache = collections.defaultdict(
lambda: collections.defaultdict()
)
# NB: objects below should be overwritten
# and configured for the particular cxs needed
self.cxs = tuple()
self.yieldsto = {}
# for drip-bucket categories
self.dripbucket = tuple()
def cxcache(self, element, name, method):
"""Get cx from cache or run."""
try:
return self.cache[element][name]
except KeyError:
return method(element)
def test_result(self, test, *cases):
"""Return the result of a test as a new Construction object"""
# return last test
if test:
cx = Construction(
match=test[-1],
cases=cases,
**test[-1]
)
self.cache[cx.element][cx.name] = cx
return cx
else:
return Construction(cases=cases, **cases[0])
def test(self, *cases):
"""Populate Construction obj based on a cases's all Truth value.
The last-matching case will be used to populate
a Construction object. This allows more complex
cases to take precedence over simpler ones.
Args:
cases: an arbitrary number of dictionaries,
each of which contains a string key that
describes the test and a test that evals
to a Boolean.
Returns:
a populated or blank Construction object
"""
# find cases where all cnds == True
def get_roles(case):
# get roles dict
roles = case.get('roles', {'':True})
return roles
test = [
case for case in cases
if all(case['conds'].values())
and all(get_roles(case).values())
]
return self.test_result(test, *cases)
def findall(self, element):
"""Runs analysis for all constructions with an element.
Returns as dict with test:result as key:value.
"""
results = []
# add cxs from this builder
for funct in self.cxs:
cx = funct(element)
if cx:
results.append(cx)
return results
def sortbyslot(self, cxlist):
"""Sort constructions by order of contained slots."""
sort = sorted(
((sorted(cx.slots), cx) for cx in cxlist),
key=lambda k: k[0]
)
return [cx[-1] for cx in sort]
def clusterCXs(self, cxlist):
"""Cluster constructions which overlap in their slots/roles.
Overlapping constructions form a graph wherein the constructions
are nodes and the overlaps are edges. This algorithm retrieves all
interconnected constructions. It does so with a recursive check
for overlapping slot sets. Merging the slot sets produces new
overlaps. The algorithm passes over all constructions until no
further overlaps are detected.
Args:
cxlist: list of Construction objects
Returns:
list of lists, where each embedded list
is a cluster of overlapping constructions.
"""
clusters = []
cxlist = [i for i in cxlist] # operate on copy
# iterate until no more intersections found
thiscluster = [cxlist.pop(0)]
theseslots = set(s for s in thiscluster[0].slots)
# loop continues as it snowballs and picks up slots
# loop stops when a complete loop produces no other matches
while cxlist:
matched = False # whether loop was successful
for cx in cxlist:
if theseslots & set(cx.slots):
thiscluster.append(cx)
theseslots |= set(cx.slots)
matched = True
# cxlist shrinks; when empty, it stops loop
cxlist = [
cx for cx in cxlist
if cx not in thiscluster
]
# assemble loop
if not matched:
clusters.append(thiscluster)
thiscluster = [cxlist.pop(0)]
theseslots = set(s for s in thiscluster[0].slots)
# add last cluster
clusters.append(thiscluster)
return clusters
def yields(self, cx1, cx2):
"""Determine whether to submit cx1 to cx2."""
# determine which yields dict to use
# yielding can be configured generally
# or specific to a pattern and its rules
yieldsto = cx1.__dict__.get('yieldsto', self.yieldsto)
# get name or class yields
cx1yields = yieldsto.get(
cx1.name,
yieldsto.get(cx1.kind, set())
)
# test yields
if type(cx1yields) == set:
return bool({cx2.name, cx2.kind} & cx1yields)
elif type(cx1yields) == bool:
return cx1yields
def interslots(self, cx1, cx2):
"""Get the intersecting slots of two CXs
Return as sorted tuple.
"""
return tuple(sorted(
set(cx1.slots) & set(cx2.slots)
))
def slots2node(self, cx, slots):
"""Get a CX node from a tuple of slots."""
return_node = None # return last matching node
for node in nx.bfs_tree(cx.graph, cx):
if cx.getslots(node) == slots and type(node) != int:
return_node = node
return return_node
def intersect_node(self, cx1, cx2):
"""Get node from cx1 with slots common with cx2."""
intersect = self.interslots(cx1, cx2)
return self.slots2node(cx1, intersect)
def weaveCX(self, cxlist, debug=False):
"""Weave together constructions on their intersections.
Overlapping constructions form a graph wherein constructions
are nodes and the overlaps are edges. The graph indicates
that the constructions function together as one single unit.
weaveCX combines all constructions into a single one. Moving
from right-to-left (Hebrew), the function consumes and subsumes
subsequent constructions to previous ones. The result is a
single unit with embedding based on the order of consumption.
Roles in previous constructions are thus expanded into the
constructions of their subsequent constituents.
For instance, take the following phrase in English:
> "to the dog"
Say a CXbuilder object contains basic noun patterns and can
recognize the following contained constructions:
> cx Preposition: ('prep', to), ('obj', the),
> cx Definite: ('art', the), ('noun', dog)
When the words of the constructions are compared, an overlap
can be seen:
> cx Preposition: to the
> cx Definite: the dog
The overlap in this case is "the". The overlap suggests that
the slot filled by "the" in the Preposition construction
should be expanded. This can be done by remapping the role
filled by "the" alone to the subsequent Definite construction.
This results in embedding:
> cx Preposition: ('prep', to),
('obj', cx Definite: ('art', the),
('noun', dog))
weaveCX accomplishes this by calling the updaterole method native
to Construction objects. The end result is a list of merged
constructions that contain embedding.
Args:
cxlist: a list of constructions pre-sorted for word order;
the list shrinks throughout recursive iteration until
the job is finished
cx: a construction object to begin/continue analysis on
debug: an option to display debugging messages for when
things go wrong 🤪
Prerequisites:
self.yieldsto: A dictionary in CXbuilder that tells weaveCX
to subsume one construction into another regardless of
word order. Key is name of submissive construction, value
is a set of dominating constructions. Important for, e.g.,
cases of quantification where a head-noun might be preceded
by a chain of quantifiers but should still be at the top of
the structure since it is more semantically prominent.
Returns:
a list of composed constructions
"""
db = Debugger(debug)
db.say(f'\nReceived cxlist {cxlist}', 0)
# compile all cxs to here
root = copy.deepcopy(cxlist.pop(0))
db.say(f'Beginning analysis with {root}')
# begin matching and remapping
while cxlist:
# get next cx
ncx = copy.deepcopy(cxlist.pop(0))
# find root node with slots intersecting next cx
db.say(f'comparing {root} with {ncx}', 1)
node = self.intersect_node(root, ncx)
db.say(f'intersect is at {node}')
# remove cxs covered by larger version
if root in ncx:
db.say(f'root {root} in ncx {ncx}...replacing root with ncx')
root = ncx
# update yielded nodes
elif self.yields(node, ncx):
db.say(f'{node} being yielded to {ncx}')
# get top-most yielding node
path = nx.shortest_path(root.graph, root, node)
while path and self.yields(path[-1], ncx):
node = path.pop(-1)
db.say(f'top-yielding node is {node}', 2)
# update ncx graph
db.say(f'comparing {ncx} with {node}')
ncxnode = self.intersect_node(ncx, node)
db.say(f'intersect is at {ncxnode}')
ncx.updategraph(ncxnode, node)
db.say(f'ncx updated to {ncx}')
# update root graph or remap root to ncx
if root != node:
rnode = self.intersect_node(root, ncx)
db.say(f'replacing node {rnode} in root {root} with {ncx}')
root.updategraph(rnode, ncx)
else:
# switch root and ncx
db.say(f'switching {root} with {ncx}')
root = ncx
# update all non-yielding nodes
else:
db.say(f'\tupdating {node} in root with {ncx}')
root.updategraph(node, ncx)
return root
def analyzestretch(self, stretch, duplicate=False, debug=False):
"""Analyze an entire stretch of a linguistic unit.
Applies construction tests for every constituent
and merges all overlapping constructions into a
single construction.
Args:
stretch: an iterable containing elements that
are tested by construction tests to build
Construction objects. e.g. stretch might be
a list of TF word nodes.
duplicate: whether to keep a copy of an analyzed
cx
debug: option to display debuggin messages
Returns:
list of merged constructions
"""
db = Debugger(debug)
# match elements to constructions based on tests
rawcxs = []
covered = set()
for element in stretch:
matches = self.findall(element)
if matches:
rawcxs.extend(matches)
covered |= set(
el for cx in matches
for el in cx.graph
)
# keep copy of the cx
if duplicate:
rawcxs.append(element)
covered.add(element)
# apply drip-bucket categories
for element in set(stretch) - covered:
for funct in self.dripbucket:
dripcx = funct(element)
if dripcx:
rawcxs.append(dripcx)
db.say(f'rawcxs found: {rawcxs}...')
# return empty results
if not rawcxs:
db.say(f'!no cx pattern matches! returning []')
return []
# cluster and sort matched constructions
clsort = [
self.sortbyslot(cxlist)
for cxlist in self.clusterCXs(rawcxs)
]
db.say(f'cxs clustered into: {clsort}...')
db.say(f'Beginning weaveCX method...')
# merge overlapping constructions
cxs = [
self.weaveCX(cluster, debug=debug)
for cluster in clsort
]
return self.sortbyslot(cxs)
class CXbuilderTF(CXbuilder):
"""Build Constructions with TF integration."""
def __init__(self, tf, **kwargs):
# set up TF data for tests
self.tf = tf
self.F, self.E, self.T, self.L = tf.api.F, tf.api.E, tf.api.T, tf.api.L
self.context = kwargs.get('context', 'timephrase')
# set up CXbuilder
CXbuilder.__init__(self)
def getP(self, node, context=None):
"""Get Positions object for a TF node.
Return Dummy object if not node.
"""
context = context or self.context
if not node:
return Dummy
return PositionsTF(node, context, self.tf.api).get
def getWk(self, node, context=None):
"""Get Walker object for a TF word node.
Return Dummy object if not node.
"""
if not node:
return Dummy()
# format tf things to send
thisotype = self.F.otype.v(node)
get_context = context or self.context
context = self.L.u(node, get_context)[0]
positions = self.L.d(context, thisotype)
return Walker(node, positions)
| 387 | 0 | 61 |
a095b4723d3bbf47f2b2321f64d5c79c88632863 | 7,241 | py | Python | 2020/cassava-leaf-disease-classification/train.py | kn25ha01/kaggle-competitions | fce44d6758c4757a7d0a0a6b00d756ff26a97d3f | [
"MIT"
] | null | null | null | 2020/cassava-leaf-disease-classification/train.py | kn25ha01/kaggle-competitions | fce44d6758c4757a7d0a0a6b00d756ff26a97d3f | [
"MIT"
] | null | null | null | 2020/cassava-leaf-disease-classification/train.py | kn25ha01/kaggle-competitions | fce44d6758c4757a7d0a0a6b00d756ff26a97d3f | [
"MIT"
] | null | null | null | import os
import re
import argparse
import datetime
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from classifier import MultiClassifierModel
from classifier import BinaryClassifierModel
from factory import get_dataset, get_model
from util.seed import set_seed
from util.yml import get_config
from logging import getLogger, DEBUG, INFO, StreamHandler, FileHandler, Formatter
logger = getLogger(__name__)
# いずれはvalidation dataなしで学習させるよー
# fold0フォルダ邪魔
if __name__ == '__main__':
args = parse_args()
main(args)
| 36.570707 | 114 | 0.679188 | import os
import re
import argparse
import datetime
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from classifier import MultiClassifierModel
from classifier import BinaryClassifierModel
from factory import get_dataset, get_model
from util.seed import set_seed
from util.yml import get_config
from logging import getLogger, DEBUG, INFO, StreamHandler, FileHandler, Formatter
logger = getLogger(__name__)
# いずれはvalidation dataなしで学習させるよー
# fold0フォルダ邪魔
def train_model01_1(args, model_config, dataset_config, train_config, logger=None):
workdir_base = args.workdir
df = pd.read_csv(dataset_config['train_csv'])
#df = df[:200]
skf = StratifiedKFold(n_splits=train_config['folds'], shuffle=True, random_state=args.seed)
for fold, (train_index, valid_index) in enumerate(skf.split(df, df['label'])):
print('=' * 20, f'fold {fold}', '=' * 20)
workdir = os.path.join(workdir_base, f'fold{fold}')
workdir = workdir_base
os.makedirs(workdir, exist_ok=True)
train_df = df.loc[train_index].reset_index(drop=True)
valid_df = df.loc[valid_index].reset_index(drop=True)
train_dataset = get_dataset(train_df, dataset_config, 'train', logger)
valid_dataset = get_dataset(valid_df, dataset_config, 'valid', logger)
loss_weight = None
#if self.loss_weight:
# loss_weight = train_df['label'].value_counts().reset_index().sort_values('index')['label'].to_numpy()
# logging.debug(f'value counts : {loss_weight}')
# loss_weight = loss_weight.min() / loss_weight
# logging.debug(f'loss weight : {loss_weight}') # [1.0 0.49628784 0.45521215 0.08254963 0.42163998]
# loss_weight = torch.Tensor(loss_weight).to(self.device, dtype=self.dtype)
model = get_model(model_config, logger)
mcModel = MultiClassifierModel(workdir, model, logger)
mcModel.fit(train_dataset, valid_dataset, train_config['batch_size'], train_config['epochs'], loss_weight)
# only fold0
break
def train_model02_1(args, model_config, dataset_config, train_config, logger=None):
workdir_base = args.workdir
df = pd.read_csv(dataset_config['train_csv'])
#df = df[:200]
skf = StratifiedKFold(n_splits=train_config['folds'], shuffle=True, random_state=args.seed)
for fold, (train_index, valid_index) in enumerate(skf.split(df, df['label'])):
print('=' * 20, f'fold {fold}', '=' * 20)
workdir = os.path.join(workdir_base, f'fold{fold}')
workdir = workdir_base
os.makedirs(workdir, exist_ok=True)
train_df = df.loc[train_index].reset_index(drop=True)
train_df['label'] = train_df['label'].replace([1,2,4,3], [0,0,0,1])
valid_df = df.loc[valid_index].reset_index(drop=True)
valid_df['label'] = valid_df['label'].replace([1,2,4,3], [0,0,0,1])
train_dataset = get_dataset(train_df, dataset_config, 'train', logger)
valid_dataset = get_dataset(valid_df, dataset_config, 'valid', logger)
loss_weight = train_df['label'].value_counts().reset_index().sort_values('index')['label'].to_numpy()
loss_weight = loss_weight.min() / loss_weight
if logger:
logger.debug(f'loss weight : {loss_weight}')
model = get_model(model_config, logger)
bcModel = BinaryClassifierModel(workdir, model, logger)
bcModel.fit(train_dataset, valid_dataset, train_config['batch_size'], train_config['epochs'])
# only fold0
break
def train_model02_2(args, model_config, dataset_config, train_config, logger=None):
workdir_base = args.workdir
df = pd.read_csv(dataset_config['train_csv'])
#df = df[:200]
skf = StratifiedKFold(n_splits=train_config['folds'], shuffle=True, random_state=args.seed)
for fold, (train_index, valid_index) in enumerate(skf.split(df, df['label'])):
print('=' * 20, f'fold {fold}', '=' * 20)
workdir = os.path.join(workdir_base, f'fold{fold}')
workdir = workdir_base
os.makedirs(workdir, exist_ok=True)
train_df = df.loc[train_index].reset_index(drop=True)
train_df = train_df[train_df['label']!=3].reset_index(drop=True)
train_df['label'] = train_df['label'].replace(4, 3)
valid_df = df.loc[valid_index].reset_index(drop=True)
valid_df = valid_df[valid_df['label']!=3].reset_index(drop=True)
valid_df['label'] = valid_df['label'].replace(4, 3)
train_dataset = get_dataset(train_df, dataset_config, 'train', logger)
valid_dataset = get_dataset(valid_df, dataset_config, 'valid', logger)
loss_weight = None
model = get_model(model_config, logger)
mcModel = MultiClassifierModel(workdir, model, logger)
mcModel.fit(train_dataset, valid_dataset, train_config['batch_size'], train_config['epochs'], loss_weight)
# only fold0
break
def main(args):
# make work directory
split = re.split('[/.]', args.yml)
args.workdir = os.path.join(args.workdir, split[3])
args.workdir = os.path.join(args.workdir, split[4])
args.workdir = os.path.join(args.workdir, get_date())
os.makedirs(args.workdir, exist_ok=True)
# set logger
set_logger(args.workdir, args.level)
# set seed
set_seed(args.seed, logger)
# get config
yml = os.path.join(args.yml)
config = get_config(yml, logger)
model_config = config['model']
dataset_config = config['dataset']
train_config = config['train']
# train
train_model = globals().get('train_' + split[3])
train_model(args, model_config, dataset_config, train_config, logger)
def get_date():
timezone = datetime.timezone(datetime.timedelta(hours=9))
return datetime.datetime.now(timezone).strftime('%Y%m%d_%H%M%S')
def date_converter(*args):
timezone = datetime.timezone(datetime.timedelta(hours=9))
return datetime.datetime.now(timezone).timetuple()
def set_logger(workdir, level):
# make file handler
filename = os.path.join(workdir, 'logger.log')
f_formatter = Formatter('%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(message)s')
f_handler = FileHandler(filename)
f_handler.setLevel(DEBUG)
f_handler.setFormatter(f_formatter)
# make stream handler
s_formatter = Formatter('%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(message)s')
s_formatter.converter = date_converter
s_handler = StreamHandler()
s_handler.setLevel(level)
s_handler.setFormatter(s_formatter)
# set logger
logger.setLevel(level)
logger.addHandler(f_handler)
logger.addHandler(s_handler)
# log
logger.info(f'work dir : {workdir}')
logger.info(f'log file : {filename}')
logger.info(f'log level : {level}')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=20200219, type=int)
parser.add_argument('--workdir', default='./output', type=str)
parser.add_argument('--level', default='DEBUG', type=str)
parser.add_argument('--yml', default='', type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 6,500 | 0 | 184 |
8f37b7ea9345ad4a07e5d162b73c9e9e1546fac1 | 718 | py | Python | Lec 4.py | MatthewMeisner/IA-241-GitHub | 3a79ce628443707e7a27402499e68b694f4889db | [
"MIT"
] | null | null | null | Lec 4.py | MatthewMeisner/IA-241-GitHub | 3a79ce628443707e7a27402499e68b694f4889db | [
"MIT"
] | null | null | null | Lec 4.py | MatthewMeisner/IA-241-GitHub | 3a79ce628443707e7a27402499e68b694f4889db | [
"MIT"
] | null | null | null | '''
tuple: is immutable once created (cannot append)
'''
#my_tuple = 'a','b','c','d','e'
#print(my_tuple)
#my_second_tuple = ('a','b','c','d','e')
#print(my_second_tuple)
#not_a_tuple = ('a')
#print( type(not_a_tuple) )
#a_tuple = ('a',)
#print( type( a_tuple) )
#print(my_tuple[1])
#print(my_second_tuple[1:3])
'''
Dictionary is a collection of key value pairs
with JSON files, treat as dictionary
'''
my_car = {
'color':'red',
'maker': 'toyota',
'year':2015
}
print(my_car.get('color'))
#or
print(my_car['maker'])
print(my_car.items())
print(my_car.values())
print(my_car.keys())
#adding new entry in dictionary
my_car['model']='corola'
print(my_car['model'])
print('year' in my_car) | 15.608696 | 48 | 0.639276 | '''
tuple: is immutable once created (cannot append)
'''
#my_tuple = 'a','b','c','d','e'
#print(my_tuple)
#my_second_tuple = ('a','b','c','d','e')
#print(my_second_tuple)
#not_a_tuple = ('a')
#print( type(not_a_tuple) )
#a_tuple = ('a',)
#print( type( a_tuple) )
#print(my_tuple[1])
#print(my_second_tuple[1:3])
'''
Dictionary is a collection of key value pairs
with JSON files, treat as dictionary
'''
my_car = {
'color':'red',
'maker': 'toyota',
'year':2015
}
print(my_car.get('color'))
#or
print(my_car['maker'])
print(my_car.items())
print(my_car.values())
print(my_car.keys())
#adding new entry in dictionary
my_car['model']='corola'
print(my_car['model'])
print('year' in my_car) | 0 | 0 | 0 |
a74971906e16c07d3e48cf75bf080d993341ba11 | 289 | py | Python | dict/fruits.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 13 | 2017-08-22T12:26:07.000Z | 2021-07-29T16:13:50.000Z | dict/fruits.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 1 | 2021-02-08T10:24:33.000Z | 2021-02-08T10:24:33.000Z | dict/fruits.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 17 | 2018-08-13T11:10:33.000Z | 2021-07-29T16:14:02.000Z | #!/usr/bin/python
# fruits.py
basket = ('oranges', 'pears', 'apples', 'bananas')
fruits = {}.fromkeys(basket, 0)
print(fruits)
fruits['oranges'] = 12
fruits['pears'] = 8
fruits['apples'] = 4
print(fruits.setdefault('oranges', 11))
print(fruits.setdefault('kiwis', 11))
print(fruits)
| 16.055556 | 50 | 0.66436 | #!/usr/bin/python
# fruits.py
basket = ('oranges', 'pears', 'apples', 'bananas')
fruits = {}.fromkeys(basket, 0)
print(fruits)
fruits['oranges'] = 12
fruits['pears'] = 8
fruits['apples'] = 4
print(fruits.setdefault('oranges', 11))
print(fruits.setdefault('kiwis', 11))
print(fruits)
| 0 | 0 | 0 |
af5bd0c3c5f3f076337fd31d02be68d3eec14301 | 711 | py | Python | mcts/ucb.py | Nimor111/pazaak-python | 0a3e39230c401e7d9f9d030b2279e583d70f3340 | [
"Apache-2.0"
] | 1 | 2021-09-18T05:50:46.000Z | 2021-09-18T05:50:46.000Z | mcts/ucb.py | Nimor111/pazaak-python | 0a3e39230c401e7d9f9d030b2279e583d70f3340 | [
"Apache-2.0"
] | 3 | 2021-10-05T23:24:53.000Z | 2022-02-18T03:19:56.000Z | mcts/ucb.py | Nimor111/pazaak-python | 0a3e39230c401e7d9f9d030b2279e583d70f3340 | [
"Apache-2.0"
] | 2 | 2019-07-08T08:36:22.000Z | 2021-09-18T05:50:48.000Z | import math
from mcts.node import Node
| 22.935484 | 83 | 0.64135 | import math
from mcts.node import Node
def best_ucb(node: Node):
best = node
max_ucb = 0.0
for child in node.children:
current_ucb = calculate_ucb_value(
child.state.win_score,
child.state.visit_score,
child.parent.state.visit_score
)
if current_ucb >= max_ucb:
max_ucb = current_ucb
best = child
return best
def calculate_ucb_value(win_score: int, visit_score: int, parent_visit_score: int):
if visit_score == 0:
return float('inf')
win_ratio = win_score / visit_score
exploitation = math.sqrt(math.log(parent_visit_score) / visit_score)
return win_ratio + 1.41 * exploitation
| 623 | 0 | 46 |
60327ca20fd7e4e78757ad9a4f34fc090dfc6f81 | 920 | py | Python | Others/tenka1/tenka1-2018-beginner/c.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | Others/tenka1/tenka1-2018-beginner/c.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | Others/tenka1/tenka1-2018-beginner/c.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| 23 | 63 | 0.423913 | # -*- coding: utf-8 -*-
def main():
n = int(input())
a = sorted([int(input()) for _ in range(n)], reverse=True)
ans = 0
center = n // 2
# See:
# http://drken1215.hatenablog.com/entry/2018/10/28/222800
# KeyInsight
# a > b < c > d < e ...
# a < b > c < d > e ...の形で決め打ちしてよい
# それぞれの係数に着目する
if n % 2 == 0:
ans += sum(a[:center - 1]) * 2
ans += a[center - 1]
ans -= a[center]
ans -= sum(a[center + 1:]) * 2
else:
ans1 = 0
ans1 += sum(a[:center - 1]) * 2
ans1 += a[center - 1]
ans1 += a[center]
ans1 -= sum(a[center + 1:]) * 2
ans2 = 0
ans2 += sum(a[:center]) * 2
ans2 -= a[center]
ans2 -= a[center + 1]
ans2 -= sum(a[center + 2:]) * 2
ans = max(ans1, ans2)
print(ans)
if __name__ == '__main__':
main()
| 870 | 0 | 25 |
be7c7f87c186571c6e0843d3d4ae53ea3ca3e819 | 32 | py | Python | imagetagger/venv/lib/python3.7/hmac.py | wrigsa/ImageTagger | d8a7ab7d004eeda8deae423eccc7236c4accab47 | [
"MIT"
] | 8 | 2020-04-09T13:37:16.000Z | 2021-11-22T01:26:02.000Z | env/lib/python3.7/hmac.py | karenhao/Office-365-Flask-App-kudos | 95500ec55169f49eb281fff9905ad0b7f94d6fa3 | [
"MIT"
] | 23 | 2019-01-19T08:54:48.000Z | 2022-03-11T23:39:37.000Z | env/lib/python3.7/hmac.py | karenhao/Office-365-Flask-App-kudos | 95500ec55169f49eb281fff9905ad0b7f94d6fa3 | [
"MIT"
] | 5 | 2021-01-12T07:03:31.000Z | 2021-09-27T16:30:24.000Z | /anaconda3/lib/python3.7/hmac.py | 32 | 32 | 0.8125 | /anaconda3/lib/python3.7/hmac.py | 0 | 0 | 0 |
6a596eb1e7e7fe587078d89bead26e5ae9ae9b29 | 16,224 | py | Python | space_api/proto/server_pb2_grpc.py | AliabbasMerchant/space-api-python | e5f047d567540d503be7fe72e82f2b198e48b5f9 | [
"Apache-2.0"
] | 8 | 2019-04-02T06:06:45.000Z | 2019-11-12T16:53:26.000Z | space_api/proto/server_pb2_grpc.py | SaiprasadDuduka/space-api-python | 278ad650fa5579089a7ff465dbe74ec5469940ae | [
"Apache-2.0"
] | 28 | 2019-03-25T11:35:07.000Z | 2020-05-11T05:10:00.000Z | space_api/proto/server_pb2_grpc.py | SaiprasadDuduka/space-api-python | 278ad650fa5579089a7ff465dbe74ec5469940ae | [
"Apache-2.0"
] | 4 | 2019-03-22T17:09:22.000Z | 2019-10-24T17:10:43.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from space_api.proto import server_pb2 as server__pb2
| 41.922481 | 80 | 0.73872 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from space_api.proto import server_pb2 as server__pb2
class SpaceCloudStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/proto.SpaceCloud/Create',
request_serializer=server__pb2.CreateRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.Read = channel.unary_unary(
'/proto.SpaceCloud/Read',
request_serializer=server__pb2.ReadRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.Update = channel.unary_unary(
'/proto.SpaceCloud/Update',
request_serializer=server__pb2.UpdateRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.Delete = channel.unary_unary(
'/proto.SpaceCloud/Delete',
request_serializer=server__pb2.DeleteRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.Aggregate = channel.unary_unary(
'/proto.SpaceCloud/Aggregate',
request_serializer=server__pb2.AggregateRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.Batch = channel.unary_unary(
'/proto.SpaceCloud/Batch',
request_serializer=server__pb2.BatchRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.Call = channel.unary_unary(
'/proto.SpaceCloud/Call',
request_serializer=server__pb2.FunctionsRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.RealTime = channel.stream_stream(
'/proto.SpaceCloud/RealTime',
request_serializer=server__pb2.RealTimeRequest.SerializeToString,
response_deserializer=server__pb2.RealTimeResponse.FromString,
)
self.Service = channel.stream_stream(
'/proto.SpaceCloud/Service',
request_serializer=server__pb2.FunctionsPayload.SerializeToString,
response_deserializer=server__pb2.FunctionsPayload.FromString,
)
self.Profile = channel.unary_unary(
'/proto.SpaceCloud/Profile',
request_serializer=server__pb2.ProfileRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.Profiles = channel.unary_unary(
'/proto.SpaceCloud/Profiles',
request_serializer=server__pb2.ProfilesRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.EditProfile = channel.unary_unary(
'/proto.SpaceCloud/EditProfile',
request_serializer=server__pb2.EditProfileRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.SignIn = channel.unary_unary(
'/proto.SpaceCloud/SignIn',
request_serializer=server__pb2.SignInRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.SignUp = channel.unary_unary(
'/proto.SpaceCloud/SignUp',
request_serializer=server__pb2.SignUpRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.CreateFolder = channel.unary_unary(
'/proto.SpaceCloud/CreateFolder',
request_serializer=server__pb2.CreateFolderRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.ListFiles = channel.unary_unary(
'/proto.SpaceCloud/ListFiles',
request_serializer=server__pb2.ListFilesRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.DeleteFile = channel.unary_unary(
'/proto.SpaceCloud/DeleteFile',
request_serializer=server__pb2.DeleteFileRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.UploadFile = channel.stream_unary(
'/proto.SpaceCloud/UploadFile',
request_serializer=server__pb2.UploadFileRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.DownloadFile = channel.unary_stream(
'/proto.SpaceCloud/DownloadFile',
request_serializer=server__pb2.DownloadFileRequest.SerializeToString,
response_deserializer=server__pb2.FilePayload.FromString,
)
self.PubsubPublish = channel.unary_unary(
'/proto.SpaceCloud/PubsubPublish',
request_serializer=server__pb2.PubsubPublishRequest.SerializeToString,
response_deserializer=server__pb2.Response.FromString,
)
self.PubsubSubscribe = channel.stream_stream(
'/proto.SpaceCloud/PubsubSubscribe',
request_serializer=server__pb2.PubsubSubscribeRequest.SerializeToString,
response_deserializer=server__pb2.PubsubMsgResponse.FromString,
)
class SpaceCloudServicer(object):
# missing associated documentation comment in .proto file
pass
def Create(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Read(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Aggregate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Batch(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Call(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RealTime(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Service(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Profile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Profiles(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EditProfile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SignIn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SignUp(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateFolder(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFiles(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadFile(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DownloadFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PubsubPublish(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PubsubSubscribe(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SpaceCloudServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=server__pb2.CreateRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'Read': grpc.unary_unary_rpc_method_handler(
servicer.Read,
request_deserializer=server__pb2.ReadRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=server__pb2.UpdateRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=server__pb2.DeleteRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'Aggregate': grpc.unary_unary_rpc_method_handler(
servicer.Aggregate,
request_deserializer=server__pb2.AggregateRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'Batch': grpc.unary_unary_rpc_method_handler(
servicer.Batch,
request_deserializer=server__pb2.BatchRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'Call': grpc.unary_unary_rpc_method_handler(
servicer.Call,
request_deserializer=server__pb2.FunctionsRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'RealTime': grpc.stream_stream_rpc_method_handler(
servicer.RealTime,
request_deserializer=server__pb2.RealTimeRequest.FromString,
response_serializer=server__pb2.RealTimeResponse.SerializeToString,
),
'Service': grpc.stream_stream_rpc_method_handler(
servicer.Service,
request_deserializer=server__pb2.FunctionsPayload.FromString,
response_serializer=server__pb2.FunctionsPayload.SerializeToString,
),
'Profile': grpc.unary_unary_rpc_method_handler(
servicer.Profile,
request_deserializer=server__pb2.ProfileRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'Profiles': grpc.unary_unary_rpc_method_handler(
servicer.Profiles,
request_deserializer=server__pb2.ProfilesRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'EditProfile': grpc.unary_unary_rpc_method_handler(
servicer.EditProfile,
request_deserializer=server__pb2.EditProfileRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'SignIn': grpc.unary_unary_rpc_method_handler(
servicer.SignIn,
request_deserializer=server__pb2.SignInRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'SignUp': grpc.unary_unary_rpc_method_handler(
servicer.SignUp,
request_deserializer=server__pb2.SignUpRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'CreateFolder': grpc.unary_unary_rpc_method_handler(
servicer.CreateFolder,
request_deserializer=server__pb2.CreateFolderRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'ListFiles': grpc.unary_unary_rpc_method_handler(
servicer.ListFiles,
request_deserializer=server__pb2.ListFilesRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'DeleteFile': grpc.unary_unary_rpc_method_handler(
servicer.DeleteFile,
request_deserializer=server__pb2.DeleteFileRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'UploadFile': grpc.stream_unary_rpc_method_handler(
servicer.UploadFile,
request_deserializer=server__pb2.UploadFileRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'DownloadFile': grpc.unary_stream_rpc_method_handler(
servicer.DownloadFile,
request_deserializer=server__pb2.DownloadFileRequest.FromString,
response_serializer=server__pb2.FilePayload.SerializeToString,
),
'PubsubPublish': grpc.unary_unary_rpc_method_handler(
servicer.PubsubPublish,
request_deserializer=server__pb2.PubsubPublishRequest.FromString,
response_serializer=server__pb2.Response.SerializeToString,
),
'PubsubSubscribe': grpc.stream_stream_rpc_method_handler(
servicer.PubsubSubscribe,
request_deserializer=server__pb2.PubsubSubscribeRequest.FromString,
response_serializer=server__pb2.PubsubMsgResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.SpaceCloud', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 10,427 | 5,588 | 69 |
f66aa9bd730be5a644b15b3cf2b8f915765ff9fd | 19,083 | py | Python | gluoncv/model_zoo/action_recognition/i3d_inceptionv1.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 5,447 | 2018-04-25T18:02:51.000Z | 2022-03-31T00:59:49.000Z | gluoncv/model_zoo/action_recognition/i3d_inceptionv1.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,566 | 2018-04-25T21:14:04.000Z | 2022-03-31T06:42:42.000Z | gluoncv/model_zoo/action_recognition/i3d_inceptionv1.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,345 | 2018-04-25T18:44:13.000Z | 2022-03-30T19:32:53.000Z | # pylint: disable=line-too-long,too-many-lines,missing-docstring,arguments-differ,unused-argument
__all__ = ['I3D_InceptionV1', 'i3d_inceptionv1_kinetics400']
from mxnet import nd
from mxnet import init
from mxnet.context import cpu
from mxnet.gluon.block import HybridBlock
from mxnet.gluon import nn
from mxnet.gluon.nn import BatchNorm
from mxnet.gluon.contrib.nn import HybridConcurrent
from gluoncv.model_zoo.googlenet import googlenet
class I3D_InceptionV1(HybridBlock):
r"""Inception v1 model from
`"Going Deeper with Convolutions"
<https://arxiv.org/abs/1409.4842>`_ paper.
Inflated 3D model (I3D) from
`"Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset"
<https://arxiv.org/abs/1705.07750>`_ paper.
Slight differences between this implementation and the original implementation due to padding.
Parameters
----------
nclass : int
Number of classes in the training dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
dropout_ratio : float, default is 0.5.
The dropout rate of a dropout layer.
The larger the value, the more strength to prevent overfitting.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
init_std : float, default is 0.001.
Standard deviation value when initialize the dense layers.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
partial_bn : bool, default False.
Freeze all batch normalization layers during training except the first layer.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
def i3d_inceptionv1_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
ctx=cpu(), root='~/.mxnet/models', use_tsn=False,
num_segments=1, num_crop=1, partial_bn=False,
feat_ext=False, **kwargs):
r"""Inception v1 model trained on Kinetics400 dataset from
`"Going Deeper with Convolutions"
<https://arxiv.org/abs/1409.4842>`_ paper.
Inflated 3D model (I3D) from
`"Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset"
<https://arxiv.org/abs/1705.07750>`_ paper.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
partial_bn : bool, default False.
Freeze all batch normalization layers during training except the first layer.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = I3D_InceptionV1(nclass=nclass,
partial_bn=partial_bn,
pretrained=pretrained,
pretrained_base=pretrained_base,
feat_ext=feat_ext,
num_segments=num_segments,
num_crop=num_crop,
dropout_ratio=0.5,
init_std=0.01,
ctx=ctx,
**kwargs)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('i3d_inceptionv1_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
| 49.695313 | 160 | 0.590892 | # pylint: disable=line-too-long,too-many-lines,missing-docstring,arguments-differ,unused-argument
__all__ = ['I3D_InceptionV1', 'i3d_inceptionv1_kinetics400']
from mxnet import nd
from mxnet import init
from mxnet.context import cpu
from mxnet.gluon.block import HybridBlock
from mxnet.gluon import nn
from mxnet.gluon.nn import BatchNorm
from mxnet.gluon.contrib.nn import HybridConcurrent
from gluoncv.model_zoo.googlenet import googlenet
def _make_basic_conv(in_channels, channels, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
out = nn.HybridSequential(prefix='')
out.add(nn.Conv3D(in_channels=in_channels, channels=channels, use_bias=False, **kwargs))
out.add(norm_layer(in_channels=channels, epsilon=0.001, **({} if norm_kwargs is None else norm_kwargs)))
out.add(nn.Activation('relu'))
return out
def _make_branch(use_pool, norm_layer, norm_kwargs, *conv_settings):
out = nn.HybridSequential(prefix='')
if use_pool == 'avg':
out.add(nn.AvgPool3D(pool_size=3, strides=1, padding=1))
elif use_pool == 'max':
out.add(nn.MaxPool3D(pool_size=3, strides=1, padding=1))
setting_names = ['in_channels', 'channels', 'kernel_size', 'strides', 'padding']
for setting in conv_settings:
kwargs = {}
for i, value in enumerate(setting):
if value is not None:
if setting_names[i] == 'in_channels':
in_channels = value
elif setting_names[i] == 'channels':
channels = value
else:
kwargs[setting_names[i]] = value
out.add(_make_basic_conv(in_channels, channels, norm_layer, norm_kwargs, **kwargs))
return out
def _make_Mixed_3a(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 64, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 96, 1, None, None),
(96, 128, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 16, 1, None, None),
(16, 32, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
def _make_Mixed_3b(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 128, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 128, 1, None, None),
(128, 192, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 32, 1, None, None),
(32, 96, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
def _make_Mixed_4a(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 192, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 96, 1, None, None),
(96, 208, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 16, 1, None, None),
(16, 48, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
def _make_Mixed_4b(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 160, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 112, 1, None, None),
(112, 224, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 24, 1, None, None),
(24, 64, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
def _make_Mixed_4c(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 128, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 128, 1, None, None),
(128, 256, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 24, 1, None, None),
(24, 64, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
def _make_Mixed_4d(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 112, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 144, 1, None, None),
(144, 288, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 32, 1, None, None),
(32, 64, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
def _make_Mixed_4e(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 256, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 160, 1, None, None),
(160, 320, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 32, 1, None, None),
(32, 128, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
def _make_Mixed_5a(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 256, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 160, 1, None, None),
(160, 320, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 32, 1, None, None),
(32, 128, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
def _make_Mixed_5b(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 384, 1, None, None)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 192, 1, None, None),
(192, 384, 3, None, 1)))
out.add(_make_branch(None, norm_layer, norm_kwargs,
(in_channels, 48, 1, None, None),
(48, 128, 3, None, 1)))
out.add(_make_branch('max', norm_layer, norm_kwargs,
(in_channels, pool_features, 1, None, None)))
return out
class I3D_InceptionV1(HybridBlock):
r"""Inception v1 model from
`"Going Deeper with Convolutions"
<https://arxiv.org/abs/1409.4842>`_ paper.
Inflated 3D model (I3D) from
`"Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset"
<https://arxiv.org/abs/1705.07750>`_ paper.
Slight differences between this implementation and the original implementation due to padding.
Parameters
----------
nclass : int
Number of classes in the training dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
dropout_ratio : float, default is 0.5.
The dropout rate of a dropout layer.
The larger the value, the more strength to prevent overfitting.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
init_std : float, default is 0.001.
Standard deviation value when initialize the dense layers.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
partial_bn : bool, default False.
Freeze all batch normalization layers during training except the first layer.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
def __init__(self, nclass=1000, pretrained=False, pretrained_base=True,
num_segments=1, num_crop=1, feat_ext=False,
dropout_ratio=0.5, init_std=0.01, partial_bn=False,
ctx=None, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(I3D_InceptionV1, self).__init__(**kwargs)
self.num_segments = num_segments
self.num_crop = num_crop
self.feat_dim = 1024
self.dropout_ratio = dropout_ratio
self.init_std = init_std
self.feat_ext = feat_ext
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(_make_basic_conv(in_channels=3, channels=64, kernel_size=7, strides=2, padding=3, norm_layer=norm_layer, norm_kwargs=norm_kwargs))
self.features.add(nn.MaxPool3D(pool_size=(1, 3, 3), strides=(1, 2, 2), padding=(0, 1, 1)))
if partial_bn:
if norm_kwargs is not None:
norm_kwargs['use_global_stats'] = True
else:
norm_kwargs = {}
norm_kwargs['use_global_stats'] = True
self.features.add(_make_basic_conv(in_channels=64, channels=64, kernel_size=1, norm_layer=norm_layer, norm_kwargs=norm_kwargs))
self.features.add(_make_basic_conv(in_channels=64, channels=192, kernel_size=3, padding=(1, 1, 1), norm_layer=norm_layer, norm_kwargs=norm_kwargs))
self.features.add(nn.MaxPool3D(pool_size=(1, 3, 3), strides=(1, 2, 2), padding=(0, 1, 1)))
self.features.add(_make_Mixed_3a(192, 32, 'Mixed_3a_', norm_layer, norm_kwargs))
self.features.add(_make_Mixed_3b(256, 64, 'Mixed_3b_', norm_layer, norm_kwargs))
self.features.add(nn.MaxPool3D(pool_size=3, strides=(2, 2, 2), padding=(1, 1, 1)))
self.features.add(_make_Mixed_4a(480, 64, 'Mixed_4a_', norm_layer, norm_kwargs))
self.features.add(_make_Mixed_4b(512, 64, 'Mixed_4b_', norm_layer, norm_kwargs))
self.features.add(_make_Mixed_4c(512, 64, 'Mixed_4c_', norm_layer, norm_kwargs))
self.features.add(_make_Mixed_4d(512, 64, 'Mixed_4d_', norm_layer, norm_kwargs))
self.features.add(_make_Mixed_4e(528, 128, 'Mixed_4e_', norm_layer, norm_kwargs))
self.features.add(nn.MaxPool3D(pool_size=2, strides=(2, 2, 2)))
self.features.add(_make_Mixed_5a(832, 128, 'Mixed_5a_', norm_layer, norm_kwargs))
self.features.add(_make_Mixed_5b(832, 128, 'Mixed_5b_', norm_layer, norm_kwargs))
self.features.add(nn.GlobalAvgPool3D())
self.head = nn.HybridSequential(prefix='')
self.head.add(nn.Dropout(rate=self.dropout_ratio))
self.output = nn.Dense(units=nclass, in_units=self.feat_dim, weight_initializer=init.Normal(sigma=self.init_std))
self.head.add(self.output)
self.features.initialize(ctx=ctx)
self.head.initialize(ctx=ctx)
if pretrained_base and not pretrained:
inceptionv1_2d = googlenet(pretrained=True)
weights2d = inceptionv1_2d.collect_params()
weights3d = self.collect_params()
assert len(weights2d.keys()) == len(weights3d.keys()), 'Number of parameters should be same.'
dict2d = {}
for key_id, key_name in enumerate(weights2d.keys()):
dict2d[key_id] = key_name
dict3d = {}
for key_id, key_name in enumerate(weights3d.keys()):
dict3d[key_id] = key_name
dict_transform = {}
for key_id, key_name in dict3d.items():
dict_transform[dict2d[key_id]] = key_name
cnt = 0
for key2d, key3d in dict_transform.items():
if 'conv' in key3d:
temporal_dim = weights3d[key3d].shape[2]
temporal_2d = nd.expand_dims(weights2d[key2d].data(), axis=2)
inflated_2d = nd.broadcast_to(temporal_2d, shape=[0, 0, temporal_dim, 0, 0]) / temporal_dim
assert inflated_2d.shape == weights3d[key3d].shape, 'the shape of %s and %s does not match. ' % (key2d, key3d)
weights3d[key3d].set_data(inflated_2d)
cnt += 1
print('%s is done with shape: ' % (key3d), weights3d[key3d].shape)
if 'batchnorm' in key3d:
assert weights2d[key2d].shape == weights3d[key3d].shape, 'the shape of %s and %s does not match. ' % (key2d, key3d)
weights3d[key3d].set_data(weights2d[key2d].data())
cnt += 1
print('%s is done with shape: ' % (key3d), weights3d[key3d].shape)
if 'dense' in key3d:
cnt += 1
print('%s is skipped with shape: ' % (key3d), weights3d[key3d].shape)
assert cnt == len(weights2d.keys()), 'Not all parameters have been ported, check the initialization.'
def hybrid_forward(self, F, x):
x = self.features(x)
x = F.squeeze(x, axis=(2, 3, 4))
# segmental consensus
x = F.reshape(x, shape=(-1, self.num_segments * self.num_crop, self.feat_dim))
x = F.mean(x, axis=1)
if self.feat_ext:
return x
x = self.head(x)
return x
def i3d_inceptionv1_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
ctx=cpu(), root='~/.mxnet/models', use_tsn=False,
num_segments=1, num_crop=1, partial_bn=False,
feat_ext=False, **kwargs):
r"""Inception v1 model trained on Kinetics400 dataset from
`"Going Deeper with Convolutions"
<https://arxiv.org/abs/1409.4842>`_ paper.
Inflated 3D model (I3D) from
`"Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset"
<https://arxiv.org/abs/1705.07750>`_ paper.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
partial_bn : bool, default False.
Freeze all batch normalization layers during training except the first layer.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = I3D_InceptionV1(nclass=nclass,
partial_bn=partial_bn,
pretrained=pretrained,
pretrained_base=pretrained_base,
feat_ext=feat_ext,
num_segments=num_segments,
num_crop=num_crop,
dropout_ratio=0.5,
init_std=0.01,
ctx=ctx,
**kwargs)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('i3d_inceptionv1_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
| 13,559 | 0 | 306 |
caa1e1e25e6e9f3701a1b9d1373b00bdbfded305 | 1,844 | py | Python | dags/twitter/get_users_info.py | Joaoluislins/algotrader | 94060531c64322203ce59e390836f1ecd25b4955 | [
"MIT"
] | null | null | null | dags/twitter/get_users_info.py | Joaoluislins/algotrader | 94060531c64322203ce59e390836f1ecd25b4955 | [
"MIT"
] | null | null | null | dags/twitter/get_users_info.py | Joaoluislins/algotrader | 94060531c64322203ce59e390836f1ecd25b4955 | [
"MIT"
] | null | null | null | from twitter.twitter_api import TwitterAPI
import logging
import requests
from dotenv import load_dotenv
from os import getenv
load_dotenv('/opt/airflow/aws_twi_env/.env')
# Creating a specific class to interact with get_users_id endpoint.
logger = logging.getLogger(__name__)
logging.basicConfig(level = logging.INFO)
# Specify the usernames that you want to lookup below
# You can enter up to 100 comma-separated values.
# User fields are adjustable, options include:
# created_at, description, entities, id, location, name,
# pinned_tweet_id, profile_image_url, protected,
# public_metrics, url, username, verified, and withheld
| 34.792453 | 99 | 0.706074 | from twitter.twitter_api import TwitterAPI
import logging
import requests
from dotenv import load_dotenv
from os import getenv
load_dotenv('/opt/airflow/aws_twi_env/.env')
# Creating a specific class to interact with get_users_id endpoint.
logger = logging.getLogger(__name__)
logging.basicConfig(level = logging.INFO)
class GetUsersInfo(TwitterAPI):
def __init__(self, names: list, **kwargs) -> None:
self.names = names
self.usernames = "usernames={}".format(','.join(self.names))
self.type_of_endpoint = 'users/by?{}&{}' #Specific to get users_id
self.user_fields = user_fields = "user.fields=description,created_at,id"
self.bearer_token = bearer_token = getenv('BEARER_TOKEN')
#self.bearer_oauth = bearer_oauth
super().__init__(**kwargs)
# Specify the usernames that you want to lookup below
# You can enter up to 100 comma-separated values.
# User fields are adjustable, options include:
# created_at, description, entities, id, location, name,
# pinned_tweet_id, profile_image_url, protected,
# public_metrics, url, username, verified, and withheld
def _get_endpoint(self) -> str:
return f"{self.base_endpoint}/{self.type_of_endpoint}".format(self.usernames, self.user_fields)
def connect_to_endpoint(self, url):
response = requests.request("GET", url, auth = self.bearer_oauth)
print(response.status_code)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
return response.json()
def get_data(self, **kwargs) -> dict:
#bearer_token = ## encript this call
url = self._get_endpoint()
logger.info(f"getting data from endpoint: {url}")
response = self.connect_to_endpoint(url)
return response | 1,050 | 10 | 125 |
6d7961e4e1da6c70f304d9854e653e727f2a6efe | 3,548 | py | Python | infogain/artefact/annotation.py | Kieran-Bacon/InfoGain | 621ccd111d474f96f0ba19a8972821becea0c5db | [
"Apache-2.0"
] | 1 | 2019-10-14T00:49:04.000Z | 2019-10-14T00:49:04.000Z | infogain/artefact/annotation.py | Kieran-Bacon/InfoGain | 621ccd111d474f96f0ba19a8972821becea0c5db | [
"Apache-2.0"
] | 2 | 2018-06-12T12:46:35.000Z | 2019-02-22T10:52:15.000Z | infogain/artefact/annotation.py | Kieran-Bacon/InfoGain | 621ccd111d474f96f0ba19a8972821becea0c5db | [
"Apache-2.0"
] | null | null | null | import weakref
from .entity import Entity
| 30.586207 | 104 | 0.625141 | import weakref
from .entity import Entity
class Annotation:
POSITIVE = 1
INSUFFICIENT = 0
NEGATIVE = -1
_CLASSMAPPER = {
POSITIVE: "POSITIVE",
INSUFFICIENT: "INSUFFICIENT",
NEGATIVE: "NEGATIVE"
}
def __init__(
self,
domain: Entity,
name: str,
target: Entity,
*,
classification: int = None,
confidence: float = 1.
):
self.domain = domain
self._name = name
self.target = target
self.confidence = confidence
if classification is None: self._classification = None
else: self.classification = classification
self._contextOwner = None
self._context = None
self._embedding = None
def __repr__(self):
prediction = ''
if self.classification is not None:
prediction = " {} {:.0%}".format(self._CLASSMAPPER[self.classification], self.confidence)
return "<Annotation: {} {} {}{}>".format(self.domain, self.name, self.target, prediction)
@property
def domain(self): return self._domain
@domain.setter
def domain(self, entity):
if isinstance(entity, Entity): self._domain = entity
else: raise ValueError("Annotation domain must be an Entity not '{}'".format(type(entity)))
@property
def name(self): return self._name
@property
def target(self): return self._target
@target.setter
def target(self, entity):
if isinstance(entity, Entity): self._target = entity
else: raise ValueError("Annotation target must be an Entity not '{}'".format(type(entity)))
@property
def confidence(self): return self._confidence
@confidence.setter
def confidence(self, value):
if isinstance(value, float) and 0. <= value <= 1.:
self._confidence = value
else:
raise ValueError("Invalid confidence set on annotation '{}'".format(value))
@property
def classification(self): return self._classification
@classification.setter
def classification(self, classification: int):
for classtype in (self.POSITIVE, self.INSUFFICIENT, self.NEGATIVE):
if classification == classtype:
self._classification = classtype
break
else:
raise TypeError("Provided classification class was not a valid type '{}'".format(classtype))
@property
def _owner(self): return self._contextOwner() if self._contextOwner is not None else None
@_owner.setter
def _owner(self, owner: weakref.ref):
self._contextOwner = owner
self._context = None
self._embedding = None
@property
def context(self):
if self._owner is None: return None
return (
self._owner.content[slice(*self._context[0])].strip(),
self._owner.content[slice(*self._context[1])].strip(),
self._owner.content[slice(*self._context[2])].strip()
)
@context.setter
def context(self, context: ((int, int))):
if self._context is not None:
raise ValueError("Cannot edit context of an annotation - determined by the owning document")
self._context = context
@property
def embedding(self): return self._embedding
@embedding.setter
def embedding(self, embeddings: ([int])):
if self._context is None:
raise RuntimeError("Cannot set embeddings for annotation context as context is not set")
self._embedding = embeddings
| 2,592 | 890 | 23 |
0352c5b61ced5185479bf7de6e1d37bfbb45eb57 | 410 | py | Python | 05-neural-network-weather-forecast/model_archi.py | alitourani/computational-intelligence-class-9801 | 1d32fc4cd86156b7651a6497956d007655ef1688 | [
"MIT"
] | 4 | 2019-10-11T08:07:15.000Z | 2020-01-20T19:07:58.000Z | 05-neural-network-weather-forecast/model_archi.py | alitourani/computational-intelligence-class-9801 | 1d32fc4cd86156b7651a6497956d007655ef1688 | [
"MIT"
] | 1 | 2022-03-02T07:02:21.000Z | 2022-03-02T07:02:21.000Z | 05-neural-network-weather-forecast/model_archi.py | alitourani/computational-intelligence-class-9801 | 1d32fc4cd86156b7651a6497956d007655ef1688 | [
"MIT"
] | 34 | 2019-12-17T14:42:39.000Z | 2020-05-14T08:08:19.000Z | import keras
from keras.models import Sequential
from keras.layers import Dense
| 24.117647 | 63 | 0.629268 | import keras
from keras.models import Sequential
from keras.layers import Dense
class model_archi:
def __init__(self):
pass
def build(input, classes):
model = Sequential()
model.add(Dense(16,input_dim=input,activation='relu'))
model.add(Dense(12,activation='relu'))
model.add(Dense(classes,activation='softmax'))
return model
| 249 | -3 | 79 |
c18f654c05cd47c5118a08ee7b95714385e80941 | 2,016 | py | Python | tests/algorithms/test_models.py | astaolaf/glenoidplanefitting | e8e8c7c1a15784a9404da046c08c48fe6216d5ee | [
"BSD-3-Clause"
] | null | null | null | tests/algorithms/test_models.py | astaolaf/glenoidplanefitting | e8e8c7c1a15784a9404da046c08c48fe6216d5ee | [
"BSD-3-Clause"
] | 8 | 2021-07-21T13:01:31.000Z | 2021-08-12T11:00:02.000Z | tests/algorithms/test_models.py | SciKit-Surgery/glenoidplanefitting | 3932183a38c94858232ed663f1a6dc88c2ee452b | [
"BSD-3-Clause"
] | 1 | 2021-07-21T13:53:59.000Z | 2021-07-21T13:53:59.000Z | """
Unit tests for the Friedman module
"""
import math
import numpy as np
import vtk
import glenoidplanefitting.algorithms.models as mdl
def test_make_plane_model():
"""
Tests that make_plane_model returns a plane centred on the
plane centre with the correct normal vector
"""
plane_centre = [1.0, 3.0, 5.0]
plane_normal = [7.0, 11.0, 13.0]
plane_size = 200.0
plane_resolution = 20
plane = mdl.make_plane_model(plane_centre, plane_normal,
plane_resolution, plane_size)
assert isinstance (plane, vtk.vtkPlaneSource)#pylint:disable=no-member
assert np.array_equal(np.array(plane.GetCenter()),
np.array(plane_centre))
denormalised_normal = np.linalg.norm(np.array(plane_normal)) \
* np.array(plane.GetNormal())
assert np.allclose(denormalised_normal, np.array(plane_normal))
assert plane.GetXResolution() == plane_resolution
assert plane.GetYResolution() == plane_resolution
actual_plane_size = np.linalg.norm(np.array(plane.GetPoint1()) -
np.array(plane.GetPoint2()))
expected_plane_size = math.sqrt(2 * (plane_size * plane_size))
assert math.isclose(actual_plane_size, expected_plane_size)
def test_friedman_model():
"""
Tests that make Friedman model returns the appropriate line
"""
point1 = (2.0, 3.0, 5.0)
point2 = (7.0, 11.0, 13.0)
line = mdl.make_friedman_model(point1, point2)
assert isinstance(line, vtk.vtkLineSource)#pylint:disable=no-member
assert line.GetPoint1() == point1
assert line.GetPoint2() == point2
def test_vault_model():
"""
Tests that make vault model returns the appropriate line
"""
point1 = (1.0, 1.0, 2.0)
point2 = (3.0, 5.0, 8.0)
line = mdl.make_vault_model(point1, point2)
assert isinstance(line, vtk.vtkLineSource)#pylint:disable=no-member
assert line.GetPoint1() == point1
assert line.GetPoint2() == point2
| 30.089552 | 74 | 0.66369 | """
Unit tests for the Friedman module
"""
import math
import numpy as np
import vtk
import glenoidplanefitting.algorithms.models as mdl
def test_make_plane_model():
"""
Tests that make_plane_model returns a plane centred on the
plane centre with the correct normal vector
"""
plane_centre = [1.0, 3.0, 5.0]
plane_normal = [7.0, 11.0, 13.0]
plane_size = 200.0
plane_resolution = 20
plane = mdl.make_plane_model(plane_centre, plane_normal,
plane_resolution, plane_size)
assert isinstance (plane, vtk.vtkPlaneSource)#pylint:disable=no-member
assert np.array_equal(np.array(plane.GetCenter()),
np.array(plane_centre))
denormalised_normal = np.linalg.norm(np.array(plane_normal)) \
* np.array(plane.GetNormal())
assert np.allclose(denormalised_normal, np.array(plane_normal))
assert plane.GetXResolution() == plane_resolution
assert plane.GetYResolution() == plane_resolution
actual_plane_size = np.linalg.norm(np.array(plane.GetPoint1()) -
np.array(plane.GetPoint2()))
expected_plane_size = math.sqrt(2 * (plane_size * plane_size))
assert math.isclose(actual_plane_size, expected_plane_size)
def test_friedman_model():
"""
Tests that make Friedman model returns the appropriate line
"""
point1 = (2.0, 3.0, 5.0)
point2 = (7.0, 11.0, 13.0)
line = mdl.make_friedman_model(point1, point2)
assert isinstance(line, vtk.vtkLineSource)#pylint:disable=no-member
assert line.GetPoint1() == point1
assert line.GetPoint2() == point2
def test_vault_model():
"""
Tests that make vault model returns the appropriate line
"""
point1 = (1.0, 1.0, 2.0)
point2 = (3.0, 5.0, 8.0)
line = mdl.make_vault_model(point1, point2)
assert isinstance(line, vtk.vtkLineSource)#pylint:disable=no-member
assert line.GetPoint1() == point1
assert line.GetPoint2() == point2
| 0 | 0 | 0 |
8a7112d43443d063dbf8104575b1273ae3f096a9 | 24,594 | py | Python | arviz/tests/base_tests/test_stats.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | 1 | 2019-10-12T21:53:11.000Z | 2019-10-12T21:53:11.000Z | arviz/tests/base_tests/test_stats.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | null | null | null | arviz/tests/base_tests/test_stats.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=redefined-outer-name, no-member
from copy import deepcopy
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from scipy.stats import linregress
from xarray import DataArray, Dataset
from ...data import concat, convert_to_inference_data, from_dict, load_arviz_data
from ...rcparams import rcParams
from ...stats import (
apply_test_function,
compare,
ess,
hdi,
loo,
loo_pit,
psislw,
r2_score,
summary,
waic,
)
from ...stats.stats import _gpinv
from ...stats.stats_utils import get_log_likelihood
from ..helpers import check_multiple_attrs, multidim_models # pylint: disable=unused-import
rcParams["data.load"] = "eager"
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
@pytest.mark.parametrize("multidim", [True, False])
@pytest.mark.parametrize("ic", ["loo", "waic"])
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("ic", ["loo", "waic"])
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
@pytest.mark.parametrize("var_names_expected", ((None, 10), ("mu", 1), (["mu", "tau"], 2)))
METRICS_NAMES = [
"mean",
"sd",
"hdi_3%",
"hdi_97%",
"mcse_mean",
"mcse_sd",
"ess_mean",
"ess_sd",
"ess_bulk",
"ess_tail",
"r_hat",
]
@pytest.mark.parametrize(
"params",
(("all", METRICS_NAMES), ("stats", METRICS_NAMES[:4]), ("diagnostics", METRICS_NAMES[4:])),
)
@pytest.mark.parametrize("fmt", ["wide", "long", "xarray"])
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("origin", [0, 1, 2, 3])
@pytest.mark.parametrize(
"stat_funcs", [[np.var], {"var": np.var, "var2": lambda x: np.var(x) ** 2}]
)
@pytest.mark.parametrize("fmt", [1, "bad_fmt"])
@pytest.mark.parametrize("order", [1, "bad_order"])
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("multidim", (True, False))
def test_waic(centered_eight, multidim_models, scale, multidim):
"""Test widely available information criterion calculation"""
if multidim:
assert waic(multidim_models.model_1, scale=scale) is not None
waic_pointwise = waic(multidim_models.model_1, pointwise=True, scale=scale)
else:
assert waic(centered_eight, scale=scale) is not None
waic_pointwise = waic(centered_eight, pointwise=True, scale=scale)
assert waic_pointwise is not None
assert "waic_i" in waic_pointwise
def test_waic_bad(centered_eight):
"""Test widely available information criterion calculation"""
centered_eight = deepcopy(centered_eight)
del centered_eight.sample_stats["log_likelihood"]
with pytest.raises(TypeError):
waic(centered_eight)
del centered_eight.sample_stats
with pytest.raises(TypeError):
waic(centered_eight)
def test_waic_bad_scale(centered_eight):
"""Test widely available information criterion calculation with bad scale."""
with pytest.raises(TypeError):
waic(centered_eight, scale="bad_value")
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("multidim", (True, False))
def test_loo(centered_eight, multidim_models, scale, multidim):
"""Test approximate leave one out criterion calculation"""
if multidim:
assert loo(multidim_models.model_1, scale=scale) is not None
loo_pointwise = loo(multidim_models.model_1, pointwise=True, scale=scale)
else:
assert loo(centered_eight, scale=scale) is not None
loo_pointwise = loo(centered_eight, pointwise=True, scale=scale)
assert loo_pointwise is not None
assert "loo_i" in loo_pointwise
assert "pareto_k" in loo_pointwise
assert "loo_scale" in loo_pointwise
def test_loo_bad_scale(centered_eight):
"""Test loo with bad scale value."""
with pytest.raises(TypeError):
loo(centered_eight, scale="bad_scale")
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("probs", [True, False])
@pytest.mark.parametrize("kappa", [-1, -0.5, 1e-30, 0.5, 1])
@pytest.mark.parametrize("sigma", [0, 2])
@pytest.mark.parametrize("func", [loo, waic])
@pytest.mark.parametrize(
"args",
[
{"y": "obs"},
{"y": "obs", "y_hat": "obs"},
{"y": "arr", "y_hat": "obs"},
{"y": "obs", "y_hat": "arr"},
{"y": "arr", "y_hat": "arr"},
{"y": "obs", "y_hat": "obs", "log_weights": "arr"},
{"y": "arr", "y_hat": "obs", "log_weights": "arr"},
{"y": "obs", "y_hat": "arr", "log_weights": "arr"},
{"idata": False},
],
)
@pytest.mark.parametrize(
"args",
[
{"y": "y"},
{"y": "y", "y_hat": "y"},
{"y": "arr", "y_hat": "y"},
{"y": "y", "y_hat": "arr"},
{"y": "arr", "y_hat": "arr"},
{"y": "y", "y_hat": "y", "log_weights": "arr"},
{"y": "arr", "y_hat": "y", "log_weights": "arr"},
{"y": "y", "y_hat": "arr", "log_weights": "arr"},
{"idata": False},
],
)
@pytest.mark.parametrize("input_type", ["idataarray", "idatanone_ystr", "yarr_yhatnone"])
def test_loo_pit_bad_input(centered_eight, input_type):
"""Test incompatible input combinations."""
arr = np.random.random((8, 200))
if input_type == "idataarray":
with pytest.raises(ValueError, match=r"type InferenceData or None"):
loo_pit(idata=arr, y="obs")
elif input_type == "idatanone_ystr":
with pytest.raises(ValueError, match=r"all 3.+must be array or DataArray"):
loo_pit(idata=None, y="obs")
elif input_type == "yarr_yhatnone":
with pytest.raises(ValueError, match=r"y_hat.+None.+y.+str"):
loo_pit(idata=centered_eight, y=arr, y_hat=None)
@pytest.mark.parametrize("arg", ["y", "y_hat", "log_weights"])
def test_loo_pit_bad_input_type(centered_eight, arg):
"""Test wrong input type (not None, str not DataArray."""
kwargs = {"y": "obs", "y_hat": "obs", "log_weights": None}
kwargs[arg] = 2 # use int instead of array-like
with pytest.raises(ValueError, match="not {}".format(type(2))):
loo_pit(idata=centered_eight, **kwargs)
@pytest.mark.parametrize("incompatibility", ["y-y_hat1", "y-y_hat2", "y_hat-log_weights"])
def test_loo_pit_bad_input_shape(incompatibility):
"""Test shape incompatiblities."""
y = np.random.random(8)
y_hat = np.random.random((8, 200))
log_weights = np.random.random((8, 200))
if incompatibility == "y-y_hat1":
with pytest.raises(ValueError, match="1 more dimension"):
loo_pit(y=y, y_hat=y_hat[None, :], log_weights=log_weights)
elif incompatibility == "y-y_hat2":
with pytest.raises(ValueError, match="y has shape"):
loo_pit(y=y, y_hat=y_hat[1:3, :], log_weights=log_weights)
elif incompatibility == "y_hat-log_weights":
with pytest.raises(ValueError, match="must have the same shape"):
loo_pit(y=y, y_hat=y_hat[:, :100], log_weights=log_weights)
@pytest.mark.parametrize("pointwise", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"kwargs",
[
{},
{"group": "posterior_predictive", "var_names": {"posterior_predictive": "obs"}},
{"group": "observed_data", "var_names": {"both": "obs"}, "out_data_shape": "shape"},
{"var_names": {"both": "obs", "posterior": ["theta", "mu"]}},
{"group": "observed_data", "out_name_data": "T_name"},
],
)
def test_apply_test_function(centered_eight, pointwise, inplace, kwargs):
"""Test some usual call cases of apply_test_function"""
centered_eight = deepcopy(centered_eight)
group = kwargs.get("group", "both")
var_names = kwargs.get("var_names", None)
out_data_shape = kwargs.get("out_data_shape", None)
out_pp_shape = kwargs.get("out_pp_shape", None)
out_name_data = kwargs.get("out_name_data", "T")
if out_data_shape == "shape":
out_data_shape = (8,) if pointwise else ()
if out_pp_shape == "shape":
out_pp_shape = (4, 500, 8) if pointwise else (4, 500)
idata = deepcopy(centered_eight)
idata_out = apply_test_function(
idata,
lambda y, theta: np.mean(y),
group=group,
var_names=var_names,
pointwise=pointwise,
out_name_data=out_name_data,
out_data_shape=out_data_shape,
out_pp_shape=out_pp_shape,
)
if inplace:
assert idata is idata_out
if group == "both":
test_dict = {"observed_data": ["T"], "posterior_predictive": ["T"]}
else:
test_dict = {group: [kwargs.get("out_name_data", "T")]}
fails = check_multiple_attrs(test_dict, idata_out)
assert not fails
def test_apply_test_function_bad_group(centered_eight):
"""Test error when group is an invalid name."""
with pytest.raises(ValueError, match="Invalid group argument"):
apply_test_function(centered_eight, lambda y, theta: y, group="bad_group")
def test_apply_test_function_missing_group():
"""Test error when InferenceData object is missing a required group.
The function cannot work if group="both" but InferenceData object has no
posterior_predictive group.
"""
idata = from_dict(
posterior={"a": np.random.random((4, 500, 30))}, observed_data={"y": np.random.random(30)}
)
with pytest.raises(ValueError, match="must have posterior_predictive"):
apply_test_function(idata, lambda y, theta: np.mean, group="both")
def test_apply_test_function_should_overwrite_error(centered_eight):
"""Test error when overwrite=False but out_name is already a present variable."""
with pytest.raises(ValueError, match="Should overwrite"):
apply_test_function(centered_eight, lambda y, theta: y, out_name_data="obs")
| 36.872564 | 98 | 0.678783 | # pylint: disable=redefined-outer-name, no-member
from copy import deepcopy
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from scipy.stats import linregress
from xarray import DataArray, Dataset
from ...data import concat, convert_to_inference_data, from_dict, load_arviz_data
from ...rcparams import rcParams
from ...stats import (
apply_test_function,
compare,
ess,
hdi,
loo,
loo_pit,
psislw,
r2_score,
summary,
waic,
)
from ...stats.stats import _gpinv
from ...stats.stats_utils import get_log_likelihood
from ..helpers import check_multiple_attrs, multidim_models # pylint: disable=unused-import
rcParams["data.load"] = "eager"
@pytest.fixture(scope="session")
def centered_eight():
centered_eight = load_arviz_data("centered_eight")
return centered_eight
@pytest.fixture(scope="session")
def non_centered_eight():
non_centered_eight = load_arviz_data("non_centered_eight")
return non_centered_eight
def test_hdp():
normal_sample = np.random.randn(5000000)
interval = hdi(normal_sample)
assert_array_almost_equal(interval, [-1.88, 1.88], 2)
def test_hdp_2darray():
normal_sample = np.random.randn(12000, 5)
result = hdi(normal_sample)
assert result.shape == (5, 2)
def test_hdi_multidimension():
normal_sample = np.random.randn(12000, 10, 3)
result = hdi(normal_sample)
assert result.shape == (3, 2)
def test_hdi_idata(centered_eight):
data = centered_eight.posterior
result = hdi(data)
assert isinstance(result, Dataset)
assert dict(result.dims) == {"school": 8, "hdi": 2}
result = hdi(data, input_core_dims=[["chain"]])
assert isinstance(result, Dataset)
assert result.dims == {"draw": 500, "hdi": 2, "school": 8}
def test_hdi_idata_varnames(centered_eight):
data = centered_eight.posterior
result = hdi(data, var_names=["mu", "theta"])
assert isinstance(result, Dataset)
assert result.dims == {"hdi": 2, "school": 8}
assert list(result.data_vars.keys()) == ["mu", "theta"]
def test_hdi_idata_group(centered_eight):
result_posterior = hdi(centered_eight, group="posterior", var_names="mu")
result_prior = hdi(centered_eight, group="prior", var_names="mu")
assert result_prior.dims == {"hdi": 2}
range_posterior = result_posterior.mu.values[1] - result_posterior.mu.values[0]
range_prior = result_prior.mu.values[1] - result_prior.mu.values[0]
assert range_posterior < range_prior
def test_hdi_coords(centered_eight):
data = centered_eight.posterior
result = hdi(data, coords={"chain": [0, 1, 3]}, input_core_dims=[["draw"]])
assert_array_equal(result.coords["chain"], [0, 1, 3])
def test_hdi_multimodal():
normal_sample = np.concatenate(
(np.random.normal(-4, 1, 2500000), np.random.normal(2, 0.5, 2500000))
)
intervals = hdi(normal_sample, multimodal=True)
assert_array_almost_equal(intervals, [[-5.8, -2.2], [0.9, 3.1]], 1)
def test_hdi_circular():
normal_sample = np.random.vonmises(np.pi, 1, 5000000)
interval = hdi(normal_sample, circular=True)
assert_array_almost_equal(interval, [0.6, -0.6], 1)
def test_hdi_bad_ci():
normal_sample = np.random.randn(10)
with pytest.raises(ValueError):
hdi(normal_sample, hdi_prob=2)
def test_hdi_skipna():
normal_sample = np.random.randn(500)
interval = hdi(normal_sample[10:])
normal_sample[:10] = np.nan
interval_ = hdi(normal_sample, skipna=True)
assert_array_almost_equal(interval, interval_)
def test_r2_score():
x = np.linspace(0, 1, 100)
y = np.random.normal(x, 1)
res = linregress(x, y)
assert_allclose(res.rvalue ** 2, r2_score(y, res.intercept + res.slope * x).r2, 2)
def test_r2_score_multivariate():
x = np.linspace(0, 1, 100)
y = np.random.normal(x, 1)
res = linregress(x, y)
y_multivariate = np.c_[y, y]
y_multivariate_pred = np.c_[res.intercept + res.slope * x, res.intercept + res.slope * x]
assert not np.isnan(r2_score(y_multivariate, y_multivariate_pred).r2)
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
@pytest.mark.parametrize("multidim", [True, False])
def test_compare_same(centered_eight, multidim_models, method, multidim):
if multidim:
data_dict = {"first": multidim_models.model_1, "second": multidim_models.model_1}
else:
data_dict = {"first": centered_eight, "second": centered_eight}
weight = compare(data_dict, method=method)["weight"]
assert_allclose(weight[0], weight[1])
assert_allclose(np.sum(weight), 1.0)
def test_compare_unknown_ic_and_method(centered_eight, non_centered_eight):
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
with pytest.raises(NotImplementedError):
compare(model_dict, ic="Unknown", method="stacking")
with pytest.raises(ValueError):
compare(model_dict, ic="loo", method="Unknown")
@pytest.mark.parametrize("ic", ["loo", "waic"])
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
def test_compare_different(centered_eight, non_centered_eight, ic, method, scale):
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
weight = compare(model_dict, ic=ic, method=method, scale=scale)["weight"]
assert weight["non_centered"] >= weight["centered"]
assert_allclose(np.sum(weight), 1.0)
@pytest.mark.parametrize("ic", ["loo", "waic"])
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
def test_compare_different_multidim(multidim_models, ic, method):
model_dict = {"model_1": multidim_models.model_1, "model_2": multidim_models.model_2}
weight = compare(model_dict, ic=ic, method=method)["weight"]
# this should hold because the same seed is always used
assert weight["model_1"] >= weight["model_2"]
assert_allclose(np.sum(weight), 1.0)
def test_compare_different_size(centered_eight, non_centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior = centered_eight.posterior.drop("Choate", "school")
centered_eight.sample_stats = centered_eight.sample_stats.drop("Choate", "school")
centered_eight.posterior_predictive = centered_eight.posterior_predictive.drop(
"Choate", "school"
)
centered_eight.prior = centered_eight.prior.drop("Choate", "school")
centered_eight.observed_data = centered_eight.observed_data.drop("Choate", "school")
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
with pytest.raises(ValueError):
compare(model_dict, ic="waic", method="stacking")
@pytest.mark.parametrize("var_names_expected", ((None, 10), ("mu", 1), (["mu", "tau"], 2)))
def test_summary_var_names(centered_eight, var_names_expected):
var_names, expected = var_names_expected
summary_df = summary(centered_eight, var_names=var_names)
assert len(summary_df.index) == expected
METRICS_NAMES = [
"mean",
"sd",
"hdi_3%",
"hdi_97%",
"mcse_mean",
"mcse_sd",
"ess_mean",
"ess_sd",
"ess_bulk",
"ess_tail",
"r_hat",
]
@pytest.mark.parametrize(
"params",
(("all", METRICS_NAMES), ("stats", METRICS_NAMES[:4]), ("diagnostics", METRICS_NAMES[4:])),
)
def test_summary_kind(centered_eight, params):
kind, metrics_names_ = params
summary_df = summary(centered_eight, kind=kind)
assert_array_equal(summary_df.columns, metrics_names_)
@pytest.mark.parametrize("fmt", ["wide", "long", "xarray"])
def test_summary_fmt(centered_eight, fmt):
assert summary(centered_eight, fmt=fmt) is not None
@pytest.mark.parametrize("order", ["C", "F"])
def test_summary_unpack_order(order):
data = from_dict({"a": np.random.randn(4, 100, 4, 5, 3)})
az_summary = summary(data, order=order, fmt="wide")
assert az_summary is not None
if order != "F":
first_index = 4
second_index = 5
third_index = 3
else:
first_index = 3
second_index = 5
third_index = 4
column_order = []
for idx1 in range(first_index):
for idx2 in range(second_index):
for idx3 in range(third_index):
if order != "F":
column_order.append("a[{},{},{}]".format(idx1, idx2, idx3))
else:
column_order.append("a[{},{},{}]".format(idx3, idx2, idx1))
for col1, col2 in zip(list(az_summary.index), column_order):
assert col1 == col2
@pytest.mark.parametrize("origin", [0, 1, 2, 3])
def test_summary_index_origin(origin):
data = from_dict({"a": np.random.randn(2, 50, 10)})
az_summary = summary(data, index_origin=origin, fmt="wide")
assert az_summary is not None
for i, col in enumerate(list(az_summary.index)):
assert col == "a[{}]".format(i + origin)
@pytest.mark.parametrize(
"stat_funcs", [[np.var], {"var": np.var, "var2": lambda x: np.var(x) ** 2}]
)
def test_summary_stat_func(centered_eight, stat_funcs):
arviz_summary = summary(centered_eight, stat_funcs=stat_funcs)
assert arviz_summary is not None
assert hasattr(arviz_summary, "var")
def test_summary_nan(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior.theta[:, :, 0] = np.nan
summary_xarray = summary(centered_eight)
assert summary_xarray is not None
assert summary_xarray.loc["theta[0]"].isnull().all()
assert (
summary_xarray.loc[[ix for ix in summary_xarray.index if ix != "theta[0]"]]
.notnull()
.all()
.all()
)
def test_summary_skip_nan(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior.theta[:, :10, 1] = np.nan
summary_xarray = summary(centered_eight)
theta_1 = summary_xarray.loc["theta[1]"].isnull()
assert summary_xarray is not None
assert ~theta_1[:4].all()
assert theta_1[4:].all()
@pytest.mark.parametrize("fmt", [1, "bad_fmt"])
def test_summary_bad_fmt(centered_eight, fmt):
with pytest.raises(TypeError):
summary(centered_eight, fmt=fmt)
@pytest.mark.parametrize("order", [1, "bad_order"])
def test_summary_bad_unpack_order(centered_eight, order):
with pytest.raises(TypeError):
summary(centered_eight, order=order)
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("multidim", (True, False))
def test_waic(centered_eight, multidim_models, scale, multidim):
"""Test widely available information criterion calculation"""
if multidim:
assert waic(multidim_models.model_1, scale=scale) is not None
waic_pointwise = waic(multidim_models.model_1, pointwise=True, scale=scale)
else:
assert waic(centered_eight, scale=scale) is not None
waic_pointwise = waic(centered_eight, pointwise=True, scale=scale)
assert waic_pointwise is not None
assert "waic_i" in waic_pointwise
def test_waic_bad(centered_eight):
"""Test widely available information criterion calculation"""
centered_eight = deepcopy(centered_eight)
del centered_eight.sample_stats["log_likelihood"]
with pytest.raises(TypeError):
waic(centered_eight)
del centered_eight.sample_stats
with pytest.raises(TypeError):
waic(centered_eight)
def test_waic_bad_scale(centered_eight):
"""Test widely available information criterion calculation with bad scale."""
with pytest.raises(TypeError):
waic(centered_eight, scale="bad_value")
def test_waic_warning(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.sample_stats["log_likelihood"][:, :250, 1] = 10
with pytest.warns(UserWarning):
assert waic(centered_eight, pointwise=True) is not None
# this should throw a warning, but due to numerical issues it fails
centered_eight.sample_stats["log_likelihood"][:, :, :] = 0
with pytest.warns(UserWarning):
assert waic(centered_eight, pointwise=True) is not None
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
def test_waic_print(centered_eight, scale):
waic_data = waic(centered_eight, scale=scale).__repr__()
waic_pointwise = waic(centered_eight, scale=scale, pointwise=True).__repr__()
assert waic_data is not None
assert waic_pointwise is not None
assert waic_data == waic_pointwise
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("multidim", (True, False))
def test_loo(centered_eight, multidim_models, scale, multidim):
"""Test approximate leave one out criterion calculation"""
if multidim:
assert loo(multidim_models.model_1, scale=scale) is not None
loo_pointwise = loo(multidim_models.model_1, pointwise=True, scale=scale)
else:
assert loo(centered_eight, scale=scale) is not None
loo_pointwise = loo(centered_eight, pointwise=True, scale=scale)
assert loo_pointwise is not None
assert "loo_i" in loo_pointwise
assert "pareto_k" in loo_pointwise
assert "loo_scale" in loo_pointwise
def test_loo_one_chain(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior = centered_eight.posterior.drop([1, 2, 3], "chain")
centered_eight.sample_stats = centered_eight.sample_stats.drop([1, 2, 3], "chain")
assert loo(centered_eight) is not None
def test_loo_bad(centered_eight):
with pytest.raises(TypeError):
loo(np.random.randn(2, 10))
centered_eight = deepcopy(centered_eight)
del centered_eight.sample_stats["log_likelihood"]
with pytest.raises(TypeError):
loo(centered_eight)
def test_loo_bad_scale(centered_eight):
"""Test loo with bad scale value."""
with pytest.raises(TypeError):
loo(centered_eight, scale="bad_scale")
def test_loo_bad_no_posterior_reff(centered_eight):
loo(centered_eight, reff=None)
centered_eight = deepcopy(centered_eight)
del centered_eight.posterior
with pytest.raises(TypeError):
loo(centered_eight, reff=None)
loo(centered_eight, reff=0.7)
def test_loo_warning(centered_eight):
centered_eight = deepcopy(centered_eight)
# make one of the khats infinity
centered_eight.sample_stats["log_likelihood"][:, :, 1] = 10
with pytest.warns(UserWarning) as records:
assert loo(centered_eight, pointwise=True) is not None
assert any("Estimated shape parameter" in str(record.message) for record in records)
# make all of the khats infinity
centered_eight.sample_stats["log_likelihood"][:, :, :] = 1
with pytest.warns(UserWarning) as records:
assert loo(centered_eight, pointwise=True) is not None
assert any("Estimated shape parameter" in str(record.message) for record in records)
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
def test_loo_print(centered_eight, scale):
loo_data = loo(centered_eight, scale=scale).__repr__()
loo_pointwise = loo(centered_eight, scale=scale, pointwise=True).__repr__()
assert loo_data is not None
assert loo_pointwise is not None
assert len(loo_data) < len(loo_pointwise)
def test_psislw(centered_eight):
pareto_k = loo(centered_eight, pointwise=True, reff=0.7)["pareto_k"]
log_likelihood = get_log_likelihood(centered_eight)
log_likelihood = log_likelihood.stack(sample=("chain", "draw"))
assert_allclose(pareto_k, psislw(-log_likelihood, 0.7)[1])
@pytest.mark.parametrize("probs", [True, False])
@pytest.mark.parametrize("kappa", [-1, -0.5, 1e-30, 0.5, 1])
@pytest.mark.parametrize("sigma", [0, 2])
def test_gpinv(probs, kappa, sigma):
if probs:
probs = np.array([0.1, 0.1, 0.1, 0.2, 0.3])
else:
probs = np.array([-0.1, 0.1, 0.1, 0.2, 0.3])
assert len(_gpinv(probs, kappa, sigma)) == len(probs)
@pytest.mark.parametrize("func", [loo, waic])
def test_multidimensional_log_likelihood(func):
llm = np.random.rand(4, 23, 15, 2)
ll1 = llm.reshape(4, 23, 15 * 2)
statsm = Dataset(dict(log_likelihood=DataArray(llm, dims=["chain", "draw", "a", "b"])))
stats1 = Dataset(dict(log_likelihood=DataArray(ll1, dims=["chain", "draw", "v"])))
post = Dataset(dict(mu=DataArray(np.random.rand(4, 23, 2), dims=["chain", "draw", "v"])))
dsm = convert_to_inference_data(statsm, group="sample_stats")
ds1 = convert_to_inference_data(stats1, group="sample_stats")
dsp = convert_to_inference_data(post, group="posterior")
dsm = concat(dsp, dsm)
ds1 = concat(dsp, ds1)
frm = func(dsm)
fr1 = func(ds1)
assert (fr1 == frm).all()
assert_array_almost_equal(frm[:4], fr1[:4])
@pytest.mark.parametrize(
"args",
[
{"y": "obs"},
{"y": "obs", "y_hat": "obs"},
{"y": "arr", "y_hat": "obs"},
{"y": "obs", "y_hat": "arr"},
{"y": "arr", "y_hat": "arr"},
{"y": "obs", "y_hat": "obs", "log_weights": "arr"},
{"y": "arr", "y_hat": "obs", "log_weights": "arr"},
{"y": "obs", "y_hat": "arr", "log_weights": "arr"},
{"idata": False},
],
)
def test_loo_pit(centered_eight, args):
y = args.get("y", None)
y_hat = args.get("y_hat", None)
log_weights = args.get("log_weights", None)
y_arr = centered_eight.observed_data.obs
y_hat_arr = centered_eight.posterior_predictive.obs.stack(sample=("chain", "draw"))
log_like = get_log_likelihood(centered_eight).stack(sample=("chain", "draw"))
n_samples = len(log_like.sample)
ess_p = ess(centered_eight.posterior, method="mean")
reff = np.hstack([ess_p[v].values.flatten() for v in ess_p.data_vars]).mean() / n_samples
log_weights_arr = psislw(-log_like, reff=reff)[0]
if args.get("idata", True):
if y == "arr":
y = y_arr
if y_hat == "arr":
y_hat = y_hat_arr
if log_weights == "arr":
log_weights = log_weights_arr
loo_pit_data = loo_pit(idata=centered_eight, y=y, y_hat=y_hat, log_weights=log_weights)
else:
loo_pit_data = loo_pit(idata=None, y=y_arr, y_hat=y_hat_arr, log_weights=log_weights_arr)
assert np.all((loo_pit_data >= 0) & (loo_pit_data <= 1))
@pytest.mark.parametrize(
"args",
[
{"y": "y"},
{"y": "y", "y_hat": "y"},
{"y": "arr", "y_hat": "y"},
{"y": "y", "y_hat": "arr"},
{"y": "arr", "y_hat": "arr"},
{"y": "y", "y_hat": "y", "log_weights": "arr"},
{"y": "arr", "y_hat": "y", "log_weights": "arr"},
{"y": "y", "y_hat": "arr", "log_weights": "arr"},
{"idata": False},
],
)
def test_loo_pit_multidim(multidim_models, args):
y = args.get("y", None)
y_hat = args.get("y_hat", None)
log_weights = args.get("log_weights", None)
idata = multidim_models.model_1
y_arr = idata.observed_data.y
y_hat_arr = idata.posterior_predictive.y.stack(sample=("chain", "draw"))
log_like = get_log_likelihood(idata).stack(sample=("chain", "draw"))
n_samples = len(log_like.sample)
ess_p = ess(idata.posterior, method="mean")
reff = np.hstack([ess_p[v].values.flatten() for v in ess_p.data_vars]).mean() / n_samples
log_weights_arr = psislw(-log_like, reff=reff)[0]
if args.get("idata", True):
if y == "arr":
y = y_arr
if y_hat == "arr":
y_hat = y_hat_arr
if log_weights == "arr":
log_weights = log_weights_arr
loo_pit_data = loo_pit(idata=idata, y=y, y_hat=y_hat, log_weights=log_weights)
else:
loo_pit_data = loo_pit(idata=None, y=y_arr, y_hat=y_hat_arr, log_weights=log_weights_arr)
assert np.all((loo_pit_data >= 0) & (loo_pit_data <= 1))
@pytest.mark.parametrize("input_type", ["idataarray", "idatanone_ystr", "yarr_yhatnone"])
def test_loo_pit_bad_input(centered_eight, input_type):
"""Test incompatible input combinations."""
arr = np.random.random((8, 200))
if input_type == "idataarray":
with pytest.raises(ValueError, match=r"type InferenceData or None"):
loo_pit(idata=arr, y="obs")
elif input_type == "idatanone_ystr":
with pytest.raises(ValueError, match=r"all 3.+must be array or DataArray"):
loo_pit(idata=None, y="obs")
elif input_type == "yarr_yhatnone":
with pytest.raises(ValueError, match=r"y_hat.+None.+y.+str"):
loo_pit(idata=centered_eight, y=arr, y_hat=None)
@pytest.mark.parametrize("arg", ["y", "y_hat", "log_weights"])
def test_loo_pit_bad_input_type(centered_eight, arg):
"""Test wrong input type (not None, str not DataArray."""
kwargs = {"y": "obs", "y_hat": "obs", "log_weights": None}
kwargs[arg] = 2 # use int instead of array-like
with pytest.raises(ValueError, match="not {}".format(type(2))):
loo_pit(idata=centered_eight, **kwargs)
@pytest.mark.parametrize("incompatibility", ["y-y_hat1", "y-y_hat2", "y_hat-log_weights"])
def test_loo_pit_bad_input_shape(incompatibility):
"""Test shape incompatiblities."""
y = np.random.random(8)
y_hat = np.random.random((8, 200))
log_weights = np.random.random((8, 200))
if incompatibility == "y-y_hat1":
with pytest.raises(ValueError, match="1 more dimension"):
loo_pit(y=y, y_hat=y_hat[None, :], log_weights=log_weights)
elif incompatibility == "y-y_hat2":
with pytest.raises(ValueError, match="y has shape"):
loo_pit(y=y, y_hat=y_hat[1:3, :], log_weights=log_weights)
elif incompatibility == "y_hat-log_weights":
with pytest.raises(ValueError, match="must have the same shape"):
loo_pit(y=y, y_hat=y_hat[:, :100], log_weights=log_weights)
@pytest.mark.parametrize("pointwise", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"kwargs",
[
{},
{"group": "posterior_predictive", "var_names": {"posterior_predictive": "obs"}},
{"group": "observed_data", "var_names": {"both": "obs"}, "out_data_shape": "shape"},
{"var_names": {"both": "obs", "posterior": ["theta", "mu"]}},
{"group": "observed_data", "out_name_data": "T_name"},
],
)
def test_apply_test_function(centered_eight, pointwise, inplace, kwargs):
"""Test some usual call cases of apply_test_function"""
centered_eight = deepcopy(centered_eight)
group = kwargs.get("group", "both")
var_names = kwargs.get("var_names", None)
out_data_shape = kwargs.get("out_data_shape", None)
out_pp_shape = kwargs.get("out_pp_shape", None)
out_name_data = kwargs.get("out_name_data", "T")
if out_data_shape == "shape":
out_data_shape = (8,) if pointwise else ()
if out_pp_shape == "shape":
out_pp_shape = (4, 500, 8) if pointwise else (4, 500)
idata = deepcopy(centered_eight)
idata_out = apply_test_function(
idata,
lambda y, theta: np.mean(y),
group=group,
var_names=var_names,
pointwise=pointwise,
out_name_data=out_name_data,
out_data_shape=out_data_shape,
out_pp_shape=out_pp_shape,
)
if inplace:
assert idata is idata_out
if group == "both":
test_dict = {"observed_data": ["T"], "posterior_predictive": ["T"]}
else:
test_dict = {group: [kwargs.get("out_name_data", "T")]}
fails = check_multiple_attrs(test_dict, idata_out)
assert not fails
def test_apply_test_function_bad_group(centered_eight):
"""Test error when group is an invalid name."""
with pytest.raises(ValueError, match="Invalid group argument"):
apply_test_function(centered_eight, lambda y, theta: y, group="bad_group")
def test_apply_test_function_missing_group():
"""Test error when InferenceData object is missing a required group.
The function cannot work if group="both" but InferenceData object has no
posterior_predictive group.
"""
idata = from_dict(
posterior={"a": np.random.random((4, 500, 30))}, observed_data={"y": np.random.random(30)}
)
with pytest.raises(ValueError, match="must have posterior_predictive"):
apply_test_function(idata, lambda y, theta: np.mean, group="both")
def test_apply_test_function_should_overwrite_error(centered_eight):
"""Test error when overwrite=False but out_name is already a present variable."""
with pytest.raises(ValueError, match="Should overwrite"):
apply_test_function(centered_eight, lambda y, theta: y, out_name_data="obs")
| 13,469 | 0 | 947 |
55beeb78688b3a220c50a5e1f1a6c0b1fe5f7973 | 3,961 | py | Python | cirrocumulus/diff_exp.py | jkanche/cirrocumulus | 242a178fd15c26cea2f5949b62f7a38fe756087d | [
"BSD-3-Clause"
] | null | null | null | cirrocumulus/diff_exp.py | jkanche/cirrocumulus | 242a178fd15c26cea2f5949b62f7a38fe756087d | [
"BSD-3-Clause"
] | 1 | 2021-04-13T14:52:39.000Z | 2021-04-13T15:53:34.000Z | cirrocumulus/diff_exp.py | jkanche/cirrocumulus | 242a178fd15c26cea2f5949b62f7a38fe756087d | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
def _ecdf(x):
'''no frills empirical cdf used in fdrcorrection
'''
nobs = len(x)
return np.arange(1, nobs + 1) / float(nobs)
def fdrcorrection(pvals, alpha=0.05, method='indep', is_sorted=False):
'''pvalue correction for false discovery rate
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests. Both are
available in the function multipletests, as method=`fdr_bh`, resp. `fdr_by`.
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'indep', 'negcorr')
Returns
-------
rejected : ndarray, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : ndarray
pvalues adjusted for multiple hypothesis testing to limit FDR
Notes
-----
If there is prior information on the fraction of true hypothesis, then alpha
should be set to alpha * m/m_0 where m is the number of tests,
given by the p-values, and m_0 is an estimate of the true hypothesis.
(see Benjamini, Krieger and Yekuteli)
The two-step method of Benjamini, Krieger and Yekutiel that estimates the number
of false hypotheses will be available (soon).
Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and 'n' for
fdr_by.
'''
pvals = np.asarray(pvals)
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
else:
pvals_sorted = pvals # alias
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1)) # corrected this
ecdffactor = _ecdf(pvals_sorted) / cm
## elif method in ['n', 'negcorr']:
## cm = np.sum(np.arange(len(pvals)))
## ecdffactor = ecdf(pvals_sorted)/cm
else:
raise ValueError('only indep and negcorr implemented')
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected > 1] = 1
if not is_sorted:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
del pvals_corrected
return pvals_corrected_
else:
return pvals_corrected
| 32.467213 | 103 | 0.627872 | import numpy as np
def _ecdf(x):
'''no frills empirical cdf used in fdrcorrection
'''
nobs = len(x)
return np.arange(1, nobs + 1) / float(nobs)
def fdrcorrection(pvals, alpha=0.05, method='indep', is_sorted=False):
'''pvalue correction for false discovery rate
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests. Both are
available in the function multipletests, as method=`fdr_bh`, resp. `fdr_by`.
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'indep', 'negcorr')
Returns
-------
rejected : ndarray, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : ndarray
pvalues adjusted for multiple hypothesis testing to limit FDR
Notes
-----
If there is prior information on the fraction of true hypothesis, then alpha
should be set to alpha * m/m_0 where m is the number of tests,
given by the p-values, and m_0 is an estimate of the true hypothesis.
(see Benjamini, Krieger and Yekuteli)
The two-step method of Benjamini, Krieger and Yekutiel that estimates the number
of false hypotheses will be available (soon).
Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and 'n' for
fdr_by.
'''
pvals = np.asarray(pvals)
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
else:
pvals_sorted = pvals # alias
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1)) # corrected this
ecdffactor = _ecdf(pvals_sorted) / cm
## elif method in ['n', 'negcorr']:
## cm = np.sum(np.arange(len(pvals)))
## ecdffactor = ecdf(pvals_sorted)/cm
else:
raise ValueError('only indep and negcorr implemented')
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected > 1] = 1
if not is_sorted:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
del pvals_corrected
return pvals_corrected_
else:
return pvals_corrected
def diff_exp(X, mask):
pvals = np.full(X.shape[1], 1.0)
tscores = np.full(X.shape[1], 0)
mat_cond1 = X[mask]
mat_cond2 = X[~mask]
n1 = mat_cond1.shape[0]
n2 = mat_cond2.shape[0]
mean1 = mat_cond1.mean(axis=0).A1
mean2 = mat_cond2.mean(axis=0).A1
psum1 = mat_cond1.power(2).sum(axis=0).A1
s1sqr = (psum1 - n1 * (mean1 ** 2)) / (n1 - 1)
psum2 = mat_cond2.power(2).sum(axis=0).A1
s2sqr = (psum2 - n2 * (mean2 ** 2)) / (n2 - 1)
percent1 = (mat_cond1.getnnz(axis=0) / n1 * 100.0).astype(np.float32)
percent2 = (mat_cond2.getnnz(axis=0) / n2 * 100.0).astype(np.float32)
import scipy.stats as ss
var_est = s1sqr / n1 + s2sqr / n2
idx = var_est > 0.0
if idx.sum() > 0:
tscore = (mean1[idx] - mean2[idx]) / np.sqrt(var_est[idx])
v = (var_est[idx] ** 2) / (
(s1sqr[idx] / n1) ** 2 / (n1 - 1) + (s2sqr[idx] / n2) ** 2 / (n2 - 1)
)
pvals[idx] = ss.t.sf(np.fabs(tscore), v) * 2.0 # two-sided
tscores[idx] = tscore
# qvals = fdrcorrection(pvals)
log_fold_change = mean1 - mean2
x_avg = (mean1 + mean2) / 2
x_max = x_avg.max()
x_min = x_avg.min() - 0.001 # to avoid divide by zero
weights = (x_avg - x_min) / (x_max - x_min)
WAD = log_fold_change * weights
return dict(WAD=WAD, mean1=mean1, mean2=mean2, percent1=percent1, percent2=percent2, tscore=tscore)
| 1,389 | 0 | 23 |
717b6f46c8af07b9fae5dda17e2facb49fa6ae44 | 9,994 | py | Python | tps/problems/urls.py | akmohtashami/tps-web | 9dab3ffe97c21f658be30ce2f2711dd93e4ba60f | [
"MIT"
] | 5 | 2019-02-26T06:10:43.000Z | 2021-07-24T17:11:45.000Z | tps/problems/urls.py | akmohtashami/tps-web | 9dab3ffe97c21f658be30ce2f2711dd93e4ba60f | [
"MIT"
] | 3 | 2019-08-15T13:56:03.000Z | 2021-06-10T18:43:16.000Z | tps/problems/urls.py | jonathanirvings/tps-web | 46519347d4fc8bdced9b5bceb6cdee5ea4e508f2 | [
"MIT"
] | 2 | 2018-12-28T13:12:59.000Z | 2020-12-25T18:42:13.000Z | from django.conf import settings
from django.conf.urls import url
from .views import *
branch_mode_urls = [
url(r'^merge_request/create/$', CreateMergeRequest.as_view(), name="create_merge_request"),
url(r'^merge_request/list/$', MergeRequestList.as_view(), name="merge_requests_list"),
url(r'^merge_request/(?P<merge_request_id>\d+)/$', MergeRequestDiscussionView.as_view(), name="merge_request"),
url(r'^merge_request/(?P<merge_request_id>\d+)/$', MergeRequestDiscussionView.as_view(), name="merge_request_discussion"),
url(r'^merge_request/(?P<merge_request_id>\d+)/changes/$', MergeRequestChangesView.as_view(), name="merge_request_changes"),
url(r'^merge_request/(?P<merge_request_id>\d+)/reopen/$', MergeRequestReopenView.as_view(), name="merge_request_reopen"),
url(r'^merge_request/(?P<merge_request_id>\d+)/follow/$', FollowMergeRequestView.as_view(), name="merge_request_follow"),
url(r'^merge_request/(?P<merge_request_id>\d+)/unfollow/$', UnfollowMergeRequestView.as_view(), name="merge_request_unfollow"),
url(r'^branch/list/$', BranchesListView.as_view(), name="branches_list"),
url(r'^branch/create/$', CreateBranchView.as_view(), name="create_branch"),
url(r'^delete/$', DeleteBranchView.as_view(), name="delete_branch"),
]
problem_urls = ([
url(r'^analysis/$', AnalysisView.as_view(), name="analysis"),
url(r'^analysis/generate/$', AnalysisGenerateView.as_view(), name="analysis_generate"),
url(r'^analysis/analyze/$', AnalyzeView.as_view(), name="analyze"),
url(r'^export/$', ExportView.as_view(), name="export"),
url(r'export/(?P<export_id>\d+)/download/$', ExportDownloadView.as_view(), name="export_download"),
url(r'export/(?P<export_id>\d+)/start/$', ExportPackageStarterView.as_view(), name="export_start"),
url(r'statement/$', EditStatement.as_view(), name="statement"),
url(r'statement/(?P<attachment_id>.+)$', DownloadStatementAttachment.as_view(), name="statement"),
url(r'^history/$', HistoryView.as_view(), name="history"),
url(r'^diff/(?P<other_slug>\w{1,40})/$', DiffView.as_view(), name="diff"),
url(r'^$', Overview.as_view(), name="overview"),
url(r'^discussions/$', DiscussionsListView.as_view(), name="discussions"),
url(r'^discussion/add/$', DiscussionAddView.as_view(), name="add_discussion"),
url(r'^discussion/(?P<discussion_id>\d+)/comments$', CommentListView.as_view(), name="comments"),
url(r'^invocations/$', InvocationsListView.as_view(), name="invocations"),
url(r'^invocation/add/$', InvocationAddView.as_view(), name="add_invocation"),
url(r'^invocation/(?P<invocation_id>\d+)/run/$', InvocationRunView.as_view(), name="run_invocation"),
url(r'^invocation/(?P<invocation_id>\d+)/clone/$', InvocationCloneView.as_view(), name="clone_invocation"),
url(r'^invocation/(?P<invocation_id>\d+)/view/$', InvocationDetailsView.as_view(), name="view_invocation"),
url(r'^invocation/(?P<invocation_id>\d+)/invocation_result/(?P<result_id>\d+)/view/$', InvocationResultView.as_view(), name="view_invocation_result"),
url(r'^invocation/(?P<invocation_id>\d+)/invocation_result/(?P<result_id>\d+)/view/download/output/$', InvocationOutputDownloadView.as_view(), name="download_output"),
url(r'^invocation/(?P<invocation_id>\d+)/invocation_result/(?P<result_id>\d+)/view/download/input/$', InvocationInputDownloadView.as_view(), name="download_input"),
url(r'^invocation/(?P<invocation_id>\d+)/invocation_result/(?P<result_id>\d+)/view/download/answer/$', InvocationAnswerDownloadView.as_view(), name="download_answer"),
url(r'^resource/add/$', ResourceAddView.as_view(), name="add_resource"),
url(r'^resource/(?P<resource_id>\d+)/edit/$', ResourceEditView.as_view(), name="edit_resource"),
url(r'^resource/(?P<object_id>\d+)/delete/$', ResourceDeleteView.as_view(), name="delete_resource"),
url(r'^resource/(?P<object_id>\d+)/download/$', ResourceDownloadView.as_view(), name="download_resource"),
url(r'^solutions/$', SolutionsListView.as_view(), name="solutions"),
url(r'^solution/add/$', SolutionAddView.as_view(), name="add_solution"),
url(r'^solution/(?P<solution_id>.+)/edit/$', SolutionEditView.as_view(), name="edit_solution"),
url(r'^solution/(?P<solution_id>.+)/delete/$', SolutionDeleteView, name="delete_solution"),
url(r'^solution/(?P<solution_id>.+)/source/$', SolutionShowSourceView.as_view(), name="solution_source"),
url(r'^solution/(?P<solution_id>.+)/download/$', SolutionDownloadView.as_view(), name="download_solution"),
url(r'^graders/$', GradersListView.as_view(), name="graders"),
url(r'^grader/add/$', GraderAddView.as_view(), name="add_grader"),
url(r'^grader/(?P<grader_id>.+)/edit/$', GraderEditView.as_view(), name="edit_grader"),
url(r'^grader/(?P<grader_id>.+)/delete/$', GraderDeleteView, name="delete_grader"),
url(r'^grader/(?P<grader_id>.+)/source/$', GraderShowSourceView.as_view(), name="grader_source"),
url(r'^grader/(?P<grader_id>.+)/download/$', GraderDownloadView.as_view(), name="download_grader"),
url(r'^testcases/$', TestCasesListView.as_view(), name="testcases"),
url(r'^testcase/add/$', TestCaseAddView.as_view(), name="add_testcase"),
url(r'^testcase/(?P<testcase_id>.+)/edit/$', TestCaseEditView.as_view(), name="edit_testcase"),
url(r'^testcase/(?P<testcase_id>.+)/delete/$', TestCaseDeleteView, name="delete_testcase"),
url(r'^testcase/(?P<testcase_id>.+)/input/$', TestCaseInputDownloadView.as_view(), name="testcase_input"),
url(r'^testcase/(?P<testcase_id>.+)/output/$', TestCaseOutputDownloadView.as_view(), name="testcase_output"),
url(r'^testcase/(?P<testcase_id>.+)/generate/$', TestCaseGenerateView.as_view(), name="generate_testcase"),
url(r'^testcase/generate/all/$', TestCaseGenerateView.as_view(), name="generate_testcase"),
url(r'^testcase/(?P<testcase_id>.+)/details/$', TestCaseDetailsView.as_view(), name="testcase_details"),
url(r'^subtasks/$', SubtasksListView.as_view(), name="subtasks"),
url(r'^subtask/add/$', SubtaskAddView.as_view(), name="add_subtask"),
url(r'^subtask/(?P<subtask_id>.+)/details/$', SubtaskDetailsView.as_view(), name="subtask_details"),
url(r'^subtask/(?P<subtask_id>.+)/delete/$', SubtaskDeleteView, name="delete_subtask"),
url(r'^subtask/(?P<subtask_id>.+)/edit/$', SubtaskEditView.as_view(), name="edit_subtask"),
url(r'^validators/$', ValidatorsListView.as_view(), name="validators"),
url(r'^validator/(?P<validator_id>.+)/edit/$', ValidatorEditView.as_view(), name="edit_validator"),
url(r'^validator/(?P<validator_id>.+)/delete/$', ValidatorDeleteView, name="delete_validator"),
url(r'^validator/(?P<validator_id>.+)/source/$', ValidatorShowSourceView.as_view(), name="validator_source"),
url(r'^validator/add/$', ValidatorAddView.as_view(), name="add_validator"),
url(r'^validator/(?P<validator_id>.+)/download/$', ValidatorDownloadView.as_view(), name="download_validator"),
url(r'^generators/$', GeneratorsListView.as_view(), name="generators"),
url(r'^generator/(?P<generator_id>.+)/edit/$', GeneratorEditView.as_view(), name="edit_generator"),
url(r'^generator/(?P<generator_id>.+)/delete/$', GeneratorDeleteView, name="delete_generator"),
url(r'^generator/(?P<generator_id>.+)/source/$', GeneratorShowSourceView.as_view(), name="generator_source"),
url(r'^generator/add/$', GeneratorAddView.as_view(), name="add_generator"),
url(r'^generator/(?P<generator_id>.+)/generate-testcases/$', GeneratorEnableView.as_view(),
name="enable_generator"),
url(r'^generator/(?P<generator_id>.+)/delete-testcases/$', GeneratorDisableView.as_view(),
name="disable_generator"),
url(r'^checkers/$', CheckerListView.as_view(), name="checkers"),
url(r'^checker/add/$$', CheckerAddView.as_view(), name="add_checker"),
url(r'^checker/(?P<checker_id>.+)/activate/$$', CheckerActivateView.as_view(), name="activate_checker"),
url(r'^checker/(?P<checker_id>.+)/delete/$$', CheckerDeleteView, name="delete_checker"),
url(r'^checker/(?P<checker_id>.+)/edit/$$', CheckerEditView.as_view(), name="edit_checker"),
url(r'^checker/(?P<checker_id>.+)/source/$$', CheckerShowSourceView.as_view(), name="checker_source"),
url(r'^checker/(?P<checker_id>.+)/download/$$', CheckerDownloadView.as_view(), name="download_checker"),
url(r'^pull/$', PullBranchView.as_view(), name="pull_branch"),
url(r'^commit/$', CommitWorkingCopy.as_view(), name="commit"),
url(r'^discard/$', DiscardWorkingCopy.as_view(), name="discard"),
url(r'^conflicts/$', ConflictsListView.as_view(), name="conflicts"),
url(r'^conflict/(?P<conflict_id>\d+)/$', ResolveConflictView.as_view(), name="resolve_conflict"),
url(r'files/list/$', ProblemFilesView.as_view(), name="files"),
url(r'files/add/$', ProblemFileAddView.as_view(), name="add_file"),
url(r'^files/(?P<file_id>\d+)/edit/$', ProblemFileEditView.as_view(), name="edit_file"),
url(r'^files/(?P<file_id>\d+)/delete/$', ProblemFileDeleteView.as_view(), name="delete_file"),
url(r'^files/(?P<file_id>\d+)/source/$', ProblemFileShowSourceView.as_view(), name="file_source"),
url(r'^files/(?P<file_id>\d+)/download/$', ProblemFileDownloadView.as_view(), name="download_file"),
] + (branch_mode_urls if not settings.DISABLE_BRANCHES else []) , None, None)
urlpatterns = [
url(r'^$', ProblemsListView.as_view(), name="problems"),
url(r'^problem/(?P<problem_code>[^\/]+)/(?P<revision_slug>\w{1,40})/', problem_urls),
url(r'^problem/add/$', ProblemAddView.as_view(), name="add_problem"),
]
| 75.712121 | 175 | 0.673704 | from django.conf import settings
from django.conf.urls import url
from .views import *
branch_mode_urls = [
url(r'^merge_request/create/$', CreateMergeRequest.as_view(), name="create_merge_request"),
url(r'^merge_request/list/$', MergeRequestList.as_view(), name="merge_requests_list"),
url(r'^merge_request/(?P<merge_request_id>\d+)/$', MergeRequestDiscussionView.as_view(), name="merge_request"),
url(r'^merge_request/(?P<merge_request_id>\d+)/$', MergeRequestDiscussionView.as_view(), name="merge_request_discussion"),
url(r'^merge_request/(?P<merge_request_id>\d+)/changes/$', MergeRequestChangesView.as_view(), name="merge_request_changes"),
url(r'^merge_request/(?P<merge_request_id>\d+)/reopen/$', MergeRequestReopenView.as_view(), name="merge_request_reopen"),
url(r'^merge_request/(?P<merge_request_id>\d+)/follow/$', FollowMergeRequestView.as_view(), name="merge_request_follow"),
url(r'^merge_request/(?P<merge_request_id>\d+)/unfollow/$', UnfollowMergeRequestView.as_view(), name="merge_request_unfollow"),
url(r'^branch/list/$', BranchesListView.as_view(), name="branches_list"),
url(r'^branch/create/$', CreateBranchView.as_view(), name="create_branch"),
url(r'^delete/$', DeleteBranchView.as_view(), name="delete_branch"),
]
problem_urls = ([
url(r'^analysis/$', AnalysisView.as_view(), name="analysis"),
url(r'^analysis/generate/$', AnalysisGenerateView.as_view(), name="analysis_generate"),
url(r'^analysis/analyze/$', AnalyzeView.as_view(), name="analyze"),
url(r'^export/$', ExportView.as_view(), name="export"),
url(r'export/(?P<export_id>\d+)/download/$', ExportDownloadView.as_view(), name="export_download"),
url(r'export/(?P<export_id>\d+)/start/$', ExportPackageStarterView.as_view(), name="export_start"),
url(r'statement/$', EditStatement.as_view(), name="statement"),
url(r'statement/(?P<attachment_id>.+)$', DownloadStatementAttachment.as_view(), name="statement"),
url(r'^history/$', HistoryView.as_view(), name="history"),
url(r'^diff/(?P<other_slug>\w{1,40})/$', DiffView.as_view(), name="diff"),
url(r'^$', Overview.as_view(), name="overview"),
url(r'^discussions/$', DiscussionsListView.as_view(), name="discussions"),
url(r'^discussion/add/$', DiscussionAddView.as_view(), name="add_discussion"),
url(r'^discussion/(?P<discussion_id>\d+)/comments$', CommentListView.as_view(), name="comments"),
url(r'^invocations/$', InvocationsListView.as_view(), name="invocations"),
url(r'^invocation/add/$', InvocationAddView.as_view(), name="add_invocation"),
url(r'^invocation/(?P<invocation_id>\d+)/run/$', InvocationRunView.as_view(), name="run_invocation"),
url(r'^invocation/(?P<invocation_id>\d+)/clone/$', InvocationCloneView.as_view(), name="clone_invocation"),
url(r'^invocation/(?P<invocation_id>\d+)/view/$', InvocationDetailsView.as_view(), name="view_invocation"),
url(r'^invocation/(?P<invocation_id>\d+)/invocation_result/(?P<result_id>\d+)/view/$', InvocationResultView.as_view(), name="view_invocation_result"),
url(r'^invocation/(?P<invocation_id>\d+)/invocation_result/(?P<result_id>\d+)/view/download/output/$', InvocationOutputDownloadView.as_view(), name="download_output"),
url(r'^invocation/(?P<invocation_id>\d+)/invocation_result/(?P<result_id>\d+)/view/download/input/$', InvocationInputDownloadView.as_view(), name="download_input"),
url(r'^invocation/(?P<invocation_id>\d+)/invocation_result/(?P<result_id>\d+)/view/download/answer/$', InvocationAnswerDownloadView.as_view(), name="download_answer"),
url(r'^resource/add/$', ResourceAddView.as_view(), name="add_resource"),
url(r'^resource/(?P<resource_id>\d+)/edit/$', ResourceEditView.as_view(), name="edit_resource"),
url(r'^resource/(?P<object_id>\d+)/delete/$', ResourceDeleteView.as_view(), name="delete_resource"),
url(r'^resource/(?P<object_id>\d+)/download/$', ResourceDownloadView.as_view(), name="download_resource"),
url(r'^solutions/$', SolutionsListView.as_view(), name="solutions"),
url(r'^solution/add/$', SolutionAddView.as_view(), name="add_solution"),
url(r'^solution/(?P<solution_id>.+)/edit/$', SolutionEditView.as_view(), name="edit_solution"),
url(r'^solution/(?P<solution_id>.+)/delete/$', SolutionDeleteView, name="delete_solution"),
url(r'^solution/(?P<solution_id>.+)/source/$', SolutionShowSourceView.as_view(), name="solution_source"),
url(r'^solution/(?P<solution_id>.+)/download/$', SolutionDownloadView.as_view(), name="download_solution"),
url(r'^graders/$', GradersListView.as_view(), name="graders"),
url(r'^grader/add/$', GraderAddView.as_view(), name="add_grader"),
url(r'^grader/(?P<grader_id>.+)/edit/$', GraderEditView.as_view(), name="edit_grader"),
url(r'^grader/(?P<grader_id>.+)/delete/$', GraderDeleteView, name="delete_grader"),
url(r'^grader/(?P<grader_id>.+)/source/$', GraderShowSourceView.as_view(), name="grader_source"),
url(r'^grader/(?P<grader_id>.+)/download/$', GraderDownloadView.as_view(), name="download_grader"),
url(r'^testcases/$', TestCasesListView.as_view(), name="testcases"),
url(r'^testcase/add/$', TestCaseAddView.as_view(), name="add_testcase"),
url(r'^testcase/(?P<testcase_id>.+)/edit/$', TestCaseEditView.as_view(), name="edit_testcase"),
url(r'^testcase/(?P<testcase_id>.+)/delete/$', TestCaseDeleteView, name="delete_testcase"),
url(r'^testcase/(?P<testcase_id>.+)/input/$', TestCaseInputDownloadView.as_view(), name="testcase_input"),
url(r'^testcase/(?P<testcase_id>.+)/output/$', TestCaseOutputDownloadView.as_view(), name="testcase_output"),
url(r'^testcase/(?P<testcase_id>.+)/generate/$', TestCaseGenerateView.as_view(), name="generate_testcase"),
url(r'^testcase/generate/all/$', TestCaseGenerateView.as_view(), name="generate_testcase"),
url(r'^testcase/(?P<testcase_id>.+)/details/$', TestCaseDetailsView.as_view(), name="testcase_details"),
url(r'^subtasks/$', SubtasksListView.as_view(), name="subtasks"),
url(r'^subtask/add/$', SubtaskAddView.as_view(), name="add_subtask"),
url(r'^subtask/(?P<subtask_id>.+)/details/$', SubtaskDetailsView.as_view(), name="subtask_details"),
url(r'^subtask/(?P<subtask_id>.+)/delete/$', SubtaskDeleteView, name="delete_subtask"),
url(r'^subtask/(?P<subtask_id>.+)/edit/$', SubtaskEditView.as_view(), name="edit_subtask"),
url(r'^validators/$', ValidatorsListView.as_view(), name="validators"),
url(r'^validator/(?P<validator_id>.+)/edit/$', ValidatorEditView.as_view(), name="edit_validator"),
url(r'^validator/(?P<validator_id>.+)/delete/$', ValidatorDeleteView, name="delete_validator"),
url(r'^validator/(?P<validator_id>.+)/source/$', ValidatorShowSourceView.as_view(), name="validator_source"),
url(r'^validator/add/$', ValidatorAddView.as_view(), name="add_validator"),
url(r'^validator/(?P<validator_id>.+)/download/$', ValidatorDownloadView.as_view(), name="download_validator"),
url(r'^generators/$', GeneratorsListView.as_view(), name="generators"),
url(r'^generator/(?P<generator_id>.+)/edit/$', GeneratorEditView.as_view(), name="edit_generator"),
url(r'^generator/(?P<generator_id>.+)/delete/$', GeneratorDeleteView, name="delete_generator"),
url(r'^generator/(?P<generator_id>.+)/source/$', GeneratorShowSourceView.as_view(), name="generator_source"),
url(r'^generator/add/$', GeneratorAddView.as_view(), name="add_generator"),
url(r'^generator/(?P<generator_id>.+)/generate-testcases/$', GeneratorEnableView.as_view(),
name="enable_generator"),
url(r'^generator/(?P<generator_id>.+)/delete-testcases/$', GeneratorDisableView.as_view(),
name="disable_generator"),
url(r'^checkers/$', CheckerListView.as_view(), name="checkers"),
url(r'^checker/add/$$', CheckerAddView.as_view(), name="add_checker"),
url(r'^checker/(?P<checker_id>.+)/activate/$$', CheckerActivateView.as_view(), name="activate_checker"),
url(r'^checker/(?P<checker_id>.+)/delete/$$', CheckerDeleteView, name="delete_checker"),
url(r'^checker/(?P<checker_id>.+)/edit/$$', CheckerEditView.as_view(), name="edit_checker"),
url(r'^checker/(?P<checker_id>.+)/source/$$', CheckerShowSourceView.as_view(), name="checker_source"),
url(r'^checker/(?P<checker_id>.+)/download/$$', CheckerDownloadView.as_view(), name="download_checker"),
url(r'^pull/$', PullBranchView.as_view(), name="pull_branch"),
url(r'^commit/$', CommitWorkingCopy.as_view(), name="commit"),
url(r'^discard/$', DiscardWorkingCopy.as_view(), name="discard"),
url(r'^conflicts/$', ConflictsListView.as_view(), name="conflicts"),
url(r'^conflict/(?P<conflict_id>\d+)/$', ResolveConflictView.as_view(), name="resolve_conflict"),
url(r'files/list/$', ProblemFilesView.as_view(), name="files"),
url(r'files/add/$', ProblemFileAddView.as_view(), name="add_file"),
url(r'^files/(?P<file_id>\d+)/edit/$', ProblemFileEditView.as_view(), name="edit_file"),
url(r'^files/(?P<file_id>\d+)/delete/$', ProblemFileDeleteView.as_view(), name="delete_file"),
url(r'^files/(?P<file_id>\d+)/source/$', ProblemFileShowSourceView.as_view(), name="file_source"),
url(r'^files/(?P<file_id>\d+)/download/$', ProblemFileDownloadView.as_view(), name="download_file"),
] + (branch_mode_urls if not settings.DISABLE_BRANCHES else []) , None, None)
urlpatterns = [
url(r'^$', ProblemsListView.as_view(), name="problems"),
url(r'^problem/(?P<problem_code>[^\/]+)/(?P<revision_slug>\w{1,40})/', problem_urls),
url(r'^problem/add/$', ProblemAddView.as_view(), name="add_problem"),
]
| 0 | 0 | 0 |
d387028c95dac2658bdf0a15bf746ca7f17f5260 | 28,050 | py | Python | stainnorm/models.py | stes/fan | 001038a8f094d12c568eb29db751618738fd190f | [
"MIT"
] | 15 | 2017-09-14T18:51:42.000Z | 2021-03-07T03:11:41.000Z | stainnorm/models.py | stes/fan | 001038a8f094d12c568eb29db751618738fd190f | [
"MIT"
] | 6 | 2017-11-30T19:44:03.000Z | 2020-11-18T05:27:24.000Z | stainnorm/models.py | stes/fan | 001038a8f094d12c568eb29db751618738fd190f | [
"MIT"
] | 2 | 2019-03-11T01:20:13.000Z | 2022-01-09T02:51:45.000Z | """ Model collection from first batch of experiments
Notes
-----
Note that all models collected here are designed for fixed 256x256 patch normalization.
Predifined shape helps theano to build more computationally efficient graph, so highly
recommended to use this during training time.
For testing, it is of course better to have undefined spatial dimensions, however this is
(right now) not the primary goal of this code collections and hence not implemented.
Contributing
------------
For later comparison of approaches, please continue with numbering and *do not* rename
existing functions, as this will confuse loading of stored weights. Whereever datasets
and/or weight files are used, a suitable hash has to be provided.
Ressources
----------
- VGG16 weights : [["https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/vgg16.pkl"]]
License : Non-commercial use only
md5sum : 57858ec9bb7435e99c78a0520e6c5d3e
- VGG19 weights : [["https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/vgg19_normalized.pkl"]]
License : Non-commercial use only
md5sum : cb8ee699c50a64f8fef2a82bfbb307c5
Changelog
---------
v0.3 (..-01-2017)
- Added bilinear upsampling, especially used in the feature path
v0.2 (09-12-2016)
- Added the Feature-Aware Normalization (FAN) layer, replacing the final batch norm layer
in Baseline 1
- The model is up and running, however learning the FAN parameters is rather slow and not
yet evaluated, but it seems to work :-)
v0.1 (05-12-2016)
- Added four baseline models with varying amount of model complexity
- Baselines 1 and 2 confirm that the batch norm layer on the output alone has a huge
impact on system performance
- Baseline 3 is the old approach using the first VGG block
"""
__author__ = "sschneider"
__email__ = "steffen.schneider@rwth-aachen.de"
from collections import OrderedDict
import pickle
import lasagne as nn
from lasagne.layers import InputLayer, NonlinearityLayer, BatchNormLayer
try:
from lasagne.layers.dnn import Pool2DDNNLayer as PoolLayer
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
except:
print("Failed to use GPU implementations of Conv and Pool Layers")
from lasagne.layers import Pool2DLayer as PoolLayer
from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers import Upscale2DLayer
from lasagne.layers import ExpressionLayer, TransposedConv2DLayer
from lasagne.nonlinearities import rectify, linear
from lasagne.layers import get_output_shape
from . import layers, tools
from .layers import fan_module_simple, fan_module_improved
from .layers import get_features, normalize, transpose
from .featurenorm import FeatureAwareNormLayer
__all__ = ['build_baseline1_small',
'build_baseline2_feats',
'build_baseline3_vgg',
'build_baseline4_fan',
'build_baseline5_fan',
'build_baseline6_fan_fan',
'build_resnet7_fan',
'build_baseline8_fan_bilinear',
'build_baseline9_fan_fan_bilinear',
'build_finetuned1_fan',
'build_finetuned2_fan',
'build_big_fan',
'build_fan_reworked']
###
# Small Baseline Model
def build_baseline1_small(input_var):
""" Most simplistic model possible. Effectively only uses last batch norm layer
"""
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
last = net["middle"] = ConvLayer(last, 3, 1, nonlinearity=linear)
last = net["bn"] = BatchNormLayer(last, beta=nn.init.Constant(128.), gamma=nn.init.Constant(25.))
return last, net
def build_baseline2_feats(input_var, nb_filter=96):
""" Slightly more complex model. Transform x to a feature space first
"""
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = BatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = BatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# Decoder as before
last = net["deconv1_2"] = TransposedConv2DLayer(last, net["conv1_2"].input_shape[1],
net["conv1_2"].filter_size, stride=net["conv1_2"].stride,
crop=net["conv1_2"].pad,
W=net["conv1_2"].W, flip_filters=not net["conv1_2"].flip_filters,
nonlinearity=None)
last = net["deconv1_1"] = TransposedConv2DLayer(last, net["conv1_1"].input_shape[1],
net["conv1_1"].filter_size, stride=net["conv1_1"].stride,
crop=net["conv1_1"].pad,
W=net["conv1_1"].W, flip_filters=not net["conv1_1"].flip_filters,
nonlinearity=None)
last = net["bn"] = BatchNormLayer(last, beta=nn.init.Constant(128.), gamma=nn.init.Constant(25.))
return last, net
###
# VGG + LSTM Model
###
# FULL LSTM Model
###
# Model with new Feature Norm Layer
###
# FULL LSTM Model with Bilinar upsampling
###
# FULL LSTM Model
###
# FULL LSTM Model
###
# FULL LSTM Model
###
# FULL Feature Aware Normalization Model
| 43.965517 | 139 | 0.647986 | """ Model collection from first batch of experiments
Notes
-----
Note that all models collected here are designed for fixed 256x256 patch normalization.
Predifined shape helps theano to build more computationally efficient graph, so highly
recommended to use this during training time.
For testing, it is of course better to have undefined spatial dimensions, however this is
(right now) not the primary goal of this code collections and hence not implemented.
Contributing
------------
For later comparison of approaches, please continue with numbering and *do not* rename
existing functions, as this will confuse loading of stored weights. Whereever datasets
and/or weight files are used, a suitable hash has to be provided.
Ressources
----------
- VGG16 weights : [["https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/vgg16.pkl"]]
License : Non-commercial use only
md5sum : 57858ec9bb7435e99c78a0520e6c5d3e
- VGG19 weights : [["https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/vgg19_normalized.pkl"]]
License : Non-commercial use only
md5sum : cb8ee699c50a64f8fef2a82bfbb307c5
Changelog
---------
v0.3 (..-01-2017)
- Added bilinear upsampling, especially used in the feature path
v0.2 (09-12-2016)
- Added the Feature-Aware Normalization (FAN) layer, replacing the final batch norm layer
in Baseline 1
- The model is up and running, however learning the FAN parameters is rather slow and not
yet evaluated, but it seems to work :-)
v0.1 (05-12-2016)
- Added four baseline models with varying amount of model complexity
- Baselines 1 and 2 confirm that the batch norm layer on the output alone has a huge
impact on system performance
- Baseline 3 is the old approach using the first VGG block
"""
__author__ = "sschneider"
__email__ = "steffen.schneider@rwth-aachen.de"
from collections import OrderedDict
import pickle
import lasagne as nn
from lasagne.layers import InputLayer, NonlinearityLayer, BatchNormLayer
try:
from lasagne.layers.dnn import Pool2DDNNLayer as PoolLayer
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
except:
print("Failed to use GPU implementations of Conv and Pool Layers")
from lasagne.layers import Pool2DLayer as PoolLayer
from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers import Upscale2DLayer
from lasagne.layers import ExpressionLayer, TransposedConv2DLayer
from lasagne.nonlinearities import rectify, linear
from lasagne.layers import get_output_shape
from . import layers, tools
from .layers import fan_module_simple, fan_module_improved
from .layers import get_features, normalize, transpose
from .featurenorm import FeatureAwareNormLayer
__all__ = ['build_baseline1_small',
'build_baseline2_feats',
'build_baseline3_vgg',
'build_baseline4_fan',
'build_baseline5_fan',
'build_baseline6_fan_fan',
'build_resnet7_fan',
'build_baseline8_fan_bilinear',
'build_baseline9_fan_fan_bilinear',
'build_finetuned1_fan',
'build_finetuned2_fan',
'build_big_fan',
'build_fan_reworked']
###
# Small Baseline Model
def build_baseline1_small(input_var):
""" Most simplistic model possible. Effectively only uses last batch norm layer
"""
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
last = net["middle"] = ConvLayer(last, 3, 1, nonlinearity=linear)
last = net["bn"] = BatchNormLayer(last, beta=nn.init.Constant(128.), gamma=nn.init.Constant(25.))
return last, net
def build_baseline2_feats(input_var, nb_filter=96):
""" Slightly more complex model. Transform x to a feature space first
"""
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = BatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = BatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# Decoder as before
last = net["deconv1_2"] = TransposedConv2DLayer(last, net["conv1_2"].input_shape[1],
net["conv1_2"].filter_size, stride=net["conv1_2"].stride,
crop=net["conv1_2"].pad,
W=net["conv1_2"].W, flip_filters=not net["conv1_2"].flip_filters,
nonlinearity=None)
last = net["deconv1_1"] = TransposedConv2DLayer(last, net["conv1_1"].input_shape[1],
net["conv1_1"].filter_size, stride=net["conv1_1"].stride,
crop=net["conv1_1"].pad,
W=net["conv1_1"].W, flip_filters=not net["conv1_1"].flip_filters,
nonlinearity=None)
last = net["bn"] = BatchNormLayer(last, beta=nn.init.Constant(128.), gamma=nn.init.Constant(25.))
return last, net
###
# VGG + LSTM Model
def build_baseline3_vgg(input_var, nb_filter=64):
net = OrderedDict()
def get_weights(file):
with open(file, "rb") as f:
vgg16 = pickle.load(f, encoding="latin-1")
weights = vgg16['param values']
return weights[0], weights[1], weights[2], weights[3]
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
net['features_s8'] = get_features(last)["conv4_1"]
net['features_s4'] = get_features(last)["conv3_3"]
# Pretrained Encoder as before
W1, b1, W2, b2 = get_weights("vgg16.pkl")
last = net["conv1_1"] = ConvLayer(last, nb_filter, 3, pad=1, flip_filters=False,
nonlinearity=linear, W=W1, b=b1)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 3, pad=1, flip_filters=False,
nonlinearity=linear, W=W2, b=b2)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["pool"] = PoolLayer(last, 2, mode="average_exc_pad")
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = fan_module_simple(last, net, "s8", net['features_s8'], nb_filter=64, scale=4)
last = fan_module_simple(last, net, "s4", net['features_s4'], nb_filter=64, scale=2)
# Decoder as before
last = net["unpool"] = Upscale2DLayer(last, 2)
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
last = net["bn"] = BatchNormLayer(last, beta=nn.init.Constant(128.), gamma=nn.init.Constant(25.))
return last, net
###
# FULL LSTM Model
def build_baseline4_fan(input_var, nb_filter=96, input_size=(None,3,tools.INP_PSIZE,tools.INP_PSIZE)):
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer(input_size, input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
net['features_s8'] = get_features(last)["conv4_1"]
net['features_s4'] = get_features(last)["conv3_3"]
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = BatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = BatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = net["bn1"] = BatchNormLayer(last)
last = fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8)
last = net["bn2"] = BatchNormLayer(last)
last = fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4)
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
last = net["bn"] = BatchNormLayer(last, beta=nn.init.Constant(128.), gamma=nn.init.Constant(25.))
return last, net
###
# Model with new Feature Norm Layer
def build_baseline5_fan(input_var):
# TODO remove these imports + move relevant parts to layers.py once everything is
# up and running
import theano.tensor as T
import numpy as np
""" Using Baseline 1 with the novel FAN layer.
VGG conv4_1 is used for feature extraction
"""
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
net['features_s8'] = get_features(last)["conv4_1"]
net['features'] = Upscale2DLayer(net["features_s8"], 8)
net['mask'] = ExpressionLayer(net["features"], lambda x: 1. * T.eq(x, x.max(axis=1, keepdims=True)))
last = net["middle"] = ConvLayer(last, 3, 1, nonlinearity=linear)
last = net["fan"] = FeatureAwareNormLayer((last, net['mask']), beta=nn.init.Constant(np.float32(128.)),
gamma=nn.init.Constant(np.float32(25.)))
return last, net
def build_baseline6_fan_fan(input_var, nb_filter=96):
net = OrderedDict()
import theano.tensor as T
import numpy as np
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
net['features_s8'] = get_features(last)["conv4_1"]
net['features_s4'] = get_features(last)["conv3_3"]
net['mask'] = ExpressionLayer(Upscale2DLayer(net["features_s8"], 8),
lambda x: 1. * T.eq(x, x.max(axis=1, keepdims=True)))
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = BatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = BatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = net["fan1"] = FeatureAwareNormLayer((last, net['mask']))
last = fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8)
last = net["fan2"] = FeatureAwareNormLayer((last, net['mask']))
last = fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4)
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
last = net["fan"] = FeatureAwareNormLayer((last, net['mask']), beta=nn.init.Constant(np.float32(128.)),
gamma=nn.init.Constant(np.float32(25.)))
return last, net
def build_resnet7_fan(input_var, nb_filter=96):
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
# Note: normalization should not be necessary for the ResNet model!
# last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
res50 = get_resnet50(last)
net['features_s8'] = res50['res3d_branch2c']
net['features_s4'] = res50['res2c_branch2c']
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = BatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = BatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = net["bn1"] = BatchNormLayer(last)
last = fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8)
last = net["bn2"] = BatchNormLayer(last)
last = fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4)
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
last = net["bn"] = BatchNormLayer(last, beta=nn.init.Constant(128.), gamma=nn.init.Constant(25.))
return last, net
###
# FULL LSTM Model with Bilinar upsampling
def build_baseline8_fan_bilinear(input_var, nb_filter=96):
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
net['features_s8'] = get_features(last)["conv4_1"]
net['features_s4'] = get_features(last)["conv3_3"]
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = BatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = BatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = net["bn1"] = BatchNormLayer(last)
last = fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8, upsampling_strategy="bilinear")
last = net["bn2"] = BatchNormLayer(last)
last = fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4, upsampling_strategy="bilinear")
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
last = net["bn"] = BatchNormLayer(last, beta=nn.init.Constant(128.), gamma=nn.init.Constant(25.))
return last, net
def build_baseline9_fan_fan_bilinear(input_var, nb_filter=96):
net = OrderedDict()
import theano.tensor as T
import numpy as np
# Input, standardization
last = net['input'] = InputLayer((None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
net['features_s8'] = get_features(last)["conv4_1"]
net['features_s4'] = get_features(last)["conv3_3"]
net['mask'] = ExpressionLayer(layers.upsample(net["features_s8"], 8, mode="bilinear"),
lambda x: 1. * T.eq(x, x.max(axis=1, keepdims=True)))
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = BatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = BatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = net["fan1"] = FeatureAwareNormLayer((last, net['mask']))
last = fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8, upsampling_strategy="bilinear")
last = net["fan2"] = FeatureAwareNormLayer((last, net['mask']))
last = fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4, upsampling_strategy="bilinear")
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
last = net["fan"] = FeatureAwareNormLayer((last, net['mask']), beta=nn.init.Constant(np.float32(128.)),
gamma=nn.init.Constant(np.float32(25.)))
return last, net
###
# FULL LSTM Model
def build_finetuned1_fan(input_var, nb_filter=96, input_size=(None,3,tools.INP_PSIZE,tools.INP_PSIZE)):
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer(input_size, input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
# TODO this is clearly a bug. only for compatibility reasons. remove once all weights are converted
net['features_s8'] = get_features(last)["conv4_1"]
net['features_s4'] = get_features(last)["conv3_3"]
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = layers.NonUpdateBatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = layers.NonUpdateBatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = net["bn1"] = layers.NonUpdateBatchNormLayer(last)
last = fan1= fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8)
last = net["bn2"] = layers.NonUpdateBatchNormLayer(last)
last = fan2= fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4)
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
last = net["bn"] = layers.FixedBatchNormLayer(last)
## for debugging: decoders after each LSTM module
def debug_connection(last):
last = transpose(last, net["conv1_2"], nonlinearity=None, b=net['deconv1_2'].b)
last = transpose(last, net["conv1_1"], nonlinearity=None, b=net['deconv1_1'].b)
last = layers.FixedBatchNormLayer(last, beta=net['bn'].beta, gamma=net['bn'].gamma, mean=net['bn'].mean, inv_std=net['bn'].inv_std)
return last
debug1 = debug_connection(fan1)
debug2 = debug_connection(fan2)
weights = "170123_runs/run_H.E.T._1485012575.4045253/3.npz"
data = tools.load_weights(last, weights)
return last, net, debug1, debug2
###
# FULL LSTM Model
def build_finetuned2_fan(input_var, nb_filter=96, input_size=(None,3,tools.INP_PSIZE,tools.INP_PSIZE)):
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer(input_size, input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
# TODO this is clearly a bug. only for compatibility reasons. remove once all weights are converted
net['features_s8'] = get_features(last)["conv4_1"]
net['features_s4'] = get_features(last)["conv3_3"]
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = layers.NonUpdateBatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = layers.NonUpdateBatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = net["bn1"] = layers.NonUpdateBatchNormLayer(last)
last = fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8)
last = net["bn2"] = layers.NonUpdateBatchNormLayer(last)
last = fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4)
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
last = net["bn"] = layers.FixedBatchNormLayer(last)
weights = "170123_runs/run_H.E.T._1485012575.4045253/3.npz"
data = tools.load_weights(last, weights)
return last, net
###
# FULL LSTM Model
def build_big_fan(input_var, nb_filter=96, input_size=(None,3,tools.INP_PSIZE,tools.INP_PSIZE)):
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer(input_size, input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
f = get_features(last)
net['features_s8'] = f["conv4_1"]
net['features_s4'] = f["conv3_3"]
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = layers.NonUpdateBatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = layers.NonUpdateBatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# Modified Middle Part
last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)
# feature aggregation at multiple scales
last = net["bn1"] = layers.NonUpdateBatchNormLayer(last)
last = fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8)
last = net["bn1"] = layers.NonUpdateBatchNormLayer(last)
last = fan_module_simple(last, net, "s8", net['features_s8'],
nb_filter=nb_filter, scale=8)
last = net["bn3"] = layers.NonUpdateBatchNormLayer(last)
last = fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4)
last = net["bn4"] = layers.NonUpdateBatchNormLayer(last)
last = fan_module_simple(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4)
last = net["bn5"] = layers.NonUpdateBatchNormLayer(last)
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
return last, net
###
# FULL Feature Aware Normalization Model
def build_fan_reworked(input_var, nb_filter=16, input_size=(None,3,tools.INP_PSIZE,tools.INP_PSIZE)):
net = OrderedDict()
# Input, standardization
last = net['input'] = InputLayer(input_size, input_var=input_var)
last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))
# load feature encoder
feats = get_features(last)
net['features_s8_1'] = feats["conv4_4"]
net['features_s8_2'] = feats["conv4_1"]
net['features_s4'] = feats["conv3_3"]
# Pretrained Encoder as before
last = net["conv1_1"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_1"] = layers.NonUpdateBatchNormLayer(last)
last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
last = net["conv1_2"] = ConvLayer(last, nb_filter, 1, pad=0, flip_filters=False,
nonlinearity=linear)
last = net["bn1_2"] = layers.NonUpdateBatchNormLayer(last)
last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
# feature aggregation at multiple scales
last = net["bn1"] = layers.NonUpdateBatchNormLayer(last, beta=None, gamma=None)
last = fan_module_improved(last, net, "s8_1", net['features_s8_1'],
nb_filter=nb_filter, scale=8, upsampling_strategy="repeat")
last = net["bn2"] = layers.NonUpdateBatchNormLayer(last, beta=None, gamma=None)
last = fan_module_improved(last, net, "s8_2", net['features_s8_2'],
nb_filter=nb_filter, scale=8, upsampling_strategy="repeat")
last = net["bn3"] = layers.NonUpdateBatchNormLayer(last, beta=None, gamma=None)
last = fan_module_improved(last, net, "s4", net['features_s4'],
nb_filter=nb_filter, scale=4, upsampling_strategy="repeat")
# unclear if Fixed, NonUpdate or Regular Layer will work best...
last = net["bn4"] = BatchNormLayer(last)
# Decoder as before
last = net["deconv1_2"] = transpose(last, net["conv1_2"], nonlinearity=None)
last = net["deconv1_1"] = transpose(last, net["conv1_1"], nonlinearity=None)
return last, net
| 21,675 | 0 | 245 |
e379b58994df8d355f40fc56111e08de7face83b | 5,942 | py | Python | feed.py | jakkso/court-rss-parser | 5061afd66c25991a55f0c296eed54b34365a7a5c | [
"MIT"
] | null | null | null | feed.py | jakkso/court-rss-parser | 5061afd66c25991a55f0c296eed54b34365a7a5c | [
"MIT"
] | 1 | 2021-06-01T22:22:05.000Z | 2021-06-01T22:22:05.000Z | feed.py | jakkso/court-rss-parser | 5061afd66c25991a55f0c296eed54b34365a7a5c | [
"MIT"
] | null | null | null | """
Contains Feed, which handles parsing the rss feed and User, which handles messaging
"""
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
from bs4 import BeautifulSoup
import feedparser as fp
from jinja2 import Environment, PackageLoader, select_autoescape
from requests import get
from configuration import Config
from database import Database
class Feed(Database):
"""
Uses Database's methods in conjunction with its own to parse feed url,
fetching and parsing page to plain text, searching each text for specific
terms and email the correct users if the terms are found.
"""
URL = 'feed://www.scotcourts.gov.uk/feeds/court-of-session-court-rolls'
def new_urls(self):
"""
Gets new URLs, adds to database then yields url
:yield: str, url
"""
page = fp.parse(self.URL)['entries']
return [item['link'] for item in page if item['link'] not in
self.get_urls()]
def refresh(self):
"""
Iterates through new_urls, downloading then searching through resulting text.
Note: text and search terms are upper case, to simplify things.
"""
new_urls = self.new_urls()
for url in new_urls:
print(f'Adding {url}')
html, text = self._downloader(url)
self.add_url_html(url, html)
for user in self.users():
hits = self._text_search(text.upper(), user, url)
if hits:
print(f'Sending alert to {user.name}')
user.send_email(hits, url)
def users(self):
"""
:yield: User obj containing name, email address and list of
search_terms
"""
users = self.get_users()
for user in users:
name, email_address = user
search_terms = self.get_search_terms(email_address)
yield User(name, email_address, search_terms)
def _text_search(self, text, user, url):
"""
Searches through text for any of the user's search terms, if found,
sends email.
:param text: str, block of text from Court Roll Issue
:param user: User obj
:param url: str Court Roll Issue URL
:return: search term hits
"""
search_term_hits = []
for term in user.search_terms:
if term in text:
search_term_hits.append(term)
self.add_user_issue(user.email_address, url)
return search_term_hits
@staticmethod
def _downloader(url):
"""
Uses BeautifulSoup to extract a block of text through which to search
:param url: str
:return: tuple, html and plain text of Court Roll issue downloaded
"""
soup = BeautifulSoup(get(url).content, 'html.parser')
selection = soup.select('.courtRollContent')[0]
html, text = selection.prettify(), selection.get_text()
return html, text
class User:
"""
Object containing a person's name, email address and list of search terms
associated with them, As well as a methods used to send an email to them
"""
__slots__ = ['name', 'email_address', 'search_terms']
def __init__(self, name, email_address, search_terms):
"""
:param name: str person's name
:param email_address: str person's email address
:param search_terms: list, search terms associated with this person
"""
self.name = name
self.email_address = email_address
self.search_terms = search_terms
def send_email(self, search_term_hits, url):
"""
Sends email message to a user.email_address containing the url &
search term hits
:param search_term_hits: list of search terms that were present in
the issue searched
:param url: str, url to a court roll issue
:return: None
"""
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Court Roll Notification'
msg['From'] = Config.sender
msg['To'] = self.email_address
msg.attach(MIMEText(self._render_text(search_term_hits, url), 'plain'))
msg.attach(MIMEText(self._render_html(search_term_hits, url), 'html'))
server = smtplib.SMTP(host=Config.host, port=Config.port)
server.starttls()
server.login(user=Config.sender, password=Config.pw)
server.sendmail(Config.sender, self.email_address, msg.as_string())
server.quit()
def _render_text(self, search_term_hits, url):
"""
Renders Text message for email
:param search_term_hits: list of search_terms
:param url: str
:return: text-formatted email message
"""
env = Environment(
loader=PackageLoader('message', 'templates'),
autoescape=select_autoescape(['.txt'])
)
template = env.get_template('base.txt')
return template.render(name=self.name,
search_terms=search_term_hits,
url=url)
def _render_html(self, search_term_hits, url):
"""
Renders HTML message for email
:param search_term_hits: list of search_terms
:param url: str
:return: HTML-formatted email message
"""
env = Environment(
loader=PackageLoader('message', 'templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template('base.html')
return template.render(name=self.name,
search_terms=search_term_hits,
url=url)
| 35.795181 | 85 | 0.611242 | """
Contains Feed, which handles parsing the rss feed and User, which handles messaging
"""
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
from bs4 import BeautifulSoup
import feedparser as fp
from jinja2 import Environment, PackageLoader, select_autoescape
from requests import get
from configuration import Config
from database import Database
class Feed(Database):
"""
Uses Database's methods in conjunction with its own to parse feed url,
fetching and parsing page to plain text, searching each text for specific
terms and email the correct users if the terms are found.
"""
URL = 'feed://www.scotcourts.gov.uk/feeds/court-of-session-court-rolls'
def new_urls(self):
"""
Gets new URLs, adds to database then yields url
:yield: str, url
"""
page = fp.parse(self.URL)['entries']
return [item['link'] for item in page if item['link'] not in
self.get_urls()]
def refresh(self):
"""
Iterates through new_urls, downloading then searching through resulting text.
Note: text and search terms are upper case, to simplify things.
"""
new_urls = self.new_urls()
for url in new_urls:
print(f'Adding {url}')
html, text = self._downloader(url)
self.add_url_html(url, html)
for user in self.users():
hits = self._text_search(text.upper(), user, url)
if hits:
print(f'Sending alert to {user.name}')
user.send_email(hits, url)
def users(self):
"""
:yield: User obj containing name, email address and list of
search_terms
"""
users = self.get_users()
for user in users:
name, email_address = user
search_terms = self.get_search_terms(email_address)
yield User(name, email_address, search_terms)
def _text_search(self, text, user, url):
"""
Searches through text for any of the user's search terms, if found,
sends email.
:param text: str, block of text from Court Roll Issue
:param user: User obj
:param url: str Court Roll Issue URL
:return: search term hits
"""
search_term_hits = []
for term in user.search_terms:
if term in text:
search_term_hits.append(term)
self.add_user_issue(user.email_address, url)
return search_term_hits
@staticmethod
def _downloader(url):
"""
Uses BeautifulSoup to extract a block of text through which to search
:param url: str
:return: tuple, html and plain text of Court Roll issue downloaded
"""
soup = BeautifulSoup(get(url).content, 'html.parser')
selection = soup.select('.courtRollContent')[0]
html, text = selection.prettify(), selection.get_text()
return html, text
class User:
"""
Object containing a person's name, email address and list of search terms
associated with them, As well as a methods used to send an email to them
"""
__slots__ = ['name', 'email_address', 'search_terms']
def __init__(self, name, email_address, search_terms):
"""
:param name: str person's name
:param email_address: str person's email address
:param search_terms: list, search terms associated with this person
"""
self.name = name
self.email_address = email_address
self.search_terms = search_terms
def __repr__(self):
return f"{self.__class__.__name__}('{self.name}'," \
f" '{self.email_address}', '{self.search_terms}'))"
def __str__(self):
return f'<User: {self.name}>'
def send_email(self, search_term_hits, url):
"""
Sends email message to a user.email_address containing the url &
search term hits
:param search_term_hits: list of search terms that were present in
the issue searched
:param url: str, url to a court roll issue
:return: None
"""
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Court Roll Notification'
msg['From'] = Config.sender
msg['To'] = self.email_address
msg.attach(MIMEText(self._render_text(search_term_hits, url), 'plain'))
msg.attach(MIMEText(self._render_html(search_term_hits, url), 'html'))
server = smtplib.SMTP(host=Config.host, port=Config.port)
server.starttls()
server.login(user=Config.sender, password=Config.pw)
server.sendmail(Config.sender, self.email_address, msg.as_string())
server.quit()
def _render_text(self, search_term_hits, url):
"""
Renders Text message for email
:param search_term_hits: list of search_terms
:param url: str
:return: text-formatted email message
"""
env = Environment(
loader=PackageLoader('message', 'templates'),
autoescape=select_autoescape(['.txt'])
)
template = env.get_template('base.txt')
return template.render(name=self.name,
search_terms=search_term_hits,
url=url)
def _render_html(self, search_term_hits, url):
"""
Renders HTML message for email
:param search_term_hits: list of search_terms
:param url: str
:return: HTML-formatted email message
"""
env = Environment(
loader=PackageLoader('message', 'templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template('base.html')
return template.render(name=self.name,
search_terms=search_term_hits,
url=url)
| 161 | 0 | 54 |
6299ddca9d8136483556ebbe37daf63e681b7fd7 | 1,645 | py | Python | ven2/lib/python2.7/site-packages/zope/traversing/tests/test_presentation.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | null | null | null | ven2/lib/python2.7/site-packages/zope/traversing/tests/test_presentation.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | 12 | 2016-03-24T15:39:22.000Z | 2020-03-30T14:48:00.000Z | ven2/lib/python2.7/site-packages/zope/traversing/tests/test_presentation.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | 2 | 2015-04-03T09:56:02.000Z | 2015-04-09T10:53:45.000Z | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Presentation Traverser Tests
"""
import unittest
from zope.testing.cleanup import CleanUp
from zope.interface import Interface, implementer
from zope.publisher.browser import TestRequest
from zope.traversing.namespace import view, resource
from zope.traversing.testing import browserView, browserResource
@implementer(IContent)
| 29.375 | 78 | 0.648024 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Presentation Traverser Tests
"""
import unittest
from zope.testing.cleanup import CleanUp
from zope.interface import Interface, implementer
from zope.publisher.browser import TestRequest
from zope.traversing.namespace import view, resource
from zope.traversing.testing import browserView, browserResource
class IContent(Interface):
pass
@implementer(IContent)
class Content(object):
pass
class Resource(object):
def __init__(self, request):
pass
class View(object):
def __init__(self, content, request):
self.content = content
class Test(CleanUp, unittest.TestCase):
def testView(self):
browserView(IContent, 'foo', View)
ob = Content()
v = view(ob, TestRequest()).traverse('foo', ())
self.assertEqual(v.__class__, View)
def testResource(self):
browserResource('foo', Resource)
ob = Content()
r = resource(ob, TestRequest()).traverse('foo', ())
self.assertEqual(r.__class__, Resource)
| 407 | 42 | 222 |
84f29ab180fc62ee31143a67588a46d3a6b89ce5 | 189 | py | Python | livescore/__init__.py | andrewda/frc-livescore | bc251fb8ca1122a4a587c072bac42a1e30e5f8fe | [
"MIT"
] | 29 | 2017-07-07T05:57:03.000Z | 2022-02-06T13:01:08.000Z | livescore/__init__.py | andrewda/frc-livescore | bc251fb8ca1122a4a587c072bac42a1e30e5f8fe | [
"MIT"
] | 21 | 2017-07-06T20:24:27.000Z | 2021-11-10T22:41:17.000Z | livescore/__init__.py | andrewda/frc-livescore | bc251fb8ca1122a4a587c072bac42a1e30e5f8fe | [
"MIT"
] | 7 | 2017-07-09T02:14:04.000Z | 2021-11-09T22:36:02.000Z | # flake8: noqa
from .LivescoreBase import NoOverlayFoundException
from .Livescore2017 import Livescore2017
from .Livescore2018 import Livescore2018
from .Livescore2019 import Livescore2019
| 31.5 | 50 | 0.867725 | # flake8: noqa
from .LivescoreBase import NoOverlayFoundException
from .Livescore2017 import Livescore2017
from .Livescore2018 import Livescore2018
from .Livescore2019 import Livescore2019
| 0 | 0 | 0 |
d9c2686f0eef6d682f25ee73a0178e8d39d1a023 | 4,456 | py | Python | replay_memory.py | menwhitehead/KerasDQN | cee5f0732b5162e47de3897c1f7131d6ac893bb8 | [
"MIT"
] | null | null | null | replay_memory.py | menwhitehead/KerasDQN | cee5f0732b5162e47de3897c1f7131d6ac893bb8 | [
"MIT"
] | null | null | null | replay_memory.py | menwhitehead/KerasDQN | cee5f0732b5162e47de3897c1f7131d6ac893bb8 | [
"MIT"
] | null | null | null |
from dqn_globals import *
import numpy as np
import random
# Fast and NOT random
| 43.262136 | 132 | 0.661131 |
from dqn_globals import *
import numpy as np
import random
class ReplayMemory:
def __init__(self, memory_size=100):
self.max_size = memory_size
self.next_index = 0
self.is_full = False
self.frames_memories = np.zeros((self.max_size, SEQUENCE_FRAME_COUNT, FRAME_WIDTH, FRAME_HEIGHT))
self.action_memories = np.zeros((self.max_size,))
self.reward_memories = np.zeros((self.max_size,))
self.next_frame_memories = np.zeros((self.max_size, SEQUENCE_FRAME_COUNT, FRAME_WIDTH, FRAME_HEIGHT))
# Pre-init minibatch arrays
self.minibatch_frames_memories = np.zeros((MINIBATCH_SIZE, SEQUENCE_FRAME_COUNT, FRAME_WIDTH, FRAME_HEIGHT))
self.minibatch_action_memories = np.zeros((MINIBATCH_SIZE,))
self.minibatch_reward_memories = np.zeros((MINIBATCH_SIZE,))
self.minibatch_next_frame_memories = np.zeros((MINIBATCH_SIZE, SEQUENCE_FRAME_COUNT, FRAME_WIDTH, FRAME_HEIGHT))
def addTransition(self, input_frames, action, reward, next_frame=None):
self.frames_memories[self.next_index] = input_frames
self.action_memories[self.next_index] = action
self.reward_memories[self.next_index] = reward
if next_frame != None:
self.next_frame_memories[self.next_index] = next_frame
else:
# Make this negative to signal a NULL value
self.next_frame_memories[self.next_index][0][0][0] = -1
#self.next_frame_memories[self.next_index] = None
self.next_index += 1
if self.next_index == self.max_size:
self.is_full = True
self.next_index = 0
def pickMinibatch(self):
for i in range(MINIBATCH_SIZE):
minibatch_index = random.randrange(len(self))
self.minibatch_frames_memories[i] = self.frames_memories[minibatch_index]
self.minibatch_action_memories[i] = self.action_memories[minibatch_index]
self.minibatch_reward_memories[i] = self.reward_memories[minibatch_index]
self.minibatch_next_frame_memories[i] = self.next_frame_memories[minibatch_index]
return self.minibatch_frames_memories, \
self.minibatch_action_memories, \
self.minibatch_reward_memories, \
self.minibatch_next_frame_memories
def pickMinibatch2(self):
# TODO: SPEED THIS UP SOMEHOW
minibatch_indexes = np.random.randint(len(self), size=MINIBATCH_SIZE)
minibatch_frames = self.frames_memories[minibatch_indexes]
minibatch_actions = self.action_memories[minibatch_indexes]
minibatch_rewards = self.reward_memories[minibatch_indexes]
minibatch_next_frames = self.next_frame_memories[minibatch_indexes]
return minibatch_frames, minibatch_actions, minibatch_rewards, minibatch_next_frames
def pickMinibatch3(self):
minibatch_frames = []
minibatch_actions = []
minibatch_rewards = []
minibatch_next_frames = []
for i in range(MINIBATCH_SIZE):
minibatch_index = random.randrange(len(self))
minibatch_frames.append(self.frames_memories[minibatch_index])
minibatch_actions.append(self.action_memories[minibatch_index])
minibatch_rewards.append(self.reward_memories[minibatch_index])
minibatch_next_frames.append(self.next_frame_memories[minibatch_index])
return np.stack(minibatch_frames), np.stack(minibatch_actions), np.stack(minibatch_rewards), np.stack(minibatch_next_frames)
# Fast and NOT random
def pickMinibatch4(self):
return (self.frames_memories[:MINIBATCH_SIZE],
self.action_memories[:MINIBATCH_SIZE],
self.reward_memories[:MINIBATCH_SIZE],
self.next_frame_memories[:MINIBATCH_SIZE])
def __len__(self):
if self.is_full:
return self.max_size
else:
return self.next_index
def __str__(self):
result = ''
for i in range(len(self)):
result += "MEMORY[%d]: \n%s\n " % (i, str(self.frames_memories[i]))
result += "\t action: %d\n " % (self.action_memories[i])
result += "\t reward: %d\n " % (self.reward_memories[i])
result += "\t next: %s\n " % (self.next_frame_memories[i])
return result
| 4,024 | -2 | 282 |
f76aa35386806bdb131ad49e161f4a4043a67761 | 2,899 | py | Python | tests/unit/models/ci/zuul/test_test.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 3 | 2022-02-17T18:07:07.000Z | 2022-03-19T10:22:38.000Z | tests/unit/models/ci/zuul/test_test.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 58 | 2022-02-14T14:41:22.000Z | 2022-03-31T10:54:28.000Z | tests/unit/models/ci/zuul/test_test.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 6 | 2022-02-14T19:21:26.000Z | 2022-03-29T09:31:31.000Z | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from unittest import TestCase
from unittest.mock import Mock
from cibyl.models.ci.zuul.test import Test, TestKind, TestStatus
class TestTest(TestCase):
"""Tests for :class:`Test`.
"""
def test_attributes(self):
"""Checks that the model has the desired attributes.
"""
kind = TestKind.ANSIBLE
name = 'test'
status = TestStatus.SUCCESS
duration = 1.2
url = 'url-to-test'
data = Test.Data()
data.name = name
data.status = status
data.duration = duration
data.url = url
model = Test(kind, data)
self.assertEqual(kind, model.kind.value)
self.assertEqual(name, model.name.value)
self.assertEqual(status.name, model.result.value)
self.assertEqual(duration, model.duration.value)
self.assertEqual(url, model.url.value)
def test_equality_by_type(self):
"""Checks that two models are no the same if they are of different
type.
"""
model = Test()
other = Mock()
self.assertNotEqual(other, model)
def test_equality_by_reference(self):
"""Checks that a model is equal to itself.
"""
model = Test()
self.assertEqual(model, model)
def test_equality_by_contents(self):
"""Checks that two models are equal if they hold the same data.
"""
kind = TestKind.ANSIBLE
data = Test.Data()
data.name = 'test'
data.status = TestStatus.SUCCESS
data.duration = 1.2
data.url = 'url-to-test'
model1 = Test(kind, data)
model2 = Test(kind, data)
self.assertEqual(model2, model1)
def test_status(self):
"""Checks that the correct status is returned for different results.
"""
model = Test()
model.result.value = TestStatus.UNKNOWN.name
self.assertEqual(TestStatus.UNKNOWN, model.status)
model.result.value = TestStatus.SUCCESS.name
self.assertEqual(TestStatus.SUCCESS, model.status)
model.result.value = TestStatus.FAILURE.name
self.assertEqual(TestStatus.FAILURE, model.status)
model.result.value = TestStatus.SKIPPED.name
self.assertEqual(TestStatus.SKIPPED, model.status)
| 29.886598 | 78 | 0.638496 | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from unittest import TestCase
from unittest.mock import Mock
from cibyl.models.ci.zuul.test import Test, TestKind, TestStatus
class TestTest(TestCase):
"""Tests for :class:`Test`.
"""
def test_attributes(self):
"""Checks that the model has the desired attributes.
"""
kind = TestKind.ANSIBLE
name = 'test'
status = TestStatus.SUCCESS
duration = 1.2
url = 'url-to-test'
data = Test.Data()
data.name = name
data.status = status
data.duration = duration
data.url = url
model = Test(kind, data)
self.assertEqual(kind, model.kind.value)
self.assertEqual(name, model.name.value)
self.assertEqual(status.name, model.result.value)
self.assertEqual(duration, model.duration.value)
self.assertEqual(url, model.url.value)
def test_equality_by_type(self):
"""Checks that two models are no the same if they are of different
type.
"""
model = Test()
other = Mock()
self.assertNotEqual(other, model)
def test_equality_by_reference(self):
"""Checks that a model is equal to itself.
"""
model = Test()
self.assertEqual(model, model)
def test_equality_by_contents(self):
"""Checks that two models are equal if they hold the same data.
"""
kind = TestKind.ANSIBLE
data = Test.Data()
data.name = 'test'
data.status = TestStatus.SUCCESS
data.duration = 1.2
data.url = 'url-to-test'
model1 = Test(kind, data)
model2 = Test(kind, data)
self.assertEqual(model2, model1)
def test_status(self):
"""Checks that the correct status is returned for different results.
"""
model = Test()
model.result.value = TestStatus.UNKNOWN.name
self.assertEqual(TestStatus.UNKNOWN, model.status)
model.result.value = TestStatus.SUCCESS.name
self.assertEqual(TestStatus.SUCCESS, model.status)
model.result.value = TestStatus.FAILURE.name
self.assertEqual(TestStatus.FAILURE, model.status)
model.result.value = TestStatus.SKIPPED.name
self.assertEqual(TestStatus.SKIPPED, model.status)
| 0 | 0 | 0 |
c5ea632580fc2bd003c5e5e02ae9d44f6aa8e6cb | 12,314 | py | Python | src/visitpy/visit_utils/src/qplot/scene.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 226 | 2018-12-29T01:13:49.000Z | 2022-03-30T19:16:31.000Z | src/visitpy/visit_utils/src/qplot/scene.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 5,100 | 2019-01-14T18:19:25.000Z | 2022-03-31T23:08:36.000Z | src/visitpy/visit_utils/src/qplot/scene.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 84 | 2019-01-24T17:41:50.000Z | 2022-03-10T10:01:46.000Z | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: qplot/scene.py
author: Cyrus Harrison <cyrush@llnl.gov>
description:
Qt based offscreen Curve Rendering lib.
"""
import sys
import time
import math
from visit_utils.property_tree import PropertyTree
from visit_utils import ult
from visit_utils.property_tree import PropertyTree
from visit_utils import ult
from visit_utils.qannote import *
from .plots import *
| 40.37377 | 78 | 0.51072 | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: qplot/scene.py
author: Cyrus Harrison <cyrush@llnl.gov>
description:
Qt based offscreen Curve Rendering lib.
"""
import sys
import time
import math
from visit_utils.property_tree import PropertyTree
from visit_utils import ult
from visit_utils.property_tree import PropertyTree
from visit_utils import ult
from visit_utils.qannote import *
from .plots import *
class PlotGrid(object):
def __init__(self,params,scene):
self.scene = scene
self.params = PropertyTree(init={"line_width":0,
"x_bins":10,
"y_bins":10})
self.params.update(params)
def render(self,painter):
self.scene.set_scene_viewport(painter)
fg = self.scene.params.fg_color
x_bins = self.params.x_bins
y_bins = self.params.y_bins
pen = QPen(QColor(fg[0],fg[1],fg[2],fg[3]),
self.params.line_width,
Qt.SolidLine,Qt.SquareCap,Qt.MiterJoin)
pen.setCosmetic(True)
painter.setPen(pen)
painter.setBrush(Qt.NoBrush)
dx = 100.0 / (x_bins-1)
dy = 100.0 / (y_bins-1)
cx,cy = 0.0,0.0
for i in range(x_bins):
painter.drawLine(QPointF(cx,0.0),QPointF(cx,100.0))
cx+=dx
for i in range(y_bins):
painter.drawLine(QPointF(0.0,cy),QPointF(100.0,cy))
cy+=dy
class PlotAxes(object):
def __init__(self,params,scene):
self.scene = scene
self.params = PropertyTree(init={"line_width":2,
"full_box":True,
"x_ticks":5,
"y_ticks":5,
"tick_length":1.0,
"tick_width":2.0,
"show_ticks":True})
self.params.update(params)
def render(self,painter):
self.scene.set_scene_viewport(painter)
fg = self.scene.params.fg_color
x_ticks = self.params.x_ticks
y_ticks = self.params.y_ticks
tick_width = self.params.tick_width
tick_len = self.params.tick_length
fgcolor = QColor(fg[0],fg[1],fg[2],fg[3])
pen = QPen(fgcolor,
self.params.line_width,
Qt.SolidLine,Qt.SquareCap,Qt.MiterJoin)
pen.setCosmetic(True)
painter.setPen(pen)
painter.setBrush(Qt.NoBrush)
outline_path = QPainterPath()
outline_path.moveTo(0,100)
outline_path.lineTo(100,100)
outline_path.moveTo(0,0)
outline_path.lineTo(0,100)
if self.params.full_box:
outline_path.moveTo(0,0)
outline_path.lineTo(100,0)
outline_path.moveTo(100,0)
outline_path.lineTo(100,100)
painter.drawPath(outline_path)
if self.params.show_ticks:
dx = 100.0/(x_ticks-1)
dy = 100.0/(y_ticks-1)
cx,cy = 0.0,0.0
pen = QPen(fgcolor,
tick_width,
Qt.SolidLine,Qt.SquareCap,Qt.MiterJoin)
pen.setCosmetic(True)
painter.setPen(pen)
painter.setBrush(Qt.NoBrush)
ar = self.scene.aspect_ratio()
xtlen = tick_len
ytlen = tick_len
xtstart = 100
ytstart = 0
if ar >1:
ytlen = ytlen / ar
else:
xtlen = xtlen / ar
for i in range(x_ticks):
painter.drawLine(QPointF(cx,xtstart+xtlen),
QPointF(cx,xtstart))
cx+=dx
for i in range(y_ticks):
painter.drawLine(QPointF(ytstart,cy),
QPointF(ytstart-ytlen,cy))
cy+=dy
class PlotLabels(object):
def __init__(self,params,scene):
self.scene = scene
self.params = PropertyTree(init={"x_labels":5,
"y_labels":5,
"x_labels_offset":9.0,
"y_labels_offset":1.0,
"x_title":"",
"y_title":"",
"x_title_offset":12.0,
"y_title_offset":9.0,
"x_format":"%2.1f",
"y_format":"%2.1f",
"labels_font/name":"Times New",
"labels_font/size":11,
"labels_font/bold":False,
"titles_font/name":"Times New",
"titles_font/size":12,
"titles_font/bold":False,
"log_scale_y":False})
self.params.update(params)
def render(self,painter):
self.scene.set_regular_viewport(painter)
orig_font = painter.font()
font = QFont(self.params.labels_font.name)
font.setPointSize(self.params.labels_font.size)
font.setBold(self.params.labels_font.bold)
painter.setFont(font)
view = [float (v) for v in self.scene.params.view]
vwidth, vheight = [float(v) for v in self.scene.params.size]
x_labels = self.params.x_labels
y_labels = self.params.y_labels
log_scale_y = self.params.log_scale_y
x_labels_offset = self.params.x_labels_offset
y_labels_offset = self.params.y_labels_offset
fstr_x = self.params.x_format
fstr_y = self.params.y_format
margins = self.scene.margins()
tw = float(100 + int(margins[0] + margins[1]))
th = float(100 + int(margins[2] + margins[3]))
xmin,xmax = view[0],view[1]
ymin,ymax = view[2],view[3]
xdiff,ydiff = xmax - xmin, ymax - ymin
vdx = xdiff / float(x_labels-1)
vdy = ydiff / float(y_labels-1)
vx ,vy = xmin, ymin
vyl = 0.0
if log_scale_y:
vdy = (log10(ymax) - log10(ymin)) / float(y_labels-1)
vyl = log10(ymin)
fmtx = painter.fontMetrics()
cx = margins[0]/tw *vwidth
fixed_y = (th - (margins[2] - x_labels_offset))/ th * vheight
cy = (1.0 - margins[2]/th) * vheight
align_x = (margins[0] - y_labels_offset)/ tw * vwidth
dx = (100.0 / tw * vwidth)/float(x_labels-1)
dy = (100.0 / th * vheight)/float(y_labels-1)
lbl = ""
fh_offset = fmtx.height() / 4.0
for i in range(x_labels):
lbl = fstr_x % vx
fw_offset = fmtx.width(lbl) / 2.0
painter.drawText(QPointF(cx-fw_offset,fixed_y+fh_offset*4.0),lbl)
cx+=dx
vx+=vdx
for i in range(y_labels):
if log_scale_y:
vy = math.pow(10.0,vyl)
lbl = fstr_y % vy
w_offset = fmtx.width(lbl)
corr = align_x - w_offset
painter.drawText(QPointF(corr,cy+fh_offset),lbl)
cy=cy-dy
if log_scale_y:
vyl+=vdy
else:
vy +=vdy
font = QFont(self.params.titles_font.name)
font.setPointSize(self.params.titles_font.size)
font.setBold(self.params.titles_font.bold)
painter.setFont(font)
x_title = process_encoded_text(self.params.x_title)
x_title_offset = self.params.x_title_offset
if x_title != "":
w_offset = fmtx.width(x_title)/2.0
xtloc = QPointF(.5 * vwidth - w_offset,
(th - (margins[2] - x_title_offset))/th * vheight)
painter.drawText(xtloc,x_title)
y_title = process_encoded_text(self.params.y_title)
y_title_offset = self.params.y_title_offset
if y_title != "":
h_offset = fmtx.width(y_title)/2.0
painter.save()
painter.rotate(-90)
ytloc = QPointF(-.5 * vheight - h_offset,
(margins[0] -y_title_offset)/tw * vwidth)
painter.drawText(ytloc,y_title)
painter.restore()
painter.setFont(orig_font)
class CurveScene(object):
def __init__(self,params):
self.params = PropertyTree(init={"size": (400,250),
"view":(0.0,0.0,0.0,0.0),
"left_margin":15,
"right_margin":10,
"top_margin":10,
"bottom_margin":15,
"use_antialiasing":True,
"log_scale_y":False,
"fg_color":(255,255,255,255),
"bg_color":(0,0,0,255)})
self.params.update(params)
self.params.lock()
self.items = []
self.active_view = "scene"
self.__setup()
def __setup(self):
self.__add_bg()
if self.params.has_property("grid"):
self.items.append(PlotGrid(self.params.grid,self))
if self.params.has_property("axes"):
self.items.append(PlotAxes(self.params.axes,self))
for p in self.params.plots:
self.items.append(Plots.create(self,p))
if self.params.has_property("labels"):
self.items.append(PlotLabels(self.params.labels,self))
for i in self.params.annotations:
self.items.append(Annotations.create(i))
def __add_bg(self):
mgns = self.margins()
w = 100 + int(mgns[0] + mgns[1])
h = 100 + int(mgns[2] + mgns[3])
bg = Rect({"x":-mgns[0],"y":-mgns[3],
"width":w,"height":h,"color":self.params.bg_color})
self.items.append(bg)
def __add_plot(self,params):
pass
def __add_annotation(self,params):
pass
def aspect_ratio(self):
return float(self.params.size[1])/float(self.params.size[0])
def margins(self):
return [self.params.left_margin,self.params.right_margin,
self.params.bottom_margin, self.params.top_margin]
def render(self,ofname):
mgns =self.margins()
w = 100 + int(mgns[0] + mgns[1])
h = 100 + int(mgns[2] + mgns[3])
view = (-mgns[0],-mgns[3],w,h)
Canvas.render(self.items,self.params.size,ofname,view)
def set_scene_viewport(self,painter):
if not self.active_view == "regular":
if self.active_view == "curve":
painter.restore()
mgns =self.margins()
w = 100 + int(mgns[0] + mgns[1])
h = 100 + int(mgns[2] + mgns[3])
view = (-mgns[0],-mgns[3],w,h)
painter.setWindow(-mgns[0],-mgns[3],w,h)
self.active_view = "scene"
def set_curve_viewport(self,painter):
if not self.active_view == "curve":
mgns =self.margins()
w = 100 + int(mgns[0] + mgns[1])
h = 100 + int(mgns[2] + mgns[3])
view = (-mgns[0],-mgns[3],w,h)
painter.setWindow(-mgns[0],-mgns[3],w,h);
xmin, xmax,ymax,ymin= self.params.view
if self.params.log_scale_y:
ymin = log10(ymin);
ymax = log10(ymax);
xdiff = xmax - xmin;
ydiff = ymax - ymin;
painter.save()
painter.scale(100.0/xdiff,100.0/ydiff)
painter.translate(-xmin,-ymin)
self.active_view="curve"
def set_regular_viewport(self,painter):
if not self.active_view == "regular":
if self.active_view == "curve":
painter.restore()
sz = self.params.size
painter.setWindow(0,0,sz[0],sz[1])
self.active_view = "regular"
| 11,160 | 12 | 534 |
575240b4e84391951afd1ca3465143a0a11de977 | 31,707 | py | Python | appTools/ToolExtractDrills.py | DannyPol/flatcam | 25a8634d0658e98b7fae31a095f8bef40c1b3067 | [
"MIT"
] | 1 | 2022-02-11T06:19:34.000Z | 2022-02-11T06:19:34.000Z | appTools/ToolExtractDrills.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | appTools/ToolExtractDrills.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | # ##########################################################
# FlatCAM: 2D Post-processing for Manufacturing #
# File Author: Marius Adrian Stanciu (c) #
# Date: 1/10/2020 #
# MIT Licence #
# ##########################################################
from PyQt5 import QtWidgets, QtCore, QtGui
from appTool import AppTool
from appGUI.GUIElements import RadioSet, FCDoubleSpinner, FCCheckBox, FCComboBox
from shapely.geometry import Point
import logging
import gettext
import appTranslation as fcTranslate
import builtins
fcTranslate.apply_language('strings')
if '_' not in builtins.__dict__:
_ = gettext.gettext
log = logging.getLogger('base')
| 42.674293 | 118 | 0.531302 | # ##########################################################
# FlatCAM: 2D Post-processing for Manufacturing #
# File Author: Marius Adrian Stanciu (c) #
# Date: 1/10/2020 #
# MIT Licence #
# ##########################################################
from PyQt5 import QtWidgets, QtCore, QtGui
from appTool import AppTool
from appGUI.GUIElements import RadioSet, FCDoubleSpinner, FCCheckBox, FCComboBox
from shapely.geometry import Point
import logging
import gettext
import appTranslation as fcTranslate
import builtins
fcTranslate.apply_language('strings')
if '_' not in builtins.__dict__:
_ = gettext.gettext
log = logging.getLogger('base')
class ToolExtractDrills(AppTool):
def __init__(self, app):
AppTool.__init__(self, app)
self.decimals = self.app.decimals
# #############################################################################
# ######################### Tool GUI ##########################################
# #############################################################################
self.ui = ExtractDrillsUI(layout=self.layout, app=self.app)
self.toolName = self.ui.toolName
# ## Signals
self.ui.hole_size_radio.activated_custom.connect(self.on_hole_size_toggle)
self.ui.e_drills_button.clicked.connect(self.on_extract_drills_click)
self.ui.reset_button.clicked.connect(self.set_tool_ui)
self.ui.circular_cb.stateChanged.connect(
lambda state:
self.ui.circular_ring_entry.setDisabled(False) if state else self.ui.circular_ring_entry.setDisabled(True)
)
self.ui.oblong_cb.stateChanged.connect(
lambda state:
self.ui.oblong_ring_entry.setDisabled(False) if state else self.ui.oblong_ring_entry.setDisabled(True)
)
self.ui.square_cb.stateChanged.connect(
lambda state:
self.ui.square_ring_entry.setDisabled(False) if state else self.ui.square_ring_entry.setDisabled(True)
)
self.ui.rectangular_cb.stateChanged.connect(
lambda state:
self.ui.rectangular_ring_entry.setDisabled(False) if state else
self.ui.rectangular_ring_entry.setDisabled(True)
)
self.ui.other_cb.stateChanged.connect(
lambda state:
self.ui.other_ring_entry.setDisabled(False) if state else self.ui.other_ring_entry.setDisabled(True)
)
def install(self, icon=None, separator=None, **kwargs):
AppTool.install(self, icon, separator, shortcut='Alt+I', **kwargs)
def run(self, toggle=True):
self.app.defaults.report_usage("Extract Drills()")
if toggle:
# if the splitter is hidden, display it, else hide it but only if the current widget is the same
if self.app.ui.splitter.sizes()[0] == 0:
self.app.ui.splitter.setSizes([1, 1])
else:
try:
if self.app.ui.tool_scroll_area.widget().objectName() == self.toolName:
# if tab is populated with the tool but it does not have the focus, focus on it
if not self.app.ui.notebook.currentWidget() is self.app.ui.tool_tab:
# focus on Tool Tab
self.app.ui.notebook.setCurrentWidget(self.app.ui.tool_tab)
else:
self.app.ui.splitter.setSizes([0, 1])
except AttributeError:
pass
else:
if self.app.ui.splitter.sizes()[0] == 0:
self.app.ui.splitter.setSizes([1, 1])
AppTool.run(self)
self.set_tool_ui()
self.app.ui.notebook.setTabText(2, _("Extract Drills Tool"))
def set_tool_ui(self):
self.reset_fields()
self.ui.hole_size_radio.set_value(self.app.defaults["tools_edrills_hole_type"])
self.ui.dia_entry.set_value(float(self.app.defaults["tools_edrills_hole_fixed_dia"]))
self.ui.circular_ring_entry.set_value(float(self.app.defaults["tools_edrills_circular_ring"]))
self.ui.oblong_ring_entry.set_value(float(self.app.defaults["tools_edrills_oblong_ring"]))
self.ui.square_ring_entry.set_value(float(self.app.defaults["tools_edrills_square_ring"]))
self.ui.rectangular_ring_entry.set_value(float(self.app.defaults["tools_edrills_rectangular_ring"]))
self.ui.other_ring_entry.set_value(float(self.app.defaults["tools_edrills_others_ring"]))
self.ui.circular_cb.set_value(self.app.defaults["tools_edrills_circular"])
self.ui.oblong_cb.set_value(self.app.defaults["tools_edrills_oblong"])
self.ui.square_cb.set_value(self.app.defaults["tools_edrills_square"])
self.ui.rectangular_cb.set_value(self.app.defaults["tools_edrills_rectangular"])
self.ui.other_cb.set_value(self.app.defaults["tools_edrills_others"])
self.ui.factor_entry.set_value(float(self.app.defaults["tools_edrills_hole_prop_factor"]))
def on_extract_drills_click(self):
drill_dia = self.ui.dia_entry.get_value()
circ_r_val = self.ui.circular_ring_entry.get_value()
oblong_r_val = self.ui.oblong_ring_entry.get_value()
square_r_val = self.ui.square_ring_entry.get_value()
rect_r_val = self.ui.rectangular_ring_entry.get_value()
other_r_val = self.ui.other_ring_entry.get_value()
prop_factor = self.ui.factor_entry.get_value() / 100.0
drills = []
tools = {}
selection_index = self.ui.gerber_object_combo.currentIndex()
model_index = self.app.collection.index(selection_index, 0, self.ui.gerber_object_combo.rootModelIndex())
try:
fcobj = model_index.internalPointer().obj
except Exception:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("There is no Gerber object loaded ..."))
return
outname = fcobj.options['name'].rpartition('.')[0]
mode = self.ui.hole_size_radio.get_value()
if mode == 'fixed':
tools = {
1: {
"tooldia": drill_dia,
"drills": [],
"slots": []
}
}
for apid, apid_value in fcobj.apertures.items():
ap_type = apid_value['type']
if ap_type == 'C':
if self.ui.circular_cb.get_value() is False:
continue
elif ap_type == 'O':
if self.ui.oblong_cb.get_value() is False:
continue
elif ap_type == 'R':
width = float(apid_value['width'])
height = float(apid_value['height'])
# if the height == width (float numbers so the reason for the following)
if round(width, self.decimals) == round(height, self.decimals):
if self.ui.square_cb.get_value() is False:
continue
else:
if self.ui.rectangular_cb.get_value() is False:
continue
else:
if self.ui.other_cb.get_value() is False:
continue
for geo_el in apid_value['geometry']:
if 'follow' in geo_el and isinstance(geo_el['follow'], Point):
tools[1]["drills"].append(geo_el['follow'])
if 'solid_geometry' not in tools[1]:
tools[1]['solid_geometry'] = []
else:
tools[1]['solid_geometry'].append(geo_el['follow'])
if 'solid_geometry' not in tools[1] or not tools[1]['solid_geometry']:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("No drills extracted. Try different parameters."))
return
elif mode == 'ring':
drills_found = set()
for apid, apid_value in fcobj.apertures.items():
ap_type = apid_value['type']
dia = None
if ap_type == 'C':
if self.ui.circular_cb.get_value():
dia = float(apid_value['size']) - (2 * circ_r_val)
elif ap_type == 'O':
width = float(apid_value['width'])
height = float(apid_value['height'])
if self.ui.oblong_cb.get_value():
if width > height:
dia = float(apid_value['height']) - (2 * oblong_r_val)
else:
dia = float(apid_value['width']) - (2 * oblong_r_val)
elif ap_type == 'R':
width = float(apid_value['width'])
height = float(apid_value['height'])
# if the height == width (float numbers so the reason for the following)
if abs(float('%.*f' % (self.decimals, width)) - float('%.*f' % (self.decimals, height))) < \
(10 ** -self.decimals):
if self.ui.square_cb.get_value():
dia = float(apid_value['height']) - (2 * square_r_val)
else:
if self.ui.rectangular_cb.get_value():
if width > height:
dia = float(apid_value['height']) - (2 * rect_r_val)
else:
dia = float(apid_value['width']) - (2 * rect_r_val)
else:
if self.ui.other_cb.get_value():
try:
dia = float(apid_value['size']) - (2 * other_r_val)
except KeyError:
if ap_type == 'AM':
pol = apid_value['geometry'][0]['solid']
x0, y0, x1, y1 = pol.bounds
dx = x1 - x0
dy = y1 - y0
if dx <= dy:
dia = dx - (2 * other_r_val)
else:
dia = dy - (2 * other_r_val)
# if dia is None then none of the above applied so we skip the following
if dia is None:
continue
tool_in_drills = False
for tool, tool_val in tools.items():
if abs(float('%.*f' % (
self.decimals,
tool_val["tooldia"])) - float('%.*f' % (self.decimals, dia))) < (10 ** -self.decimals):
tool_in_drills = tool
if tool_in_drills is False:
if tools:
new_tool = max([int(t) for t in tools]) + 1
tool_in_drills = new_tool
else:
tool_in_drills = 1
for geo_el in apid_value['geometry']:
if 'follow' in geo_el and isinstance(geo_el['follow'], Point):
if tool_in_drills not in tools:
tools[tool_in_drills] = {
"tooldia": dia,
"drills": [],
"slots": []
}
tools[tool_in_drills]['drills'].append(geo_el['follow'])
if 'solid_geometry' not in tools[tool_in_drills]:
tools[tool_in_drills]['solid_geometry'] = []
else:
tools[tool_in_drills]['solid_geometry'].append(geo_el['follow'])
if tool_in_drills in tools:
if 'solid_geometry' not in tools[tool_in_drills] or not tools[tool_in_drills]['solid_geometry']:
drills_found.add(False)
else:
drills_found.add(True)
if True not in drills_found:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("No drills extracted. Try different parameters."))
return
else:
drills_found = set()
for apid, apid_value in fcobj.apertures.items():
ap_type = apid_value['type']
dia = None
if ap_type == 'C':
if self.ui.circular_cb.get_value():
dia = float(apid_value['size']) * prop_factor
elif ap_type == 'O':
width = float(apid_value['width'])
height = float(apid_value['height'])
if self.ui.oblong_cb.get_value():
if width > height:
dia = float(apid_value['height']) * prop_factor
else:
dia = float(apid_value['width']) * prop_factor
elif ap_type == 'R':
width = float(apid_value['width'])
height = float(apid_value['height'])
# if the height == width (float numbers so the reason for the following)
if abs(float('%.*f' % (self.decimals, width)) - float('%.*f' % (self.decimals, height))) < \
(10 ** -self.decimals):
if self.ui.square_cb.get_value():
dia = float(apid_value['height']) * prop_factor
else:
if self.ui.rectangular_cb.get_value():
if width > height:
dia = float(apid_value['height']) * prop_factor
else:
dia = float(apid_value['width']) * prop_factor
else:
if self.ui.other_cb.get_value():
try:
dia = float(apid_value['size']) * prop_factor
except KeyError:
if ap_type == 'AM':
pol = apid_value['geometry'][0]['solid']
x0, y0, x1, y1 = pol.bounds
dx = x1 - x0
dy = y1 - y0
if dx <= dy:
dia = dx * prop_factor
else:
dia = dy * prop_factor
# if dia is None then none of the above applied so we skip the following
if dia is None:
continue
tool_in_drills = False
for tool, tool_val in tools.items():
if abs(float('%.*f' % (
self.decimals,
tool_val["tooldia"])) - float('%.*f' % (self.decimals, dia))) < (10 ** -self.decimals):
tool_in_drills = tool
if tool_in_drills is False:
if tools:
new_tool = max([int(t) for t in tools]) + 1
tool_in_drills = new_tool
else:
tool_in_drills = 1
for geo_el in apid_value['geometry']:
if 'follow' in geo_el and isinstance(geo_el['follow'], Point):
if tool_in_drills not in tools:
tools[tool_in_drills] = {
"tooldia": dia,
"drills": [],
"slots": []
}
tools[tool_in_drills]['drills'].append(geo_el['follow'])
if 'solid_geometry' not in tools[tool_in_drills]:
tools[tool_in_drills]['solid_geometry'] = []
else:
tools[tool_in_drills]['solid_geometry'].append(geo_el['follow'])
if tool_in_drills in tools:
if 'solid_geometry' not in tools[tool_in_drills] or not tools[tool_in_drills]['solid_geometry']:
drills_found.add(False)
else:
drills_found.add(True)
if True not in drills_found:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("No drills extracted. Try different parameters."))
return
def obj_init(obj_inst, app_inst):
obj_inst.tools = tools
obj_inst.drills = drills
obj_inst.create_geometry()
obj_inst.source_file = app_inst.f_handlers.export_excellon(obj_name=outname, local_use=obj_inst,
filename=None,
use_thread=False)
self.app.app_obj.new_object("excellon", outname, obj_init)
def on_hole_size_toggle(self, val):
if val == "fixed":
self.ui.fixed_label.setVisible(True)
self.ui.dia_entry.setVisible(True)
self.ui.dia_label.setVisible(True)
self.ui.ring_frame.setVisible(False)
self.ui.prop_label.setVisible(False)
self.ui.factor_label.setVisible(False)
self.ui.factor_entry.setVisible(False)
elif val == "ring":
self.ui.fixed_label.setVisible(False)
self.ui.dia_entry.setVisible(False)
self.ui.dia_label.setVisible(False)
self.ui.ring_frame.setVisible(True)
self.ui.prop_label.setVisible(False)
self.ui.factor_label.setVisible(False)
self.ui.factor_entry.setVisible(False)
elif val == "prop":
self.ui.fixed_label.setVisible(False)
self.ui.dia_entry.setVisible(False)
self.ui.dia_label.setVisible(False)
self.ui.ring_frame.setVisible(False)
self.ui.prop_label.setVisible(True)
self.ui.factor_label.setVisible(True)
self.ui.factor_entry.setVisible(True)
def reset_fields(self):
self.ui.gerber_object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.ui.gerber_object_combo.setCurrentIndex(0)
class ExtractDrillsUI:
toolName = _("Extract Drills")
def __init__(self, layout, app):
self.app = app
self.decimals = self.app.decimals
self.layout = layout
# ## Title
title_label = QtWidgets.QLabel("%s" % self.toolName)
title_label.setStyleSheet("""
QLabel
{
font-size: 16px;
font-weight: bold;
}
""")
self.layout.addWidget(title_label)
self.layout.addWidget(QtWidgets.QLabel(""))
# ## Grid Layout
grid_lay = QtWidgets.QGridLayout()
self.layout.addLayout(grid_lay)
grid_lay.setColumnStretch(0, 1)
grid_lay.setColumnStretch(1, 0)
# ## Gerber Object
self.gerber_object_combo = FCComboBox()
self.gerber_object_combo.setModel(self.app.collection)
self.gerber_object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.gerber_object_combo.is_last = True
self.gerber_object_combo.obj_type = "Gerber"
self.grb_label = QtWidgets.QLabel("<b>%s:</b>" % _("GERBER"))
self.grb_label.setToolTip('%s.' % _("Gerber from which to extract drill holes"))
# grid_lay.addRow("Bottom Layer:", self.object_combo)
grid_lay.addWidget(self.grb_label, 0, 0, 1, 2)
grid_lay.addWidget(self.gerber_object_combo, 1, 0, 1, 2)
self.padt_label = QtWidgets.QLabel("<b>%s</b>" % _("Processed Pads Type"))
self.padt_label.setToolTip(
_("The type of pads shape to be processed.\n"
"If the PCB has many SMD pads with rectangular pads,\n"
"disable the Rectangular aperture.")
)
grid_lay.addWidget(self.padt_label, 2, 0, 1, 2)
# Circular Aperture Selection
self.circular_cb = FCCheckBox('%s' % _("Circular"))
self.circular_cb.setToolTip(
_("Process Circular Pads.")
)
grid_lay.addWidget(self.circular_cb, 3, 0, 1, 2)
# Oblong Aperture Selection
self.oblong_cb = FCCheckBox('%s' % _("Oblong"))
self.oblong_cb.setToolTip(
_("Process Oblong Pads.")
)
grid_lay.addWidget(self.oblong_cb, 4, 0, 1, 2)
# Square Aperture Selection
self.square_cb = FCCheckBox('%s' % _("Square"))
self.square_cb.setToolTip(
_("Process Square Pads.")
)
grid_lay.addWidget(self.square_cb, 5, 0, 1, 2)
# Rectangular Aperture Selection
self.rectangular_cb = FCCheckBox('%s' % _("Rectangular"))
self.rectangular_cb.setToolTip(
_("Process Rectangular Pads.")
)
grid_lay.addWidget(self.rectangular_cb, 6, 0, 1, 2)
# Others type of Apertures Selection
self.other_cb = FCCheckBox('%s' % _("Others"))
self.other_cb.setToolTip(
_("Process pads not in the categories above.")
)
grid_lay.addWidget(self.other_cb, 7, 0, 1, 2)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid_lay.addWidget(separator_line, 8, 0, 1, 2)
# ## Grid Layout
grid1 = QtWidgets.QGridLayout()
self.layout.addLayout(grid1)
grid1.setColumnStretch(0, 0)
grid1.setColumnStretch(1, 1)
self.method_label = QtWidgets.QLabel('<b>%s</b>' % _("Method"))
self.method_label.setToolTip(
_("The method for processing pads. Can be:\n"
"- Fixed Diameter -> all holes will have a set size\n"
"- Fixed Annular Ring -> all holes will have a set annular ring\n"
"- Proportional -> each hole size will be a fraction of the pad size"))
grid1.addWidget(self.method_label, 2, 0, 1, 2)
# ## Holes Size
self.hole_size_radio = RadioSet(
[
{'label': _("Fixed Diameter"), 'value': 'fixed'},
{'label': _("Proportional"), 'value': 'prop'},
{'label': _("Fixed Annular Ring"), 'value': 'ring'}
],
orientation='vertical',
stretch=False)
grid1.addWidget(self.hole_size_radio, 3, 0, 1, 2)
# grid_lay1.addWidget(QtWidgets.QLabel(''))
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid1.addWidget(separator_line, 5, 0, 1, 2)
# Annular Ring
self.fixed_label = QtWidgets.QLabel('<b>%s</b>' % _("Fixed Diameter"))
grid1.addWidget(self.fixed_label, 6, 0, 1, 2)
# Diameter value
self.dia_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.dia_entry.set_precision(self.decimals)
self.dia_entry.set_range(0.0000, 10000.0000)
self.dia_label = QtWidgets.QLabel('%s:' % _("Value"))
self.dia_label.setToolTip(
_("Fixed hole diameter.")
)
grid1.addWidget(self.dia_label, 8, 0)
grid1.addWidget(self.dia_entry, 8, 1)
self.ring_frame = QtWidgets.QFrame()
self.ring_frame.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.ring_frame)
self.ring_box = QtWidgets.QVBoxLayout()
self.ring_box.setContentsMargins(0, 0, 0, 0)
self.ring_frame.setLayout(self.ring_box)
# ## Grid Layout
grid2 = QtWidgets.QGridLayout()
grid2.setColumnStretch(0, 0)
grid2.setColumnStretch(1, 1)
self.ring_box.addLayout(grid2)
# Annular Ring value
self.ring_label = QtWidgets.QLabel('<b>%s</b>' % _("Fixed Annular Ring"))
self.ring_label.setToolTip(
_("The size of annular ring.\n"
"The copper sliver between the hole exterior\n"
"and the margin of the copper pad.")
)
grid2.addWidget(self.ring_label, 0, 0, 1, 2)
# Circular Annular Ring Value
self.circular_ring_label = QtWidgets.QLabel('%s:' % _("Circular"))
self.circular_ring_label.setToolTip(
_("The size of annular ring for circular pads.")
)
self.circular_ring_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.circular_ring_entry.set_precision(self.decimals)
self.circular_ring_entry.set_range(0.0000, 10000.0000)
grid2.addWidget(self.circular_ring_label, 1, 0)
grid2.addWidget(self.circular_ring_entry, 1, 1)
# Oblong Annular Ring Value
self.oblong_ring_label = QtWidgets.QLabel('%s:' % _("Oblong"))
self.oblong_ring_label.setToolTip(
_("The size of annular ring for oblong pads.")
)
self.oblong_ring_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.oblong_ring_entry.set_precision(self.decimals)
self.oblong_ring_entry.set_range(0.0000, 10000.0000)
grid2.addWidget(self.oblong_ring_label, 2, 0)
grid2.addWidget(self.oblong_ring_entry, 2, 1)
# Square Annular Ring Value
self.square_ring_label = QtWidgets.QLabel('%s:' % _("Square"))
self.square_ring_label.setToolTip(
_("The size of annular ring for square pads.")
)
self.square_ring_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.square_ring_entry.set_precision(self.decimals)
self.square_ring_entry.set_range(0.0000, 10000.0000)
grid2.addWidget(self.square_ring_label, 3, 0)
grid2.addWidget(self.square_ring_entry, 3, 1)
# Rectangular Annular Ring Value
self.rectangular_ring_label = QtWidgets.QLabel('%s:' % _("Rectangular"))
self.rectangular_ring_label.setToolTip(
_("The size of annular ring for rectangular pads.")
)
self.rectangular_ring_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.rectangular_ring_entry.set_precision(self.decimals)
self.rectangular_ring_entry.set_range(0.0000, 10000.0000)
grid2.addWidget(self.rectangular_ring_label, 4, 0)
grid2.addWidget(self.rectangular_ring_entry, 4, 1)
# Others Annular Ring Value
self.other_ring_label = QtWidgets.QLabel('%s:' % _("Others"))
self.other_ring_label.setToolTip(
_("The size of annular ring for other pads.")
)
self.other_ring_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.other_ring_entry.set_precision(self.decimals)
self.other_ring_entry.set_range(0.0000, 10000.0000)
grid2.addWidget(self.other_ring_label, 5, 0)
grid2.addWidget(self.other_ring_entry, 5, 1)
grid3 = QtWidgets.QGridLayout()
self.layout.addLayout(grid3)
grid3.setColumnStretch(0, 0)
grid3.setColumnStretch(1, 1)
# Annular Ring value
self.prop_label = QtWidgets.QLabel('<b>%s</b>' % _("Proportional Diameter"))
grid3.addWidget(self.prop_label, 2, 0, 1, 2)
# Diameter value
self.factor_entry = FCDoubleSpinner(callback=self.confirmation_message, suffix='%')
self.factor_entry.set_precision(self.decimals)
self.factor_entry.set_range(0.0000, 100.0000)
self.factor_entry.setSingleStep(0.1)
self.factor_label = QtWidgets.QLabel('%s:' % _("Value"))
self.factor_label.setToolTip(
_("Proportional Diameter.\n"
"The hole diameter will be a fraction of the pad size.")
)
grid3.addWidget(self.factor_label, 3, 0)
grid3.addWidget(self.factor_entry, 3, 1)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid3.addWidget(separator_line, 5, 0, 1, 2)
# Extract drills from Gerber apertures flashes (pads)
self.e_drills_button = QtWidgets.QPushButton(_("Extract Drills"))
self.e_drills_button.setIcon(QtGui.QIcon(self.app.resource_location + '/drill16.png'))
self.e_drills_button.setToolTip(
_("Extract drills from a given Gerber file.")
)
self.e_drills_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
self.layout.addWidget(self.e_drills_button)
self.layout.addStretch()
# ## Reset Tool
self.reset_button = QtWidgets.QPushButton(_("Reset Tool"))
self.reset_button.setIcon(QtGui.QIcon(self.app.resource_location + '/reset32.png'))
self.reset_button.setToolTip(
_("Will reset the tool parameters.")
)
self.reset_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
self.layout.addWidget(self.reset_button)
self.circular_ring_entry.setEnabled(False)
self.oblong_ring_entry.setEnabled(False)
self.square_ring_entry.setEnabled(False)
self.rectangular_ring_entry.setEnabled(False)
self.other_ring_entry.setEnabled(False)
self.dia_entry.setVisible(False)
self.dia_label.setVisible(False)
self.factor_label.setVisible(False)
self.factor_entry.setVisible(False)
self.ring_frame.setVisible(False)
# #################################### FINSIHED GUI ###########################
# #############################################################################
def confirmation_message(self, accepted, minval, maxval):
if accepted is False:
self.app.inform[str, bool].emit('[WARNING_NOTCL] %s: [%.*f, %.*f]' % (_("Edited value is out of range"),
self.decimals,
minval,
self.decimals,
maxval), False)
else:
self.app.inform[str, bool].emit('[success] %s' % _("Edited value is within limits."), False)
def confirmation_message_int(self, accepted, minval, maxval):
if accepted is False:
self.app.inform[str, bool].emit('[WARNING_NOTCL] %s: [%d, %d]' %
(_("Edited value is out of range"), minval, maxval), False)
else:
self.app.inform[str, bool].emit('[success] %s' % _("Edited value is within limits."), False)
| 30,571 | 130 | 235 |
0abf118c383a3b0fab7d91a8d0f450a30b184401 | 20,603 | py | Python | lib/imdb_util.py | syKevinPeng/M3D-RPN | ae43248f0d64a83d7deef63308dd5ade25e7b751 | [
"MIT"
] | null | null | null | lib/imdb_util.py | syKevinPeng/M3D-RPN | ae43248f0d64a83d7deef63308dd5ade25e7b751 | [
"MIT"
] | null | null | null | lib/imdb_util.py | syKevinPeng/M3D-RPN | ae43248f0d64a83d7deef63308dd5ade25e7b751 | [
"MIT"
] | null | null | null | """
This file contains all image database (imdb) functionality,
such as loading and reading information from a dataset.
Generally, this file is meant to read in a dataset from disk into a
simple custom format for the detetive framework.
"""
# -----------------------------------------
# modules
# -----------------------------------------
import torch
import torch.utils.data as data
import sys
import re
from PIL import Image
from copy import deepcopy
sys.dont_write_bytecode = True
# -----------------------------------------
# custom
# -----------------------------------------
from lib.rpn_util import *
from lib.util import *
from lib.augmentations import *
from lib.core import *
class Dataset(torch.utils.data.Dataset):
"""
A single Dataset class is used for the whole project,
which implements the __init__ and __get__ functions from PyTorch.
"""
def __init__(self, conf, root, cache_folder=None):
"""
This function reads in all datasets to be used in training and stores ANY relevant
information which may be needed during training as a list of edict()
(referred to commonly as 'imobj').
The function also optionally stores the image database (imdb) file into a cache.
"""
imdb = []
self.video_det = False if not ('video_det' in conf) else conf.video_det
self.video_count = 1 if not ('video_count' in conf) else conf.video_count
self.use_3d_for_2d = ('use_3d_for_2d' in conf) and conf.use_3d_for_2d
# use cache?
if (cache_folder is not None) and os.path.exists(os.path.join(cache_folder, 'imdb.pkl')):
logging.info('Preloading imdb.')
imdb = pickle_read(os.path.join(cache_folder, 'imdb.pkl'))
else:
print("here")
# cycle through each dataset
for dbind, db in enumerate(conf.datasets_train):
logging.info('Loading imdb {}'.format(db['name']))
# single imdb
imdb_single_db = []
# kitti formatting
if db['anno_fmt'].lower() == 'kitti_det':
train_folder = os.path.join(root, db['name'], 'training')
ann_folder = os.path.join(train_folder, 'label_2', '')
cal_folder = os.path.join(train_folder, 'calib', '')
im_folder = os.path.join(train_folder, 'image_2', '')
# get sorted filepaths
annlist = sorted(glob(ann_folder + '*.txt'))
imdb_start = time()
self.affine_size = None if not ('affine_size' in conf) else conf.affine_size
for annind, annpath in enumerate(annlist):
# get file parts
base = os.path.basename(annpath)
id, ext = os.path.splitext(base)
calpath = os.path.join(cal_folder, id + '.txt')
impath = os.path.join(im_folder, id + db['im_ext'])
impath_pre = os.path.join(train_folder, 'prev_2', id + '_01' + db['im_ext'])
impath_pre2 = os.path.join(train_folder, 'prev_2', id + '_02' + db['im_ext'])
impath_pre3 = os.path.join(train_folder, 'prev_2', id + '_03' + db['im_ext'])
# read gts
p2 = read_kitti_cal(calpath)
p2_inv = np.linalg.inv(p2)
gts = read_kitti_label(annpath, p2, self.use_3d_for_2d)
if not self.affine_size is None:
# filter relevant classes
gts_plane = [deepcopy(gt) for gt in gts if gt.cls in conf.lbls and not gt.ign]
if len(gts_plane) > 0:
KITTI_H = 1.65
# compute ray traces for default projection
for gtind in range(len(gts_plane)):
gt = gts_plane[gtind]
#cx2d = gt.bbox_3d[0]
#cy2d = gt.bbox_3d[1]
cy2d = gt.bbox_full[1] + gt.bbox_full[3]
cx2d = gt.bbox_full[0] + gt.bbox_full[2] / 2
z2d, coord3d = projection_ray_trace(p2, p2_inv, cx2d, cy2d, KITTI_H)
gts_plane[gtind].center_in = coord3d[0:3, 0]
gts_plane[gtind].center_3d = np.array(gt.center_3d)
prelim_tra = np.array([gt.center_in for gtind, gt in enumerate(gts_plane)])
target_tra = np.array([gt.center_3d for gtind, gt in enumerate(gts_plane)])
if self.affine_size == 4:
prelim_tra = np.pad(prelim_tra, [(0, 0), (0, 1)], mode='constant', constant_values=1)
target_tra = np.pad(target_tra, [(0, 0), (0, 1)], mode='constant', constant_values=1)
affine_gt, err = solve_transform(prelim_tra, target_tra, compute_error=True)
a = 1
obj = edict()
# did not compute transformer
if (self.affine_size is None) or len(gts_plane) < 1:
obj.affine_gt = None
else:
obj.affine_gt = affine_gt
# store gts
obj.id = id
obj.gts = gts
obj.p2 = p2
obj.p2_inv = p2_inv
# im properties
im = Image.open(impath)
obj.path = impath
obj.path_pre = impath_pre
obj.path_pre2 = impath_pre2
obj.path_pre3 = impath_pre3
obj.imW, obj.imH = im.size
# database properties
obj.dbname = db.name
obj.scale = db.scale
obj.dbind = dbind
# store
imdb_single_db.append(obj)
if (annind % 1000) == 0 and annind > 0:
time_str, dt = compute_eta(imdb_start, annind, len(annlist))
logging.info('{}/{}, dt: {:0.4f}, eta: {}'.format(annind, len(annlist), dt, time_str))
# concatenate single imdb into full imdb
imdb += imdb_single_db
imdb = np.array(imdb)
# cache off the imdb?
if cache_folder is not None:
pickle_write(os.path.join(cache_folder, 'imdb.pkl'), imdb)
# store more information
self.datasets_train = conf.datasets_train
self.len = len(imdb)
self.imdb = imdb
# setup data augmentation transforms
self.transform = Augmentation(conf)
# setup sampler and data loader for this dataset
self.sampler = torch.utils.data.sampler.WeightedRandomSampler(balance_samples(conf, imdb), self.len)
self.loader = torch.utils.data.DataLoader(self, conf.batch_size, sampler=self.sampler, collate_fn=self.collate)
# check classes
cls_not_used = []
for imobj in imdb:
for gt in imobj.gts:
cls = gt.cls
if not(cls in conf.lbls or cls in conf.ilbls) and (cls not in cls_not_used):
cls_not_used.append(cls)
if len(cls_not_used) > 0:
logging.info('Labels not used in training.. {}'.format(cls_not_used))
def __getitem__(self, index):
"""
Grabs the item at the given index. Specifically,
- read the image from disk
- read the imobj from RAM
- applies data augmentation to (im, imobj)
- converts image to RGB and [B C W H]
"""
if not self.video_det:
# read image
im = cv2.imread(self.imdb[index].path)
else:
# read images
im = cv2.imread(self.imdb[index].path)
video_count = 1 if self.video_count is None else self.video_count
if video_count >= 2:
im_pre = cv2.imread(self.imdb[index].path_pre)
if not im_pre.shape == im.shape:
im_pre = cv2.resize(im_pre, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre), axis=2)
if video_count >= 3:
im_pre2 = cv2.imread(self.imdb[index].path_pre2)
if im_pre2 is None:
im_pre2 = im_pre
if not im_pre2.shape == im.shape:
im_pre2 = cv2.resize(im_pre2, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre2), axis=2)
if video_count >= 4:
im_pre3 = cv2.imread(self.imdb[index].path_pre3)
if im_pre3 is None:
im_pre3 = im_pre2
if not im_pre3.shape == im.shape:
im_pre3 = cv2.resize(im_pre3, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre3), axis=2)
# transform / data augmentation
im, imobj = self.transform(im, deepcopy(self.imdb[index]))
for i in range(int(im.shape[2]/3)):
# convert to RGB then permute to be [B C H W]
im[:, :, (i*3):(i*3) + 3] = im[:, :, (i*3+2, i*3+1, i*3)]
im = np.transpose(im, [2, 0, 1])
return im, imobj
@staticmethod
def collate(batch):
"""
Defines the methodology for PyTorch to collate the objects
of a batch together, for some reason PyTorch doesn't function
this way by default.
"""
imgs = []
imobjs = []
# go through each batch
for sample in batch:
# append images and object dictionaries
imgs.append(sample[0])
imobjs.append(sample[1])
# stack images
imgs = np.array(imgs)
imgs = torch.from_numpy(imgs).cuda()
return imgs, imobjs
def __len__(self):
"""
Simply return the length of the dataset.
"""
return self.len
def read_kitti_cal(calfile):
"""
Reads the kitti calibration projection matrix (p2) file from disc.
Args:
calfile (str): path to single calibration file
"""
text_file = open(calfile, 'r')
p2pat = re.compile(('(P2:)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)' +
'\s+(fpat)\s+(fpat)\s+(fpat)\s*\n').replace('fpat', '[-+]?[\d]+\.?[\d]*[Ee](?:[-+]?[\d]+)?'))
for line in text_file:
parsed = p2pat.fullmatch(line)
# bbGt annotation in text format of:
# cls x y w h occ x y w h ign ang
if parsed is not None:
p2 = np.zeros([4, 4], dtype=float)
p2[0, 0] = parsed.group(2)
p2[0, 1] = parsed.group(3)
p2[0, 2] = parsed.group(4)
p2[0, 3] = parsed.group(5)
p2[1, 0] = parsed.group(6)
p2[1, 1] = parsed.group(7)
p2[1, 2] = parsed.group(8)
p2[1, 3] = parsed.group(9)
p2[2, 0] = parsed.group(10)
p2[2, 1] = parsed.group(11)
p2[2, 2] = parsed.group(12)
p2[2, 3] = parsed.group(13)
p2[3, 3] = 1
text_file.close()
return p2
def read_kitti_label(file, p2, use_3d_for_2d=False):
"""
Reads the kitti label file from disc.
Args:
file (str): path to single label file for an image
p2 (ndarray): projection matrix for the given image
"""
gts = []
text_file = open(file, 'r')
'''
Values Name Description
----------------------------------------------------------------------------
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
'''
pattern = re.compile(('([a-zA-Z\-\?\_]+)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+'
+ '(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s*((fpat)?)\n')
.replace('fpat', '[-+]?\d*\.\d+|[-+]?\d+'))
for line in text_file:
parsed = pattern.fullmatch(line)
# bbGt annotation in text format of:
# cls x y w h occ x y w h ign ang
if parsed is not None:
obj = edict()
ign = False
cls = parsed.group(1)
trunc = float(parsed.group(2))
occ = float(parsed.group(3))
alpha = float(parsed.group(4))
x = float(parsed.group(5))
y = float(parsed.group(6))
x2 = float(parsed.group(7))
y2 = float(parsed.group(8))
width = x2 - x + 1
height = y2 - y + 1
h3d = float(parsed.group(9))
w3d = float(parsed.group(10))
l3d = float(parsed.group(11))
cx3d = float(parsed.group(12)) # center of car in 3d
cy3d = float(parsed.group(13)) # bottom of car in 3d
cz3d = float(parsed.group(14)) # center of car in 3d
rotY = float(parsed.group(15))
# actually center the box
cy3d -= (h3d / 2)
elevation = (1.65 - cy3d)
if use_3d_for_2d and h3d > 0 and w3d > 0 and l3d > 0:
# re-compute the 2D box using 3D (finally, avoids clipped boxes)
verts3d, corners_3d = project_3d(p2, cx3d, cy3d, cz3d, w3d, h3d, l3d, rotY, return_3d=True)
# any boxes behind camera plane?
if np.any(corners_3d[2, :] <= 0):
ign = True
else:
x = min(verts3d[:, 0])
y = min(verts3d[:, 1])
x2 = max(verts3d[:, 0])
y2 = max(verts3d[:, 1])
width = x2 - x + 1
height = y2 - y + 1
# project cx, cy, cz
coord3d = p2.dot(np.array([cx3d, cy3d, cz3d, 1]))
# store the projected instead
cx3d_2d = coord3d[0]
cy3d_2d = coord3d[1]
cz3d_2d = coord3d[2]
cx = cx3d_2d / cz3d_2d
cy = cy3d_2d / cz3d_2d
# encode occlusion with range estimation
# 0 = fully visible, 1 = partly occluded
# 2 = largely occluded, 3 = unknown
if occ == 0: vis = 1
elif occ == 1: vis = 0.66
elif occ == 2: vis = 0.33
else: vis = 0.0
while rotY > math.pi: rotY -= math.pi * 2
while rotY < (-math.pi): rotY += math.pi * 2
# recompute alpha
alpha = convertRot2Alpha(rotY, cz3d, cx3d)
obj.elevation = elevation
obj.cls = cls
obj.occ = occ > 0
obj.ign = ign
obj.visibility = vis
obj.trunc = trunc
obj.alpha = alpha
obj.rotY = rotY
# is there an extra field? (assume to be track)
if len(parsed.groups()) >= 16 and parsed.group(16).isdigit(): obj.track = int(parsed.group(16))
obj.bbox_full = np.array([x, y, width, height])
obj.bbox_3d = [cx, cy, cz3d_2d, w3d, h3d, l3d, alpha, cx3d, cy3d, cz3d, rotY]
obj.center_3d = [cx3d, cy3d, cz3d]
gts.append(obj)
text_file.close()
return gts
def balance_samples(conf, imdb):
"""
Balances the samples in an image dataset according to the given configuration.
Basically we check which images have relevant foreground samples and which are empty,
then we compute the sampling weights according to a desired fg_image_ratio.
This is primarily useful in datasets which have a lot of empty (background) images, which may
cause instability during training if not properly balanced against.
"""
sample_weights = np.ones(len(imdb))
if conf.fg_image_ratio >= 0:
empty_inds = []
valid_inds = []
for imind, imobj in enumerate(imdb):
valid = 0
scale = conf.test_scale / imobj.imH
igns, rmvs = determine_ignores(imobj.gts, conf.lbls, conf.ilbls, conf.min_gt_vis,
conf.min_gt_h, conf.max_gt_h, scale)
for gtind, gt in enumerate(imobj.gts):
if (not igns[gtind]) and (not rmvs[gtind]):
valid += 1
sample_weights[imind] = valid
if valid>0:
valid_inds.append(imind)
else:
empty_inds.append(imind)
if not (conf.fg_image_ratio == 2):
fg_weight = len(imdb) * conf.fg_image_ratio / len(valid_inds)
bg_weight = len(imdb) * (1 - conf.fg_image_ratio) / len(empty_inds)
sample_weights[valid_inds] = fg_weight
sample_weights[empty_inds] = bg_weight
logging.info('weighted respectively as {:.2f} and {:.2f}'.format(fg_weight, bg_weight))
logging.info('Found {} foreground and {} empty images'.format(np.sum(sample_weights > 0), np.sum(sample_weights <= 0)))
# force sampling weights to sum to 1
sample_weights /= np.sum(sample_weights)
return sample_weights
| 34.626891 | 127 | 0.499782 | """
This file contains all image database (imdb) functionality,
such as loading and reading information from a dataset.
Generally, this file is meant to read in a dataset from disk into a
simple custom format for the detetive framework.
"""
# -----------------------------------------
# modules
# -----------------------------------------
import torch
import torch.utils.data as data
import sys
import re
from PIL import Image
from copy import deepcopy
sys.dont_write_bytecode = True
# -----------------------------------------
# custom
# -----------------------------------------
from lib.rpn_util import *
from lib.util import *
from lib.augmentations import *
from lib.core import *
class Dataset(torch.utils.data.Dataset):
"""
A single Dataset class is used for the whole project,
which implements the __init__ and __get__ functions from PyTorch.
"""
def __init__(self, conf, root, cache_folder=None):
"""
This function reads in all datasets to be used in training and stores ANY relevant
information which may be needed during training as a list of edict()
(referred to commonly as 'imobj').
The function also optionally stores the image database (imdb) file into a cache.
"""
imdb = []
self.video_det = False if not ('video_det' in conf) else conf.video_det
self.video_count = 1 if not ('video_count' in conf) else conf.video_count
self.use_3d_for_2d = ('use_3d_for_2d' in conf) and conf.use_3d_for_2d
# use cache?
if (cache_folder is not None) and os.path.exists(os.path.join(cache_folder, 'imdb.pkl')):
logging.info('Preloading imdb.')
imdb = pickle_read(os.path.join(cache_folder, 'imdb.pkl'))
else:
print("here")
# cycle through each dataset
for dbind, db in enumerate(conf.datasets_train):
logging.info('Loading imdb {}'.format(db['name']))
# single imdb
imdb_single_db = []
# kitti formatting
if db['anno_fmt'].lower() == 'kitti_det':
train_folder = os.path.join(root, db['name'], 'training')
ann_folder = os.path.join(train_folder, 'label_2', '')
cal_folder = os.path.join(train_folder, 'calib', '')
im_folder = os.path.join(train_folder, 'image_2', '')
# get sorted filepaths
annlist = sorted(glob(ann_folder + '*.txt'))
imdb_start = time()
self.affine_size = None if not ('affine_size' in conf) else conf.affine_size
for annind, annpath in enumerate(annlist):
# get file parts
base = os.path.basename(annpath)
id, ext = os.path.splitext(base)
calpath = os.path.join(cal_folder, id + '.txt')
impath = os.path.join(im_folder, id + db['im_ext'])
impath_pre = os.path.join(train_folder, 'prev_2', id + '_01' + db['im_ext'])
impath_pre2 = os.path.join(train_folder, 'prev_2', id + '_02' + db['im_ext'])
impath_pre3 = os.path.join(train_folder, 'prev_2', id + '_03' + db['im_ext'])
# read gts
p2 = read_kitti_cal(calpath)
p2_inv = np.linalg.inv(p2)
gts = read_kitti_label(annpath, p2, self.use_3d_for_2d)
if not self.affine_size is None:
# filter relevant classes
gts_plane = [deepcopy(gt) for gt in gts if gt.cls in conf.lbls and not gt.ign]
if len(gts_plane) > 0:
KITTI_H = 1.65
# compute ray traces for default projection
for gtind in range(len(gts_plane)):
gt = gts_plane[gtind]
#cx2d = gt.bbox_3d[0]
#cy2d = gt.bbox_3d[1]
cy2d = gt.bbox_full[1] + gt.bbox_full[3]
cx2d = gt.bbox_full[0] + gt.bbox_full[2] / 2
z2d, coord3d = projection_ray_trace(p2, p2_inv, cx2d, cy2d, KITTI_H)
gts_plane[gtind].center_in = coord3d[0:3, 0]
gts_plane[gtind].center_3d = np.array(gt.center_3d)
prelim_tra = np.array([gt.center_in for gtind, gt in enumerate(gts_plane)])
target_tra = np.array([gt.center_3d for gtind, gt in enumerate(gts_plane)])
if self.affine_size == 4:
prelim_tra = np.pad(prelim_tra, [(0, 0), (0, 1)], mode='constant', constant_values=1)
target_tra = np.pad(target_tra, [(0, 0), (0, 1)], mode='constant', constant_values=1)
affine_gt, err = solve_transform(prelim_tra, target_tra, compute_error=True)
a = 1
obj = edict()
# did not compute transformer
if (self.affine_size is None) or len(gts_plane) < 1:
obj.affine_gt = None
else:
obj.affine_gt = affine_gt
# store gts
obj.id = id
obj.gts = gts
obj.p2 = p2
obj.p2_inv = p2_inv
# im properties
im = Image.open(impath)
obj.path = impath
obj.path_pre = impath_pre
obj.path_pre2 = impath_pre2
obj.path_pre3 = impath_pre3
obj.imW, obj.imH = im.size
# database properties
obj.dbname = db.name
obj.scale = db.scale
obj.dbind = dbind
# store
imdb_single_db.append(obj)
if (annind % 1000) == 0 and annind > 0:
time_str, dt = compute_eta(imdb_start, annind, len(annlist))
logging.info('{}/{}, dt: {:0.4f}, eta: {}'.format(annind, len(annlist), dt, time_str))
# concatenate single imdb into full imdb
imdb += imdb_single_db
imdb = np.array(imdb)
# cache off the imdb?
if cache_folder is not None:
pickle_write(os.path.join(cache_folder, 'imdb.pkl'), imdb)
# store more information
self.datasets_train = conf.datasets_train
self.len = len(imdb)
self.imdb = imdb
# setup data augmentation transforms
self.transform = Augmentation(conf)
# setup sampler and data loader for this dataset
self.sampler = torch.utils.data.sampler.WeightedRandomSampler(balance_samples(conf, imdb), self.len)
self.loader = torch.utils.data.DataLoader(self, conf.batch_size, sampler=self.sampler, collate_fn=self.collate)
# check classes
cls_not_used = []
for imobj in imdb:
for gt in imobj.gts:
cls = gt.cls
if not(cls in conf.lbls or cls in conf.ilbls) and (cls not in cls_not_used):
cls_not_used.append(cls)
if len(cls_not_used) > 0:
logging.info('Labels not used in training.. {}'.format(cls_not_used))
def __getitem__(self, index):
"""
Grabs the item at the given index. Specifically,
- read the image from disk
- read the imobj from RAM
- applies data augmentation to (im, imobj)
- converts image to RGB and [B C W H]
"""
if not self.video_det:
# read image
im = cv2.imread(self.imdb[index].path)
else:
# read images
im = cv2.imread(self.imdb[index].path)
video_count = 1 if self.video_count is None else self.video_count
if video_count >= 2:
im_pre = cv2.imread(self.imdb[index].path_pre)
if not im_pre.shape == im.shape:
im_pre = cv2.resize(im_pre, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre), axis=2)
if video_count >= 3:
im_pre2 = cv2.imread(self.imdb[index].path_pre2)
if im_pre2 is None:
im_pre2 = im_pre
if not im_pre2.shape == im.shape:
im_pre2 = cv2.resize(im_pre2, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre2), axis=2)
if video_count >= 4:
im_pre3 = cv2.imread(self.imdb[index].path_pre3)
if im_pre3 is None:
im_pre3 = im_pre2
if not im_pre3.shape == im.shape:
im_pre3 = cv2.resize(im_pre3, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre3), axis=2)
# transform / data augmentation
im, imobj = self.transform(im, deepcopy(self.imdb[index]))
for i in range(int(im.shape[2]/3)):
# convert to RGB then permute to be [B C H W]
im[:, :, (i*3):(i*3) + 3] = im[:, :, (i*3+2, i*3+1, i*3)]
im = np.transpose(im, [2, 0, 1])
return im, imobj
@staticmethod
def collate(batch):
"""
Defines the methodology for PyTorch to collate the objects
of a batch together, for some reason PyTorch doesn't function
this way by default.
"""
imgs = []
imobjs = []
# go through each batch
for sample in batch:
# append images and object dictionaries
imgs.append(sample[0])
imobjs.append(sample[1])
# stack images
imgs = np.array(imgs)
imgs = torch.from_numpy(imgs).cuda()
return imgs, imobjs
def __len__(self):
"""
Simply return the length of the dataset.
"""
return self.len
def read_kitti_cal(calfile):
"""
Reads the kitti calibration projection matrix (p2) file from disc.
Args:
calfile (str): path to single calibration file
"""
text_file = open(calfile, 'r')
p2pat = re.compile(('(P2:)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)' +
'\s+(fpat)\s+(fpat)\s+(fpat)\s*\n').replace('fpat', '[-+]?[\d]+\.?[\d]*[Ee](?:[-+]?[\d]+)?'))
for line in text_file:
parsed = p2pat.fullmatch(line)
# bbGt annotation in text format of:
# cls x y w h occ x y w h ign ang
if parsed is not None:
p2 = np.zeros([4, 4], dtype=float)
p2[0, 0] = parsed.group(2)
p2[0, 1] = parsed.group(3)
p2[0, 2] = parsed.group(4)
p2[0, 3] = parsed.group(5)
p2[1, 0] = parsed.group(6)
p2[1, 1] = parsed.group(7)
p2[1, 2] = parsed.group(8)
p2[1, 3] = parsed.group(9)
p2[2, 0] = parsed.group(10)
p2[2, 1] = parsed.group(11)
p2[2, 2] = parsed.group(12)
p2[2, 3] = parsed.group(13)
p2[3, 3] = 1
text_file.close()
return p2
def read_kitti_poses(posefile):
text_file = open(posefile, 'r')
ppat1 = re.compile(('(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)' +
'\s+(fpat)\s+(fpat)\s+(fpat)\s*\n').replace('fpat', '[-+]?[\d]+\.?[\d]*[Ee](?:[-+]?[\d]+)?'))
ppat2 = re.compile(('(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)' +
'\s+(fpat)\s+(fpat)\s+(fpat)\s*\n').replace('fpat', '[-+]?[\d]+\.?[\d]*'));
ps = []
for line in text_file:
parsed1 = ppat1.fullmatch(line)
parsed2 = ppat2.fullmatch(line)
if parsed1 is not None:
p = np.zeros([4, 4], dtype=float)
p[0, 0] = parsed1.group(1)
p[0, 1] = parsed1.group(2)
p[0, 2] = parsed1.group(3)
p[0, 3] = parsed1.group(4)
p[1, 0] = parsed1.group(5)
p[1, 1] = parsed1.group(6)
p[1, 2] = parsed1.group(7)
p[1, 3] = parsed1.group(8)
p[2, 0] = parsed1.group(9)
p[2, 1] = parsed1.group(10)
p[2, 2] = parsed1.group(11)
p[2, 3] = parsed1.group(12)
p[3, 3] = 1
ps.append(p)
elif parsed2 is not None:
p = np.zeros([4, 4], dtype=float)
p[0, 0] = parsed2.group(1)
p[0, 1] = parsed2.group(2)
p[0, 2] = parsed2.group(3)
p[0, 3] = parsed2.group(4)
p[1, 0] = parsed2.group(5)
p[1, 1] = parsed2.group(6)
p[1, 2] = parsed2.group(7)
p[1, 3] = parsed2.group(8)
p[2, 0] = parsed2.group(9)
p[2, 1] = parsed2.group(10)
p[2, 2] = parsed2.group(11)
p[2, 3] = parsed2.group(12)
p[3, 3] = 1
ps.append(p)
text_file.close()
return ps
def read_kitti_label(file, p2, use_3d_for_2d=False):
"""
Reads the kitti label file from disc.
Args:
file (str): path to single label file for an image
p2 (ndarray): projection matrix for the given image
"""
gts = []
text_file = open(file, 'r')
'''
Values Name Description
----------------------------------------------------------------------------
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
'''
pattern = re.compile(('([a-zA-Z\-\?\_]+)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+'
+ '(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s*((fpat)?)\n')
.replace('fpat', '[-+]?\d*\.\d+|[-+]?\d+'))
for line in text_file:
parsed = pattern.fullmatch(line)
# bbGt annotation in text format of:
# cls x y w h occ x y w h ign ang
if parsed is not None:
obj = edict()
ign = False
cls = parsed.group(1)
trunc = float(parsed.group(2))
occ = float(parsed.group(3))
alpha = float(parsed.group(4))
x = float(parsed.group(5))
y = float(parsed.group(6))
x2 = float(parsed.group(7))
y2 = float(parsed.group(8))
width = x2 - x + 1
height = y2 - y + 1
h3d = float(parsed.group(9))
w3d = float(parsed.group(10))
l3d = float(parsed.group(11))
cx3d = float(parsed.group(12)) # center of car in 3d
cy3d = float(parsed.group(13)) # bottom of car in 3d
cz3d = float(parsed.group(14)) # center of car in 3d
rotY = float(parsed.group(15))
# actually center the box
cy3d -= (h3d / 2)
elevation = (1.65 - cy3d)
if use_3d_for_2d and h3d > 0 and w3d > 0 and l3d > 0:
# re-compute the 2D box using 3D (finally, avoids clipped boxes)
verts3d, corners_3d = project_3d(p2, cx3d, cy3d, cz3d, w3d, h3d, l3d, rotY, return_3d=True)
# any boxes behind camera plane?
if np.any(corners_3d[2, :] <= 0):
ign = True
else:
x = min(verts3d[:, 0])
y = min(verts3d[:, 1])
x2 = max(verts3d[:, 0])
y2 = max(verts3d[:, 1])
width = x2 - x + 1
height = y2 - y + 1
# project cx, cy, cz
coord3d = p2.dot(np.array([cx3d, cy3d, cz3d, 1]))
# store the projected instead
cx3d_2d = coord3d[0]
cy3d_2d = coord3d[1]
cz3d_2d = coord3d[2]
cx = cx3d_2d / cz3d_2d
cy = cy3d_2d / cz3d_2d
# encode occlusion with range estimation
# 0 = fully visible, 1 = partly occluded
# 2 = largely occluded, 3 = unknown
if occ == 0: vis = 1
elif occ == 1: vis = 0.66
elif occ == 2: vis = 0.33
else: vis = 0.0
while rotY > math.pi: rotY -= math.pi * 2
while rotY < (-math.pi): rotY += math.pi * 2
# recompute alpha
alpha = convertRot2Alpha(rotY, cz3d, cx3d)
obj.elevation = elevation
obj.cls = cls
obj.occ = occ > 0
obj.ign = ign
obj.visibility = vis
obj.trunc = trunc
obj.alpha = alpha
obj.rotY = rotY
# is there an extra field? (assume to be track)
if len(parsed.groups()) >= 16 and parsed.group(16).isdigit(): obj.track = int(parsed.group(16))
obj.bbox_full = np.array([x, y, width, height])
obj.bbox_3d = [cx, cy, cz3d_2d, w3d, h3d, l3d, alpha, cx3d, cy3d, cz3d, rotY]
obj.center_3d = [cx3d, cy3d, cz3d]
gts.append(obj)
text_file.close()
return gts
def balance_samples(conf, imdb):
"""
Balances the samples in an image dataset according to the given configuration.
Basically we check which images have relevant foreground samples and which are empty,
then we compute the sampling weights according to a desired fg_image_ratio.
This is primarily useful in datasets which have a lot of empty (background) images, which may
cause instability during training if not properly balanced against.
"""
sample_weights = np.ones(len(imdb))
if conf.fg_image_ratio >= 0:
empty_inds = []
valid_inds = []
for imind, imobj in enumerate(imdb):
valid = 0
scale = conf.test_scale / imobj.imH
igns, rmvs = determine_ignores(imobj.gts, conf.lbls, conf.ilbls, conf.min_gt_vis,
conf.min_gt_h, conf.max_gt_h, scale)
for gtind, gt in enumerate(imobj.gts):
if (not igns[gtind]) and (not rmvs[gtind]):
valid += 1
sample_weights[imind] = valid
if valid>0:
valid_inds.append(imind)
else:
empty_inds.append(imind)
if not (conf.fg_image_ratio == 2):
fg_weight = len(imdb) * conf.fg_image_ratio / len(valid_inds)
bg_weight = len(imdb) * (1 - conf.fg_image_ratio) / len(empty_inds)
sample_weights[valid_inds] = fg_weight
sample_weights[empty_inds] = bg_weight
logging.info('weighted respectively as {:.2f} and {:.2f}'.format(fg_weight, bg_weight))
logging.info('Found {} foreground and {} empty images'.format(np.sum(sample_weights > 0), np.sum(sample_weights <= 0)))
# force sampling weights to sum to 1
sample_weights /= np.sum(sample_weights)
return sample_weights
| 1,845 | 0 | 23 |
849fc88fae8a4096b80539f3f88d161280127931 | 83 | py | Python | covid19_id/data/meninggal/jenis_kelamin.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
] | null | null | null | covid19_id/data/meninggal/jenis_kelamin.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
] | null | null | null | covid19_id/data/meninggal/jenis_kelamin.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
] | null | null | null | from . import BaseMeninggal
| 13.833333 | 43 | 0.795181 | from . import BaseMeninggal
class MeninggalJenisKelamin(BaseMeninggal):
pass
| 0 | 31 | 23 |
a120935690e12cde7645cc460ecd499ac70f9ff6 | 663 | py | Python | Problems/52.py | matejbolta/project-euler | 33a99de4c4d51d45f5168fb146e68ff46a1ad337 | [
"MIT"
] | null | null | null | Problems/52.py | matejbolta/project-euler | 33a99de4c4d51d45f5168fb146e68ff46a1ad337 | [
"MIT"
] | null | null | null | Problems/52.py | matejbolta/project-euler | 33a99de4c4d51d45f5168fb146e68ff46a1ad337 | [
"MIT"
] | null | null | null | '''
Permuted multiples
It can be seen that the number, 125874, and its double,
251748, contain exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x,
contain the same digits.
'''
def st2set(n):
'''stevke da v mnozico'''
assert n >= 0 #zakaj pa ne
stevke = set()
while n != 0:
stevke.add(n % 10)
n //= 10
return stevke
import itertools
for i in itertools.count(1):
if st2set(i) == st2set(2*i) == st2set(3*i) == st2set(4*i) == st2set(5*i) == st2set(6*i):
print(f'tole iščemo: {i}')
break
#tole iščemo: 142857
| 24.555556 | 93 | 0.59276 | '''
Permuted multiples
It can be seen that the number, 125874, and its double,
251748, contain exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x,
contain the same digits.
'''
def st2set(n):
'''stevke da v mnozico'''
assert n >= 0 #zakaj pa ne
stevke = set()
while n != 0:
stevke.add(n % 10)
n //= 10
return stevke
import itertools
for i in itertools.count(1):
if st2set(i) == st2set(2*i) == st2set(3*i) == st2set(4*i) == st2set(5*i) == st2set(6*i):
print(f'tole iščemo: {i}')
break
#tole iščemo: 142857
| 0 | 0 | 0 |
53123f36cce52724b5b2d416395bfaad5a18a97d | 2,550 | py | Python | scripts/fasta2nj.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | scripts/fasta2nj.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | scripts/fasta2nj.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-08-04T22:46:38.000Z | 2019-08-04T22:46:38.000Z | '''
fasta2nj.py - convert fasta file to nj input
============================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
convert a fasta file to NJ input.
This script translates identifiers like
species|transcripts|gene|class to transcript_species GENEID=gene
Usage
-----
Example::
python fasta2nj.py --help
Type::
python fasta2nj.py --help
for command line help.
Command line options
--------------------
'''
import sys
import re
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: fasta2nj.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-m", "--map", dest="filename_map", type="string",
help="filename with mapping of species ids to swissprot species ids.")
parser.set_defaults(
separator="|",
filename_map=None,
)
(options, args) = E.Start(parser)
if options.filename_map:
map_species2sp = IOTools.ReadMap(open(options.filename_map, "r"))
ninput, noutput, nerrors = 0, 0, 0
for line in sys.stdin:
if line[0] == ">":
ninput += 1
id = re.match(">([^/ \t]+)", line[:-1]).groups()[0]
data = id.split(options.separator)
species = data[0]
if len(data) == 2:
gene = data[1]
transcript = None
elif len(data) >= 3:
gene = data[2]
transcript = data[1]
if map_species2sp:
try:
species = map_species2sp[species]
except IndexError:
nerrors += 1
if options.loglevel >= 1:
options.stdlog.write(
"# could not map species %s\n" % species)
if transcript:
options.stdout.write(
">%s_%s GENEID=%s\n" % (transcript, species, gene))
else:
options.stdout.write(">%s_%s\n" % (species, gene))
noutput += 1
else:
options.stdout.write(line)
if options.loglevel >= 1:
options.stdlog.write(
"# ninput=%i, noutput=%i, nerrors=%i\n" % (ninput, noutput, nerrors))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 23.611111 | 92 | 0.529412 | '''
fasta2nj.py - convert fasta file to nj input
============================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
convert a fasta file to NJ input.
This script translates identifiers like
species|transcripts|gene|class to transcript_species GENEID=gene
Usage
-----
Example::
python fasta2nj.py --help
Type::
python fasta2nj.py --help
for command line help.
Command line options
--------------------
'''
import sys
import re
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: fasta2nj.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-m", "--map", dest="filename_map", type="string",
help="filename with mapping of species ids to swissprot species ids.")
parser.set_defaults(
separator="|",
filename_map=None,
)
(options, args) = E.Start(parser)
if options.filename_map:
map_species2sp = IOTools.ReadMap(open(options.filename_map, "r"))
ninput, noutput, nerrors = 0, 0, 0
for line in sys.stdin:
if line[0] == ">":
ninput += 1
id = re.match(">([^/ \t]+)", line[:-1]).groups()[0]
data = id.split(options.separator)
species = data[0]
if len(data) == 2:
gene = data[1]
transcript = None
elif len(data) >= 3:
gene = data[2]
transcript = data[1]
if map_species2sp:
try:
species = map_species2sp[species]
except IndexError:
nerrors += 1
if options.loglevel >= 1:
options.stdlog.write(
"# could not map species %s\n" % species)
if transcript:
options.stdout.write(
">%s_%s GENEID=%s\n" % (transcript, species, gene))
else:
options.stdout.write(">%s_%s\n" % (species, gene))
noutput += 1
else:
options.stdout.write(line)
if options.loglevel >= 1:
options.stdlog.write(
"# ninput=%i, noutput=%i, nerrors=%i\n" % (ninput, noutput, nerrors))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 0 | 0 | 0 |
dfa637e2c1098742df613e94ad0e259dfed67df3 | 1,000 | py | Python | runner1c/commands/create_epf.py | vakulenkoalex/runner-1c | 25b29f794085fb638808778dd0792fb98787543d | [
"BSD-2-Clause"
] | null | null | null | runner1c/commands/create_epf.py | vakulenkoalex/runner-1c | 25b29f794085fb638808778dd0792fb98787543d | [
"BSD-2-Clause"
] | 6 | 2018-01-10T11:43:27.000Z | 2018-01-12T12:26:25.000Z | runner1c/commands/create_epf.py | vakulenkoalex/runner-1c | 25b29f794085fb638808778dd0792fb98787543d | [
"BSD-2-Clause"
] | null | null | null | import runner1c
| 34.482759 | 118 | 0.657 | import runner1c
class CreateEpfParser(runner1c.parser.Parser):
@property
def name(self):
return 'create_epf'
@property
def description(self):
return 'создание внешних обработок или отчетов из исходников'
def create_handler(self, **kwargs):
return CreateEpf(**kwargs)
def set_up(self):
self.add_argument_to_parser(connection_required=False)
self._parser.add_argument('--epf', required=True,
help='путь к файлу внешней обработки или отчета, в который будет записан результат')
self._parser.add_argument('--xml', required=True,
help='путь к корневому файлу исходников внешней обработки или отчета')
class CreateEpf(runner1c.command.Command):
def __init__(self, **kwargs):
kwargs['mode'] = runner1c.command.Mode.DESIGNER
super().__init__(**kwargs)
self.add_argument('/LoadExternalDataProcessorOrReportFromFiles "{xml}" "{epf}"')
| 893 | 181 | 72 |
5d58065a132d6ee7ecac74b5fd0f7484050cf37a | 7,384 | py | Python | buildTruthDatabase.py | tomcm39/COVID19_expert_survey | b2d35b4eb241e04ec1e3fa2b1f81d2c11f5a6c1d | [
"MIT"
] | 13 | 2020-04-02T16:53:13.000Z | 2021-03-11T13:38:23.000Z | buildTruthDatabase.py | tomcm39/COVID19_expert_survey | b2d35b4eb241e04ec1e3fa2b1f81d2c11f5a6c1d | [
"MIT"
] | 1 | 2020-04-05T21:54:58.000Z | 2020-04-09T20:52:53.000Z | buildTruthDatabase.py | tomcm39/COVID19_expert_survey | b2d35b4eb241e04ec1e3fa2b1f81d2c11f5a6c1d | [
"MIT"
] | null | null | null | #mcandrew
import sys
import numpy as np
import pandas as pd
if __name__ == "__main__":
# SURVEY1 (2020-02-17)
survey1 = truth()
survey1.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF-1-0','QF-1-1_1','QF-2-0','QF-2-0_1','QF-2-2_1','QF-2-2_2','QF-2-2_3','QF-3-0'])
survey1.addTruths([np.nan,np.nan,np.nan,1,1,1,1,35,35,35,np.nan])
survey1DB = survey1.export2DB('2020-02-17')
# SURVEY2 (2020-02-24)
survey2 = truth()
survey2.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1','QF2_1','QF3','QF4_1','QF5_1','QF5_2','QF5_3','QF6_1','QF6_2','QF6_3','QF7'])
survey2.addTruths([np.nan,np.nan,np.nan,1,1,1,1,62,62,62,7,7,7,np.nan])
survey2DB = survey2.export2DB('2020-02-24')
# SURVEY3 (2020-03-02)
survey3 = truth()
survey3.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF6_2','QF6_3','QF7'])
survey3.addTruths([np.nan,np.nan,np.nan,423,423,423,3487,3487,3487,35,35,35,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,17,17,17,np.nan])
survey3DB = survey3.export2DB('2020-03-02')
# SURVEY4 (2020-03-09)
survey4 = truth()
survey4.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3','QF4_1','QF4_2','QF4_3','QF6_1','QF6_2','QF6_3','QF7','QF8','QF9'])
survey4.addTruths([np.nan,np.nan,np.nan,3487,3487,3487,49,49,49,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey4DB = survey4.export2DB('2020-03-09')
# SURVEY5 (2020-03-16)
survey5 = truth()
survey5.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF7','QF8'])
survey5.addTruths([np.nan,np.nan,np.nan,33404,33404,33404,139061,139061,139061,32,32,32,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey5DB = survey5.export2DB('2020-03-16')
# SURVEY6 (2020-03-23)
survey6 = truth()
survey6.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF6_4','QF6_5','QF6_6','QF6_7','QF6_8','QF7','QF8'])
survey6.addTruths([np.nan,np.nan,np.nan,139061,139061,139061,332308,332308,332308,49,49,49,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey6DB = survey6.export2DB('2020-03-23')
# SURVEY7 (2020-03-30)
survey7 = truth()
survey7.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1','QF5_4','QF5_5','QF5_6','QF5_7','QF5_8','QF6_1','QF6_2','QF6_3','QF7_1','QF8','QF9'])
survey7.addTruths([np.nan,np.nan,np.nan,332308,332308,332308,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey7DB = survey7.export2DB('2020-03-30')
# SURVEY8 (2020-04-06)
survey8 = truth()
survey8.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_4','QF3_5','QF3_6','QF3_7','QF3_8','QF3_10','Q4_1','Q4_2','Q4_3','Q4_4','Q4_5','QF5_1','QF5_2','QF5_3','QF6','QF7'])
survey8.addTruths([np.nan,np.nan,np.nan,576774,576774,576774,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,2,2,2,2,2,np.nan,np.nan,np.nan,np.nan,np.nan])
survey8DB = survey8.export2DB('2020-04-06')
# SURVEY9 (2020-04-13)
# true number of cases was 751,646 and corresponds to bin the 3rd bin which is labeled "3"
survey9 = truth()
survey9.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_5','QF1_6','QF1_7','QF1_8','QF1_9','QF2_1','QF2_2','QF2_3','QF2_4','QF2_5','QF3_1','QF3_2','QF3_3','QF3_4','QF3_5','QF3_6','Q4_1','Q4_2','Q4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF7','QF8'])
survey9.addTruths([np.nan,np.nan,np.nan,3,3,3,3,3,3,3,3,5,5,5,5,5,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey9DB = survey9.export2DB('2020-04-13')
# SURVEY10 (2020-04-20)
# true number of cases was 960,343 and corresponds to the fourth bin which is labeled "5"
survey10 = truth()
survey10.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_5','QF1_6','QF1_7','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','Q4_1','Q4_2','Q4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF6_2'
,'QF6_3','QF6_4','QF6_5','QF7','QF8'])
survey10.addTruths([np.nan,np.nan,np.nan,5,5,5,5,5,5,73291,73291,73291,3349,3349,3349,2267,2267,2267,np.nan,np.nan,np.nan,1,1,1,1,1,np.nan,np.nan])
survey10DB = survey10.export2DB('2020-04-20')
# SURVEY11 (2020-04-27)
# 1,152,006 reported
survey11 = truth()
survey11.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_4','QF1_5','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF3_4','QF3_5','QF4_1','QF4_2','QF4_3','QF5_1','QF5_2','QF5_3'])
survey11.addTruths([np.nan,np.nan,np.nan,4,4,4,4,4,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey11DB = survey11.export2DB('2020-04-27')
# SURVEY12 (2020-05-04)
survey12 = truth()
survey12.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_4','QF1_5','QF1_6','QF1_7','QF2_1','QF2_2','QF2_3','QF2_4','QF2_5','QF2_6','QF2_7','QF3_1','QF3_2','QF3_3','QF3_4','QF3_5','QF4_1','QF4_2','QF4_3','QF4_4','QF4_5','QF5_1','QF5_2','QF5_3'])
survey12.addTruths([np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey12DB = survey12.export2DB('2020-05-04')
#SURVEY13 - 2020-05-12
survey13 = truth()
survey13.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_4','QF1_5','QF1_6','QF1_7','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1'])
survey13.addTruths([np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey13DB = survey13.export2DB('2020-05-11')
entiredb = pd.DataFrame()
for db in [survey1DB,survey2DB,survey3DB,survey4DB,survey5DB
,survey6DB,survey7DB,survey8DB,survey9DB,survey10DB
,survey11DB,survey12DB,survey13DB]:
entiredb = entiredb.append(db)
entiredb.to_csv('./database/truthDatabase.csv',index=False)
| 63.655172 | 274 | 0.643418 | #mcandrew
import sys
import numpy as np
import pandas as pd
class truth(object):
def __init__(self):
self.truth = {'questionLabel':[],'questionroot':[],'truth':[]}
def addQuestionLabels(self,listOfLabels):
for ql in listOfLabels:
self.truth['questionLabel'].append(ql)
self.truth['questionroot'].append(ql.split('_')[0])
def addTruths(self,truths):
for t in truths:
self.truth['truth'].append(t)
def export2DB(self,surveyIssuedDate):
self.truthDB = pd.DataFrame(self.truth)
self.truthDB['surveyIssued'] = surveyIssuedDate
return self.truthDB
if __name__ == "__main__":
# SURVEY1 (2020-02-17)
survey1 = truth()
survey1.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF-1-0','QF-1-1_1','QF-2-0','QF-2-0_1','QF-2-2_1','QF-2-2_2','QF-2-2_3','QF-3-0'])
survey1.addTruths([np.nan,np.nan,np.nan,1,1,1,1,35,35,35,np.nan])
survey1DB = survey1.export2DB('2020-02-17')
# SURVEY2 (2020-02-24)
survey2 = truth()
survey2.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1','QF2_1','QF3','QF4_1','QF5_1','QF5_2','QF5_3','QF6_1','QF6_2','QF6_3','QF7'])
survey2.addTruths([np.nan,np.nan,np.nan,1,1,1,1,62,62,62,7,7,7,np.nan])
survey2DB = survey2.export2DB('2020-02-24')
# SURVEY3 (2020-03-02)
survey3 = truth()
survey3.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF6_2','QF6_3','QF7'])
survey3.addTruths([np.nan,np.nan,np.nan,423,423,423,3487,3487,3487,35,35,35,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,17,17,17,np.nan])
survey3DB = survey3.export2DB('2020-03-02')
# SURVEY4 (2020-03-09)
survey4 = truth()
survey4.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3','QF4_1','QF4_2','QF4_3','QF6_1','QF6_2','QF6_3','QF7','QF8','QF9'])
survey4.addTruths([np.nan,np.nan,np.nan,3487,3487,3487,49,49,49,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey4DB = survey4.export2DB('2020-03-09')
# SURVEY5 (2020-03-16)
survey5 = truth()
survey5.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF7','QF8'])
survey5.addTruths([np.nan,np.nan,np.nan,33404,33404,33404,139061,139061,139061,32,32,32,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey5DB = survey5.export2DB('2020-03-16')
# SURVEY6 (2020-03-23)
survey6 = truth()
survey6.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF6_4','QF6_5','QF6_6','QF6_7','QF6_8','QF7','QF8'])
survey6.addTruths([np.nan,np.nan,np.nan,139061,139061,139061,332308,332308,332308,49,49,49,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey6DB = survey6.export2DB('2020-03-23')
# SURVEY7 (2020-03-30)
survey7 = truth()
survey7.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1','QF5_4','QF5_5','QF5_6','QF5_7','QF5_8','QF6_1','QF6_2','QF6_3','QF7_1','QF8','QF9'])
survey7.addTruths([np.nan,np.nan,np.nan,332308,332308,332308,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey7DB = survey7.export2DB('2020-03-30')
# SURVEY8 (2020-04-06)
survey8 = truth()
survey8.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF2_1','QF2_2','QF2_3','QF3_1','QF3_4','QF3_5','QF3_6','QF3_7','QF3_8','QF3_10','Q4_1','Q4_2','Q4_3','Q4_4','Q4_5','QF5_1','QF5_2','QF5_3','QF6','QF7'])
survey8.addTruths([np.nan,np.nan,np.nan,576774,576774,576774,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,2,2,2,2,2,np.nan,np.nan,np.nan,np.nan,np.nan])
survey8DB = survey8.export2DB('2020-04-06')
# SURVEY9 (2020-04-13)
# true number of cases was 751,646 and corresponds to bin the 3rd bin which is labeled "3"
survey9 = truth()
survey9.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_5','QF1_6','QF1_7','QF1_8','QF1_9','QF2_1','QF2_2','QF2_3','QF2_4','QF2_5','QF3_1','QF3_2','QF3_3','QF3_4','QF3_5','QF3_6','Q4_1','Q4_2','Q4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF7','QF8'])
survey9.addTruths([np.nan,np.nan,np.nan,3,3,3,3,3,3,3,3,5,5,5,5,5,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey9DB = survey9.export2DB('2020-04-13')
# SURVEY10 (2020-04-20)
# true number of cases was 960,343 and corresponds to the fourth bin which is labeled "5"
survey10 = truth()
survey10.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_5','QF1_6','QF1_7','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','Q4_1','Q4_2','Q4_3','QF5_1','QF5_2','QF5_3','QF6_1','QF6_2'
,'QF6_3','QF6_4','QF6_5','QF7','QF8'])
survey10.addTruths([np.nan,np.nan,np.nan,5,5,5,5,5,5,73291,73291,73291,3349,3349,3349,2267,2267,2267,np.nan,np.nan,np.nan,1,1,1,1,1,np.nan,np.nan])
survey10DB = survey10.export2DB('2020-04-20')
# SURVEY11 (2020-04-27)
# 1,152,006 reported
survey11 = truth()
survey11.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_4','QF1_5','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF3_4','QF3_5','QF4_1','QF4_2','QF4_3','QF5_1','QF5_2','QF5_3'])
survey11.addTruths([np.nan,np.nan,np.nan,4,4,4,4,4,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey11DB = survey11.export2DB('2020-04-27')
# SURVEY12 (2020-05-04)
survey12 = truth()
survey12.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_4','QF1_5','QF1_6','QF1_7','QF2_1','QF2_2','QF2_3','QF2_4','QF2_5','QF2_6','QF2_7','QF3_1','QF3_2','QF3_3','QF3_4','QF3_5','QF4_1','QF4_2','QF4_3','QF4_4','QF4_5','QF5_1','QF5_2','QF5_3'])
survey12.addTruths([np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey12DB = survey12.export2DB('2020-05-04')
#SURVEY13 - 2020-05-12
survey13 = truth()
survey13.addQuestionLabels(['Q0-1','QR-1-0','QR-2-0','QF1_1','QF1_2','QF1_3','QF1_4','QF1_5','QF1_6','QF1_7','QF2_1','QF2_2','QF2_3','QF3_1','QF3_2','QF3_3','QF4_1','QF4_2','QF4_3','QF5_1'])
survey13.addTruths([np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan])
survey13DB = survey13.export2DB('2020-05-11')
entiredb = pd.DataFrame()
for db in [survey1DB,survey2DB,survey3DB,survey4DB,survey5DB
,survey6DB,survey7DB,survey8DB,survey9DB,survey10DB
,survey11DB,survey12DB,survey13DB]:
entiredb = entiredb.append(db)
entiredb.to_csv('./database/truthDatabase.csv',index=False)
| 457 | -1 | 142 |
eaee0db231fc468c07550e445bb19bd0d51c63bc | 117 | py | Python | project_euler/solutions/problem_20.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | null | null | null | project_euler/solutions/problem_20.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | 9 | 2017-02-20T23:41:40.000Z | 2017-04-16T15:36:54.000Z | project_euler/solutions/problem_20.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | null | null | null | from math import factorial
| 19.5 | 57 | 0.692308 | from math import factorial
def solve(n: int=100) -> str:
return sum(int(digit) for digit in str(factorial(n)))
| 66 | 0 | 23 |
59628e9601ecf6c11f54f3d49b5254aea9fc24d8 | 3,604 | py | Python | scripts/benchmark_util.py | Kanghee-Lee/3D_Localization | 9756a0e22b782257758cff6dc7302a55d939aa44 | [
"MIT"
] | 5 | 2021-03-20T15:32:23.000Z | 2022-03-03T09:10:03.000Z | scripts/benchmark_util.py | Ganghee-Lee/3D_Localization | 9756a0e22b782257758cff6dc7302a55d939aa44 | [
"MIT"
] | null | null | null | scripts/benchmark_util.py | Ganghee-Lee/3D_Localization | 9756a0e22b782257758cff6dc7302a55d939aa44 | [
"MIT"
] | null | null | null | import open3d as o3d
import os
import logging
import numpy as np
from util.trajectory import CameraPose
from util.pointcloud import compute_overlap_ratio, \
make_open3d_point_cloud, make_open3d_feature_from_numpy
from scipy import spatial
#######################################################################
| 32.763636 | 83 | 0.681188 | import open3d as o3d
import os
import logging
import numpy as np
from util.trajectory import CameraPose
from util.pointcloud import compute_overlap_ratio, \
make_open3d_point_cloud, make_open3d_feature_from_numpy
from scipy import spatial
def run_ransac(xyz0, xyz1, feat0, feat1, voxel_size):
distance_threshold = voxel_size * 1.5
result_ransac = o3d.registration.registration_ransac_based_on_feature_matching(
xyz0, xyz1, feat0, feat1, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(4000000, 500))
return result_ransac.transformation
def gather_results(results):
traj = []
for r in results:
success = r[0]
if success:
traj.append(CameraPose([r[1], r[2], r[3]], r[4]))
return traj
def gen_matching_pair(pts_num):
matching_pairs = []
for i in range(pts_num):
for j in range(i + 1, pts_num):
matching_pairs.append([i, j, pts_num])
return matching_pairs
def read_data(feature_path, name):
data = np.load(os.path.join(feature_path, name + ".npz"))
xyz = make_open3d_point_cloud(data['xyz'])
feat = make_open3d_feature_from_numpy(data['feature'])
print("In benchmark_util.py - data['feature'] : ", data['feature'])
print("In benchmark_util.py - data['feature'] shape : ", data['feature'].shape)
print("In benchmark_util.py - feat : ", feat)
print('-'*20)
return data['points'], xyz, feat
def do_single_pair_matching(feature_path, set_name, m, voxel_size):
i, j, s = m
name_i = "%s_%03d" % (set_name, i)
name_j = "%s_%03d" % (set_name, j)
logging.info("matching %s %s" % (name_i, name_j))
print("feature_path : ", feature_path)
points_i, xyz_i, feat_i = read_data(feature_path, name_i)
points_j, xyz_j, feat_j = read_data(feature_path, name_j)
print("points, xyz, feats : ", points_j, xyz_j, feat_j)
if len(xyz_i.points) < len(xyz_j.points):
trans = run_ransac(xyz_i, xyz_j, feat_i, feat_j, voxel_size)
else:
trans = run_ransac(xyz_j, xyz_i, feat_j, feat_i, voxel_size)
trans = np.linalg.inv(trans)
ratio = compute_overlap_ratio(xyz_i, xyz_j, trans, voxel_size)
logging.info(f"{ratio}")
if ratio > 0.3:
return [True, i, j, s, np.linalg.inv(trans)]
else:
return [False, i, j, s, np.identity(4)]
#######################################################################
def global_desc_matching(feature_path, set_name, m, voxel_size, pooling_arg) :
i, j, s = m
name_i = "%s_%03d" % (set_name, i)
name_j = "%s_%03d" % (set_name, j)
logging.info("matching %s %s" % (name_i, name_j))
# print("feature_path : ", feature_path)
feat_np_i = read_dataAsnumpy(feature_path, name_i)
feat_np_j = read_dataAsnumpy(feature_path, name_j)
feat_np_i = feat_pooling(feat_np_i, pooling_arg)
feat_np_j = feat_pooling(feat_np_j, pooling_arg)
cos_sim = 1 - spatial.distance.cosine(feat_np_i, feat_np_j)
# print('feature & cos_sim')
# print(cos_sim)
if cos_sim > 0.5 :
return [True, i, j, s, cos_sim]
else :
return [False, i, j, s, cos_sim]
def read_dataAsnumpy(feature_path, name):
data = np.load(os.path.join(feature_path, name + ".npz"))
return data['feature']
def feat_pooling(feat, pooling_arg) :
if pooling_arg == 'max' :
print('------------Max--------------')
print(feat)
print(np.max(feat, 0))
return np.max(feat, 0)
elif pooling_arg == 'avg' :
return np.average(feat, 0)
| 3,093 | 0 | 185 |
908a6ac3242ecbde2aee8e2528f69cc9b7e536e9 | 3,337 | py | Python | scripts/kitti_eval.py | lannelin/unsupervised_track_segmentation | 7479b1fba81a00544f71384c77b6f84e4a3456d3 | [
"Apache-2.0"
] | null | null | null | scripts/kitti_eval.py | lannelin/unsupervised_track_segmentation | 7479b1fba81a00544f71384c77b6f84e4a3456d3 | [
"Apache-2.0"
] | null | null | null | scripts/kitti_eval.py | lannelin/unsupervised_track_segmentation | 7479b1fba81a00544f71384c77b6f84e4a3456d3 | [
"Apache-2.0"
] | null | null | null | import os
from typing import Dict, Tuple
import hydra
import pandas as pd
import torch
import wandb
from omegaconf import DictConfig
from pl_bolts.datamodules.kitti_datamodule import KittiDataModule
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import WandbLogger
from tqdm.auto import tqdm
# isort:imports-firstparty
from trackseg.datamodules import UnsupervisedSingleImageDataModule
from trackseg.model import UnsupervisedSemSegment
PROJECT_ROOT = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
@hydra.main(config_path="../config", config_name="config")
if __name__ == "__main__":
my_app()
| 27.352459 | 77 | 0.676356 | import os
from typing import Dict, Tuple
import hydra
import pandas as pd
import torch
import wandb
from omegaconf import DictConfig
from pl_bolts.datamodules.kitti_datamodule import KittiDataModule
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import WandbLogger
from tqdm.auto import tqdm
# isort:imports-firstparty
from trackseg.datamodules import UnsupervisedSingleImageDataModule
from trackseg.model import UnsupervisedSemSegment
PROJECT_ROOT = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
def train_validate(
image: torch.Tensor,
target: torch.Tensor,
n_channels: int,
max_steps: int,
resize_size: Tuple[int, int],
run_id: str,
iteration: int,
) -> Dict[str, float]:
# don't need target for inner dm
inner_dm = UnsupervisedSingleImageDataModule(
image=image,
target=target,
im_size=resize_size,
normalize=False,
num_workers=0,
)
# new model
model = UnsupervisedSemSegment(
n_channels=n_channels, connectivity_weight=1.0, similarity_weight=1.0
)
# train
wandb_logger = WandbLogger(id=run_id, prefix=str(iteration))
trainer = Trainer(gpus=1, max_steps=max_steps, logger=wandb_logger)
trainer.fit(model, datamodule=inner_dm)
model.eval()
# use test_dataloader here that also returns target
results = trainer.validate(dataloaders=inner_dm.test_dataloader())
# close logger
trainer.logger.close()
# only expect one result so take 0th index
results = results[0]
return results
def main(
kitti_data_path: str,
kitti_val_split: float,
kitti_test_split: float,
n_channels: int,
max_steps: int,
resize_size: Tuple[int, int],
wandb_project_name: str,
random_seed: int,
):
seed_everything(random_seed)
dm = KittiDataModule(
kitti_data_path,
batch_size=1,
val_split=kitti_val_split,
test_split=kitti_test_split,
seed=random_seed,
num_workers=0,
)
wandb_run = wandb.init(project=wandb_project_name)
all_results = list()
# loop through val dataloader
for i, batch in enumerate(
tqdm(dm.val_dataloader(), desc="kitti val dataloader - outer loop")
):
images, targets = batch
all_results.append(
train_validate(
image=images[0],
target=targets[0],
n_channels=n_channels,
max_steps=max_steps,
resize_size=resize_size,
run_id=wandb_run.id,
iteration=i,
)
)
df = pd.DataFrame(all_results)
result_dict = df.mean().add_prefix("FINAL_").to_dict()
wandb_run.log(result_dict)
wandb_run.finish()
@hydra.main(config_path="../config", config_name="config")
def my_app(cfg: DictConfig) -> None:
main(
kitti_data_path=os.path.join(PROJECT_ROOT, cfg.locations.data.kitti),
kitti_val_split=cfg.kitti.val_split,
kitti_test_split=cfg.kitti.test_split,
n_channels=cfg.model.n_channels,
max_steps=cfg.model.max_steps,
resize_size=tuple(cfg.kitti.resize_size),
wandb_project_name=cfg.wandb.project,
random_seed=cfg.general.random_seed,
)
if __name__ == "__main__":
my_app()
| 2,609 | 0 | 68 |
4312ae0b3b2291e659d3e2ad05cca9e9d18a7593 | 598 | py | Python | tests/cli/workflows/test_matdyn.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 40 | 2017-09-25T20:22:43.000Z | 2022-02-21T02:53:41.000Z | tests/cli/workflows/test_matdyn.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 594 | 2017-08-08T17:28:52.000Z | 2022-03-28T13:38:10.000Z | tests/cli/workflows/test_matdyn.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 66 | 2017-08-08T16:58:56.000Z | 2022-03-17T10:18:43.000Z | # -*- coding: utf-8 -*-
"""Tests for the ``calculation launch matdyn`` command."""
from aiida_quantumespresso.cli.calculations.matdyn import launch_calculation
def test_command_base(run_cli_process_launch_command, fixture_code, generate_force_constants_data):
"""Test invoking the calculation launch command with only required inputs."""
code = fixture_code('quantumespresso.matdyn').store()
force_constants = generate_force_constants_data.store()
options = ['-X', code.full_label, '-D', force_constants.pk]
run_cli_process_launch_command(launch_calculation, options=options)
| 49.833333 | 99 | 0.779264 | # -*- coding: utf-8 -*-
"""Tests for the ``calculation launch matdyn`` command."""
from aiida_quantumespresso.cli.calculations.matdyn import launch_calculation
def test_command_base(run_cli_process_launch_command, fixture_code, generate_force_constants_data):
"""Test invoking the calculation launch command with only required inputs."""
code = fixture_code('quantumespresso.matdyn').store()
force_constants = generate_force_constants_data.store()
options = ['-X', code.full_label, '-D', force_constants.pk]
run_cli_process_launch_command(launch_calculation, options=options)
| 0 | 0 | 0 |
2c2d962f716371fe9fb3f7d8128b69aaf44ebb1e | 4,718 | py | Python | view/view_main/view_detector_settings.py | cgtuebingen/emca | 560975bddc1b6176fe25029acb13d7806c8ab35b | [
"MIT"
] | 3 | 2021-09-28T12:08:10.000Z | 2022-01-20T18:27:29.000Z | view/view_main/view_detector_settings.py | cgtuebingen/emca | 560975bddc1b6176fe25029acb13d7806c8ab35b | [
"MIT"
] | 1 | 2021-11-08T12:57:45.000Z | 2021-11-17T13:45:52.000Z | view/view_main/view_detector_settings.py | cgtuebingen/emca | 560975bddc1b6176fe25029acb13d7806c8ab35b | [
"MIT"
] | 1 | 2022-01-20T18:27:48.000Z | 2022-01-20T18:27:48.000Z | """
MIT License
Copyright (c) 2020 Christoph Kreisl
Copyright (c) 2021 Lukas Ruppert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from core.pyside2_uic import loadUi
from PySide2.QtCore import Slot
from PySide2.QtWidgets import QWidget
from PySide2.QtWidgets import QApplication
import os
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from controller.controller import Controller
else:
from typing import Any as Controller
class ViewDetectorSettings(QWidget):
"""
ViewDetectorSettings
Handles the settings of the detector.
The view will trigger the detector which will detect outliers based on the final estimate data.
"""
def set_controller(self, controller : Controller):
"""
Sets the connection to the controller
:param controller: Controller
:return:
"""
self._controller = controller
def init_values(self, detector):
"""
Initialise the values of the view from the detector
:param detector:
:return:
"""
self.dsb_m.setValue(detector.m)
self.dsb_alpha.setValue(detector.alpha)
self.dsb_k.setValue(detector.k)
self.dsb_pre_filter.setValue(detector.pre_filter)
self.cb_default.setChecked(detector.is_default_active)
@Slot(bool, name='toggle_esd')
def toggle_esd(self, clicked):
"""
Toggles the checkbox of the ESD detector, only one detector can be active
:param clicked: boolean
:return:
"""
if self.cb_default.isChecked():
self.cb_esd.setChecked(False)
if not self.cb_default.isChecked() and not self.cb_esd.isChecked():
self.cb_esd.setChecked(True)
@Slot(bool, name='toggle_default')
def toggle_default(self, clicked):
"""
Toggles the checkbox of the default detector, only one detector can be active
:param clicked: boolean
:return:
"""
if self.cb_esd.isChecked():
self.cb_default.setChecked(False)
if not self.cb_esd.isChecked() and not self.cb_default.isChecked():
self.cb_default.setChecked(True)
@Slot(bool, name='apply')
def apply(self, clicked):
"""
Informs the controller to apply the current detector settings
:param clicked: boolean
:return:
"""
self._controller.detector.update_and_run_detector(
self.dsb_m.value(),
self.dsb_alpha.value(),
self.dsb_k.value(),
self.dsb_pre_filter.value(),
self.cb_default.isChecked(),
self.cb_is_active.isChecked())
@Slot(bool, name='apply_close')
def apply_close(self, clicked):
"""
Applies the current detector by informing the controller and closes the view.
:param clicked: boolean
:return:
"""
self.apply(clicked)
self.close()
| 35.208955 | 105 | 0.671895 | """
MIT License
Copyright (c) 2020 Christoph Kreisl
Copyright (c) 2021 Lukas Ruppert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from core.pyside2_uic import loadUi
from PySide2.QtCore import Slot
from PySide2.QtWidgets import QWidget
from PySide2.QtWidgets import QApplication
import os
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from controller.controller import Controller
else:
from typing import Any as Controller
class ViewDetectorSettings(QWidget):
"""
ViewDetectorSettings
Handles the settings of the detector.
The view will trigger the detector which will detect outliers based on the final estimate data.
"""
def __init__(self, parent=None):
QWidget.__init__(self, parent=None)
ui_filepath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'ui', 'detector.ui'))
loadUi(ui_filepath, self)
self._controller = None
# center widget depending on screen size
desktop_widget = QApplication.desktop()
screen_rect = desktop_widget.availableGeometry(self)
self.move(screen_rect.center() - self.rect().center())
self.cb_default.clicked.connect(self.toggle_esd)
self.cb_esd.clicked.connect(self.toggle_default)
self.btn_apply.clicked.connect(self.apply)
self.btn_apply_close.clicked.connect(self.apply_close)
def set_controller(self, controller : Controller):
"""
Sets the connection to the controller
:param controller: Controller
:return:
"""
self._controller = controller
def init_values(self, detector):
"""
Initialise the values of the view from the detector
:param detector:
:return:
"""
self.dsb_m.setValue(detector.m)
self.dsb_alpha.setValue(detector.alpha)
self.dsb_k.setValue(detector.k)
self.dsb_pre_filter.setValue(detector.pre_filter)
self.cb_default.setChecked(detector.is_default_active)
@Slot(bool, name='toggle_esd')
def toggle_esd(self, clicked):
"""
Toggles the checkbox of the ESD detector, only one detector can be active
:param clicked: boolean
:return:
"""
if self.cb_default.isChecked():
self.cb_esd.setChecked(False)
if not self.cb_default.isChecked() and not self.cb_esd.isChecked():
self.cb_esd.setChecked(True)
@Slot(bool, name='toggle_default')
def toggle_default(self, clicked):
"""
Toggles the checkbox of the default detector, only one detector can be active
:param clicked: boolean
:return:
"""
if self.cb_esd.isChecked():
self.cb_default.setChecked(False)
if not self.cb_esd.isChecked() and not self.cb_default.isChecked():
self.cb_default.setChecked(True)
@Slot(bool, name='apply')
def apply(self, clicked):
"""
Informs the controller to apply the current detector settings
:param clicked: boolean
:return:
"""
self._controller.detector.update_and_run_detector(
self.dsb_m.value(),
self.dsb_alpha.value(),
self.dsb_k.value(),
self.dsb_pre_filter.value(),
self.cb_default.isChecked(),
self.cb_is_active.isChecked())
@Slot(bool, name='apply_close')
def apply_close(self, clicked):
"""
Applies the current detector by informing the controller and closes the view.
:param clicked: boolean
:return:
"""
self.apply(clicked)
self.close()
| 679 | 0 | 27 |
884a10363f56a97f7b02a4f162879be1da06b0e6 | 5,402 | py | Python | HLS_tuner/hlstuner/search/learningtechniques.py | stephenneuendorffer/hls_tuner | fa7de78f0e2bb4b8f9f2e0a0368ed071b379c875 | [
"MIT"
] | 1 | 2021-02-21T12:13:09.000Z | 2021-02-21T12:13:09.000Z | HLS_tuner/hlstuner/search/learningtechniques.py | stephenneuendorffer/hls_tuner | fa7de78f0e2bb4b8f9f2e0a0368ed071b379c875 | [
"MIT"
] | null | null | null | HLS_tuner/hlstuner/search/learningtechniques.py | stephenneuendorffer/hls_tuner | fa7de78f0e2bb4b8f9f2e0a0368ed071b379c875 | [
"MIT"
] | 1 | 2019-09-10T16:45:27.000Z | 2019-09-10T16:45:27.000Z | # Machine Learning Search Techniques
#
# Base classes for search techniques that use machine-learning models
#
# Author: Hans Giesen (giesen@seas.upenn.edu)
#######################################################################################################################
import abc, logging
from opentuner.search.bandittechniques import AUCBanditMetaTechnique
from opentuner.search.differentialevolution import DifferentialEvolutionAlt
from opentuner.search.evolutionarytechniques import NormalGreedyMutation, UniformGreedyMutation
from opentuner.search.simplextechniques import RandomNelderMead
from opentuner.search.technique import SearchTechnique
from .modeltuner import ModelTuner
log = logging.getLogger(__name__)
#######################################################################################################################
class LearningTechnique(SearchTechnique):
"""Abstract base class for machine-learning search techniques"""
__metaclass__ = abc.ABCMeta
def __init__(self, models, *pargs, **kwargs):
"""
Initialize the machine learning search technique object.
Parameters
----------
models : list of Model objects
"""
super(LearningTechnique, self).__init__(*pargs, **kwargs)
self._models = models
self._data_set = []
self._pending_cfgs = set()
def handle_requested_result(self, result):
"""This callback is invoked by the search driver to report new results.
Parameters
----------
result : Result
Result
"""
self._data_set.append(result)
data_set = [result for result in self._data_set if result.state == 'OK']
if data_set:
cfgs = [result.configuration.data for result in data_set]
for model in self._models:
results = [getattr(result, model.metric) for result in data_set]
model.train(cfgs, results)
self._pending_cfgs.discard(result.configuration)
def desired_configuration(self):
"""Suggest a new configuration to evaluate.
Returns
-------
Configuration
Suggested configuration
"""
for model in self._models:
cfg = model.select_configuration()
if cfg != None:
break
if cfg == None:
cfg = self.select_configuration()
if cfg is not None:
self._pending_cfgs.add(cfg)
return cfg
@abc.abstractmethod
def select_configuration(self):
"""Callback that should be implemented by a subclass to suggest a new configuration to evaluate.
Returns
-------
Configuration
Suggested configuration
"""
def set_driver(self, driver):
"""Set the search driver.
Parameters
----------
driver : SearchDriver
"""
super(LearningTechnique, self).set_driver(driver)
for model in self._models:
model.set_driver(driver)
class Model(object):
"""Abstract base class for machine-learning models"""
__metaclass__ = abc.ABCMeta
def __init__(self, metric = None):
"""Constructor
Parameters
----------
metric : str
Metric that is modeled
"""
self.metric = metric
def train(self, cfgs, results):
"""Train the model with the provided dataset.
Parameters
----------
cfgs : list of dict
Configurations
results : list of float
Measurements
"""
pass
def set_driver(self, driver):
"""Set the search driver.
Parameters
----------
driver : SearchDriver
"""
self._driver = driver
self._manipulator = driver.manipulator
@abc.abstractmethod
def predict(self, cfg):
"""Make a prediction for the given configuration.
Parameters
----------
cfg : dict
Configuration
Returns
-------
float
Mean of prediction
float
Standard deviation of prediction
"""
def select_configuration(self):
"""Suggest a new configuration to evaluate to initialize the model.
Returns
-------
Configuration
Suggested configuration. None is returned if initialization has completed.
"""
return None
class GreedyLearningTechnique(LearningTechnique):
"""Configuration selector that tries the optimal configuration according to the model unless it has already been
tried, in which case a random configuration is chosen.
"""
def select_configuration(self):
"""Suggest a new configuration to evaluate.
If the configuration that the model thinks is best has not been tried yet, we will suggest it. Otherwise, we
suggest a random configuration.
Returns
-------
Configuration
Suggested configuration
"""
if len(self._data_set) == 0:
return self._driver.get_configuration(self._manipulator.random())
technique = AUCBanditMetaTechnique([
DifferentialEvolutionAlt(),
UniformGreedyMutation(),
NormalGreedyMutation(mutation_rate = 0.3),
RandomNelderMead(),
])
tuner = ModelTuner(self._models, technique, self._objective, self._manipulator)
cfg = self._driver.get_configuration(tuner.tune())
if (cfg in [result.configuration for result in self._data_set]) or (cfg in self._pending_cfgs):
log.info("Subtechnique suggests already evaluated point. Falling back to random point.");
cfg = self._driver.get_configuration(self._manipulator.random())
return cfg
| 25.72381 | 119 | 0.64291 | # Machine Learning Search Techniques
#
# Base classes for search techniques that use machine-learning models
#
# Author: Hans Giesen (giesen@seas.upenn.edu)
#######################################################################################################################
import abc, logging
from opentuner.search.bandittechniques import AUCBanditMetaTechnique
from opentuner.search.differentialevolution import DifferentialEvolutionAlt
from opentuner.search.evolutionarytechniques import NormalGreedyMutation, UniformGreedyMutation
from opentuner.search.simplextechniques import RandomNelderMead
from opentuner.search.technique import SearchTechnique
from .modeltuner import ModelTuner
log = logging.getLogger(__name__)
#######################################################################################################################
class LearningTechnique(SearchTechnique):
"""Abstract base class for machine-learning search techniques"""
__metaclass__ = abc.ABCMeta
def __init__(self, models, *pargs, **kwargs):
"""
Initialize the machine learning search technique object.
Parameters
----------
models : list of Model objects
"""
super(LearningTechnique, self).__init__(*pargs, **kwargs)
self._models = models
self._data_set = []
self._pending_cfgs = set()
def handle_requested_result(self, result):
"""This callback is invoked by the search driver to report new results.
Parameters
----------
result : Result
Result
"""
self._data_set.append(result)
data_set = [result for result in self._data_set if result.state == 'OK']
if data_set:
cfgs = [result.configuration.data for result in data_set]
for model in self._models:
results = [getattr(result, model.metric) for result in data_set]
model.train(cfgs, results)
self._pending_cfgs.discard(result.configuration)
def desired_configuration(self):
"""Suggest a new configuration to evaluate.
Returns
-------
Configuration
Suggested configuration
"""
for model in self._models:
cfg = model.select_configuration()
if cfg != None:
break
if cfg == None:
cfg = self.select_configuration()
if cfg is not None:
self._pending_cfgs.add(cfg)
return cfg
@abc.abstractmethod
def select_configuration(self):
"""Callback that should be implemented by a subclass to suggest a new configuration to evaluate.
Returns
-------
Configuration
Suggested configuration
"""
def set_driver(self, driver):
"""Set the search driver.
Parameters
----------
driver : SearchDriver
"""
super(LearningTechnique, self).set_driver(driver)
for model in self._models:
model.set_driver(driver)
class Model(object):
"""Abstract base class for machine-learning models"""
__metaclass__ = abc.ABCMeta
def __init__(self, metric = None):
"""Constructor
Parameters
----------
metric : str
Metric that is modeled
"""
self.metric = metric
def train(self, cfgs, results):
"""Train the model with the provided dataset.
Parameters
----------
cfgs : list of dict
Configurations
results : list of float
Measurements
"""
pass
def set_driver(self, driver):
"""Set the search driver.
Parameters
----------
driver : SearchDriver
"""
self._driver = driver
self._manipulator = driver.manipulator
@abc.abstractmethod
def predict(self, cfg):
"""Make a prediction for the given configuration.
Parameters
----------
cfg : dict
Configuration
Returns
-------
float
Mean of prediction
float
Standard deviation of prediction
"""
def select_configuration(self):
"""Suggest a new configuration to evaluate to initialize the model.
Returns
-------
Configuration
Suggested configuration. None is returned if initialization has completed.
"""
return None
class GreedyLearningTechnique(LearningTechnique):
"""Configuration selector that tries the optimal configuration according to the model unless it has already been
tried, in which case a random configuration is chosen.
"""
def select_configuration(self):
"""Suggest a new configuration to evaluate.
If the configuration that the model thinks is best has not been tried yet, we will suggest it. Otherwise, we
suggest a random configuration.
Returns
-------
Configuration
Suggested configuration
"""
if len(self._data_set) == 0:
return self._driver.get_configuration(self._manipulator.random())
technique = AUCBanditMetaTechnique([
DifferentialEvolutionAlt(),
UniformGreedyMutation(),
NormalGreedyMutation(mutation_rate = 0.3),
RandomNelderMead(),
])
tuner = ModelTuner(self._models, technique, self._objective, self._manipulator)
cfg = self._driver.get_configuration(tuner.tune())
if (cfg in [result.configuration for result in self._data_set]) or (cfg in self._pending_cfgs):
log.info("Subtechnique suggests already evaluated point. Falling back to random point.");
cfg = self._driver.get_configuration(self._manipulator.random())
return cfg
| 0 | 0 | 0 |
5aa9e4a53fbd344d4e355aab923822504653912b | 9,390 | py | Python | deadtrees/network/segmodel.py | cwerner/deadtrees | 15ddfec58c4a40f22f9c1e2424fb535df4d29b03 | [
"Apache-2.0"
] | 1 | 2021-11-15T09:26:24.000Z | 2021-11-15T09:26:24.000Z | deadtrees/network/segmodel.py | cwerner/deadtrees | 15ddfec58c4a40f22f9c1e2424fb535df4d29b03 | [
"Apache-2.0"
] | 43 | 2021-04-19T14:55:05.000Z | 2022-03-29T13:34:16.000Z | deadtrees/network/segmodel.py | cwerner/deadtrees | 15ddfec58c4a40f22f9c1e2424fb535df4d29b03 | [
"Apache-2.0"
] | null | null | null | # source: https://github.com/PyTorchLightning/pytorch-lightning-bolts (Apache2)
import logging
from collections import Counter
from typing import Any, Dict, Optional, Tuple
import segmentation_models_pytorch as smp
import pandas as pd
import pytorch_lightning as pl
import torch
from deadtrees.loss.losses import (
BoundaryLoss,
class2one_hot,
FocalLoss,
GeneralizedDice,
)
from deadtrees.network.extra import EfficientUnetPlusPlus, ResUnet, ResUnetPlusPlus
from deadtrees.visualization.helper import show
from omegaconf import DictConfig
from torch import Tensor
logger = logging.getLogger(__name__)
| 34.270073 | 114 | 0.603088 | # source: https://github.com/PyTorchLightning/pytorch-lightning-bolts (Apache2)
import logging
from collections import Counter
from typing import Any, Dict, Optional, Tuple
import segmentation_models_pytorch as smp
import pandas as pd
import pytorch_lightning as pl
import torch
from deadtrees.loss.losses import (
BoundaryLoss,
class2one_hot,
FocalLoss,
GeneralizedDice,
)
from deadtrees.network.extra import EfficientUnetPlusPlus, ResUnet, ResUnetPlusPlus
from deadtrees.visualization.helper import show
from omegaconf import DictConfig
from torch import Tensor
logger = logging.getLogger(__name__)
def concat_extra(
img: Tensor, mask: Tensor, distmap: Tensor, stats, *, extra
) -> Tuple[Tensor]:
extra_imgs, extra_masks, extra_distmaps, extra_stats = list(zip(*extra))
img = torch.cat((img, *extra_imgs), dim=0)
mask = torch.cat((mask, *extra_masks), dim=0)
distmap = torch.cat((distmap, *extra_distmaps), dim=0)
stats.extend(sum(extra_stats, []))
return img, mask, distmap, stats
def create_combined_batch(
batch: Dict[str, Any]
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
img, mask, distmap, stats = batch["main"]
# grab extra datasets and concat tensors
extra = [v for k, v in batch.items() if k.startswith("extra")]
if extra:
img, mask, distmap, stats = concat_extra(img, mask, distmap, stats, extra=extra)
return img, mask, distmap, stats
class SemSegment(pl.LightningModule): # type: ignore
def __init__(
self,
train_conf: DictConfig,
network_conf: DictConfig,
):
super().__init__()
architecture = network_conf.architecture.lower().strip()
if architecture == "unet":
Model = smp.Unet
elif architecture in ["unetplusplus", "unet++"]:
Model = smp.UnetPlusPlus
elif architecture == "resunet":
Model = ResUnet
elif architecture in ["resunetplusplus", "resunet++"]:
Model = ResUnetPlusPlus
elif architecture in ["efficientunetplusplus", "efficientunet++"]:
Model = EfficientUnetPlusPlus
else:
raise NotImplementedError(
"Currently only Unet, ResUnet, Unet++, ResUnet++, and EfficientUnet++ architectures are supported"
)
# Model does not accept "architecture" as an argument, but we need to store it in hparams for inference
# TODO: cleanup?
clean_network_conf = network_conf.copy()
del clean_network_conf.architecture
del clean_network_conf.losses
self.model = Model(**clean_network_conf)
# self.model.apply(initialize_weights)
self.save_hyperparameters() # type: ignore
self.classes = list(range(self.hparams["network_conf"]["classes"]))
self.classes_wout_bg = [c for c in self.classes if c != 0]
self.in_channels = self.hparams["network_conf"]["in_channels"]
# losses
self.generalized_dice_loss = None
self.focal_loss = None
self.boundary_loss = None
# parse loss config
self.initial_alpha = 0.01 # make this a hyperparameter and/ or scale with epoch
self.boundary_loss_ramped = False
for loss_component in network_conf.losses:
if loss_component == "GDICE":
# This the only required loss term
self.generalized_dice_loss = GeneralizedDice(idc=self.classes_wout_bg)
elif loss_component == "FOCAL":
self.focal_loss = FocalLoss(idc=self.classes, gamma=2)
elif loss_component == "BOUNDARY":
self.boundary_loss = BoundaryLoss(idc=self.classes_wout_bg)
elif loss_component == "BOUNDARY-RAMPED":
self.boundary_loss = BoundaryLoss(idc=self.classes_wout_bg)
self.boundary_loss_ramped = True
else:
raise NotImplementedError(
f"The loss component <{loss_component}> is not recognized"
)
logger.info(f"Losses: {network_conf.losses}")
# checks: we require GDICE!
assert self.generalized_dice_loss is not None
self.dice_metric = smp.utils.metrics.Fscore(
ignore_channels=[0],
)
self.dice_metric_with_bg = smp.utils.metrics.Fscore()
self.stats = {
"train": Counter(),
"val": Counter(),
"test": Counter(),
}
@property
def alpha(self):
"""blending parameter for boundary loss - ramps from 0.01 to 0.99 in 0.01 steps by epoch"""
return min((self.current_epoch + 1) * self.initial_alpha, 0.99)
def get_progress_bar_dict(self):
"""Hack to remove v_num from progressbar"""
tqdm_dict = super().get_progress_bar_dict()
if "v_num" in tqdm_dict:
del tqdm_dict["v_num"]
return tqdm_dict
def calculate_loss(
self, y_hat: Tensor, y: Tensor, stage: str, distmap: Optional[Tensor] = None
) -> Tensor:
"""calculate compound loss"""
loss, loss_gd, loss_bd, loss_fo = 0, None, None, None
if self.generalized_dice_loss:
loss_gd = self.generalized_dice_loss(y_hat, y)
self.log(f"{stage}/dice_loss", loss_gd)
loss += loss_gd
if self.boundary_loss:
loss_bd = self.boundary_loss(y_hat, distmap)
self.log(f"{stage}/boundary_loss", loss_bd)
loss += self.alpha * loss_bd if self.boundary_loss_ramped else loss_bd
if self.focal_loss:
loss_fo = self.focal_loss(y_hat, y)
self.log(f"{stage}/focal_loss", loss_fo)
loss += loss_fo
self.log(f"{stage}/total_loss", loss)
return loss
def log_metrics(self, y_hat: Tensor, y: Tensor, *, stage: str):
dice_score = self.dice_metric(y_hat, y)
dice_score_with_bg = self.dice_metric_with_bg(y_hat, y)
self.log(f"{stage}/dice", dice_score)
self.log(f"{stage}/dice_with_bg", dice_score_with_bg)
def training_step(self, batch, batch_idx):
img, mask, distmap, stats = create_combined_batch(batch)
logits = self.model(img)
y = class2one_hot(mask, K=len(self.classes))
y_hat = logits.softmax(dim=1)
loss = self.calculate_loss(y_hat, y, "train", distmap=distmap)
if torch.isnan(loss) or torch.isinf(loss):
logger.warn("Train loss is NaN! What is going on?")
return None
self.log_metrics(y_hat, y, stage="train")
# track training batch files
self.stats["train"].update([x["file"] for x in stats])
return loss
def validation_step(self, batch, batch_idx):
img, mask, distmap, stats = create_combined_batch(batch)
logits = self.model(img)
y = class2one_hot(mask, K=len(self.classes))
y_hat = logits.softmax(dim=1)
loss = self.calculate_loss(y_hat, y, stage="val", distmap=distmap)
self.log_metrics(y_hat, y, stage="val")
if batch_idx == 0:
sample_chart = show(
x=img.cpu(),
y=mask.cpu(),
y_hat=y_hat.cpu(),
n_samples=min(img.shape[0], 8),
stats=stats,
dpi=72,
display=False,
)
for logger in self.logger:
if isinstance(logger, pl.loggers.wandb.WandbLogger):
import wandb
logger.experiment.log(
{
"sample": wandb.Image(
sample_chart,
caption=f"Sample-{self.trainer.global_step}",
)
},
commit=False,
)
# track validation batch files
self.stats["val"].update([x["file"] for x in stats])
return loss
def test_step(self, batch, batch_idx):
img, mask, _, stats = batch
logits = self.model(img)
y = class2one_hot(mask, K=len(self.classes))
y_hat = logits.softmax(dim=1)
self.log_metrics(y_hat, y, stage="test")
# track validation batch files
self.stats["test"].update([x["file"] for x in stats])
def teardown(self, stage=None) -> None:
logger.debug(f"len(stats_train): {len(self.stats['train'])}")
pd.DataFrame.from_records(
list(dict(self.stats["train"]).items()), columns=["filename", "count"]
).to_csv("train_stats.csv", index=False)
logger.debug(f"len(stats_val): {len(self.stats['val'])}")
pd.DataFrame.from_records(
list(dict(self.stats["val"]).items()), columns=["filename", "count"]
).to_csv("val_stats.csv", index=False)
def configure_optimizers(self):
opt = torch.optim.Adam(
self.parameters(),
lr=self.hparams.train_conf.learning_rate,
)
sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=10)
return [opt], [sch]
def initialize_weights(m):
if getattr(m, "bias", None) is not None:
torch.nn.init.constant_(m.bias, 0)
if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
torch.nn.init.kaiming_normal_(m.weight)
for c in m.children():
initialize_weights(c)
| 7,150 | 1,523 | 92 |
f5041b20a2d7c1170b6b55b1c3a679bbba17f9f3 | 12,392 | py | Python | Executor/pathExecutor.py | keshenjey/Heft | bf935ab8afd63d9f1b058b3d3707abef4a71e15c | [
"MIT"
] | 3 | 2017-02-15T08:32:17.000Z | 2017-03-23T17:13:24.000Z | Executor/pathExecutor.py | Keshann/Heft | bf935ab8afd63d9f1b058b3d3707abef4a71e15c | [
"MIT"
] | 8 | 2016-06-12T04:14:38.000Z | 2016-06-14T03:39:15.000Z | Executor/pathExecutor.py | Keshann/Heft | bf935ab8afd63d9f1b058b3d3707abef4a71e15c | [
"MIT"
] | 1 | 2019-01-31T17:49:04.000Z | 2019-01-31T17:49:04.000Z | '''
The pathExecutor is to use NetworkX as algorithm support to run path finding operations.
After getting results from NetworkX, the pathExecutor will transform the results into a temp table in PostgreSQL
@author: keshan
'''
import os
import networkx as nx
#based on the path expression, choose different methods to run the operations
import QueryParser
#Create graphs in NetworkX
#for example: dealing with V1/./V2/./V3, separate it into two paths.
#get the graph from materialised graph dir or tmp graph dir
#create temp table in the relational DB to store the results (without middle node condition, e.g. V1/././V2)
#create temp table in the relational DB to store the results (with middle node conditions, e.g. V1/.V2/.V3)
def createMTable(pathCommands, Graph, columnList, pathLenList, conn, cur):
'''
for debug
for each in rowsList:
print each
for each in pathLenList:
print each
print "enter MTable"
'''
tableName = pathCommands[-1]
Col1 = "PathId"
Col2 = "Length"
Col3 = "Path"
#print "create temp table " + graphCommand[3] + " (" + graphCommand[1] + " int not null primary key, " + graphCommand[2] + " int);"
cur.execute("create temp table " + tableName + " (" + Col1 + " int not null primary key, " + Col2 + " int , " + Col3 + " integer[] );")
conn.commit()
pathId = 0
srcCols = columnList[-1]
desCols = columnList[-2]
pathLen = pathLenList[0]
for i in range(0,len(srcCols)):
for j in range(0, len(desCols)):
if (srcCols[i][0] != desCols[j][0]):
#print "enter path1"
try:
pathList = findPaths(Graph, srcCols[i][0], desCols[j][0], pathLen)
except (nx.exception.NetworkXError, nx.exception.NetworkXNoPath, KeyError) as reasons:
#print reasons
continue
tempPathList = []
#this is the first path e.g. V1/./V2
for path in pathList:
#Here starts the next several paths
for pl in range(1, len(pathLenList)):
sCols = columnList[-pl-1]
dCols = columnList[-pl-2]
#Here is for the last path e.g. V2/./V3
#only for the last path, we start to insert values into table
if pl == len(pathLenList) - 1:
for a in range(0, len(sCols)):
for b in range(0, len(dCols)):
#make sure the first node not equals to the last node
if (srcCols[i][0] != dCols[b][0]) and (sCols[a][0] != dCols[b][0]):
try:
lastPathList = findPaths(Graph, sCols[a][0], dCols[b][0], pathLenList[pl])
except (nx.exception.NetworkXError, nx.exception.NetworkXNoPath, KeyError) as reasons:
#print reasons
continue
#print "enter update:", pathId
if len(tempPathList) == 0:
for lastPath in lastPathList:
pathId += 1
cpPath = path[:]
cpPath.extend(lastPath[1:])
cur.execute("INSERT INTO " + tableName + " VALUES(%s, %s, array %s)" % (pathId, len(cpPath)-1, cpPath))
#cur.execute("UPDATE " + tableName + " SET %s = %s || ARRAY%s WHERE %s = %s" % (Col3, Col3, path[1:], Col1, pathId))
#cur.execute("UPDATE " + tableName + " SET %s = %s + %s WHERE %s = %s" % (Col2, Col2, pathLenList[pl], Col1, pathId))
conn.commit()
else:
for each in tempPathList:
for lastPath in lastPathList:
pathId += 1
cpPath = each[:]
cpPath.extend(lastPath[1:])
cur.execute("INSERT INTO " + tableName + " VALUES(%s, %s, array %s)" % (pathId, len(cpPath)-1, cpPath))
conn.commit()
#Here is the paths between first path and last path
#We only expand the result list and store the new results into a tempPathList
else:
for a in range(0, len(sCols)):
for b in range(0, len(dCols)):
#the source and the des must be different
if (sCols[a][0] != dCols[b][0]):
try:
conPathList = findPaths(Graph, sCols[a][0], dCols[b][0], pathLenList[pl])
except (nx.exception.NetworkXError, nx.exception.NetworkXNoPath, KeyError) as reasons:
#print reasons
continue
if len(tempPathList) == 0:
for conPath in conPathList:
cpPath = path[:]
cpPath.extend(conPath[1:])
tempPathList.append(cpPath)
else:
cpTempPathList = tempPathList[:]
for each in tempPathList:
tempPathList.remove(each)
for each in cpTempPathList:
for conPath in conPathList:
cpPath = each[:]
cpPath.extend(conPath[1:])
tempPathList.append(cpPath)
print "complete the paths temp Mtable"
#use networkx to find paths | 46.762264 | 165 | 0.481843 | '''
The pathExecutor is to use NetworkX as algorithm support to run path finding operations.
After getting results from NetworkX, the pathExecutor will transform the results into a temp table in PostgreSQL
@author: keshan
'''
import os
import networkx as nx
#based on the path expression, choose different methods to run the operations
import QueryParser
def processCommand(pathCommands, conn ,cur):
from QueryParser import queryParser
#createGraph
graphPath = getGraph(pathCommands[0])
if "dirgraph" == pathCommands[2].strip():
Graph = createGraph(graphPath, "dirgraph")
elif "undirgraph" == pathCommands[2].strip():
Graph = createGraph(graphPath, "undirgraph")
#differentiate V1//V2 and V1/./V2
paths = analysePathSymbol(pathCommands[1])
pathLenList = []
for each in paths:
if (str(each)).find('//') != -1:
pathLen = 0
pathLenList.append(pathLen)
else:
pathLen = int((str(each)).count('/'))
pathLenList.append(pathLen)
#find all the members for one column
columnList = []
for eachCommand in pathCommands[3]: #the fourth element of pathCommands is commandArray
if len(eachCommand) == 2: #means the node condition, otherwise it is a query for create graphs
nodeCommand = eachCommand[1].replace(' ', ' distinct ', 1) #a column only contains unique value
#print nodeCommand
if ("rank" in nodeCommand) or ("cluster" in nodeCommand):
for eachStr in QueryParser.queryParser.graphQueryWithResult.keys():
nodeCommand = nodeCommand.replace(eachStr,
QueryParser.queryParser.graphQueryWithResult.get(eachStr))
cur.execute(nodeCommand)
rows = cur.fetchall()
#print rows
conn.commit()
columnList.append(rows)
#without middle node condition
if len(paths) == 1:
srcCols = columnList[0]
desCols = columnList[1]
print "start to create table"
createTable(pathCommands, Graph, srcCols, desCols, pathLenList[0], conn, cur)
#with middle node conditions (S\.\.\N\.\D or S\..\N\..\D)
else:
print "start to create Mtable"
createMTable(pathCommands, Graph, columnList, pathLenList, conn, cur)
#Create graphs in NetworkX
def createGraph(graphTxtName, graphType):
if graphType == "dirgraph":
Graph = nx.DiGraph()
elif graphType == "undirgraph":
Graph = nx.Graph()
#Create Graph
f = open(graphTxtName)
edgeList = []
for eachLine in f:
s = int((eachLine.split())[0])
d = int((eachLine.split())[1])
t = s, d #print t
edgeList.append(t)
Graph.add_edges_from(edgeList)
print "number of nodes: ", Graph.number_of_nodes()
print "number of edges: ", Graph.number_of_edges()
return Graph
#for example: dealing with V1/./V2/./V3, separate it into two paths.
def analysePathSymbol(pathSymbol):
paths = []
i = 0
nodes = [e for e in pathSymbol.split("/") if e != '.' and e!= '']
while i + 1 < len(nodes):
sIndex = pathSymbol.index(nodes[i])
eIndex = pathSymbol.index(nodes[i+1])
paths.append(pathSymbol[sIndex:eIndex+len(nodes[i+1])])
i += 1
#for debug
#print paths
return paths
#get the graph from materialised graph dir or tmp graph dir
def getGraph(graphName):
matGraphDir = os.environ['HOME'] + "/IRG_Stat_Graph/"
tmpGraphDir = "/dev/shm/IRG_Tmp_Graph/"
if os.path.exists(tmpGraphDir + graphName):
return tmpGraphDir + graphName
elif os.path.exists(matGraphDir + graphName):
return matGraphDir + graphName
else:
raise RuntimeError, "No such graph!!"
#create temp table in the relational DB to store the results (without middle node condition, e.g. V1/././V2)
def createTable(pathCommands, Graph, srcRows, desRows, pathLen, conn, cur):
tableName = pathCommands[-1]
Col1 = "PathId"
Col2 = "Length"
Col3 = "Paths"
#print "create temp table " + graphCommand[3] + " (" + graphCommand[1] + " int not null primary key, " + graphCommand[2] + " int);"
cur.execute("create temp table " + tableName + " (" + Col1 + " int not null primary key, " + Col2 + " int , " + Col3 + " integer[] );")
conn.commit()
pathId = 0
for i in range(0,len(srcRows)):
for j in range(0, len(desRows)):
if (srcRows[i][0] != desRows[j][0]):
#print "enter path1"
try:
pathList = findPaths(Graph, srcRows[i][0], desRows[j][0], pathLen)
except (nx.exception.NetworkXError, nx.exception.NetworkXNoPath, KeyError) as reasons:
#print reasons
continue
for path in pathList:
pathId += 1
#print path, len(path)-1
cur.execute("INSERT INTO " + tableName + " VALUES(%s, %s, array %s)" % (pathId, len(path)-1, path))
#cur.execute("UPDATE " + tableName + " SET %s = %s || ARRAY[1,2,3,4] WHERE %s = %s" % (Col3, Col3, Col1, pathId))
conn.commit()
print "complete the paths temp table"
#create temp table in the relational DB to store the results (with middle node conditions, e.g. V1/.V2/.V3)
def createMTable(pathCommands, Graph, columnList, pathLenList, conn, cur):
'''
for debug
for each in rowsList:
print each
for each in pathLenList:
print each
print "enter MTable"
'''
tableName = pathCommands[-1]
Col1 = "PathId"
Col2 = "Length"
Col3 = "Path"
#print "create temp table " + graphCommand[3] + " (" + graphCommand[1] + " int not null primary key, " + graphCommand[2] + " int);"
cur.execute("create temp table " + tableName + " (" + Col1 + " int not null primary key, " + Col2 + " int , " + Col3 + " integer[] );")
conn.commit()
pathId = 0
srcCols = columnList[-1]
desCols = columnList[-2]
pathLen = pathLenList[0]
for i in range(0,len(srcCols)):
for j in range(0, len(desCols)):
if (srcCols[i][0] != desCols[j][0]):
#print "enter path1"
try:
pathList = findPaths(Graph, srcCols[i][0], desCols[j][0], pathLen)
except (nx.exception.NetworkXError, nx.exception.NetworkXNoPath, KeyError) as reasons:
#print reasons
continue
tempPathList = []
#this is the first path e.g. V1/./V2
for path in pathList:
#Here starts the next several paths
for pl in range(1, len(pathLenList)):
sCols = columnList[-pl-1]
dCols = columnList[-pl-2]
#Here is for the last path e.g. V2/./V3
#only for the last path, we start to insert values into table
if pl == len(pathLenList) - 1:
for a in range(0, len(sCols)):
for b in range(0, len(dCols)):
#make sure the first node not equals to the last node
if (srcCols[i][0] != dCols[b][0]) and (sCols[a][0] != dCols[b][0]):
try:
lastPathList = findPaths(Graph, sCols[a][0], dCols[b][0], pathLenList[pl])
except (nx.exception.NetworkXError, nx.exception.NetworkXNoPath, KeyError) as reasons:
#print reasons
continue
#print "enter update:", pathId
if len(tempPathList) == 0:
for lastPath in lastPathList:
pathId += 1
cpPath = path[:]
cpPath.extend(lastPath[1:])
cur.execute("INSERT INTO " + tableName + " VALUES(%s, %s, array %s)" % (pathId, len(cpPath)-1, cpPath))
#cur.execute("UPDATE " + tableName + " SET %s = %s || ARRAY%s WHERE %s = %s" % (Col3, Col3, path[1:], Col1, pathId))
#cur.execute("UPDATE " + tableName + " SET %s = %s + %s WHERE %s = %s" % (Col2, Col2, pathLenList[pl], Col1, pathId))
conn.commit()
else:
for each in tempPathList:
for lastPath in lastPathList:
pathId += 1
cpPath = each[:]
cpPath.extend(lastPath[1:])
cur.execute("INSERT INTO " + tableName + " VALUES(%s, %s, array %s)" % (pathId, len(cpPath)-1, cpPath))
conn.commit()
#Here is the paths between first path and last path
#We only expand the result list and store the new results into a tempPathList
else:
for a in range(0, len(sCols)):
for b in range(0, len(dCols)):
#the source and the des must be different
if (sCols[a][0] != dCols[b][0]):
try:
conPathList = findPaths(Graph, sCols[a][0], dCols[b][0], pathLenList[pl])
except (nx.exception.NetworkXError, nx.exception.NetworkXNoPath, KeyError) as reasons:
#print reasons
continue
if len(tempPathList) == 0:
for conPath in conPathList:
cpPath = path[:]
cpPath.extend(conPath[1:])
tempPathList.append(cpPath)
else:
cpTempPathList = tempPathList[:]
for each in tempPathList:
tempPathList.remove(each)
for each in cpTempPathList:
for conPath in conPathList:
cpPath = each[:]
cpPath.extend(conPath[1:])
tempPathList.append(cpPath)
print "complete the paths temp Mtable"
#use networkx to find paths
def findPaths(Graph, source, des, pathLen):
#for path like V1//V2
if pathLen == 0:
#Old version find paths:
#while True:
#pathLen += 1
#print source, des
#paths = nx.all_simple_paths(Graph, source, des, pathLen)
#if (len(pathList) != 0) or (pathLen == 3) :
#if (pathLen == 3) :
#pathLen = 0
#break
#New version find paths:
paths = nx.all_shortest_paths(Graph, source, des)
pathList = list(paths)
return pathList
#for path like V1/./V2 with specific length
else:
paths = nx.all_simple_paths(Graph, source, des, pathLen)
pathList = list(paths)
return pathList | 5,368 | 0 | 136 |
7abb94ea9ece5631b7b68a75389f25ba084d8cdf | 5,177 | py | Python | tests/test_beta_code.py | zfletch/beta-code-py | 788db49ccdd2fc20d5b39c4af8b1c974599da7fd | [
"MIT"
] | null | null | null | tests/test_beta_code.py | zfletch/beta-code-py | 788db49ccdd2fc20d5b39c4af8b1c974599da7fd | [
"MIT"
] | 6 | 2020-03-05T21:31:51.000Z | 2021-04-05T19:21:49.000Z | tests/test_beta_code.py | perseids-tools/beta-code-py | 788db49ccdd2fc20d5b39c4af8b1c974599da7fd | [
"MIT"
] | null | null | null | # coding: utf-8
import sys
sys.path.append('../beta_code')
import unittest
import beta_code
if __name__ == '__main__':
unittest.main()
| 61.630952 | 885 | 0.681476 | # coding: utf-8
import sys
sys.path.append('../beta_code')
import unittest
import beta_code
class TestBetaCode(unittest.TestCase):
def test_greek_to_beta_code(self):
self.assertEqual(
u'xai=re w)= ko/sme',
beta_code.greek_to_beta_code(u'χαῖρε ὦ κόσμε'),
)
self.assertEqual(
u'mh=nin a)/eide qea\\ *phlhi+a/dew *)axilh=os',
beta_code.greek_to_beta_code(u'μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος'),
)
self.assertEqual(
u'kate/bhn xqe\\s ei)s *peiraia= meta\\ *glau/kwnos tou= *)ari/stwnos proseuco/meno/s te th=| qew=| kai\\ a(/ma th\\n e(orth\\n boulo/menos qea/sasqai ti/na tro/pon poih/sousin a(/te nu=n prw=ton a)/gontes. kalh\\ me\\n ou)=n moi kai\\ h( tw=n e)pixwri/wn pomph\\ e)/docen ei)=nai, ou) me/ntoi h(=tton e)fai/neto pre/pein h(\\n oi( *qra=|kes e)/pempon. proseuca/menoi de\\ kai\\ qewrh/santes a)ph=|men pro\\s to\\ a)/stu. katidw\\n ou)=n po/rrwqen h(ma=s oi)/kade w(rmhme/nous *pole/marxos o( *kefa/lou e)ke/leuse dramo/nta to\\n pai=da perimei=nai/ e( keleu=sai. kai/ mou o)/pisqen o( pai=s labo/menos tou= i(mati/ou, keleu/ei u(ma=s, e)/fh, *pole/marxos perimei=nai. kai\\ e)gw\\ metestra/fhn te kai\\ h)ro/mhn o(/pou au)to\\s ei)/h. ou(=tos, e)/fh, o)/pisqen prose/rxetai: a)lla\\ perime/nete. a)lla\\ perimenou=men, h)= d\' o(\\s o( *glau/kwn.',
beta_code.greek_to_beta_code(u'κατέβην χθὲς εἰς Πειραιᾶ μετὰ Γλαύκωνος τοῦ Ἀρίστωνος προσευξόμενός τε τῇ θεῷ καὶ ἅμα τὴν ἑορτὴν βουλόμενος θεάσασθαι τίνα τρόπον ποιήσουσιν ἅτε νῦν πρῶτον ἄγοντες. καλὴ μὲν οὖν μοι καὶ ἡ τῶν ἐπιχωρίων πομπὴ ἔδοξεν εἶναι, οὐ μέντοι ἧττον ἐφαίνετο πρέπειν ἣν οἱ Θρᾷκες ἔπεμπον. προσευξάμενοι δὲ καὶ θεωρήσαντες ἀπῇμεν πρὸς τὸ ἄστυ. κατιδὼν οὖν πόρρωθεν ἡμᾶς οἴκαδε ὡρμημένους Πολέμαρχος ὁ Κεφάλου ἐκέλευσε δραμόντα τὸν παῖδα περιμεῖναί ἑ κελεῦσαι. καί μου ὄπισθεν ὁ παῖς λαβόμενος τοῦ ἱματίου, κελεύει ὑμᾶς, ἔφη, Πολέμαρχος περιμεῖναι. καὶ ἐγὼ μετεστράφην τε καὶ ἠρόμην ὅπου αὐτὸς εἴη. οὗτος, ἔφη, ὄπισθεν προσέρχεται· ἀλλὰ περιμένετε. ἀλλὰ περιμενοῦμεν, ἦ δ\' ὃς ὁ Γλαύκων.'),
)
self.assertEqual(
u'dd*ds',
beta_code.greek_to_beta_code(u'δδΔς'),
)
self.assertEqual(
u'*pollh\\ me\\n e)n brotoi=si kou)k a)nw/numos qea\\ ke/klhmai *ku/pris ou)ranou= t᾿ e)/sw:',
beta_code.greek_to_beta_code(u'Πολλὴ μὲν ἐν βροτοῖσι κοὐκ ἀνώνυμος θεὰ κέκλημαι Κύπρις οὐρανοῦ τ᾿ ἔσω·'),
)
self.assertEqual(
u'w(=|*(=w|',
beta_code.greek_to_beta_code(u'ᾧᾯ'),
)
self.assertEqual(
u'mou_s',
beta_code.greek_to_beta_code(u'μοῡς', custom_map={ u'ᾱ': u'a_', u'ῑ': u'i_', u'ῡ': u'u_' }),
)
def test_beta_code_to_greek(self):
self.assertEqual(
u'χαῖρε ὦ κόσμε',
beta_code.beta_code_to_greek(u'xai=re w)= ko/sme'),
)
self.assertEqual(
u'μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος',
beta_code.beta_code_to_greek(u'mh=nin a)/eide qea\\ *phlhi+a/dew *)axilh=os'),
)
self.assertEqual(
u'κατέβην χθὲς εἰς Πειραιᾶ μετὰ Γλαύκωνος τοῦ Ἀρίστωνος προσευξόμενός τε τῇ θεῷ καὶ ἅμα τὴν ἑορτὴν βουλόμενος θεάσασθαι τίνα τρόπον ποιήσουσιν ἅτε νῦν πρῶτον ἄγοντες. καλὴ μὲν οὖν μοι καὶ ἡ τῶν ἐπιχωρίων πομπὴ ἔδοξεν εἶναι, οὐ μέντοι ἧττον ἐφαίνετο πρέπειν ἣν οἱ Θρᾷκες ἔπεμπον. προσευξάμενοι δὲ καὶ θεωρήσαντες ἀπῇμεν πρὸς τὸ ἄστυ. κατιδὼν οὖν πόρρωθεν ἡμᾶς οἴκαδε ὡρμημένους Πολέμαρχος ὁ Κεφάλου ἐκέλευσε δραμόντα τὸν παῖδα περιμεῖναί ἑ κελεῦσαι. καί μου ὄπισθεν ὁ παῖς λαβόμενος τοῦ ἱματίου, κελεύει ὑμᾶς, ἔφη, Πολέμαρχος περιμεῖναι. καὶ ἐγὼ μετεστράφην τε καὶ ἠρόμην ὅπου αὐτὸς εἴη. οὗτος, ἔφη, ὄπισθεν προσέρχεται· ἀλλὰ περιμένετε. ἀλλὰ περιμενοῦμεν, ἦ δ\' ὃς ὁ Γλαύκων.',
beta_code.beta_code_to_greek(u'kate/bhn xqe\\s ei)s *peiraia= meta\\ *glau/kwnos tou= *)ari/stwnos proseuco/meno/s te th=| qew=| kai\\ a(/ma th\\n e(orth\\n boulo/menos qea/sasqai ti/na tro/pon poih/sousin a(/te nu=n prw=ton a)/gontes. kalh\\ me\\n ou)=n moi kai\\ h( tw=n e)pixwri/wn pomph\\ e)/docen ei)=nai, ou) me/ntoi h(=tton e)fai/neto pre/pein h(\\n oi( *qra=|kes e)/pempon. proseuca/menoi de\\ kai\\ qewrh/santes a)ph=|men pro\\s to\\ a)/stu. katidw\\n ou)=n po/rrwqen h(ma=s oi)/kade w(rmhme/nous *pole/marxos o( *kefa/lou e)ke/leuse dramo/nta to\\n pai=da perimei=nai/ e( keleu=sai. kai/ mou o)/pisqen o( pai=s labo/menos tou= i(mati/ou, keleu/ei u(ma=s, e)/fh, *pole/marxos perimei=nai. kai\\ e)gw\\ metestra/fhn te kai\\ h)ro/mhn o(/pou au)to\\s ei)/h. ou(=tos, e)/fh, o)/pisqen prose/rxetai: a)lla\\ perime/nete. a)lla\\ perimenou=men, h)= d\' o(\\s o( *glau/kwn.'),
)
self.assertEqual(
u'δδΔς',
beta_code.beta_code_to_greek(u'dd*ds'),
)
self.assertEqual(
u'δδΔς',
beta_code.beta_code_to_greek(u'dd*ds2'),
)
self.assertEqual(
u'Ὅρκος Ἄζωτον Ἕλληνας Ἆπις ᾯ Ὅρκος Ἄζωτον Ἕλληνας Ἆπις ᾯ Ὅρκος Ἄζωτον Ἕλληνας Ἆπις ᾯ',
beta_code.beta_code_to_greek(u'*o(/rkos *a)/zwton *e(/llhnas *a)=pis *w(=| *(/orkos *)/azwton *(/ellhnas *)=apis *(=|w *(/orkos *)/azwton *(/ellhnas *)=apis *(=w|'),
)
self.assertEqual(
u'μοῡς',
beta_code.beta_code_to_greek(u'mou_s', custom_map={ u'a_': u'ᾱ', u'i_': u'ῑ', u'u_': u'ῡ' }),
)
if __name__ == '__main__':
unittest.main()
| 6,512 | 17 | 72 |
b32d0012297d1425b389f90ab0adc2f7f457c6f9 | 988 | py | Python | tests/application_handler_tests.py | a-h/tornado-example | 0b2d1c01d19ee8cc4a737046780a73ace09f08e1 | [
"MIT"
] | null | null | null | tests/application_handler_tests.py | a-h/tornado-example | 0b2d1c01d19ee8cc4a737046780a73ace09f08e1 | [
"MIT"
] | null | null | null | tests/application_handler_tests.py | a-h/tornado-example | 0b2d1c01d19ee8cc4a737046780a73ace09f08e1 | [
"MIT"
] | null | null | null | import json
from unittest import TestCase
from unittest.mock import MagicMock
from tornado import gen
from tornado.concurrent import Future
import app
from application_repository import ApplicationRepository
__author__ = 'adrian'
| 29.058824 | 114 | 0.724696 | import json
from unittest import TestCase
from unittest.mock import MagicMock
from tornado import gen
from tornado.concurrent import Future
import app
from application_repository import ApplicationRepository
__author__ = 'adrian'
class ApplicationHandlerTests(TestCase):
@gen.coroutine
def test_that_applications_can_be_listed(self):
# Arrange
expected_data = {'some': 'data'}
mock_application_repository = ApplicationRepository()
future = Future()
future.set_result(expected_data)
mock_application_repository.list_applications = MagicMock(return_value=future)
request = MagicMock()
application = MagicMock()
output = MagicMock()
handler = app.ApplicationHandler(application, request, application_repository=mock_application_repository)
handler.write = output
# Act
yield handler.get()
# Assert
output.assert_called_once_with(json.dumps(expected_data))
| 670 | 64 | 23 |
c284284086f42120ba6b782d5e43c95c436a29dc | 1,095 | py | Python | monitor-flask-apps/app.py | Pucster/lambda_py | 23991849de9b527f2405c479e61cb67ee5e76ac4 | [
"MIT"
] | 65 | 2017-06-13T01:02:17.000Z | 2022-01-10T09:58:29.000Z | monitor-flask-apps/app.py | Pucster/lambda_py | 23991849de9b527f2405c479e61cb67ee5e76ac4 | [
"MIT"
] | 1 | 2020-06-05T18:07:42.000Z | 2020-06-05T18:07:42.000Z | monitor-flask-apps/app.py | Pucster/lambda_py | 23991849de9b527f2405c479e61cb67ee5e76ac4 | [
"MIT"
] | 50 | 2017-07-01T02:10:19.000Z | 2022-03-24T17:23:58.000Z | import os
import re
import rollbar
import rollbar.contrib.flask
from flask import Flask, render_template, Response
from flask import got_request_exception
from werkzeug.exceptions import NotFound
app = Flask(__name__)
MIN_PAGE_NAME_LENGTH = 2
@app.before_first_request
@app.route("/<string:page>/")
if __name__ == "__main__":
app.run(debug=True)
| 28.076923 | 78 | 0.699543 | import os
import re
import rollbar
import rollbar.contrib.flask
from flask import Flask, render_template, Response
from flask import got_request_exception
from werkzeug.exceptions import NotFound
app = Flask(__name__)
MIN_PAGE_NAME_LENGTH = 2
@app.before_first_request
def add_monitoring():
rollbar.init(os.environ.get('ROLLBAR_SECRET'))
## delete the next line if you dont want this event anymore
rollbar.report_message('Rollbar is configured correctly')
got_request_exception.connect(rollbar.contrib.flask.report_exception, app)
@app.route("/<string:page>/")
def show_page(page):
try:
valid_length = len(page) >= MIN_PAGE_NAME_LENGTH
valid_name = re.match('^[a-z]+$', page.lower()) is not None
if valid_length and valid_name:
return render_template("{}.html".format(page))
else:
msg = "Sorry, couldn't find page with name {}".format(page)
raise NotFound(msg)
except:
rollbar.report_exc_info()
return Response("404 Not Found")
if __name__ == "__main__":
app.run(debug=True)
| 693 | 0 | 44 |
39b5a2156c31011cded20d6a056bdf5c83501c5d | 4,381 | py | Python | contrib/browser/browser.py | PiRSquared17/warc-tools | f200a5ed086159a1b1833f7361b35389b1ad1ef4 | [
"Apache-2.0"
] | null | null | null | contrib/browser/browser.py | PiRSquared17/warc-tools | f200a5ed086159a1b1833f7361b35389b1ad1ef4 | [
"Apache-2.0"
] | null | null | null | contrib/browser/browser.py | PiRSquared17/warc-tools | f200a5ed086159a1b1833f7361b35389b1ad1ef4 | [
"Apache-2.0"
] | null | null | null |
# ------------------------------------------------------------------- #
# Copyright (c) 2007-2008 Hanzo Archives Limited. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# You may find more information about Hanzo Archives at #
# #
# http://www.hanzoarchives.com/ #
# #
# You may find more information about the WARC Tools project at #
# #
# http://code.google.com/p/warc-tools/ #
# ------------------------------------------------------------------- #
import wpath
import web
from web import cheetah
from urllib import unquote_plus
from index.keypath import keypath
from index.timetools import getDisplayTime
from warcutils import getRecord
from rewrite import makeAccessible
index = None
def getparams( s ):
ret = {}
items = s.split('&')
for item in items:
if '=' in item:
bits = item.split('=')
key = bits[0]
value = unquote_plus( ''.join( bits[1: ] ))
ret[key] = value
return ret
urls = ( '/' , 'frontpage' ,
'/archive/(.*?)/(.*)' , 'archive' )
class frontpage(object):
def GET( self ):
cheetah.render( 'index.tmpl' )
class query(object):
def GET(self):
print 'there'
def POST( self ):
print 'here' ,
print getparams(web.data())
def doSearchResults( url ):
r = index.search( keypath( url ))
if len(r):
results = [ x.split()[1] for x in r ]
years = []
yearresults = {}
for res in results:
year = res[:4]
if year not in years:
years.append( year )
yearresults[ year ] = [ ( res , getDisplayTime(res )) ]
else:
yearresults[ year ].append(( res , getDisplayTime(res )))
yearresults["years"] = years
cheetah.render( 'searchresults.tmpl' , { 'url': url , 'results': yearresults } )
else:
cheetah.render( 'nosearchresults.tmpl' );
class archive( object ):
def GET( self , timestamp , url ):
r = web.ctx['fullpath']
url = r[ r.find('archive') + len('archive') + 2 + len( timestamp ) :]
if index is not None:
if '*' in timestamp:
doSearchResults( url )
else:
item = index.get( timestamp , keypath( url ))
if item is not None:
bits = [x for x in item.split(' ' ) if x.strip() != '' ]
mimetype = bits[2]
status = bits[3]
offset = bits[4]
redirect = bits[5]
warcn = bits[6]
( headers , content ) = getRecord( warcn , offset )
print makeAccessible( mimetype , url , timestamp , content )
def my_notfound():
cheetah.render( '404.tmpl' );
def browse( idx ):
global index
index = idx
fn = web.webpyfunc( urls , globals() )
web.webapi.notfound = my_notfound
web.runsimple( web.wsgifunc(fn ))
| 4,381 | 4,381 | 0.43506 |
# ------------------------------------------------------------------- #
# Copyright (c) 2007-2008 Hanzo Archives Limited. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# You may find more information about Hanzo Archives at #
# #
# http://www.hanzoarchives.com/ #
# #
# You may find more information about the WARC Tools project at #
# #
# http://code.google.com/p/warc-tools/ #
# ------------------------------------------------------------------- #
import wpath
import web
from web import cheetah
from urllib import unquote_plus
from index.keypath import keypath
from index.timetools import getDisplayTime
from warcutils import getRecord
from rewrite import makeAccessible
index = None
def getparams( s ):
ret = {}
items = s.split('&')
for item in items:
if '=' in item:
bits = item.split('=')
key = bits[0]
value = unquote_plus( ''.join( bits[1: ] ))
ret[key] = value
return ret
urls = ( '/' , 'frontpage' ,
'/archive/(.*?)/(.*)' , 'archive' )
class frontpage(object):
def GET( self ):
cheetah.render( 'index.tmpl' )
class query(object):
def GET(self):
print 'there'
def POST( self ):
print 'here' ,
print getparams(web.data())
def doSearchResults( url ):
r = index.search( keypath( url ))
if len(r):
results = [ x.split()[1] for x in r ]
years = []
yearresults = {}
for res in results:
year = res[:4]
if year not in years:
years.append( year )
yearresults[ year ] = [ ( res , getDisplayTime(res )) ]
else:
yearresults[ year ].append(( res , getDisplayTime(res )))
yearresults["years"] = years
cheetah.render( 'searchresults.tmpl' , { 'url': url , 'results': yearresults } )
else:
cheetah.render( 'nosearchresults.tmpl' );
class archive( object ):
def GET( self , timestamp , url ):
r = web.ctx['fullpath']
url = r[ r.find('archive') + len('archive') + 2 + len( timestamp ) :]
if index is not None:
if '*' in timestamp:
doSearchResults( url )
else:
item = index.get( timestamp , keypath( url ))
if item is not None:
bits = [x for x in item.split(' ' ) if x.strip() != '' ]
mimetype = bits[2]
status = bits[3]
offset = bits[4]
redirect = bits[5]
warcn = bits[6]
( headers , content ) = getRecord( warcn , offset )
print makeAccessible( mimetype , url , timestamp , content )
def my_notfound():
cheetah.render( '404.tmpl' );
def browse( idx ):
global index
index = idx
fn = web.webpyfunc( urls , globals() )
web.webapi.notfound = my_notfound
web.runsimple( web.wsgifunc(fn ))
| 0 | 0 | 0 |
090b3c03c4611d771d9600f17c71f3cee07f2bbd | 19,220 | py | Python | notebook/calendar_html.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 174 | 2018-05-30T21:14:50.000Z | 2022-03-25T07:59:37.000Z | notebook/calendar_html.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 5 | 2019-08-10T03:22:02.000Z | 2021-07-12T20:31:17.000Z | notebook/calendar_html.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 53 | 2018-04-27T05:26:35.000Z | 2022-03-25T07:59:37.000Z | import calendar
hc = calendar.HTMLCalendar()
print(hc.formatmonth(2019, 1, withyear=False))
# <table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">January</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
# <tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
# <tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
# <tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
# <tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
#
print(type(hc.formatmonth(2019, 1)))
# <class 'str'>
print(hc.formatyear(2019, width=4))
# <table border="0" cellpadding="0" cellspacing="0" class="year">
# <tr><th colspan="4" class="year">2019</th></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">January</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
# <tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
# <tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
# <tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
# <tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">February</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
# <tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
# <tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
# <tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
# <tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">March</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
# <tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
# <tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
# <tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
# <tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="sun">31</td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">April</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
# <tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
# <tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
# <tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
# <tr><td class="mon">29</td><td class="tue">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">May</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
# <tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
# <tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
# <tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
# <tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="fri">31</td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">June</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sat">1</td><td class="sun">2</td></tr>
# <tr><td class="mon">3</td><td class="tue">4</td><td class="wed">5</td><td class="thu">6</td><td class="fri">7</td><td class="sat">8</td><td class="sun">9</td></tr>
# <tr><td class="mon">10</td><td class="tue">11</td><td class="wed">12</td><td class="thu">13</td><td class="fri">14</td><td class="sat">15</td><td class="sun">16</td></tr>
# <tr><td class="mon">17</td><td class="tue">18</td><td class="wed">19</td><td class="thu">20</td><td class="fri">21</td><td class="sat">22</td><td class="sun">23</td></tr>
# <tr><td class="mon">24</td><td class="tue">25</td><td class="wed">26</td><td class="thu">27</td><td class="fri">28</td><td class="sat">29</td><td class="sun">30</td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">July</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
# <tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
# <tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
# <tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
# <tr><td class="mon">29</td><td class="tue">30</td><td class="wed">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">August</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
# <tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
# <tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
# <tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
# <tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
# </table>
# </td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">September</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
# <tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
# <tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
# <tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
# <tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
# <tr><td class="mon">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">October</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
# <tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
# <tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
# <tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
# <tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">November</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
# <tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
# <tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
# <tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
# <tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">December</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
# <tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
# <tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
# <tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
# <tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
# <tr><td class="mon">30</td><td class="tue">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td></tr></table>
print(hc.cssclasses)
# ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
hc.cssclasses = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat blue', 'sun red']
print(hc.cssclass_month)
# month
print(hc.cssclass_year)
# year
print(hc.cssclass_noday)
# noday
hc_sun = calendar.HTMLCalendar(firstweekday=6)
print(hc_sun.formatmonth(2019, 1))
# <table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">January 2019</th></tr>
# <tr><th class="sun">Sun</th><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td></tr>
# <tr><td class="sun">6</td><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td></tr>
# <tr><td class="sun">13</td><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td></tr>
# <tr><td class="sun">20</td><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td></tr>
# <tr><td class="sun">27</td><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
#
lhc = calendar.LocaleHTMLCalendar(firstweekday=6, locale='ja_jp')
print(lhc.formatmonth(2019, 1))
# <table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">1月 2019</th></tr>
# <tr><th class="sun">日</th><th class="mon">月</th><th class="tue">火</th><th class="wed">水</th><th class="thu">木</th><th class="fri">金</th><th class="sat">土</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td></tr>
# <tr><td class="sun">6</td><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td></tr>
# <tr><td class="sun">13</td><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td></tr>
# <tr><td class="sun">20</td><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td></tr>
# <tr><td class="sun">27</td><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
#
| 109.828571 | 208 | 0.597659 | import calendar
hc = calendar.HTMLCalendar()
print(hc.formatmonth(2019, 1, withyear=False))
# <table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">January</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
# <tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
# <tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
# <tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
# <tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
#
print(type(hc.formatmonth(2019, 1)))
# <class 'str'>
print(hc.formatyear(2019, width=4))
# <table border="0" cellpadding="0" cellspacing="0" class="year">
# <tr><th colspan="4" class="year">2019</th></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">January</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
# <tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
# <tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
# <tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
# <tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">February</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
# <tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
# <tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
# <tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
# <tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">March</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
# <tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
# <tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
# <tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
# <tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="sun">31</td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">April</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
# <tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
# <tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
# <tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
# <tr><td class="mon">29</td><td class="tue">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">May</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
# <tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
# <tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
# <tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
# <tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="fri">31</td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">June</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sat">1</td><td class="sun">2</td></tr>
# <tr><td class="mon">3</td><td class="tue">4</td><td class="wed">5</td><td class="thu">6</td><td class="fri">7</td><td class="sat">8</td><td class="sun">9</td></tr>
# <tr><td class="mon">10</td><td class="tue">11</td><td class="wed">12</td><td class="thu">13</td><td class="fri">14</td><td class="sat">15</td><td class="sun">16</td></tr>
# <tr><td class="mon">17</td><td class="tue">18</td><td class="wed">19</td><td class="thu">20</td><td class="fri">21</td><td class="sat">22</td><td class="sun">23</td></tr>
# <tr><td class="mon">24</td><td class="tue">25</td><td class="wed">26</td><td class="thu">27</td><td class="fri">28</td><td class="sat">29</td><td class="sun">30</td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">July</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
# <tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
# <tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
# <tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
# <tr><td class="mon">29</td><td class="tue">30</td><td class="wed">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">August</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
# <tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
# <tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
# <tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
# <tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
# </table>
# </td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">September</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
# <tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
# <tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
# <tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
# <tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
# <tr><td class="mon">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">October</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
# <tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
# <tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
# <tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
# <tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">November</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
# <tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
# <tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
# <tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
# <tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="noday"> </td></tr>
# </table>
# </td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">December</th></tr>
# <tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
# <tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
# <tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
# <tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
# <tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
# <tr><td class="mon">30</td><td class="tue">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
# </td></tr></table>
print(hc.cssclasses)
# ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
hc.cssclasses = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat blue', 'sun red']
print(hc.cssclass_month)
# month
print(hc.cssclass_year)
# year
print(hc.cssclass_noday)
# noday
hc_sun = calendar.HTMLCalendar(firstweekday=6)
print(hc_sun.formatmonth(2019, 1))
# <table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">January 2019</th></tr>
# <tr><th class="sun">Sun</th><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td></tr>
# <tr><td class="sun">6</td><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td></tr>
# <tr><td class="sun">13</td><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td></tr>
# <tr><td class="sun">20</td><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td></tr>
# <tr><td class="sun">27</td><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
#
lhc = calendar.LocaleHTMLCalendar(firstweekday=6, locale='ja_jp')
print(lhc.formatmonth(2019, 1))
# <table border="0" cellpadding="0" cellspacing="0" class="month">
# <tr><th colspan="7" class="month">1月 2019</th></tr>
# <tr><th class="sun">日</th><th class="mon">月</th><th class="tue">火</th><th class="wed">水</th><th class="thu">木</th><th class="fri">金</th><th class="sat">土</th></tr>
# <tr><td class="noday"> </td><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td></tr>
# <tr><td class="sun">6</td><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td></tr>
# <tr><td class="sun">13</td><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td></tr>
# <tr><td class="sun">20</td><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td></tr>
# <tr><td class="sun">27</td><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="thu">31</td><td class="noday"> </td><td class="noday"> </td></tr>
# </table>
#
| 0 | 0 | 0 |
6d5de67179b6752d85db1ac1325c64852966a0f8 | 6,350 | py | Python | scripts/convert_catalog.py | iconclass/cit | 06dab3a467ee32212d6e4ddc6f516d42d4c339de | [
"CC0-1.0"
] | 2 | 2022-01-27T15:38:15.000Z | 2022-02-21T17:10:02.000Z | scripts/convert_catalog.py | iconclass/cit | 06dab3a467ee32212d6e4ddc6f516d42d4c339de | [
"CC0-1.0"
] | 5 | 2021-12-16T11:58:28.000Z | 2021-12-16T13:49:53.000Z | scripts/convert_catalog.py | iconclass/cit | 06dab3a467ee32212d6e4ddc6f516d42d4c339de | [
"CC0-1.0"
] | null | null | null | import sys, os
import xml.etree.ElementTree as ET
import json
import textbase
from tqdm import tqdm
CIT_data = {}
for x in textbase.parse("CIT.dmp"):
CIT_data[x["ID.INV"][0]] = x
# We only want to add image items for images that we also actually have on disk at time of import.
# Read in a list of current filenames from disk
CURRENT_JPGS = set(open("all_jpg.txt").read().split("\n"))
HIM_CODES = ("VANDA", "MET", "NPM", "CMA")
data_list = []
for HIM_CODE in HIM_CODES:
if not os.path.exists(HIM_CODE):
print(f"Directory named {HIM_CODE} not found")
continue
for filename in os.listdir(HIM_CODE):
if filename.lower().endswith(".xml"):
filepath = os.path.join(HIM_CODE, filename)
data_list.extend(parse(filepath, HIM_CODE))
dump("CATALOG.dmp", data_list)
| 39.197531 | 164 | 0.586772 | import sys, os
import xml.etree.ElementTree as ET
import json
import textbase
from tqdm import tqdm
CIT_data = {}
for x in textbase.parse("CIT.dmp"):
CIT_data[x["ID.INV"][0]] = x
def dump(filename, objs):
with open(filename, "w", encoding="utf8") as F:
for obj in objs:
for k, v in obj.items():
tmp = "\n; ".join(set(v))
F.write("%s %s\n" % (k, tmp))
F.write("$\n")
# We only want to add image items for images that we also actually have on disk at time of import.
# Read in a list of current filenames from disk
CURRENT_JPGS = set(open("all_jpg.txt").read().split("\n"))
def parse(filename, HIM):
# Note, parsing file: CIT export_inOrder_04.03.2019.xml 20190312 gives error in XML on lines 781763 and 781765 encountering character '\x02' embedded in file.
# Looks like all the source have this so do a search and replace to fix it.
# Object with <sys_id>O1455666</sys_id> has no vanda_museum_number ?
# Some objects, like cit_O1466337 doesnot the associated jpg yet?
filecontents = (
open(filename, encoding="utf8")
.read()
.replace(
"^vandap34_object_display_hours_calc", "vandap34_object_display_hours_calc"
)
)
doc = ET.fromstring(filecontents)
objs = []
for item in tqdm(doc.findall(".//mus_catalogue")):
sys_id = item.find(".//sys_id")
if sys_id.text is None:
print(obj)
break
obj = {
"ID": ["cit_%s" % sys_id.text],
"HIM": ["CIT", HIM],
"COL": [HIM],
"TYPE": ["image"],
"LOCATION.INV": ["Victoria and Albert Museum"],
}
name = item.find(".//mus_object_name/_")
if name is not None:
obj["DESCRIPTION"] = [name.text]
spec_title_field = item.find(".//spec_title_field")
if spec_title_field is not None:
obj["TITLE"] = [spec_title_field.text]
for mus_obj_images_field_data in item.findall(".//mus_obj_images_field_data"):
image_filename = mus_obj_images_field_data.text
if not image_filename.endswith(".jpg"):
image_filename = "%s.jpg" % mus_obj_images_field_data.text
if image_filename in CURRENT_JPGS:
obj.setdefault("URL.IMAGE", []).append(image_filename)
# Note only import items with valid CIT IDs as classifiers.
# https://chineseiconography.org/r/view/cit_O68886/vanda
cit_list = []
cit_list_id = []
# And do all the CIT terms, but retain their IDs.
for spec_content_other in item.findall(".//spec_content_other/_"):
spec_content_other_field_val = spec_content_other.find(
".//spec_content_other_field_val"
)
if (
spec_content_other_field_val is not None
and spec_content_other_field_val.text
):
# Only look for the cit id if we also have a val for it, so do it inside the if
spec_content_other_field_th_i = spec_content_other.find(
".//spec_content_other_field_th_i"
)
if (
spec_content_other_field_th_i is not None
and spec_content_other_field_th_i.text
):
cit_id = spec_content_other_field_th_i.text
if cit_id in CIT_data:
# We want to add the NUMERIC notation (like 1.1.2)
cit_list.append(CIT_data[cit_id].get("N")[0])
cit_list_id.append(cit_id)
if cit_list:
obj["CIT"] = cit_list
obj["CIT.ID"] = cit_list_id
mapping = {
".//mus_part_obj_num_display": "ID.INV.ALT",
".//spec_object_production_date_note": "DATE",
".//spec_object_production_date_field_text": "DATE",
".//spec_other_number_field": "INSTIT.INV",
".//spec_other_number_type/spec_other_number_type_val": "ID.INV.INST",
".//spec_object_production_person_field_data": "PERSON.ARTIST",
".//spec_object_production_person_association_val": "PERSON.ROLE",
".//spec_reference_details": "LOCATION.INV",
".//mus_reference_free": "URL.WEBPAGE",
".//vanda_museum_number": "ID.INV.ALT",
}
for path, field in mapping.items():
val = item.find(path)
if val is not None and val.text:
obj[field] = list(filter(None, val.text.split("\n")))
# For the NPM and MET the REF fiekd contains the URL.IMAGE,
# For VandA entries it should be
# 'http://collections.vam.ac.uk/item/' + sys_id.text
if HIM == "VANDA":
obj["URL.WEBPAGE"] = ["http://collections.vam.ac.uk/item/" + sys_id.text]
# The NPM JPG filenames are what is in the 'INSTIT.INV' field with a .jpg appended
# if HIM == "NPM":
# INSTIT_INV = obj.get("INSTIT.INV")
# if INSTIT_INV:
# obj["URL.IMAGE"] = [f"{x.strip()}.jpg" for x in INSTIT_INV]
# For some exported items, the image filenames are NOT in .//mus_obj_images_field_data
# but need to be extracted from .//vanda_museum_number :-(
if "URL.IMAGE" not in obj:
vanda_museum_number = obj.get("ID.INV.ALT", [None])[0]
vanda_museum_number_image = f"{vanda_museum_number}.jpg"
if vanda_museum_number_image in CURRENT_JPGS:
obj["URL.IMAGE"] = [vanda_museum_number_image.strip()]
# At this time we only want to import items that DO have images.
# As of 25 July there are 2672 objects total including without images
#
# if len(obj.get('URL.IMAGE', [])) < 1:
# continue
objs.append(obj)
return objs
HIM_CODES = ("VANDA", "MET", "NPM", "CMA")
data_list = []
for HIM_CODE in HIM_CODES:
if not os.path.exists(HIM_CODE):
print(f"Directory named {HIM_CODE} not found")
continue
for filename in os.listdir(HIM_CODE):
if filename.lower().endswith(".xml"):
filepath = os.path.join(HIM_CODE, filename)
data_list.extend(parse(filepath, HIM_CODE))
dump("CATALOG.dmp", data_list)
| 5,481 | 0 | 46 |
f872867346327e527db60d11a8df022a8a712789 | 4,633 | py | Python | frunner/pipeline.py | fiz9832/frunner | c2977f8b6091c5cfccd75aa368cd90a0e81e588f | [
"MIT"
] | null | null | null | frunner/pipeline.py | fiz9832/frunner | c2977f8b6091c5cfccd75aa368cd90a0e81e588f | [
"MIT"
] | null | null | null | frunner/pipeline.py | fiz9832/frunner | c2977f8b6091c5cfccd75aa368cd90a0e81e588f | [
"MIT"
] | null | null | null |
__all__ = ['pipeline']
import asyncio
from .task_tui import start_tui
import frunner as fr
from asyncio.subprocess import PIPE
from frunner import State
from collections import deque
| 33.092857 | 100 | 0.597021 |
__all__ = ['pipeline']
import asyncio
from .task_tui import start_tui
import frunner as fr
from asyncio.subprocess import PIPE
from frunner import State
from collections import deque
def init_tasks(tasks):
# build the importance graph
def walk_task(task):
task.importance += 1
for tt in task.prereqs:
walk_task(tt)
for t in tasks:
walk_task(t)
async def start_all(pipeline, tui):
futures = [pipeline.start_async(), tui.start_async()]
done, __ = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
return done.pop().result()
class TaskResult:
def __init__(self, task, stdout=None, stderr=None, retcode=None):
self.task = task
self.stdout = stdout
self.stderr = stderr
self.retcode = retcode
def __str__(self):
return (f"[{self.task.name}] retcode={self.retcode}")
async def run_task(task, pipeline):
pipeline.info(f'[{task.name}] Executing...')
task.state = State.RUNNING
proc = await asyncio.create_subprocess_exec(*task.args,
stdout=PIPE, stderr=PIPE, cwd=task.cwd)
stdout, stderr = await asyncio.gather(
read_stream(proc.stdout, print),
read_stream(proc.stderr, print))
retcode = await proc.wait()
task.state = State.COMPLETED if retcode == 0 else State.FAILED
task.result = TaskResult(task, stdout=stdout, stderr=stderr, retcode=retcode)
pipeline.info(f'[{task.name}] Returned: {task.state}. stdout #={len(stdout)} retcode={retcode}')
return task
async def read_stream(stream, display):
output = []
while True:
line = await stream.readline()
if not line:
break
output.append(line)
return output
def get_ready_queue(tasks, info):
ready_queue = []
for task in tasks:
if task.state in (State.COMPLETED, State.FAILED):
continue
if task.state == State.PENDING:
# check to see if we are okay to run
if not task.prereqs:
task.state = State.READY
else:
if all(t.state == State.COMPLETED for t in task.prereqs):
task.state = State.READY
elif any(t.state == State.FAILED for t in task.prereqs):
task.state = State.FAILED
if task.state == State.READY:
ready_queue.append(task)
# sort according to distance, reversed
ready_queue = sorted(ready_queue, key=lambda x: x.get_distance())
ready_queue = deque(ready_queue)
return ready_queue
class Pipeline:
def __init__(self):
self.tasks = set()
self.log = deque()
self.success = None
def add_task(self, name, *args):
task = fr.TaskUnit(name, *args)
if task in self.tasks:
raise ValueError(f'"{name}" already exists.')
self.tasks.add(task)
return task
def info(self, msg):
self.log.append(msg)
if len(self.log)>50:
self.log.popleft()
async def start_async(self, queue_len=3):
init_tasks(self.tasks)
ready_queue = get_ready_queue(self.tasks, self.info)
nn = min(queue_len, len(ready_queue))
futures = [run_task(ready_queue.popleft(),self) for i in range(nn)]
while True:
__, futures = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
# since one is done, we can add one more the futures list if any is left
ready_queue = get_ready_queue(self.tasks, self.info)
if ready_queue:
while ready_queue and len(futures) < queue_len:
futures.add(run_task(ready_queue.popleft(), self))
if not ready_queue and not futures:
break
async def _start_async(self):
self.n = 0
while True:
self.n += 1
if self.n > 10:
break
await asyncio.sleep(0.5)
return self
def run(self):
with start_tui(self) as tui:
# complete both event loops
result = asyncio.run(start_all(self, tui))
z = [t for t in self.tasks if t.state != State.COMPLETED]
self.success = not z
if not self.success:
self.info('Not all tasks completed: {len(z)}')
for t in z:
self.info(f"{t.name}: {t.state}")
self.info("Press q to exit...")
result = asyncio.run(tui.start_async())
# user canceled or pipeline finished?
print(result)
def pipeline():
return Pipeline()
| 4,051 | -10 | 406 |
68bad4b26958f189c6cdc79fb3f642b3041f1e99 | 910 | py | Python | twitoff/app.py | jcs-lambda/ds11u3s3-twitoff | 99647041727154e8e4810fcca6ac489d8e0c1903 | [
"MIT"
] | null | null | null | twitoff/app.py | jcs-lambda/ds11u3s3-twitoff | 99647041727154e8e4810fcca6ac489d8e0c1903 | [
"MIT"
] | 2 | 2021-03-22T12:29:02.000Z | 2021-09-08T01:48:34.000Z | twitoff/app.py | jcs-lambda/ds11u3s3-twitoff | 99647041727154e8e4810fcca6ac489d8e0c1903 | [
"MIT"
] | 1 | 2021-03-08T19:52:05.000Z | 2021-03-08T19:52:05.000Z | """Flask application core logic."""
import os
from dotenv import load_dotenv
from flask import Flask, url_for
from twitoff.models import db, migrate
from twitoff.routes.home_routes import home_routes
from twitoff.routes.twitter_routes import twitter_routes
assert load_dotenv(), 'falied to initialize environment'
SECRET_KEY = os.getenv('SECRET_KEY')
DATABASE_URL = os.getenv('DATABASE_URL')
def create_app():
"""Create and configure a Flask application instance.
Returns:
Flask application instance.
"""
app = Flask(__name__)
app.config['SECRET_KEY'] = SECRET_KEY
# configure database
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
migrate.init_app(app, db)
# configure routes
app.register_blueprint(home_routes)
app.register_blueprint(twitter_routes)
return app
| 24.594595 | 57 | 0.749451 | """Flask application core logic."""
import os
from dotenv import load_dotenv
from flask import Flask, url_for
from twitoff.models import db, migrate
from twitoff.routes.home_routes import home_routes
from twitoff.routes.twitter_routes import twitter_routes
assert load_dotenv(), 'falied to initialize environment'
SECRET_KEY = os.getenv('SECRET_KEY')
DATABASE_URL = os.getenv('DATABASE_URL')
def create_app():
"""Create and configure a Flask application instance.
Returns:
Flask application instance.
"""
app = Flask(__name__)
app.config['SECRET_KEY'] = SECRET_KEY
# configure database
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
migrate.init_app(app, db)
# configure routes
app.register_blueprint(home_routes)
app.register_blueprint(twitter_routes)
return app
| 0 | 0 | 0 |
98d7482db60433a8772bd29058ddaa96f4d8d0af | 3,225 | py | Python | tempest/lib/services/network/__init__.py | sapcc/tempest | 93a902072fd9986f2bb660166552f37d9eb5bdbb | [
"Apache-2.0"
] | 254 | 2015-01-05T19:22:52.000Z | 2022-03-29T08:14:54.000Z | tempest/lib/services/network/__init__.py | openstack/tempest | c2f5a47cfba430d2086d1e67f4234ca0a9f855ff | [
"Apache-2.0"
] | 13 | 2015-03-02T15:53:04.000Z | 2022-02-16T02:28:14.000Z | tempest/lib/services/network/__init__.py | openstack/tempest | c2f5a47cfba430d2086d1e67f4234ca0a9f855ff | [
"Apache-2.0"
] | 367 | 2015-01-07T15:05:39.000Z | 2022-03-04T09:50:35.000Z | # Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from tempest.lib.services.network.agents_client import AgentsClient
from tempest.lib.services.network.extensions_client import ExtensionsClient
from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
from tempest.lib.services.network.floating_ips_port_forwarding_client import \
FloatingIpsPortForwardingClient
from tempest.lib.services.network.log_resource_client import LogResourceClient
from tempest.lib.services.network.loggable_resource_client import \
LoggableResourceClient
from tempest.lib.services.network.metering_label_rules_client import \
MeteringLabelRulesClient
from tempest.lib.services.network.metering_labels_client import \
MeteringLabelsClient
from tempest.lib.services.network.networks_client import NetworksClient
from tempest.lib.services.network.ports_client import PortsClient
from tempest.lib.services.network.qos_client import QosClient
from tempest.lib.services.network.qos_limit_bandwidth_rules_client import \
QosLimitBandwidthRulesClient
from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
QosMinimumBandwidthRulesClient
from tempest.lib.services.network.quotas_client import QuotasClient
from tempest.lib.services.network.routers_client import RoutersClient
from tempest.lib.services.network.security_group_rules_client import \
SecurityGroupRulesClient
from tempest.lib.services.network.security_groups_client import \
SecurityGroupsClient
from tempest.lib.services.network.segments_client import SegmentsClient
from tempest.lib.services.network.service_providers_client import \
ServiceProvidersClient
from tempest.lib.services.network.subnetpools_client import SubnetpoolsClient
from tempest.lib.services.network.subnets_client import SubnetsClient
from tempest.lib.services.network.tags_client import TagsClient
from tempest.lib.services.network.trunks_client import TrunksClient
from tempest.lib.services.network.versions_client import NetworkVersionsClient
__all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
'FloatingIpsPortForwardingClient', 'MeteringLabelRulesClient',
'MeteringLabelsClient', 'NetworksClient', 'NetworkVersionsClient',
'PortsClient', 'QosClient', 'QosMinimumBandwidthRulesClient',
'QosLimitBandwidthRulesClient', 'QuotasClient', 'RoutersClient',
'SecurityGroupRulesClient', 'SecurityGroupsClient',
'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
'LoggableResourceClient']
| 55.603448 | 79 | 0.819845 | # Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from tempest.lib.services.network.agents_client import AgentsClient
from tempest.lib.services.network.extensions_client import ExtensionsClient
from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
from tempest.lib.services.network.floating_ips_port_forwarding_client import \
FloatingIpsPortForwardingClient
from tempest.lib.services.network.log_resource_client import LogResourceClient
from tempest.lib.services.network.loggable_resource_client import \
LoggableResourceClient
from tempest.lib.services.network.metering_label_rules_client import \
MeteringLabelRulesClient
from tempest.lib.services.network.metering_labels_client import \
MeteringLabelsClient
from tempest.lib.services.network.networks_client import NetworksClient
from tempest.lib.services.network.ports_client import PortsClient
from tempest.lib.services.network.qos_client import QosClient
from tempest.lib.services.network.qos_limit_bandwidth_rules_client import \
QosLimitBandwidthRulesClient
from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
QosMinimumBandwidthRulesClient
from tempest.lib.services.network.quotas_client import QuotasClient
from tempest.lib.services.network.routers_client import RoutersClient
from tempest.lib.services.network.security_group_rules_client import \
SecurityGroupRulesClient
from tempest.lib.services.network.security_groups_client import \
SecurityGroupsClient
from tempest.lib.services.network.segments_client import SegmentsClient
from tempest.lib.services.network.service_providers_client import \
ServiceProvidersClient
from tempest.lib.services.network.subnetpools_client import SubnetpoolsClient
from tempest.lib.services.network.subnets_client import SubnetsClient
from tempest.lib.services.network.tags_client import TagsClient
from tempest.lib.services.network.trunks_client import TrunksClient
from tempest.lib.services.network.versions_client import NetworkVersionsClient
__all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
'FloatingIpsPortForwardingClient', 'MeteringLabelRulesClient',
'MeteringLabelsClient', 'NetworksClient', 'NetworkVersionsClient',
'PortsClient', 'QosClient', 'QosMinimumBandwidthRulesClient',
'QosLimitBandwidthRulesClient', 'QuotasClient', 'RoutersClient',
'SecurityGroupRulesClient', 'SecurityGroupsClient',
'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
'LoggableResourceClient']
| 0 | 0 | 0 |
b2721185bfd5562ec0ddb07b52751d9191a5b12d | 791 | py | Python | src/powerbi_vcs/version.py | lucasfcnunes/powerbi-vcs | 342d7b53eaa329884492b601ec1cefefe48fbc23 | [
"MIT"
] | null | null | null | src/powerbi_vcs/version.py | lucasfcnunes/powerbi-vcs | 342d7b53eaa329884492b601ec1cefefe48fbc23 | [
"MIT"
] | null | null | null | src/powerbi_vcs/version.py | lucasfcnunes/powerbi-vcs | 342d7b53eaa329884492b601ec1cefefe48fbc23 | [
"MIT"
] | null | null | null | try:
# importlib.metadata is present in Python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# use the shim package importlib-metadata pre-3.8
import importlib_metadata as importlib_metadata # type: ignore
import pathlib
for distribution_name in [__package__, __name__, pathlib.Path(__file__).parent.name]:
try:
_DISTRIBUTION_METADATA = importlib_metadata.metadata(
distribution_name=distribution_name,
)
break
except importlib_metadata.PackageNotFoundError:
continue
else:
pass
author = _DISTRIBUTION_METADATA["Author"]
project = _DISTRIBUTION_METADATA["Name"]
version = _DISTRIBUTION_METADATA["Version"]
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
| 31.64 | 85 | 0.734513 | try:
# importlib.metadata is present in Python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# use the shim package importlib-metadata pre-3.8
import importlib_metadata as importlib_metadata # type: ignore
import pathlib
for distribution_name in [__package__, __name__, pathlib.Path(__file__).parent.name]:
try:
_DISTRIBUTION_METADATA = importlib_metadata.metadata(
distribution_name=distribution_name,
)
break
except importlib_metadata.PackageNotFoundError:
continue
else:
pass
author = _DISTRIBUTION_METADATA["Author"]
project = _DISTRIBUTION_METADATA["Name"]
version = _DISTRIBUTION_METADATA["Version"]
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
| 0 | 0 | 0 |
bd5cca32b28e2ab35362a263c79e355624302b50 | 7,961 | py | Python | sdk/python/pulumi_alicloud/quotas/get_quota_alarms.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 42 | 2019-03-18T06:34:37.000Z | 2022-03-24T07:08:57.000Z | sdk/python/pulumi_alicloud/quotas/get_quota_alarms.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 152 | 2019-04-15T21:03:44.000Z | 2022-03-29T18:00:57.000Z | sdk/python/pulumi_alicloud/quotas/get_quota_alarms.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-08-26T17:30:07.000Z | 2021-07-05T01:37:45.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetQuotaAlarmsResult',
'AwaitableGetQuotaAlarmsResult',
'get_quota_alarms',
]
@pulumi.output_type
class GetQuotaAlarmsResult:
"""
A collection of values returned by getQuotaAlarms.
"""
@property
@pulumi.getter
@property
@pulumi.getter(name="enableDetails")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
@property
@pulumi.getter(name="nameRegex")
@property
@pulumi.getter
@property
@pulumi.getter(name="outputFile")
@property
@pulumi.getter(name="productCode")
@property
@pulumi.getter(name="quotaActionCode")
@property
@pulumi.getter(name="quotaAlarmName")
@property
@pulumi.getter(name="quotaDimensions")
# pylint: disable=using-constant-test
def get_quota_alarms(enable_details: Optional[bool] = None,
ids: Optional[Sequence[str]] = None,
name_regex: Optional[str] = None,
output_file: Optional[str] = None,
product_code: Optional[str] = None,
quota_action_code: Optional[str] = None,
quota_alarm_name: Optional[str] = None,
quota_dimensions: Optional[Sequence[pulumi.InputType['GetQuotaAlarmsQuotaDimensionArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetQuotaAlarmsResult:
"""
This data source provides the Quotas Quota Alarms of the current Alibaba Cloud user.
> **NOTE:** Available in v1.116.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.quotas.get_quota_alarms(ids=["5VR90-421F886-81E9-xxx"],
name_regex="tf-testAcc")
pulumi.export("firstQuotasQuotaAlarmId", example.alarms[0].id)
```
:param bool enable_details: Default to `false`. Set it to `true` can output more details about resource attributes.
:param Sequence[str] ids: A list of Quota Alarm IDs.
:param str name_regex: A regex string to filter results by Quota Alarm name.
:param str product_code: The Product Code.
:param str quota_action_code: The Quota Action Code.
:param str quota_alarm_name: The name of Quota Alarm.
:param Sequence[pulumi.InputType['GetQuotaAlarmsQuotaDimensionArgs']] quota_dimensions: The Quota Dimensions.
"""
__args__ = dict()
__args__['enableDetails'] = enable_details
__args__['ids'] = ids
__args__['nameRegex'] = name_regex
__args__['outputFile'] = output_file
__args__['productCode'] = product_code
__args__['quotaActionCode'] = quota_action_code
__args__['quotaAlarmName'] = quota_alarm_name
__args__['quotaDimensions'] = quota_dimensions
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:quotas/getQuotaAlarms:getQuotaAlarms', __args__, opts=opts, typ=GetQuotaAlarmsResult).value
return AwaitableGetQuotaAlarmsResult(
alarms=__ret__.alarms,
enable_details=__ret__.enable_details,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
names=__ret__.names,
output_file=__ret__.output_file,
product_code=__ret__.product_code,
quota_action_code=__ret__.quota_action_code,
quota_alarm_name=__ret__.quota_alarm_name,
quota_dimensions=__ret__.quota_dimensions)
| 39.805 | 216 | 0.674538 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetQuotaAlarmsResult',
'AwaitableGetQuotaAlarmsResult',
'get_quota_alarms',
]
@pulumi.output_type
class GetQuotaAlarmsResult:
"""
A collection of values returned by getQuotaAlarms.
"""
def __init__(__self__, alarms=None, enable_details=None, id=None, ids=None, name_regex=None, names=None, output_file=None, product_code=None, quota_action_code=None, quota_alarm_name=None, quota_dimensions=None):
if alarms and not isinstance(alarms, list):
raise TypeError("Expected argument 'alarms' to be a list")
pulumi.set(__self__, "alarms", alarms)
if enable_details and not isinstance(enable_details, bool):
raise TypeError("Expected argument 'enable_details' to be a bool")
pulumi.set(__self__, "enable_details", enable_details)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if name_regex and not isinstance(name_regex, str):
raise TypeError("Expected argument 'name_regex' to be a str")
pulumi.set(__self__, "name_regex", name_regex)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
if output_file and not isinstance(output_file, str):
raise TypeError("Expected argument 'output_file' to be a str")
pulumi.set(__self__, "output_file", output_file)
if product_code and not isinstance(product_code, str):
raise TypeError("Expected argument 'product_code' to be a str")
pulumi.set(__self__, "product_code", product_code)
if quota_action_code and not isinstance(quota_action_code, str):
raise TypeError("Expected argument 'quota_action_code' to be a str")
pulumi.set(__self__, "quota_action_code", quota_action_code)
if quota_alarm_name and not isinstance(quota_alarm_name, str):
raise TypeError("Expected argument 'quota_alarm_name' to be a str")
pulumi.set(__self__, "quota_alarm_name", quota_alarm_name)
if quota_dimensions and not isinstance(quota_dimensions, list):
raise TypeError("Expected argument 'quota_dimensions' to be a list")
pulumi.set(__self__, "quota_dimensions", quota_dimensions)
@property
@pulumi.getter
def alarms(self) -> Sequence['outputs.GetQuotaAlarmsAlarmResult']:
return pulumi.get(self, "alarms")
@property
@pulumi.getter(name="enableDetails")
def enable_details(self) -> Optional[bool]:
return pulumi.get(self, "enable_details")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="nameRegex")
def name_regex(self) -> Optional[str]:
return pulumi.get(self, "name_regex")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
return pulumi.get(self, "names")
@property
@pulumi.getter(name="outputFile")
def output_file(self) -> Optional[str]:
return pulumi.get(self, "output_file")
@property
@pulumi.getter(name="productCode")
def product_code(self) -> Optional[str]:
return pulumi.get(self, "product_code")
@property
@pulumi.getter(name="quotaActionCode")
def quota_action_code(self) -> Optional[str]:
return pulumi.get(self, "quota_action_code")
@property
@pulumi.getter(name="quotaAlarmName")
def quota_alarm_name(self) -> Optional[str]:
return pulumi.get(self, "quota_alarm_name")
@property
@pulumi.getter(name="quotaDimensions")
def quota_dimensions(self) -> Optional[Sequence['outputs.GetQuotaAlarmsQuotaDimensionResult']]:
return pulumi.get(self, "quota_dimensions")
class AwaitableGetQuotaAlarmsResult(GetQuotaAlarmsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetQuotaAlarmsResult(
alarms=self.alarms,
enable_details=self.enable_details,
id=self.id,
ids=self.ids,
name_regex=self.name_regex,
names=self.names,
output_file=self.output_file,
product_code=self.product_code,
quota_action_code=self.quota_action_code,
quota_alarm_name=self.quota_alarm_name,
quota_dimensions=self.quota_dimensions)
def get_quota_alarms(enable_details: Optional[bool] = None,
ids: Optional[Sequence[str]] = None,
name_regex: Optional[str] = None,
output_file: Optional[str] = None,
product_code: Optional[str] = None,
quota_action_code: Optional[str] = None,
quota_alarm_name: Optional[str] = None,
quota_dimensions: Optional[Sequence[pulumi.InputType['GetQuotaAlarmsQuotaDimensionArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetQuotaAlarmsResult:
"""
This data source provides the Quotas Quota Alarms of the current Alibaba Cloud user.
> **NOTE:** Available in v1.116.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.quotas.get_quota_alarms(ids=["5VR90-421F886-81E9-xxx"],
name_regex="tf-testAcc")
pulumi.export("firstQuotasQuotaAlarmId", example.alarms[0].id)
```
:param bool enable_details: Default to `false`. Set it to `true` can output more details about resource attributes.
:param Sequence[str] ids: A list of Quota Alarm IDs.
:param str name_regex: A regex string to filter results by Quota Alarm name.
:param str product_code: The Product Code.
:param str quota_action_code: The Quota Action Code.
:param str quota_alarm_name: The name of Quota Alarm.
:param Sequence[pulumi.InputType['GetQuotaAlarmsQuotaDimensionArgs']] quota_dimensions: The Quota Dimensions.
"""
__args__ = dict()
__args__['enableDetails'] = enable_details
__args__['ids'] = ids
__args__['nameRegex'] = name_regex
__args__['outputFile'] = output_file
__args__['productCode'] = product_code
__args__['quotaActionCode'] = quota_action_code
__args__['quotaAlarmName'] = quota_alarm_name
__args__['quotaDimensions'] = quota_dimensions
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:quotas/getQuotaAlarms:getQuotaAlarms', __args__, opts=opts, typ=GetQuotaAlarmsResult).value
return AwaitableGetQuotaAlarmsResult(
alarms=__ret__.alarms,
enable_details=__ret__.enable_details,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
names=__ret__.names,
output_file=__ret__.output_file,
product_code=__ret__.product_code,
quota_action_code=__ret__.quota_action_code,
quota_alarm_name=__ret__.quota_alarm_name,
quota_dimensions=__ret__.quota_dimensions)
| 3,534 | 37 | 335 |
5b8ddf1bdd8e0eb8741da31c8fe01dda74474319 | 1,824 | py | Python | tests/pwdbs/factoryboy.py | albireox/sdssdb | 02d165d3a4347e8241aacdbdca0cec86058c8d29 | [
"BSD-3-Clause"
] | 6 | 2019-04-10T21:28:44.000Z | 2021-03-01T18:39:55.000Z | tests/pwdbs/factoryboy.py | albireox/sdssdb | 02d165d3a4347e8241aacdbdca0cec86058c8d29 | [
"BSD-3-Clause"
] | 44 | 2018-10-31T17:48:20.000Z | 2022-01-27T20:52:26.000Z | tests/pwdbs/factoryboy.py | albireox/sdssdb | 02d165d3a4347e8241aacdbdca0cec86058c8d29 | [
"BSD-3-Clause"
] | 2 | 2021-07-13T17:09:43.000Z | 2021-07-13T19:33:18.000Z | # !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: peewee_factory.py
# Project: pwdbs
# Author: Brian Cherinka
# Created: Monday, 23rd March 2020 4:32:17 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2020 Brian Cherinka
# Last Modified: Monday, 23rd March 2020 5:34:56 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
import peewee
from factory import base
#
# This code, copied from https://github.com/cam-stitt/factory_boy-peewee,
# implements a factory_boy Model Factory for Peewee ORM models since
# factory_boy does not support the peewee ORM
#
class PeeweeModelFactory(base.Factory):
"""Factory for peewee models. """
_options_class = PeeweeOptions
@classmethod
def _setup_next_sequence(cls, *args, **kwargs):
"""Compute the next available PK, based on the 'pk' database field."""
db = cls._meta.database
model = cls._meta.model
pk = getattr(model, model._meta.primary_key.name)
max_pk = (model.select(peewee.fn.Max(pk).alias('maxpk'))
.limit(1).order_by().execute())
max_pk = [mp.maxpk for mp in max_pk][0]
if isinstance(max_pk, int):
return max_pk + 1 if max_pk else 1
else:
return 1
@classmethod
def _create(cls, target_class, *args, **kwargs):
"""Create an instance of the model, and save it to the database."""
db = cls._meta.database
obj = target_class.create(**kwargs)
return obj
| 29.901639 | 78 | 0.659539 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: peewee_factory.py
# Project: pwdbs
# Author: Brian Cherinka
# Created: Monday, 23rd March 2020 4:32:17 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2020 Brian Cherinka
# Last Modified: Monday, 23rd March 2020 5:34:56 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
import peewee
from factory import base
#
# This code, copied from https://github.com/cam-stitt/factory_boy-peewee,
# implements a factory_boy Model Factory for Peewee ORM models since
# factory_boy does not support the peewee ORM
#
class PeeweeOptions(base.FactoryOptions):
def _build_default_options(self):
return super(PeeweeOptions, self)._build_default_options() + [
base.OptionDefault('database', None, inherit=True),
]
class PeeweeModelFactory(base.Factory):
"""Factory for peewee models. """
_options_class = PeeweeOptions
class Meta:
abstract = True
@classmethod
def _setup_next_sequence(cls, *args, **kwargs):
"""Compute the next available PK, based on the 'pk' database field."""
db = cls._meta.database
model = cls._meta.model
pk = getattr(model, model._meta.primary_key.name)
max_pk = (model.select(peewee.fn.Max(pk).alias('maxpk'))
.limit(1).order_by().execute())
max_pk = [mp.maxpk for mp in max_pk][0]
if isinstance(max_pk, int):
return max_pk + 1 if max_pk else 1
else:
return 1
@classmethod
def _create(cls, target_class, *args, **kwargs):
"""Create an instance of the model, and save it to the database."""
db = cls._meta.database
obj = target_class.create(**kwargs)
return obj
| 157 | 34 | 76 |
c9f36b2cffd48de7f8d3a0bb81bcaca3abdf534d | 34,656 | py | Python | classes/score_with_diagnostic.py | MichaelAllen1966/1912_stroke_unit_location_with_outcomes | 2f5f5b86718afaac1c3ce7e9cde0c21feae90355 | [
"MIT"
] | null | null | null | classes/score_with_diagnostic.py | MichaelAllen1966/1912_stroke_unit_location_with_outcomes | 2f5f5b86718afaac1c3ce7e9cde0c21feae90355 | [
"MIT"
] | null | null | null | classes/score_with_diagnostic.py | MichaelAllen1966/1912_stroke_unit_location_with_outcomes | 2f5f5b86718afaac1c3ce7e9cde0c21feae90355 | [
"MIT"
] | null | null | null | ##TO DO Consider adding in 'not suitable for test'
"""
Score population according to:
0: Number of hospitals
1: Mean time to thrombolysis
2: Max time to thrombolysis
3: Mean time to thrombectomy
4: Maximum time to thrombectomy
5: Minimum thrombolysis admissions to any one hospital
6: Maximum thrombolysis admissions to any one hospital
7: Minimum thrombectomy admissions to any one hospital
8: Maximum thrombectomy admissions to any one hospital
9: Proportion patients within target thrombolysis time
10: Proportion patients attending unit with target first admissions
11: Proportion patients meeting both thrombolysis targets
12: Proportion patients within target thrombectomy time
13: Proportion patients attending unit with target thrombectomy
14: Proportion patients meeting targets both thrombectomy targets
15: Proportion patients meeting all thrombolysis + thrombectomy targets
16: 95th percentile time for thrombolysis
17: 95th percentile time for thrombectomy
18: Total transfers
19: Total transfer time
20: Clinical outcome (good outcomes) with no treatment
21: Clinical outcome (good outcomes) with treatment
22: Additional good outcomes per 1000 admissions
23: Median time to thrombolysis
24: Median time to thrombectomy
25: Minimum clinical outcome
26: 5th percentile clinical outcome
27: 95th percentile clinical outcome
28: Maximum clinical outcome
"""
import numpy as np
import pandas as pd
from classes.clinical_outcome import Clinical_outcome
| 39.926267 | 83 | 0.554998 | ##TO DO Consider adding in 'not suitable for test'
"""
Score population according to:
0: Number of hospitals
1: Mean time to thrombolysis
2: Max time to thrombolysis
3: Mean time to thrombectomy
4: Maximum time to thrombectomy
5: Minimum thrombolysis admissions to any one hospital
6: Maximum thrombolysis admissions to any one hospital
7: Minimum thrombectomy admissions to any one hospital
8: Maximum thrombectomy admissions to any one hospital
9: Proportion patients within target thrombolysis time
10: Proportion patients attending unit with target first admissions
11: Proportion patients meeting both thrombolysis targets
12: Proportion patients within target thrombectomy time
13: Proportion patients attending unit with target thrombectomy
14: Proportion patients meeting targets both thrombectomy targets
15: Proportion patients meeting all thrombolysis + thrombectomy targets
16: 95th percentile time for thrombolysis
17: 95th percentile time for thrombectomy
18: Total transfers
19: Total transfer time
20: Clinical outcome (good outcomes) with no treatment
21: Clinical outcome (good outcomes) with treatment
22: Additional good outcomes per 1000 admissions
23: Median time to thrombolysis
24: Median time to thrombectomy
25: Minimum clinical outcome
26: 5th percentile clinical outcome
27: 95th percentile clinical outcome
28: Maximum clinical outcome
"""
import numpy as np
import pandas as pd
from classes.clinical_outcome import Clinical_outcome
class Score_population_with_diagnostic():
def __init__(self, data, population):
number_of_scenarios = population.shape[0]
number_of_hospitals = population.shape[1]
number_of_areas = len(data.np_admissions)
total_admissions = sum(data.admissions)
# Set up results tables
self.results = np.zeros((number_of_scenarios, 29))
self.hospital_first_admissions = np.zeros((population.shape))
self.hospital_thrombectomy_admissions = np.zeros((population.shape))
node_results = np.zeros((number_of_areas, 47))
# Set up clinical outcome object
self.outcome = Clinical_outcome()
"""
Node results are results for each area (e.g. LSAO)
# General measures
0: Time to closest hospital
1: Orginal (full hosital list) index # of closest hospital
2: Time to closest CSC (direct)
3: Orginal (full hosital list) index # of closest CSC (direct)
4: Transfer time to closest CSC (drip and ship)
5: Orginal (full hosital list) index # of closest CSC (drip and ship)
6: Total drip and ship time: orginal transfer + net delay + transfer
# Negative diagnostic test (assume go to closest)
7: Negative test admissions
8: Chosen thrombolysis centre
9: Time to chosen thrombolysis centre
10: Chosen thrombectomy centre
11: Time to chosen thrombectomy centre
12: Number of transfers to CSC
13: Distance of transfers to CSC
14: Clinical benefit - no treatement
15: Additional clinical benefit
# Positive diagnostic test
16: Positive test admissions
17: Clinical benefit - no treatment
18: Additional clinical direct to CSC
19: Additional clinical drip and ship
20: Choose CSC
21: Chosen thrombolysis centre
22: Time to chosemn thrombolysis centre
23: Chosen thrombectomy centre
24: Time to chosen thrombectomy centre
25: Number of transfers to CSC
26: Distance of transfers to CSC
27: Clinical benefit from chosen location
# Adjusted admissions (takes into account people where no action woiuld
# be taken even with positive LVO diagnostic test)
28: Adjusted IVT admissions
29: Adjusted ET admissions
# Admission numbers
30: -ve test thrombolysis unit admissions
31: -ve test thrombectomy unit procedures
32: +ve test thrombolysis unit admissions
33: +ve test thrombectomy unit procedures
# Targets met
34: -ve test thrombolysis unit target admissions
35: -ve test thrombolysis target time
36: -ve test thrombolysis both targets
37: -ve test thrombectomy unit target admissions
38: -ve test thrombectomy target time
39: -ve test thrombectomy both targets
40: +ve test thrombolysis unit target admissions
41: +ve test thrombolysis target time
42: +ve test thrombolysis both targets
43: +ve test thrombectomy unit target admissions
44: +ve test thrombectomy target time
45: +ve test thrombectomy both targets
# Net clinical benefit
46: Net clinical benefit
"""
for i in range(number_of_scenarios):
# Create and apply mask to remove unused hospitals in scenario
if data.vary_et_centres:
# Have all hospitals open for IVT except forced closed ones
mask = data.hospitals['Fixed'] != -1
# Recalculate travel times to ET units
data.identify_closest_neighbouring_thrombectomy_unit(
population[i,:])
data.identify_closest_thrombectomy_unit_to_each_patient_area(
population[i,:])
data.convert_pandas_to_numpy()
else:
mask = population[i, :] == 1
_ = data.hospitals['hospital'].values
used_hospital_postcodes = _[mask]
_ = data.hospitals['index_#'].values
used_hospital_index_nos = _[mask]
used_travel_matrix = data.np_travel_matrix[:, mask]
# Node result 0: Identify time closest hospital
node_results[:, 0] = np.min(used_travel_matrix, axis=1)
# Node result 1: Identify orginal (full hosital list) index # of
# closest hospital
local_id = np.argmin(used_travel_matrix, axis=1)
node_results[:, 1] = used_hospital_index_nos[local_id]
# Node result 2: Time to closest CSC (direct)
node_results[:, 2] = \
data.np_closest_thrombectomy_to_each_area_time
# Node result 3: orginal (full hosital list) index # of closest
# CSC (direct)
node_results[:, 3] = \
data.np_closest_thrombectomy_to_each_area_index
# Node result 4 & 5: Transfer time and index (original) to
# closest CSC (drip'n'ship)
fancy_index = np.int_(node_results[:, 1])
node_results[:, 4] = \
data.np_closest_neighbouring_thrombectomy_unit_time[
fancy_index]
node_results[:, 5] = \
data.np_closest_neighbouring_thrombectomy_unit_index[
fancy_index]
# Node 6 Total drip and ship time (original travel + net delay +
# transfer)
node_results[:, 6] = (
node_results[:, 0] +
node_results[:, 4])
# Transfer delay if thrombectomy and thrombolysis centres are different
mask = node_results[:, 1] != node_results[:, 3]
node_results[mask, 6] += data.transfer_net_delay
## NEGATIVE DIAGNOSTIC TEST RESULTS
# Admissions with negative diagnostic test
node_results[:, 7] = data.admissions * \
(data.diagnostic_prop_negative)
# Create mask for direct to CSC
mask = node_results[:, 2] <= node_results[:, 0] + \
data.allowable_delay
# Chosen IVT unit
node_results[:, 8] = node_results[:, 1]
node_results[mask, 8] = node_results[mask, 3]
# IVT time
node_results[:, 9] = node_results[:, 0]
node_results[mask, 9] = node_results[mask, 2]
# Chosen ET unit
node_results[:, 10] = node_results[:, 5]
node_results[mask, 10] = node_results[mask, 3]
# ET time
node_results[:, 11] = node_results[:, 6]
node_results[mask, 11] = node_results[mask, 2]
# Number of transfers for drip and ship
node_results[:, 12] = \
(node_results[:, 7] *
data.diagnostic_neg_lvo *
data.prop_lvo_eligible_ivt *
data.prop_thrombolysed_lvo_receiving_thrombectomy)
node_results[mask, 12] = 0
# Distance of transfers for drip and ship
node_results[:, 13] = node_results[:, 12] * node_results[:, 4]
# Clinical benefit for negative diagnostic test
admissions = data.admissions.values * data.diagnostic_prop_negative
areas = len(admissions)
mimic = np.ones(areas) * data.diagnostic_neg_mimic
ich = np.ones(areas) * data.diagnostic_neg_ich
nlvo = np.ones(areas) * data.diagnostic_neg_nlvo
lvo = np.ones(areas) * data.diagnostic_neg_lvo
prop_nlvo_eligible_treatment = np.zeros(len(admissions))
prop_nlvo_eligible_treatment.fill(data.prop_nlvo_eligible_treatment)
prop_lvo_eligible_treatment = np.zeros(len(admissions))
prop_lvo_eligible_treatment.fill(data.prop_lvo_eligible_ivt)
door_to_needle = data.door_to_needle
onset_to_needle = (data.onset_to_travel +
door_to_needle +
node_results[:, 0])
onset_to_puncture = (data.onset_to_travel +
data.door_to_puncture +
node_results[:, 11])
# Get outcome with no treatment
no_treatment_outcome = (
data.diagnostic_neg_lvo * 0.1328 +
data.diagnostic_neg_nlvo * 0.4622 +
data.diagnostic_neg_ich * 0.24 +
data.diagnostic_neg_mimic * 1)
node_results[:, 14] = np.ones(areas) * no_treatment_outcome
# Get outcome with treatment
outcome = self.outcome.calculate_outcome_for_all(
mimic,
ich,
nlvo,
lvo,
onset_to_needle,
onset_to_puncture,
prop_nlvo_eligible_treatment,
prop_lvo_eligible_treatment,
data.prop_thrombolysed_lvo_receiving_thrombectomy)
# Calculate additional clinical benefit from treatment
node_results[:, 15] = outcome - node_results[:, 14]
## POSITIVE DIAGNISTIC TEST RESULTS
# To choose between direct to CSC or drip and ship for each area,
# compare clinical outcomes, and choose the best clinical outcome
# Record admissions for positive test
admissions = data.admissions.values * data.diagnostic_prop_positive
node_results[:, 16] = admissions
# Clinical benefit direct to CSC
door_to_needle = data.door_to_needle
onset_to_needle = (data.onset_to_travel +
door_to_needle +
node_results[:, 2])
onset_to_puncture = \
data.onset_to_travel + data.door_to_puncture + \
node_results[:, 2]
# Get outcome with no treatment
no_treatment_outcome = (
data.diagnostic_pos_lvo * 0.1328 +
data.diagnostic_pos_nlvo * 0.4622 +
data.diagnostic_pos_ich * 0.24 +
data.diagnostic_pos_mimic * 1)
node_results[:, 17] = no_treatment_outcome
# Get clinical benefit with treatment
mimic = np.ones(areas) * data.diagnostic_pos_mimic
ich = np.ones(areas) * data.diagnostic_pos_ich
nlvo = np.ones(areas) * data.diagnostic_pos_nlvo
lvo = np.ones(areas) * data.diagnostic_pos_lvo
outcome = self.outcome.calculate_outcome_for_all(
mimic,
ich,
nlvo,
lvo,
onset_to_needle,
onset_to_puncture,
prop_nlvo_eligible_treatment,
prop_lvo_eligible_treatment,
data.prop_thrombolysed_lvo_receiving_thrombectomy)
# Calculate added benefit with direct to thrombectomy centre
node_results[:, 18] = outcome - node_results[:, 17]
# Clinical benefit drip and ship
door_to_needle = data.door_to_needle
onset_to_needle = (data.onset_to_travel +
door_to_needle +
node_results[:, 9])
onset_to_puncture = (data.onset_to_travel +
data.door_to_puncture +
node_results[:, 6])
outcome = self.outcome.calculate_outcome_for_all(
mimic,
ich,
nlvo,
lvo,
onset_to_needle,
onset_to_puncture,
prop_nlvo_eligible_treatment,
prop_lvo_eligible_treatment,
data.prop_thrombolysed_lvo_receiving_thrombectomy)
node_results[:, 19] = outcome - node_results[:, 17]
# Create mask for direct to CSC
# debug make drip and ship for everyone
mask = node_results[:, 18] >= node_results[:, 19] + \
(data.diagnostic_outcome_signifciant/1000)
# Record direct to CSC (convert Boolean to 0/1)
node_results[:, 20] = mask * 1
# Chosen IVT unit
node_results[:, 21] = node_results[:, 1]
node_results[mask, 21] = node_results[mask, 3]
# IVT time
node_results[:, 22] = node_results[:, 0]
node_results[mask, 22] = node_results[mask, 2]
# Chosen ET unit
node_results[:, 23] = node_results[:, 5]
node_results[mask, 23] = node_results[mask, 3]
# ET time
node_results[:, 24] = node_results[:, 6]
node_results[mask, 24] = node_results[mask, 2]
# Number of transfers for drip and ship
node_results[:, 25] = \
(node_results[:, 16] *
data.diagnostic_pos_lvo *
data.prop_lvo_eligible_ivt *
data.prop_thrombolysed_lvo_receiving_thrombectomy)
node_results[mask, 25] = 0
# Distance of transfers for drip and ship
node_results[:, 26] = node_results[:, 25] * node_results[:, 4]
# Clinical benefit of chosen hospital
node_results[:, 27] = node_results[:, 19]
node_results[mask, 27] = node_results[mask, 18]
# Adjusted admissions
# IVT admitting unit includes positive diagnostic test fraction
# where no diversion would take place (e.g. certain outside
# window. ET admitting unit reduced by the same number
# ADMISSION NUMBERS
# Adjusted IVT admissions (includes 'no action on test' patients)
node_results[:, 28] = \
node_results[:, 7] + \
node_results[:, 16] * (1 - data.proportion_tested)
# Adjust ET admissions (reduced by 'no action on test' patients)
node_results[:, 29] = node_results[:, 16] * data.proportion_tested
# Non-adjusted admissions are used to calculate total thrombectomies
non_adjusted_admissions_concatenated = np.concatenate(
(node_results[:, 7], node_results[:, 16]))
# Adjusted admissions are used to calculate first admitting hospital
adjusted_admissions_concatenated = np.concatenate(
(node_results[:, 28], node_results[:, 29]))
admitting_ivt_hospital = np.concatenate((node_results[:, 8],
node_results[:, 21]))
admitting_et_hospital = np.concatenate((node_results[:, 10],
node_results[:, 23]))
thrombolysis_admissions_by_hospital = np.bincount(
np.int_(admitting_ivt_hospital),
weights=adjusted_admissions_concatenated)
thrombectomy_admissions_by_hospital = np.bincount(
np.int_(admitting_et_hospital),
weights=non_adjusted_admissions_concatenated)
overall_proportion_of_lvo_eligible_for_treatment = (
(data.diagnostic_prop_positive *
data.diagnostic_pos_lvo *
data.prop_lvo_eligible_ivt) +
((data.diagnostic_prop_negative) *
data.diagnostic_neg_lvo *
data.prop_lvo_eligible_ivt))
thrombectomy_admissions_by_hospital *= \
(overall_proportion_of_lvo_eligible_for_treatment *
data.prop_thrombolysed_lvo_receiving_thrombectomy)
# Fill in missing hospital counts at end of array
if len(thrombolysis_admissions_by_hospital) < number_of_hospitals:
zeros_to_add = number_of_hospitals - \
len(thrombolysis_admissions_by_hospital)
thrombolysis_admissions_by_hospital = \
np.hstack((thrombolysis_admissions_by_hospital,
np.zeros(zeros_to_add)))
if len(thrombectomy_admissions_by_hospital) < number_of_hospitals:
zeros_to_add = number_of_hospitals - \
len(thrombectomy_admissions_by_hospital)
thrombectomy_admissions_by_hospital = \
np.hstack((thrombectomy_admissions_by_hospital,
np.zeros(zeros_to_add)))
# Record admission results
self.hospital_first_admissions[i, :] = \
thrombolysis_admissions_by_hospital
self.hospital_thrombectomy_admissions[i, :] = \
thrombectomy_admissions_by_hospital
# Add in unit admission numbers to node results
# -ve test thrombolysis unit admissions
node_results[:, 30] = \
thrombolysis_admissions_by_hospital[np.int_ \
(node_results[:, 8])]
# -ve test thrombectomy unit procedures
node_results[:, 31] = \
(thrombectomy_admissions_by_hospital \
[np.int_(node_results[:, 10])])
# +ve test thrombolysis unit admissions
node_results[:, 32] = \
thrombolysis_admissions_by_hospital[np.int_ \
(node_results[:, 21])]
# +ve test thrombectomy unit procedures
node_results[:, 33] = \
(thrombectomy_admissions_by_hospital \
[np.int_(node_results[:, 23])])
# RECORD TARGETS MET
# -ve test thrombolysis unit target admissions
node_results[:, 34] = \
node_results[:, 30] >= data.target_thrombolysis_admissions
# -ve test thrombolysis target time
node_results[:, 35] = \
node_results[:, 9] <= data.target_travel_thrombolysis
# -ve test thrombolysis both targets
node_results[:, 36] = \
np.logical_and(node_results[:, 34], node_results[:, 35])
# -ve test thrombectomy unit target admissions
node_results[:, 37] = \
node_results[:, 31] >= data.target_thrombectomy_admissions
# -ve test thrombectomy target time
node_results[:, 38] = \
node_results[:, 11] <= data.target_travel_thrombectomy
# -ve test thrombectomy both targets
node_results[:, 39] = \
np.logical_and(node_results[:, 37], node_results[:, 38])
# +ve test thrombolysis unit target admissions
node_results[:, 40] = \
node_results[:, 32] >= data.target_thrombolysis_admissions
# +ve test thrombolysis target time
node_results[:, 41] = \
node_results[:, 22] <= data.target_travel_thrombolysis
# +ve test thrombolysis both targets
node_results[:, 42] = \
np.logical_and(node_results[:, 40], node_results[:, 41])
# +ve test thrombectomy unit target admissions
node_results[:, 43] = \
node_results[:, 33] >= data.target_thrombectomy_admissions
# +ve test thrombectomy target time
node_results[:, 44] = \
node_results[:, 24] <= data.target_travel_thrombectomy
# +ve test thrombectomy both targets
node_results[:, 45] = \
np.logical_and(node_results[:, 43], node_results[:, 44])
# Net clinical benefit (weighted benefit by diagnostic test
# proportion)
node_results[:, 46] = (
node_results[:, 15] * data.diagnostic_prop_negative +
node_results[:, 27] * data.diagnostic_prop_positive)
# Save full node results (not usually used)
if data.save_node_results:
filename = './' + data.output_location_node_results + \
str(i) + '.csv'
node_df = pd.DataFrame()
node_df['area'] = data.admissions_index.values
node_df['admissions'] = data.admissions.values
# Add negative test reults
node_df['neg_test_admissions_inc_no_test'] = node_results[:, 28]
node_df['neg_test_IVT_unit_#'] = node_results[:, 8]
node_df['neg_test_time_to_IVT_unit'] = node_results[:, 9]
node_df['neg_test_ET_unit_#'] = node_results[:, 10]
node_df['neg_test_time_to_ET_unit'] = node_results[:, 11]
node_df['neg_add_clinc_benefit'] = node_results[:, 15] * 1000
# Add IVT hospital names
node_df = pd.merge(node_df,
data.hospitals[['index_#', 'Hospital_name']],
left_on='neg_test_IVT_unit_#',
right_on='index_#',
how='left')
# Delete unecessary columns
node_df.drop(
['index_#', 'neg_test_IVT_unit_#'], axis=1, inplace=True)
# Rename hospital name column
node_df = node_df.rename \
(columns={'Hospital_name': 'neg_test_IVT unit'})
# Add ET hospital names
node_df = pd.merge(node_df,
data.hospitals[['index_#', 'Hospital_name']],
left_on='neg_test_ET_unit_#',
right_on='index_#',
how='left')
# Delete unecessary columns
node_df.drop(
['index_#', 'neg_test_ET_unit_#'], axis=1, inplace=True)
# Rename hospital name column
node_df = node_df.rename \
(columns={'Hospital_name': 'neg_test_ET unit'})
# Add positive test reults
node_df['pos_test_admissions_inc_no_test'] = node_results[:, 29]
node_df['pos_test_IVT_unit_#'] = node_results[:, 21]
node_df['pos_test_time_to_IVT_unit'] = node_results[:, 22]
node_df['pos_test_ET_unit_#'] = node_results[:, 23]
node_df['pos_test_time_to_ET_unit'] = node_results[:, 24]
node_df['pos_add_clinc_benefit'] = node_results[:, 27] * 1000
node_df['pos_add_clinc_benefit_direct_to_CSC'] = \
(node_results[:, 18] - node_results[:, 19]) * 1000
# Add IVT hospital names
node_df = pd.merge(node_df,
data.hospitals[['index_#', 'Hospital_name']],
left_on='pos_test_IVT_unit_#',
right_on='index_#',
how='left')
# Delete unecessary columns
node_df.drop(
['index_#', 'pos_test_IVT_unit_#'], axis=1, inplace=True)
# Rename hospital name column
node_df = node_df.rename \
(columns={'Hospital_name': 'pos_test_IVT unit'})
# Add ET hospital names
node_df = pd.merge(node_df,
data.hospitals[['index_#', 'Hospital_name']],
left_on='pos_test_ET_unit_#',
right_on='index_#',
how='left')
# Delete unecessary columns
node_df.drop(
['index_#', 'pos_test_ET_unit_#'], axis=1, inplace=True)
# Rename hospital name column
node_df = node_df.rename \
(columns={'Hospital_name': 'pos_test_ET unit'})
# Add net clinical benefit
node_df['net_clinical_benefit_per_1000'] = \
node_results[:, 46] * 1000
# save results
node_df.index.name = 'scenario'
node_df.to_csv(filename)
# COLLATE SUMMARY RESULTS
# Result 0: Number of hospitals
self.results[i, 0] = len(used_hospital_postcodes)
# Time to thrombolysis results (odd sequence due to original order
# aded to results)
# Result 1: mean time to thrombolysis
travel_time = np.concatenate((node_results[:, 9],
node_results[:, 22]))
patients = np.concatenate((node_results[:, 28],
node_results[:, 29]))
self.results[i, 1] = \
np.sum(travel_time * patients) / np.sum(patients)
# Result 2: Maximum time to thrombolysis
# Use mask where admissions >0
mask = data.admissions > 0
mask2 = np.concatenate((mask,mask))
self.results[i, 2] = np.max(travel_time[mask2])
# Result 23: Median time to thrombolysis
self.results[i, 23] = self.calculate_weighted_percentiles(
travel_time, patients, [0.5])[0]
# Result 16: 95th pecentile time to thrombolysis
self.results[i, 16] = self.calculate_weighted_percentiles(
travel_time, patients, [0.95])[0]
# Time to thrombectomy results
travel_time = np.concatenate((node_results[:, 11],
node_results[:, 24]))
# Result 3: Mean time to thrombectomy
self.results[i, 3] = \
np.sum(travel_time * patients) / np.sum(patients)
# Result 4: Maximum time to thrombectomy
self.results[i, 4] = np.max(travel_time[mask2])
# Result 23: Median time to thrombolysis
self.results[i, 24] = self.calculate_weighted_percentiles(
travel_time, patients, [0.5])[0]
# Result 16: 95th pecentile time to thrombolysis
self.results[i, 17] = self.calculate_weighted_percentiles(
travel_time, patients, [0.95])[0]
# Result 5 & 6: Minimum and maximum thrombolysis admissions to any
# one hospital
if data.vary_et_centres:
# Have all hospitals open for IVT except forced closed ones
mask = data.hospitals['Fixed'] != -1
else:
mask = population[i, :] == 1
admissions_to_used_units = thrombolysis_admissions_by_hospital[mask]
self.results[i, 5] = np.min(admissions_to_used_units)
self.results[i, 6] = np.max(admissions_to_used_units)
# Result 7 & 8: Minimum and maximum thrombectomy admissions to any
# one hospital.
mask = data.thrombectomy_boolean
admissions_to_used_units = thrombectomy_admissions_by_hospital[mask]
self.results[i, 7] = np.min(admissions_to_used_units)
self.results[i, 8] = np.max(admissions_to_used_units)
# Result 9: Proportion patients within target thrombolysis time
target_met = np.concatenate(
(node_results[:, 35], node_results[:, 41]))
self.results[i, 9] = (np.sum(patients * target_met) /
np.sum(patients))
# Result 10: Proportion patients attending unit with target first
# admissions
target_met = np.concatenate(
(node_results[:, 34], node_results[:, 40]))
self.results[i, 10] = (np.sum(patients * target_met) /
np.sum(patients))
# Result 11: Proportion patients meeting both thrombolysis targets
target_met = np.concatenate(
(node_results[:, 36], node_results[:, 42]))
self.results[i, 11] = (np.sum(patients * target_met) /
np.sum(patients))
# Result 12: Proportion patients within target thrombectomy time
target_met = np.concatenate(
(node_results[:, 38], node_results[:, 44]))
self.results[i, 12] = (np.sum(patients * target_met) /
np.sum(patients))
# Result 13: Proportion patients attending unit with target
# thrombectomy admissions
target_met = np.concatenate(
(node_results[:, 37], node_results[:, 43]))
self.results[i, 13] = (np.sum(patients * target_met) /
np.sum(patients))
# Result 14: Proportion patients meeting both thrombectomy targets
target_met = np.concatenate(
(node_results[:, 39], node_results[:, 45]))
self.results[i, 14] = (np.sum(patients * target_met) /
np.sum(patients))
# Result 15: Proportion patients meeting all targets
areas_meeting_all_targets = \
np.min(node_results[:, [36, 39, 42, 45]], axis=1)
self.results[i, 15] = \
np.sum(areas_meeting_all_targets * admissions) / \
np.sum(admissions)
# Result 18: Total transfers
self.results[i, 18] = np.sum(node_results[:, 12])
# Result 19: Total transfer time
self.results[i, 19] = np.sum(node_results[:, 13])
# Result 20-22: Net clinical outcome
# Note clinical outcome is based on patients in -ve and +ve
# diagnostic test groups before removal on non-tested patients
# Result 20 No treatment
patients = np.concatenate((node_results[:, 7], node_results[:, 16]))
outcome = np.concatenate((node_results[:, 14], node_results[:, 17]))
self.results[i, 20] = np.sum(outcome * patients) / np.sum(patients)
self.results[i, 20] *= 1000
# Result 22 average added good outcomes per 1,000 patients
outcome = np.concatenate((node_results[:, 15], node_results[:, 27]))
self.results[i, 22] = np.sum(outcome * patients) / np.sum(patients)
# Express outcome as good outcomes per 1000 patients
self.results[i, 22] *= 1000
# Total good oucomes with treatment
self.results[i, 21] = self.results[i, 20] + self.results[i, 22]
# Results 25-28 added clinical outcome ranges
self.results[i, 25] = np.min(node_results[:, 46]) * 1000
self.results[i, 26] = self.calculate_weighted_percentiles(
node_results[:, 46], data.admissions.values, [0.05])[0] * 1000
self.results[i, 27] = self.calculate_weighted_percentiles(
node_results[:, 46], data.admissions.values, [0.95])[0] * 1000
self.results[i, 28] = np.max(node_results[:, 46]) * 1000
return
@staticmethod
def calculate_weighted_percentiles(data, wt, percentiles):
"""Calculate weighted percentiles. Multiple percentiles may be passed as
a list"""
assert np.greater_equal(percentiles,
0.0).all(), "Percentiles less than zero"
assert np.less_equal(percentiles,
1.0).all(), "Percentiles greater than one"
data = np.asarray(data)
assert len(data.shape) == 1
if wt is None:
wt = np.ones(data.shape, np.float)
else:
wt = np.asarray(wt, np.float)
assert wt.shape == data.shape
assert np.greater_equal(wt, 0.0).all(), "Not all weights are " \
"non-negative."
assert len(wt.shape) == 1
n = data.shape[0]
assert n > 0
i = np.argsort(data)
sd = np.take(data, i, axis=0)
sw = np.take(wt, i, axis=0)
aw = np.add.accumulate(sw)
if not aw[-1] > 0:
raise ValueError('Nonpositive weight sum')
w = (aw - 0.5 * sw) / aw[-1]
spots = np.searchsorted(w, percentiles)
o = []
for (s, p) in zip(spots, percentiles):
if s == 0:
o.append(sd[0])
elif s == n:
o.append(sd[n - 1])
else:
f1 = (w[s] - p) / (w[s] - w[s - 1])
f2 = (p - w[s - 1]) / (w[s] - w[s - 1])
assert f1 >= 0 and f2 >= 0 and f1 <= 1 and f2 <= 1
assert abs(f1 + f2 - 1.0) < 1e-6
o.append(sd[s - 1] * f1 + sd[s] * f2)
return o
| 31,317 | 1,723 | 23 |
4351eb02058d506278cbef25731c9dde5adbe1dc | 3,127 | py | Python | python/setup.py | coderforlife/glia | eebd9987a2ccecc0c483023c692a434e02664f59 | [
"MIT"
] | null | null | null | python/setup.py | coderforlife/glia | eebd9987a2ccecc0c483023c692a434e02664f59 | [
"MIT"
] | null | null | null | python/setup.py | coderforlife/glia | eebd9987a2ccecc0c483023c692a434e02664f59 | [
"MIT"
] | 1 | 2018-07-10T17:49:45.000Z | 2018-07-10T17:49:45.000Z | #!/usr/bin/env python
from __future__ import division
#from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import os, sys
from os.path import join
# We require Python v2.7 or newer
if sys.version_info[:2] < (2,7): raise RuntimeError("This requires Python v2.7 or newer")
# Prepare for compiling the source code
from distutils.ccompiler import get_default_compiler
import numpy
compiler_name = get_default_compiler() # TODO: this isn't the compiler that will necessarily be used, but is a good guess...
compiler_opt = {
'msvc' : ['/D_SCL_SECURE_NO_WARNINGS','/EHsc','/O2','/DNPY_NO_DEPRECATED_API=7','/bigobj','/openmp'],
# TODO: older versions of gcc need -std=c++0x instead of -std=c++11
'unix' : ['-std=c++11','-O3','-DNPY_NO_DEPRECATED_API=7','-fopenmp'], # gcc/clang (whatever is system default)
'mingw32' : ['-std=c++11','-O3','-DNPY_NO_DEPRECATED_API=7','-fopenmp'],
'cygwin' : ['-std=c++11','-O3','-DNPY_NO_DEPRECATED_API=7','-fopenmp'],
}
linker_opt = {
'msvc' : [],
'unix' : ['-fopenmp'], # gcc/clang (whatever is system default)
'mingw32' : ['-fopenmp'],
'cygwin' : ['-fopenmp'],
}
np_inc = numpy.get_include()
import pysegtools
cy_inc = join(os.path.dirname(pysegtools.__file__), 'general', 'cython') # TODO: better way to get this
src_ext = '.cpp'
# Find and use Cython if available
try:
from distutils.version import StrictVersion
import Cython.Build
if StrictVersion(Cython.__version__) >= StrictVersion('0.22'):
src_ext = '.pyx'
except ImportError:
# Finally we get to run setup
try: from setuptools import setup
except ImportError: from distutils.core import setup
setup(name='glia',
version='0.1',
author='Jeffrey Bush',
author_email='jeff@coderforlife.com',
packages=['glia'],
setup_requires=['numpy>=1.7'],
install_requires=['numpy>=1.7','scipy>=0.16','pysegtools>=0.1'],
use_2to3=True, # the code *should* support Python 3 once run through 2to3 but this isn't tested
zip_safe=False,
package_data = { '': ['*.pyx', '*.pyxdep', '*.pxi', '*.pxd', '*.h', '*.txt'], }, # Make sure all Cython files are wrapped up with the code
ext_modules = cythonize([
create_ext('glia.__contours_around_labels'),
create_ext('glia.__count_pairs'),
])
)
| 39.0875 | 144 | 0.658778 | #!/usr/bin/env python
from __future__ import division
#from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import os, sys
from os.path import join
# We require Python v2.7 or newer
if sys.version_info[:2] < (2,7): raise RuntimeError("This requires Python v2.7 or newer")
# Prepare for compiling the source code
from distutils.ccompiler import get_default_compiler
import numpy
compiler_name = get_default_compiler() # TODO: this isn't the compiler that will necessarily be used, but is a good guess...
compiler_opt = {
'msvc' : ['/D_SCL_SECURE_NO_WARNINGS','/EHsc','/O2','/DNPY_NO_DEPRECATED_API=7','/bigobj','/openmp'],
# TODO: older versions of gcc need -std=c++0x instead of -std=c++11
'unix' : ['-std=c++11','-O3','-DNPY_NO_DEPRECATED_API=7','-fopenmp'], # gcc/clang (whatever is system default)
'mingw32' : ['-std=c++11','-O3','-DNPY_NO_DEPRECATED_API=7','-fopenmp'],
'cygwin' : ['-std=c++11','-O3','-DNPY_NO_DEPRECATED_API=7','-fopenmp'],
}
linker_opt = {
'msvc' : [],
'unix' : ['-fopenmp'], # gcc/clang (whatever is system default)
'mingw32' : ['-fopenmp'],
'cygwin' : ['-fopenmp'],
}
np_inc = numpy.get_include()
import pysegtools
cy_inc = join(os.path.dirname(pysegtools.__file__), 'general', 'cython') # TODO: better way to get this
src_ext = '.cpp'
def create_ext(name, dep=[], src=[], inc=[], lib=[], objs=[]):
from distutils.extension import Extension
return Extension(
name=name,
depends=dep,
sources=[join(*name.split('.'))+src_ext]+src,
define_macros=[('NPY_NO_DEPRECATED_API','7'),],
include_dirs=[np_inc,cy_inc]+inc,
library_dirs=lib,
extra_objects=objs,
extra_compile_args=compiler_opt.get(compiler_name, []),
extra_link_args=linker_opt.get(compiler_name, []),
language='c++',
)
# Find and use Cython if available
try:
from distutils.version import StrictVersion
import Cython.Build
if StrictVersion(Cython.__version__) >= StrictVersion('0.22'):
src_ext = '.pyx'
def cythonize(*args, **kwargs):
kwargs.setdefault('include_path', []).append(cy_inc)
return Cython.Build.cythonize(*args, **kwargs)
except ImportError:
def cythonize(exts, *args, **kwargs): return exts
# Finally we get to run setup
try: from setuptools import setup
except ImportError: from distutils.core import setup
setup(name='glia',
version='0.1',
author='Jeffrey Bush',
author_email='jeff@coderforlife.com',
packages=['glia'],
setup_requires=['numpy>=1.7'],
install_requires=['numpy>=1.7','scipy>=0.16','pysegtools>=0.1'],
use_2to3=True, # the code *should* support Python 3 once run through 2to3 but this isn't tested
zip_safe=False,
package_data = { '': ['*.pyx', '*.pyxdep', '*.pxi', '*.pxd', '*.h', '*.txt'], }, # Make sure all Cython files are wrapped up with the code
ext_modules = cythonize([
create_ext('glia.__contours_around_labels'),
create_ext('glia.__count_pairs'),
])
)
| 670 | 0 | 78 |
fd44d9b13879865e635a12ecc37b9296cb358b88 | 1,796 | py | Python | test_boolop.py | carltraveler/FormalVerfication | 52b7d15e066a477b732fb80cade4fbd64f60b879 | [
"MIT"
] | null | null | null | test_boolop.py | carltraveler/FormalVerfication | 52b7d15e066a477b732fb80cade4fbd64f60b879 | [
"MIT"
] | null | null | null | test_boolop.py | carltraveler/FormalVerfication | 52b7d15e066a477b732fb80cade4fbd64f60b879 | [
"MIT"
] | null | null | null | OntCversion = '2.0.0'
#!/usr/bin/env python3
from ontology.builtins import print
| 17.96 | 82 | 0.389198 | OntCversion = '2.0.0'
#!/usr/bin/env python3
from ontology.builtins import print
def VaasAssert(expr):
if not expr:
raise Exception("AssertError")
def main():
a = 2
b = 4
t = 0
if a == 2 or (a == 5 and b == 4) or a == 3 and a == 7:
t = 1
print("or in")
else:
t = 2
print("or not in")
VaasAssert(t == 1)
a = 5
b = 4
if a == 2 or (a == 5 and b == 4) or a == 3 and a == 7:
t = 3
print("or in")
else:
t = 4
print("or not in")
VaasAssert(t == 3)
a = 3
b = 1000
if a == 2 or (a == 5 and b == 4) or a == 3 and a == 7:
t = 5
print("or in")
else:
t = 6
print("or not in")
VaasAssert(t == 6)
a = 9
if (a == 2 or (a == 5 and b == 4) or a == 3 and a == 7 )or a == 9:
t = 7
print("or in")
else:
t = 8
print("or not in")
VaasAssert(t == 7)
a = 7
if (a == 2 or (a == 5 and b == 4) or a == 3 and a == 7 )or a == 9:
t = 9
print("or in")
else:
t = 10
print("or not in")
VaasAssert(t == 10)
a = 10
if a == 2 or (a == 5 and b == 4) or (a == 3 and a == 7) or a == 9 or a == 10:
t = 11
print("or in")
else:
t = 12
print("or not in")
VaasAssert(t == 11)
a = 8
b = 4
if a == 2 or (a == 5 and b == 4) or a == 3 and a == 7:
t = 13
print("or in")
else:
t = 14
print("or not in")
VaasAssert(t == 14)
a = 5
b = 4
if a == 2 or (a == 5 and b == 4) or a == 3 and a == 7:
t = 15
print("or in")
else:
t = 16
print("or not in")
VaasAssert(t == 15 + 1)
print("bool test ok")
| 1,670 | 0 | 45 |
201cd4c29338ced795461cb4c0ab37bf9eaa3359 | 91 | py | Python | setup.py | i2mint/umpyre | d6cca86854a465c5e4cee7299981dfdcf09d033a | [
"Apache-2.0"
] | 10 | 2019-03-04T15:02:15.000Z | 2020-09-16T11:57:30.000Z | setup.py | i2mint/umpyre | d6cca86854a465c5e4cee7299981dfdcf09d033a | [
"Apache-2.0"
] | 60 | 2019-08-08T14:38:56.000Z | 2021-04-08T15:19:11.000Z | setup.py | i2mint/umpyre | d6cca86854a465c5e4cee7299981dfdcf09d033a | [
"Apache-2.0"
] | 10 | 2019-08-08T03:15:35.000Z | 2020-06-18T16:19:18.000Z | from setuptools import setup
setup() # Note: Everything should be in the local setup.cfg
| 22.75 | 60 | 0.769231 | from setuptools import setup
setup() # Note: Everything should be in the local setup.cfg
| 0 | 0 | 0 |
57ec56fdff7ddd3556b3180e3b58fe73f4cdeaaf | 1,720 | py | Python | cdk/status_table.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | 18 | 2018-02-05T20:03:30.000Z | 2022-01-19T00:58:00.000Z | cdk/status_table.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | 31 | 2018-01-19T14:21:03.000Z | 2021-09-07T02:04:40.000Z | cdk/status_table.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | 5 | 2019-12-04T17:44:10.000Z | 2021-02-23T03:14:24.000Z | from aws_cdk import core, aws_dynamodb as dynamodb
from . import names
| 34.4 | 73 | 0.584884 | from aws_cdk import core, aws_dynamodb as dynamodb
from . import names
class ZipStatus(core.Construct):
def __init__(self, scope: core.Construct, id: str):
"""
On demand requests status table
"""
super().__init__(scope, id)
stack_name = core.Stack.of(self).stack_name
self.table = dynamodb.Table(
self,
"table",
table_name=f"{stack_name}-{names.PIPELINE_STATUS_TABLE}",
partition_key=dynamodb.Attribute(
name="zip_id",
type=dynamodb.AttributeType.STRING,
),
billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
removal_policy=core.RemovalPolicy.DESTROY,
time_to_live_attribute="expiration",
)
self.table.add_global_secondary_index(
index_name="mid_index",
partition_key=dynamodb.Attribute(
name="meeting_id",
type=dynamodb.AttributeType.NUMBER,
),
)
# Add secondary index for searching for latest updates
# Since dynamoDB scans/filters only scan/filter on 1MB data,
# we need an index ("update_date") to do an exact query to narrow
# down the results to < 1MB before filtering
# (1MB is several days worth of entries)
self.table.add_global_secondary_index(
index_name="time_index",
partition_key=dynamodb.Attribute(
name="update_date",
type=dynamodb.AttributeType.STRING,
),
sort_key=dynamodb.Attribute(
name="update_time",
type=dynamodb.AttributeType.NUMBER,
),
)
| 0 | 1,625 | 23 |
3f1eecf7f483af9de340aba795108691561cc25c | 2,656 | py | Python | advent_of_code_2016/day 24/solution.py | jvanelteren/advent_of_code | 3c547645250adb2d95ebac43d5d2111cdf9b09e9 | [
"MIT"
] | 1 | 2021-12-23T11:24:11.000Z | 2021-12-23T11:24:11.000Z | advent_of_code_2016/day 24/solution.py | jvanelteren/advent_of_code | 3c547645250adb2d95ebac43d5d2111cdf9b09e9 | [
"MIT"
] | null | null | null | advent_of_code_2016/day 24/solution.py | jvanelteren/advent_of_code | 3c547645250adb2d95ebac43d5d2111cdf9b09e9 | [
"MIT"
] | null | null | null | #%%
# always check the input carefully after opening and splitting
# split r and c indices for simplicity (instead of combining them into a tuple)
# when using re, be sure to thing if greedy or nongreedy parsing is necessary
# don't use re when split() suffices, whitespaces can be casted to int
# make sure to experiment with one item from a list instead of and immediate for loop
# !when there is a clear test case available, test it!
# scan over the input a bit more carefully, it may be different than the test examples
# when plotting, use plt.imshow (not plt.show)
# i should look into itertools more
# when refractoring a test case into the real code, check the indices are not hardcoded!
# use better descriptive names. also unpack if it makes code more readable for example in list of tuples
# with regex you can numbers pretty easily in a line. and -? to add possible negatives
# sometimes its easier to brute force bfs than to figure out an insane algo yourself
# at beginning look at example first and highlighted texts
# use the easiest way to calculate solution (is set A is asked, dont calc set total - set B)
# when defining multiple functions, but an assert testcase below it that closely resembles input
# use a class with __hash__ to store mutable objects
# with assembly analysis make sure to print registers at important jump points
# don't implement an algo if you are pretty sure it isnt going to work
# %%
f = open('input.txt','r')
lines = [list(line.rstrip()) for line in f]
print(f'len lines {len(lines)} first item {lines[0]}')
import numpy as np
grid = np.array(lines)
grid
import networkx as nx
G = nx.Graph()
for cell,value in np.ndenumerate(grid):
G.add_node(cell)
for cell,value in np.ndenumerate(grid):
if value != '#':
for delta in [[0,1],[1,0],[0,-1],[-1,0]]:
r2 = cell[0]+delta[0]
c2 = cell[1]+delta[1]
if 0<= r2 < grid.shape[0] and 0<= c2 < grid.shape[1] and grid[r2,c2]!='#':
G.add_edge(cell,(r2,c2))
# %%
poi = {}
for cell,value in np.ndenumerate(grid):
if value.isdigit(): poi[cell] = value
start = [k for k,v in poi.items() if v == '0'][0]
total_dis = 0
for fromcell in poi:
distances = {}
for tocell in poi:
if fromcell != tocell:
distances[tocell]= nx.shortest_path_length(G, source=fromcell, target=tocell)
poi[fromcell]=distances
poi
best = 9999
from itertools import permutations
for p in permutations(poi,len(poi)):
if p[0]== start:
dis = 0
p = list(p)
p.append(start)
for i,j in zip(p,p[1:]):
dis+=poi[i][j]
if dis < best: best = dis
best
| 37.408451 | 104 | 0.682605 | #%%
# always check the input carefully after opening and splitting
# split r and c indices for simplicity (instead of combining them into a tuple)
# when using re, be sure to thing if greedy or nongreedy parsing is necessary
# don't use re when split() suffices, whitespaces can be casted to int
# make sure to experiment with one item from a list instead of and immediate for loop
# !when there is a clear test case available, test it!
# scan over the input a bit more carefully, it may be different than the test examples
# when plotting, use plt.imshow (not plt.show)
# i should look into itertools more
# when refractoring a test case into the real code, check the indices are not hardcoded!
# use better descriptive names. also unpack if it makes code more readable for example in list of tuples
# with regex you can numbers pretty easily in a line. and -? to add possible negatives
# sometimes its easier to brute force bfs than to figure out an insane algo yourself
# at beginning look at example first and highlighted texts
# use the easiest way to calculate solution (is set A is asked, dont calc set total - set B)
# when defining multiple functions, but an assert testcase below it that closely resembles input
# use a class with __hash__ to store mutable objects
# with assembly analysis make sure to print registers at important jump points
# don't implement an algo if you are pretty sure it isnt going to work
# %%
f = open('input.txt','r')
lines = [list(line.rstrip()) for line in f]
print(f'len lines {len(lines)} first item {lines[0]}')
import numpy as np
grid = np.array(lines)
grid
import networkx as nx
G = nx.Graph()
for cell,value in np.ndenumerate(grid):
G.add_node(cell)
for cell,value in np.ndenumerate(grid):
if value != '#':
for delta in [[0,1],[1,0],[0,-1],[-1,0]]:
r2 = cell[0]+delta[0]
c2 = cell[1]+delta[1]
if 0<= r2 < grid.shape[0] and 0<= c2 < grid.shape[1] and grid[r2,c2]!='#':
G.add_edge(cell,(r2,c2))
# %%
poi = {}
for cell,value in np.ndenumerate(grid):
if value.isdigit(): poi[cell] = value
start = [k for k,v in poi.items() if v == '0'][0]
total_dis = 0
for fromcell in poi:
distances = {}
for tocell in poi:
if fromcell != tocell:
distances[tocell]= nx.shortest_path_length(G, source=fromcell, target=tocell)
poi[fromcell]=distances
poi
best = 9999
from itertools import permutations
for p in permutations(poi,len(poi)):
if p[0]== start:
dis = 0
p = list(p)
p.append(start)
for i,j in zip(p,p[1:]):
dis+=poi[i][j]
if dis < best: best = dis
best
| 0 | 0 | 0 |
e6ce4c4a4551fe235d8b96db2a5fd1524b91945a | 9,536 | py | Python | pythonforandroid/recipes/mobileinsight/__init__.py | mobile-insight/python-for-android | 2a16cd1ecb800e5263e6dbd278377556ff56b2e5 | [
"MIT"
] | 3 | 2018-03-18T07:38:18.000Z | 2019-07-24T20:53:08.000Z | pythonforandroid/recipes/mobileinsight/__init__.py | mobile-insight/python-for-android | 2a16cd1ecb800e5263e6dbd278377556ff56b2e5 | [
"MIT"
] | 2 | 2017-10-28T07:15:32.000Z | 2020-11-22T02:39:42.000Z | pythonforandroid/recipes/mobileinsight/__init__.py | mobile-insight/python-for-android | 2a16cd1ecb800e5263e6dbd278377556ff56b2e5 | [
"MIT"
] | 8 | 2017-07-20T05:34:04.000Z | 2021-08-03T08:21:32.000Z | # MobileInsight Recipe for python-for-android
# Authors: Zengwen Yuan, Jiayao Li,
# Update for py3: Yunqi Guo, 2020.04
import glob
from os.path import exists, join, isdir, split
import sh
from pythonforandroid.logger import info, shprint, warning
from pythonforandroid.toolchain import Recipe, current_directory
LOCAL_DEBUG = False
recipe = MobileInsightRecipe()
| 40.236287 | 119 | 0.567534 | # MobileInsight Recipe for python-for-android
# Authors: Zengwen Yuan, Jiayao Li,
# Update for py3: Yunqi Guo, 2020.04
import glob
from os.path import exists, join, isdir, split
import sh
from pythonforandroid.logger import info, shprint, warning
from pythonforandroid.toolchain import Recipe, current_directory
LOCAL_DEBUG = False
class MobileInsightRecipe(Recipe):
mi_git = 'https://github.com/mobile-insight/mobileinsight-core.git'
mi_branch = 'dev-py3'
local_src = '/home/vagrant/mi-dev/mobileinsight-core'
version = '5.0'
toolchain_version = 4.9 # default GCC toolchain version we try to use
depends = ['python3'] # any other recipe names that must be built before this one
def get_newest_toolchain(self, arch):
# warning("get_newest_toolchain(self, arch), toolchain prefix = {}".format(toolchain_prefix))
# [WARNING]: get_newest_toolchain(self, arch), toolchain prefix = arm-linux-androideabi
toolchain_versions = []
toolchain_prefix = arch.toolchain_prefix
toolchain_path = join(self.ctx.ndk_dir, 'toolchains')
if isdir(toolchain_path):
toolchain_contents = glob.glob('{}/{}-*'.format(toolchain_path,
toolchain_prefix))
toolchain_versions = [split(path)[-1][len(toolchain_prefix) + 1:]
for path in toolchain_contents]
else:
warning('Could not find toolchain subdirectory!')
toolchain_versions.sort()
toolchain_versions_gcc = []
for toolchain_version in toolchain_versions:
if toolchain_version[0].isdigit():
toolchain_versions_gcc.append(toolchain_version) # GCC toolchains begin with a number
if toolchain_versions:
toolchain_version = toolchain_versions_gcc[-1] # the latest gcc toolchain
else:
warning('Could not find any toolchain for {}!'.format(toolchain_prefix))
self.toolchain_version = toolchain_version
def get_recipe_env(self, arch):
env = super(MobileInsightRecipe, self).get_recipe_env(arch)
# warning("get_recipe_env(self, arch), use toolchain version = {toolchain_version}".format(
# toolchain_version=self.toolchain_version))
env['CFLAGS'] += ' -fPIC'
env['CFLAGS'] += ' -I{ndk_dir}/sources/cxx-stl/llvm-libc++/include'.format(
ndk_dir=self.ctx.ndk_dir,
toolchain_version=self.toolchain_version)
env['CFLAGS'] += ' -I{ndk_dir}/sources/cxx-stl/llvm-libc++/libs/{arch}/include'.format(
ndk_dir=self.ctx.ndk_dir,
toolchain_version=self.toolchain_version,
arch=arch)
env['CFLAGS'] += ' -I{}'.format(
self.ctx.python_recipe.include_root(arch.arch)
)
env['LDFLAGS'] += ' -L{ndk_dir}/sources/cxx-stl/llvm-libc++/libs/{arch}'.format(
ndk_dir=self.ctx.ndk_dir,
toolchain_version=self.toolchain_version,
arch=arch)
env['LDFLAGS'] += ' -L{} -lpython{}'.format(
self.ctx.python_recipe.link_root(arch.arch),
self.ctx.python_recipe.major_minor_version_string,
)
env['LDFLAGS'] += ' -shared'
env['LDFLAGS'] += ' -lc++_shared -llog'
env['STRIP'] = str.split(env['STRIP'])[0]
# warning("Testing the env")
# shprint(sh.echo, '$PATH', _env=env)
# warning("self.ctx = {}".format(str(self.ctx)))
# warning("self.ctx.ndk-dir = {}".format(self.ctx.ndk_dir))
# warning("self.ctx.build_dir = {}".format(self.ctx.build_dir))
# warning("self.ctx.libs_dir = {}".format(self.ctx.libs_dir))
# warning("self.ctx.bootstrap.build_dir = {}".format(self.ctx.bootstrap.build_dir))
return env
def prebuild_arch(self, arch):
super(MobileInsightRecipe, self).prebuild_arch(arch)
build_dir = self.get_build_dir(arch.arch)
tmp_dir = join(build_dir, 'mi_tmp')
info("Cleaning old MobileInsight-core sources at {}".format(build_dir))
try:
shprint(sh.rm, '-r',
build_dir,
_tail=20,
_critical=True)
except:
pass
if LOCAL_DEBUG is False:
info("Cloning MobileInsight-core sources from {}".format(self.mi_git))
shprint(sh.git,
'clone', '-b',
self.mi_branch,
'--depth=1',
self.mi_git,
tmp_dir,
_tail=20,
_critical=True)
else:
warning("Debugging using local sources of MobileInsight at {}".format(self.local_src))
shprint(sh.mkdir,
build_dir,
_tail=20,
_critical=True)
shprint(sh.mkdir,
tmp_dir,
_tail=20,
_critical=True)
shprint(sh.cp,
'-fr',
self.local_src,
tmp_dir,
_tail=20,
_critical=True)
tmp_dir = join(tmp_dir, 'mobileinsight-core')
shprint(sh.mv,
join(tmp_dir, 'mobile_insight'),
build_dir,
_tail=20,
_critical=True)
shprint(sh.mv,
join(tmp_dir, 'dm_collector_c'),
build_dir,
_tail=20,
_critical=True)
# remove unnecessary codes
shprint(sh.rm, '-r', tmp_dir,
_tail=20,
_critical=True)
self.get_newest_toolchain(arch)
def build_arch(self, arch):
super(MobileInsightRecipe, self).build_arch(arch)
env = self.get_recipe_env(arch)
# self.build_cython_components(arch)
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.ctx.hostpython)
app_mk = join(self.get_build_dir(arch.arch), 'Application.mk')
app_setup = join(self.get_build_dir(arch.arch), 'setup.py')
if not exists(app_mk):
shprint(sh.cp, join(self.get_recipe_dir(), 'Application.mk'), app_mk)
if not exists(app_setup):
shprint(sh.cp, join(self.get_recipe_dir(), 'setup.py'), app_setup)
shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env, _tail=10, _critical=True)
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
'--install-lib=.',
_env=env, _tail=10, _critical=True)
build_lib = glob.glob('./build/lib*')
assert len(build_lib) == 1
warning('MobileInsight -- stripping mobileinsight')
shprint(sh.find, build_lib[0], '-name', '*.so', '-exec', env['STRIP'], '{}', ';', _tail=20, _critical=True)
try:
warning('Copying LLVM libc++ STL shared lib to {libs_dir}/{arch}'.format(
libs_dir=self.ctx.libs_dir,
arch=arch))
shprint(sh.cp,
'{ndk_dir}/sources/cxx-stl/llvm-libc++/libs/{arch}/libc++_shared.so'.format(
ndk_dir=self.ctx.ndk_dir,
toolchain_version=self.toolchain_version,
arch=arch),
'{libs_dir}/{arch}'.format(
libs_dir=self.ctx.libs_dir,
arch=arch))
except:
warning('Failed to copy LLVM libc++ STL shared lib!')
def build_cython_components(self, arch):
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
info('hostpython is ' + self.ctx.hostpython)
hostpython = sh.Command(self.ctx.hostpython)
app_mk = join(self.get_build_dir(arch.arch), 'Application.mk')
if not exists(app_mk):
shprint(sh.cp, join(self.get_recipe_dir(), 'Application.mk'), app_mk)
app_setup = join(self.get_build_dir(arch.arch), 'setup.py')
if not exists(app_setup):
shprint(sh.cp, join(self.get_recipe_dir(), 'setup.py'), app_setup)
# This first attempt *will* fail, because cython isn't
# installed in the hostpython
try:
shprint(hostpython, 'setup.py', 'build_ext', _env=env)
except sh.ErrorReturnCode_1:
pass
# ...so we manually run cython from the user's system
shprint(sh.find, self.get_build_dir('armeabi'), '-iname', '*.pyx', '-exec',
self.ctx.cython, '{}', ';', _env=env)
# now cython has already been run so the build works
shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env)
# stripping debug symbols lowers the file size a lot
build_lib = glob.glob('./build/lib*')
shprint(sh.find, build_lib[0], '-name', '*.o', '-exec',
env['STRIP'], '{}', ';', _env=env)
def postbuild_arch(self, arch):
super(MobileInsightRecipe, self).postbuild_arch(arch)
# TODO
# warning('Should remove mobileinsight build tools here, skipping for now')
# try rm -rf $BUILD_PATH/python-install/lib/python*/site-packages/mobile_insight/tools
recipe = MobileInsightRecipe()
| 8,631 | 513 | 23 |
57b86fa3f074a81d2b7d5d4df9192bf1947b5fc3 | 2,155 | py | Python | src/hdmf/common/io/table.py | satra/hdmf | fab5660b1e009151980939e266e63a6c408064aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | src/hdmf/common/io/table.py | satra/hdmf | fab5660b1e009151980939e266e63a6c408064aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | src/hdmf/common/io/table.py | satra/hdmf | fab5660b1e009151980939e266e63a6c408064aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | from ...utils import docval, getargs
from ...build import ObjectMapper, BuildManager
from ...spec import Spec
from ..table import DynamicTable, VectorIndex
from .. import register_map
@register_map(DynamicTable)
| 46.847826 | 112 | 0.62877 | from ...utils import docval, getargs
from ...build import ObjectMapper, BuildManager
from ...spec import Spec
from ..table import DynamicTable, VectorIndex
from .. import register_map
@register_map(DynamicTable)
class DynamicTableMap(ObjectMapper):
def __init__(self, spec):
super().__init__(spec)
vector_data_spec = spec.get_data_type('VectorData')
self.map_spec('columns', vector_data_spec)
@ObjectMapper.object_attr('colnames')
def attr_columns(self, container, manager):
if all(not col for col in container.columns):
return tuple()
return container.colnames
@docval({"name": "spec", "type": Spec, "doc": "the spec to get the attribute value for"},
{"name": "container", "type": DynamicTable, "doc": "the container to get the attribute value from"},
{"name": "manager", "type": BuildManager, "doc": "the BuildManager used for managing this build"},
returns='the value of the attribute')
def get_attr_value(self, **kwargs):
''' Get the value of the attribute corresponding to this spec from the given container '''
spec, container, manager = getargs('spec', 'container', 'manager', kwargs)
attr_value = super().get_attr_value(spec, container, manager)
if attr_value is None and spec.name in container:
if spec.data_type_inc == 'VectorData':
attr_value = container[spec.name]
if isinstance(attr_value, VectorIndex):
attr_value = attr_value.target
elif spec.data_type_inc == 'DynamicTableRegion':
attr_value = container[spec.name]
if isinstance(attr_value, VectorIndex):
attr_value = attr_value.target
if attr_value.table is None:
msg = "empty or missing table for DynamicTableRegion '%s' in DynamicTable '%s'" %\
(attr_value.name, container.name)
raise ValueError(msg)
elif spec.data_type_inc == 'VectorIndex':
attr_value = container[spec.name]
return attr_value
| 283 | 1,636 | 22 |
edc3328d3f4cdcbd6bab260b172230cb31df92e3 | 48,628 | py | Python | pyutils/simple_model.py | eltrompetero/innovation | b59617f5f1486d7f4caf620192d5e8d95cf30f7f | [
"MIT"
] | null | null | null | pyutils/simple_model.py | eltrompetero/innovation | b59617f5f1486d7f4caf620192d5e8d95cf30f7f | [
"MIT"
] | null | null | null | pyutils/simple_model.py | eltrompetero/innovation | b59617f5f1486d7f4caf620192d5e8d95cf30f7f | [
"MIT"
] | null | null | null | # ====================================================================================== #
# Minimal innovation model implementations and solutions.
#
# Author : Eddie Lee, edlee@csh.ac.at
# ====================================================================================== #
from numba import njit, jit
from numba.typed import List
from scipy.optimize import minimize, root
from cmath import sqrt
from workspace.utils import save_pickle
from .utils import *
def L_1ode(G, ro, re, rd, I, alpha=1., Q=2, return_denom=False):
"""Calculate stationary lattice width accounting the first order correction
(from Firms II pg. 140, 238).
This is equivalent to
(np.exp((1-rd_bar)/(1-ro_bar)) - 1) / np.exp((1-rd_bar)/(1-ro_bar)) * -G_bar * I
/ ro_bar / (1-rd_bar)
This matches numerical solution to first-order equation with x=-1 boundary
condition.
Parameters
----------
G : float
ro : float
re : float
rd : float
I : float
alpha : float, 1.
Q : float 2.
return_denom : bool, False
Returns
-------
float
Estimated lattice width.
float (optional)
Denominator for L calculation.
"""
G_bar = G/re
ro_bar = ro/re
rd_bar = rd/re
assert not hasattr(G_bar, '__len__')
z = -(1 - rd_bar*(Q-1)) / (1 - ro_bar*(Q-1))
if not hasattr(z, '__len__'):
is_ndarray = False
z = np.array([z])
else:
is_ndarray = True
C = np.zeros_like(z)
# handle numerical precision problems with z
# analytic limit
zeroix = z==0
# large z approximation
largeix = z < -200
C[largeix] = np.inf
remainix = (~zeroix) & (~largeix)
C[remainix] = (np.exp(-z[remainix]) - 1 + z[remainix]) / z[remainix]
denom = ro_bar/I * ((1+1/(1-C))/(Q-1) - rd_bar - ro_bar/(1-C))
L = -G_bar / denom
# analytic form
#L = G_bar / (1-rd_bar) * I/ro_bar * (np.exp(-(1-rd_bar)/(1-ro_bar)) - 1)
if return_denom:
if is_ndarray:
return L, denom
return L[0], denom
if is_ndarray:
return L
return L[0]
def match_length(y1, y2, side1='l', side2='r'):
"""Fill zeros to match two vectors. Pad zeros on either left or right sides.
Parameters
----------
y1 : ndarray
y2 : ndarray
side1 : str, 'l'
side2 : str, 'r'
Returns
-------
ndarray
ndarray
"""
if side1=='l':
y1 = y1[::-1]
if side2=='l':
y2 = y2[::-1]
if y1.size > y2.size:
y2 = np.concatenate((y2, np.zeros(y1.size-y2.size)))
elif y2.size > y1.size:
y1 = np.concatenate((y1, np.zeros(y2.size-y1.size)))
if side1=='l':
y1 = y1[::-1]
if side2=='l':
y2 = y2[::-1]
return y1, y2
def _solve_G(G0, L, ro, re, rd, I):
"""Solve for G that return appropriate L."""
z = (re - rd) / (re * (ro/re-1))
C = (np.exp(-z)-1+z) / z
soln = minimize(cost, np.log(G0))
return np.exp(soln['x']), soln
def fit_dmft(data, L, dt, initial_params, **params_kw):
"""Best fit of dmft model at stationarity.
Parameters
----------
dt : float
initial_params : list
G shouldn't be specified.
**params_kw
Returns
-------
dict
"""
soln = minimize(cost, np.log(initial_params))
return np.exp(soln['x']), soln
def _fit_dmft(data, dt, initial_params, **params_kw):
"""Best fit of dmft model at stationarity.
Parameters
----------
dt : float
initial_params
**params_kw
Returns
-------
dict
"""
soln = minimize(cost, np.log(initial_params))
return np.exp(soln['x']), soln
def fit_flow(x, data, initial_params, full_output=False, reverse=False, **params_kw):
"""Best fit of ODE model at stationarity.
Parameters
----------
x : ndarray
data : ndarray
initial_params : list
full_output : bool, False
If True, return soln dict.
reverse : bool, False
**params_kw
Returns
-------
ndarray
Estimates for (G, ro, re, rd, I).
dict
If full_output is True. From scipy.optimize.minimize.
"""
from scipy.interpolate import interp1d
data_fun = interp1d(x, data, bounds_error=False, fill_value=0)
soln = minimize(cost, initial_params, bounds=[(0,np.inf),(0,np.inf),(1e-3, np.inf)]+[(1e-3,30)]*3,
method='SLSQP',
constraints=({'type':'ineq', 'fun':lambda args: args[3] - 2 + args[4] - 1e-3,
'jac':lambda args: np.array([0,0,0,1,0,1])},))
if full_output:
return soln['x'], soln
return soln['x']
def fit_ode(x, data, initial_params, full_output=False, **params_kw):
"""Best fit of ODE model at stationarity.
Parameters
----------
x : ndarray
data : ndarray
initial_params : list
full_output : bool, False
If True, return soln dict.
**params_kw
Returns
-------
ndarray
Estimates for (G, ro, re, rd, I).
dict
If full_output is True. From scipy.optimize.minimize.
"""
soln = minimize(cost, initial_params, bounds=[(1e-3, np.inf)]*4, method='SLSQP',
constraints=({'type':'ineq', 'fun':lambda args: args[1] - 2 + args[2] - 1e-3,
'jac':lambda args: np.array([0,1,0,1])},))
if full_output:
return soln['x'], soln
return soln['x']
def fit_piecewise_ode(x, y, initial_guess,
full_output=False):
"""Heuristic algorithm for fitting to density function.
Parameters
----------
full_output : bool, False
"""
# convert parameters to log space to handle cutoff at 0
soln = minimize(cost, [initial_guess[0]]+np.log(initial_guess[1:]).tolist(), method='powell')
if full_output:
return np.exp(soln['x']), soln
return np.exp(soln['x'])
def _fit_piecewise_ode(peakx, peaky, n0, s0, initial_guess,
full_output=False):
"""Heuristic algorithm for fitting to density function.
Parameters
----------
full_output : bool, False
"""
# convert parameters to log space to handle cutoff at 0
soln = minimize(cost, [initial_guess[0]]+np.log(initial_guess[1:]).tolist(), method='powell')
if full_output:
return np.exp(soln['x']), soln
return np.exp(soln['x'])
def solve_min_rd(G, ro, re, I, Q=2, a=1.,
tol=1e-10,
initial_guess=None,
full_output=False,
return_neg=False):
"""Solve for minimum rd that leads to divergent lattice, i.e. when denominator
for L goes to 0 for a fixed re.
Parameters
----------
G : float
ro : float
re : float
I : float
Q : float, 2
a : float, 1.
tol : float, 1e-10
initial_guess : float, None
full_output : bool, False
return_neg : bool, False
Returns
-------
float
dict (optional)
"""
# use linear guess as default starting point
initial_guess = initial_guess or (2*re+ro)
# analytic continuation from the collapsed curve
if re==0:
if full_output:
return 0., {}
return 0.
soln = minimize(cost, initial_guess)
# if it didn't converge, return nan
if soln['fun'] > tol:
if full_output:
return np.nan, soln
return np.nan
# neg values should be rounded up to 0
elif not return_neg and soln['x'][0] < 0:
if full_output:
return 0., soln
return 0.
if full_output:
return soln['x'][0], soln
return soln['x'][0]
def solve_max_rd(G, ro, re, I, Q=2, a=1.,
tol=1e-10,
initial_guess=None,
full_output=False,
return_neg=False):
"""Solve for max rd that precedes collapsed lattice, i.e. when we estimate L~1.
Parameters
----------
G : float
ro : float
re : float
I : float
Q : float, 2
a : float, 1.
tol : float, 1e-10
initial_guess : float, None
full_output : bool, False
return_neg : bool, False
Returns
-------
float
dict (optional)
"""
# use linear guess as default starting point
initial_guess = initial_guess or (G * (ro/re/I)**(-1/a) - ro + 2*re)
# analytic continuation from the collapsed curve
if re==0:
if full_output:
return 0., {}
return 0.
soln = minimize(cost, initial_guess)
# if it didn't converge, return nan
if soln['fun'] > tol:
if full_output:
return np.nan, soln
return np.nan
# neg values should be rounded up to 0
elif not return_neg and soln['x'][0] < 0:
if full_output:
return 0., soln
return 0.
if full_output:
return soln['x'][0], soln
return soln['x'][0]
def flatten_phase_boundary(G, ro, re, I, Q, a,
re_data, rd_data,
poly_order=5):
"""Fit polynomial to min rd growth curve and use inverse transform to map
relative to 1:1 line.
Parameters
----------
G: float
ro : float
re : ndarray
I : float
Q : float
a : float
re_data : ndarray
rd_data : ndarray
poly_order : int, 5
Returns
-------
float
float
"""
if not hasattr(re_data, '__len__'):
re_data = np.array([re_data])
if not hasattr(rd_data, '__len__'):
rd_data = np.array([rd_data])
y = np.array([solve_min_rd(G, ro, re_, I, Q=Q, return_neg=True) for re_ in re])
y = y[1:]
x = re[1:][~np.isnan(y)]
y = y[~np.isnan(y)]
# rescale x to interval [0,1]
newx = re_data / x[-1]
x /= x[-1]
p = np.poly1d(np.polyfit(x, y, poly_order))
newy = np.zeros_like(rd_data)
for i, y_ in enumerate(rd_data):
roots = (p-y_).roots
ny = roots[np.abs(roots-y_).argmin()]
if not ny.imag==0:
newy[i] = np.nan
else:
newy[i] = ny
return newx, newy.real
def L_denominator(ro, rd, Q=2):
"""Denominator for 2nd order calculation of L at stationarity. Parameters are
rescaled rates.
When denom goes negative, we have no physical solution for L, i.e. it is
either non-stationary or it diverges there is a weird boundary at ro=1.
Parameters
----------
ro : float or ndarray
ro/re
rd : float or ndarray
rd/re
Q : float, 2
Returns
-------
ndarray
"""
if not hasattr(ro, '__len__'):
ro = np.array([ro])
assert (ro>=0).all()
if not hasattr(rd, '__len__'):
rd = np.array([rd])
assert (rd>=0).all()
assert Q>=2
z = -(1/(Q-1) - rd) / (1/(Q-1) - ro)
C = np.zeros_like(z)
# handle numberical precision problems with z
# analytic limit
zeroix = z==0
# large z approximation
largeix = z < -200
C[largeix] = np.inf
remainix = (~zeroix) & (~smallix) & (~largeix)
C[remainix] = (np.exp(-z[remainix]) - 1 + z[remainix]) / z[remainix]
return ro/(1-C) + rd - (1+1/(1-C)) / (Q-1)
def collapse_condition(ro, rd, G, I, Q=2, allow_negs=False):
"""When this goes to 0, we are at a collapsed boundary. Coming from first order
ODE approximation.
Parameters
----------
ro : float or ndarray
ro/re
rd : float or ndarray
rd/re
G : float
G/re
I : float
Q : float, 2
allow_negs : bool, False
Returns
-------
ndarray
"""
if not hasattr(ro, '__len__'):
ro = np.array([ro])
if not hasattr(rd, '__len__'):
rd = np.array([rd])
if not allow_negs:
assert (ro>=0).all()
assert (rd>=0).all()
assert Q>=2
z = -(1/(Q-1) - rd) / (1/(Q-1) - ro)
C = np.zeros_like(z)
# handle numerical precision problems with z
# analytic limit
zeroix = z==0
# large z approximation
largeix = z < -200
C[largeix] = np.inf
remainix = (~zeroix) & (~smallix) & (~largeix)
C[remainix] = (np.exp(-z[remainix]) - 1 + z[remainix]) / z[remainix]
return rd + ro / (1-C) - (1+1/(1-C)) / (Q-1) - G*I/ro
# ======= #
# Classes #
# ======= #
#end IterativeMFT
#end FlowMFT
#end ODE2
#end UnitSimulator
@njit
def jit_unit_sim_loop(T, dt, G, ro, re, rd, I, a):
"""
Parameters
----------
occupancy : numba.typed.ListType[int64]
T : int
dt : float
ro : float
G : float
re : float
rd : float
I : float
a : float
"""
counter = 0
occupancy = [0]
while (counter * dt) < T:
# innov
innov = False
if occupancy[-1] and np.random.rand() < (re * I * occupancy[-1]**a * dt):
occupancy.append(1)
innov = True
# obsolescence
if len(occupancy) > 1 and np.random.rand() < (ro * dt):
occupancy.pop(0)
# from right to left b/c of expansion
for x in range(len(occupancy)-1, -1, -1):
# expansion (fast approximation)
if x < (len(occupancy)-1-innov):
if occupancy[x] and np.random.rand() < (occupancy[x] * re * dt):
occupancy[x+1] += 1
# death (fast approximation)
if occupancy[x] and np.random.rand() < (occupancy[x] * rd * dt):
occupancy[x] -= 1
# start up (remember that L is length of lattice on x-axis, s.t. L=0 means lattice has one site)
if np.random.rand() < (G / len(occupancy) * dt):
occupancy[x] += 1
counter += 1
return occupancy
@njit
def jit_unit_sim_loop_with_occupancy(occupancy, T, dt, G, ro, re, rd, I, a):
"""
Parameters
----------
occupancy : numba.typed.ListType[int64]
T : int
dt : float
ro : float
G : float
re : float
rd : float
I : float
a : float
"""
counter = 0
while (counter * dt) < T:
# innov
innov = False
if occupancy[-1] and np.random.rand() < (re * I * occupancy[-1]**a * dt):
occupancy.append(1)
innov = True
# from right to left b/c of expansion
for x in range(len(occupancy)-1, -1, -1):
# death (fast approximation)
if occupancy[x] and np.random.rand() < (occupancy[x] * rd * dt):
occupancy[x] -= 1
# expansion (fast approximation)
if x < (len(occupancy)-1-innov):
if occupancy[x] and np.random.rand() < (occupancy[x] * re * dt):
occupancy[x+1] += 1
# start up
if np.random.rand() < (G / len(occupancy) * dt):
occupancy[x] += 1
# obsolescence
if len(occupancy) > 1 and np.random.rand() < (ro * dt):
occupancy.pop(0)
counter += 1
return occupancy
@njit
def jit_unit_sim_loop_no_expand(T, dt, G, ro, re, rd, I, a):
"""Special case for testing. Without expansion.
Parameters
----------
occupancy : numba.typed.ListType[int64]
T : int
dt : float
ro : float
G : float
re : float
rd : float
I : float
a : float
"""
counter = 0
occupancy = [0]
while (counter * dt) < T:
# innov
innov = False
if occupancy[-1] and np.random.rand() < (re * I * occupancy[-1]**a * dt):
occupancy.append(1)
innov = True
# obsolescence
if len(occupancy) > 1 and np.random.rand() < (ro * dt):
occupancy.pop(0)
# from right to left b/c of expansion
for x in range(len(occupancy)-1, -1, -1):
# death (fast approximation)
if occupancy[x] and np.random.rand() < (occupancy[x] * rd * dt):
occupancy[x] -= 1
# start up (remember that L is length of lattice on x-axis, s.t. L=0 means lattice has one site)
if len(occupancy)==1:
occupancy[x] += 1
elif np.random.rand() < (G / (len(occupancy)-1) * dt):
occupancy[x] += 1
counter += 1
return occupancy
| 28.337995 | 112 | 0.476557 | # ====================================================================================== #
# Minimal innovation model implementations and solutions.
#
# Author : Eddie Lee, edlee@csh.ac.at
# ====================================================================================== #
from numba import njit, jit
from numba.typed import List
from scipy.optimize import minimize, root
from cmath import sqrt
from workspace.utils import save_pickle
from .utils import *
def L_linear(G, ro, re, rd, I, alpha=1., Q=2):
assert alpha==1
G_bar = G/re
ro_bar = ro/re
rd_bar = rd/re
if (ro_bar * (rd_bar + ro_bar - 2/(Q-1)))==0:
return np.inf
return G_bar * I / (ro_bar * (rd_bar + ro_bar - 2/(Q-1)))
def L_1ode(G, ro, re, rd, I, alpha=1., Q=2, return_denom=False):
"""Calculate stationary lattice width accounting the first order correction
(from Firms II pg. 140, 238).
This is equivalent to
(np.exp((1-rd_bar)/(1-ro_bar)) - 1) / np.exp((1-rd_bar)/(1-ro_bar)) * -G_bar * I
/ ro_bar / (1-rd_bar)
This matches numerical solution to first-order equation with x=-1 boundary
condition.
Parameters
----------
G : float
ro : float
re : float
rd : float
I : float
alpha : float, 1.
Q : float 2.
return_denom : bool, False
Returns
-------
float
Estimated lattice width.
float (optional)
Denominator for L calculation.
"""
G_bar = G/re
ro_bar = ro/re
rd_bar = rd/re
assert not hasattr(G_bar, '__len__')
z = -(1 - rd_bar*(Q-1)) / (1 - ro_bar*(Q-1))
if not hasattr(z, '__len__'):
is_ndarray = False
z = np.array([z])
else:
is_ndarray = True
C = np.zeros_like(z)
# handle numerical precision problems with z
# analytic limit
zeroix = z==0
# large z approximation
largeix = z < -200
C[largeix] = np.inf
remainix = (~zeroix) & (~largeix)
C[remainix] = (np.exp(-z[remainix]) - 1 + z[remainix]) / z[remainix]
denom = ro_bar/I * ((1+1/(1-C))/(Q-1) - rd_bar - ro_bar/(1-C))
L = -G_bar / denom
# analytic form
#L = G_bar / (1-rd_bar) * I/ro_bar * (np.exp(-(1-rd_bar)/(1-ro_bar)) - 1)
if return_denom:
if is_ndarray:
return L, denom
return L[0], denom
if is_ndarray:
return L
return L[0]
def match_length(y1, y2, side1='l', side2='r'):
"""Fill zeros to match two vectors. Pad zeros on either left or right sides.
Parameters
----------
y1 : ndarray
y2 : ndarray
side1 : str, 'l'
side2 : str, 'r'
Returns
-------
ndarray
ndarray
"""
if side1=='l':
y1 = y1[::-1]
if side2=='l':
y2 = y2[::-1]
if y1.size > y2.size:
y2 = np.concatenate((y2, np.zeros(y1.size-y2.size)))
elif y2.size > y1.size:
y1 = np.concatenate((y1, np.zeros(y2.size-y1.size)))
if side1=='l':
y1 = y1[::-1]
if side2=='l':
y2 = y2[::-1]
return y1, y2
def _solve_G(G0, L, ro, re, rd, I):
"""Solve for G that return appropriate L."""
z = (re - rd) / (re * (ro/re-1))
C = (np.exp(-z)-1+z) / z
def cost(logG):
G = np.exp(logG)
return (L - G * I * re / (ro * (rd + ro - re*(1+1/(1-C)))))**2
soln = minimize(cost, np.log(G0))
return np.exp(soln['x']), soln
def fit_dmft(data, L, dt, initial_params, **params_kw):
"""Best fit of dmft model at stationarity.
Parameters
----------
dt : float
initial_params : list
G shouldn't be specified.
**params_kw
Returns
-------
dict
"""
def cost(params):
ro, re, rd, I = np.exp(params)
G = _solve_G(30, L, ro, re, rd, I)
G = G[0]
#print(f'{G=}')
model = FlowMFT(G, ro, re, rd, I, dt, **params_kw)
print(f'{model.L=}')
if not np.isclose(model.L, L, atol=1e-2): return 1e30
flag, mxerr = model.solve_stationary()
if np.any(model.n<0): return 1e30
try:
c = np.linalg.norm(data-model.n)
except ValueError:
return 1e30
return c
soln = minimize(cost, np.log(initial_params))
return np.exp(soln['x']), soln
def _fit_dmft(data, dt, initial_params, **params_kw):
"""Best fit of dmft model at stationarity.
Parameters
----------
dt : float
initial_params
**params_kw
Returns
-------
dict
"""
def cost(params):
model = FlowMFT(*np.exp(params), dt, **params_kw)
if model.L<1 or ~np.isfinite(model.L): return 1e30
flag, mxerr = model.solve_stationary()
if np.any(model.n<0): return 1e30
y1, y2 = match_length(data, model.n, 'l', 'l')
c = np.linalg.norm(y1-y2)
return c
soln = minimize(cost, np.log(initial_params))
return np.exp(soln['x']), soln
def fit_flow(x, data, initial_params, full_output=False, reverse=False, **params_kw):
"""Best fit of ODE model at stationarity.
Parameters
----------
x : ndarray
data : ndarray
initial_params : list
full_output : bool, False
If True, return soln dict.
reverse : bool, False
**params_kw
Returns
-------
ndarray
Estimates for (G, ro, re, rd, I).
dict
If full_output is True. From scipy.optimize.minimize.
"""
from scipy.interpolate import interp1d
data_fun = interp1d(x, data, bounds_error=False, fill_value=0)
def cost(params):
A, B, G, ro, rd, I = params
try:
model = FlowMFT(G, ro, 1, rd, I, dt=.1, L_method=2, **params_kw)
model.solve_stationary()
except (AssertionError, ValueError): # e.g. problem with stationarity and L
return 1e30
if reverse:
modx = np.linspace(x.max()-model.n.size, x.max(), model.n.size)
c = np.linalg.norm(data_fun(modx/A) - model.n*B)
else:
modx = np.arange(model.n.size)
c = np.linalg.norm(data_fun(modx/A) - model.n*B)
# handle overflow
if np.isnan(c): c = 1e30
return c
soln = minimize(cost, initial_params, bounds=[(0,np.inf),(0,np.inf),(1e-3, np.inf)]+[(1e-3,30)]*3,
method='SLSQP',
constraints=({'type':'ineq', 'fun':lambda args: args[3] - 2 + args[4] - 1e-3,
'jac':lambda args: np.array([0,0,0,1,0,1])},))
if full_output:
return soln['x'], soln
return soln['x']
def fit_ode(x, data, initial_params, full_output=False, **params_kw):
"""Best fit of ODE model at stationarity.
Parameters
----------
x : ndarray
data : ndarray
initial_params : list
full_output : bool, False
If True, return soln dict.
**params_kw
Returns
-------
ndarray
Estimates for (G, ro, re, rd, I).
dict
If full_output is True. From scipy.optimize.minimize.
"""
def cost(params):
G, ro, rd, I = params
try:
model = ODE2(G, ro, 1, rd, I, **params_kw)
except AssertionError: # e.g. problem with stationarity and L
return 1e30
c = np.linalg.norm(data - model.n(x))
return c
soln = minimize(cost, initial_params, bounds=[(1e-3, np.inf)]*4, method='SLSQP',
constraints=({'type':'ineq', 'fun':lambda args: args[1] - 2 + args[2] - 1e-3,
'jac':lambda args: np.array([0,1,0,1])},))
if full_output:
return soln['x'], soln
return soln['x']
def fit_piecewise_ode(x, y, initial_guess,
full_output=False):
"""Heuristic algorithm for fitting to density function.
Parameters
----------
full_output : bool, False
"""
def cost(args):
try:
# first three args are x offset, width, and height units
xoff = args[0]
args = np.exp(args[1:])
xunit, yunit = args[:2]
args = np.insert(args[2:], 2, 1)
odemodel = ODE2(*args)
except AssertionError:
return 1e30
peakx = odemodel.peak()
if peakx<=0: return 1e30
mody = odemodel.n(x / xunit - xoff) / yunit
return np.linalg.norm(mody[-10:] - y[-10:])
# convert parameters to log space to handle cutoff at 0
soln = minimize(cost, [initial_guess[0]]+np.log(initial_guess[1:]).tolist(), method='powell')
if full_output:
return np.exp(soln['x']), soln
return np.exp(soln['x'])
def _fit_piecewise_ode(peakx, peaky, n0, s0, initial_guess,
full_output=False):
"""Heuristic algorithm for fitting to density function.
Parameters
----------
full_output : bool, False
"""
def cost(args):
try:
# first three args are x offset, width, and height units
xoff = args[0]
args = np.exp(args[1:])
xunit, yunit = args[:2]
args = np.insert(args[2:], 2, 1)
odemodel = ODE2(*args)
except AssertionError:
return 1e30
x = odemodel.peak() / xunit
if x<=0: return 1e30
y = odemodel.n(x) / yunit
if y<=0: return 1e30
# weight location and height of peak, innov density, slope at innov
return ((y-peaky)**2 + (x+xoff-peakx)**2 +
(odemodel.n0/yunit - n0)**2 +
(odemodel.d_complex(0).real*xunit/yunit - s0)**2)
# convert parameters to log space to handle cutoff at 0
soln = minimize(cost, [initial_guess[0]]+np.log(initial_guess[1:]).tolist(), method='powell')
if full_output:
return np.exp(soln['x']), soln
return np.exp(soln['x'])
def solve_min_rd(G, ro, re, I, Q=2, a=1.,
tol=1e-10,
initial_guess=None,
full_output=False,
return_neg=False):
"""Solve for minimum rd that leads to divergent lattice, i.e. when denominator
for L goes to 0 for a fixed re.
Parameters
----------
G : float
ro : float
re : float
I : float
Q : float, 2
a : float, 1.
tol : float, 1e-10
initial_guess : float, None
full_output : bool, False
return_neg : bool, False
Returns
-------
float
dict (optional)
"""
# use linear guess as default starting point
initial_guess = initial_guess or (2*re+ro)
# analytic continuation from the collapsed curve
if re==0:
if full_output:
return 0., {}
return 0.
def cost(rd):
n0 = (ro / re / I)**(1/a)
z = (re/(Q-1)-rd) / re / (I*n0**a - 1/(Q-1))
C = z**-1 * (np.exp(-z) - 1 + z)
return (rd + ro - re / (Q-1) * (1 + 1/(1-C)))**2
soln = minimize(cost, initial_guess)
# if it didn't converge, return nan
if soln['fun'] > tol:
if full_output:
return np.nan, soln
return np.nan
# neg values should be rounded up to 0
elif not return_neg and soln['x'][0] < 0:
if full_output:
return 0., soln
return 0.
if full_output:
return soln['x'][0], soln
return soln['x'][0]
def solve_max_rd(G, ro, re, I, Q=2, a=1.,
tol=1e-10,
initial_guess=None,
full_output=False,
return_neg=False):
"""Solve for max rd that precedes collapsed lattice, i.e. when we estimate L~1.
Parameters
----------
G : float
ro : float
re : float
I : float
Q : float, 2
a : float, 1.
tol : float, 1e-10
initial_guess : float, None
full_output : bool, False
return_neg : bool, False
Returns
-------
float
dict (optional)
"""
# use linear guess as default starting point
initial_guess = initial_guess or (G * (ro/re/I)**(-1/a) - ro + 2*re)
# analytic continuation from the collapsed curve
if re==0:
if full_output:
return 0., {}
return 0.
def cost(rd):
n0 = (ro / re / I)**(1/a)
z = (re/(Q-1)-rd) / re / (I*n0**a - 1/(Q-1))
C = z**-1 * (np.exp(-z) - 1 + z)
return (rd + ro - re / (Q-1) * (1 + 1/(1-C)) - G*(re*I)**(1./a))**2
soln = minimize(cost, initial_guess)
# if it didn't converge, return nan
if soln['fun'] > tol:
if full_output:
return np.nan, soln
return np.nan
# neg values should be rounded up to 0
elif not return_neg and soln['x'][0] < 0:
if full_output:
return 0., soln
return 0.
if full_output:
return soln['x'][0], soln
return soln['x'][0]
def flatten_phase_boundary(G, ro, re, I, Q, a,
re_data, rd_data,
poly_order=5):
"""Fit polynomial to min rd growth curve and use inverse transform to map
relative to 1:1 line.
Parameters
----------
G: float
ro : float
re : ndarray
I : float
Q : float
a : float
re_data : ndarray
rd_data : ndarray
poly_order : int, 5
Returns
-------
float
float
"""
if not hasattr(re_data, '__len__'):
re_data = np.array([re_data])
if not hasattr(rd_data, '__len__'):
rd_data = np.array([rd_data])
y = np.array([solve_min_rd(G, ro, re_, I, Q=Q, return_neg=True) for re_ in re])
y = y[1:]
x = re[1:][~np.isnan(y)]
y = y[~np.isnan(y)]
# rescale x to interval [0,1]
newx = re_data / x[-1]
x /= x[-1]
p = np.poly1d(np.polyfit(x, y, poly_order))
newy = np.zeros_like(rd_data)
for i, y_ in enumerate(rd_data):
roots = (p-y_).roots
ny = roots[np.abs(roots-y_).argmin()]
if not ny.imag==0:
newy[i] = np.nan
else:
newy[i] = ny
return newx, newy.real
def L_denominator(ro, rd, Q=2):
"""Denominator for 2nd order calculation of L at stationarity. Parameters are
rescaled rates.
When denom goes negative, we have no physical solution for L, i.e. it is
either non-stationary or it diverges there is a weird boundary at ro=1.
Parameters
----------
ro : float or ndarray
ro/re
rd : float or ndarray
rd/re
Q : float, 2
Returns
-------
ndarray
"""
if not hasattr(ro, '__len__'):
ro = np.array([ro])
assert (ro>=0).all()
if not hasattr(rd, '__len__'):
rd = np.array([rd])
assert (rd>=0).all()
assert Q>=2
z = -(1/(Q-1) - rd) / (1/(Q-1) - ro)
C = np.zeros_like(z)
# handle numberical precision problems with z
# analytic limit
zeroix = z==0
# large z approximation
largeix = z < -200
C[largeix] = np.inf
remainix = (~zeroix) & (~smallix) & (~largeix)
C[remainix] = (np.exp(-z[remainix]) - 1 + z[remainix]) / z[remainix]
return ro/(1-C) + rd - (1+1/(1-C)) / (Q-1)
def collapse_condition(ro, rd, G, I, Q=2, allow_negs=False):
"""When this goes to 0, we are at a collapsed boundary. Coming from first order
ODE approximation.
Parameters
----------
ro : float or ndarray
ro/re
rd : float or ndarray
rd/re
G : float
G/re
I : float
Q : float, 2
allow_negs : bool, False
Returns
-------
ndarray
"""
if not hasattr(ro, '__len__'):
ro = np.array([ro])
if not hasattr(rd, '__len__'):
rd = np.array([rd])
if not allow_negs:
assert (ro>=0).all()
assert (rd>=0).all()
assert Q>=2
z = -(1/(Q-1) - rd) / (1/(Q-1) - ro)
C = np.zeros_like(z)
# handle numerical precision problems with z
# analytic limit
zeroix = z==0
# large z approximation
largeix = z < -200
C[largeix] = np.inf
remainix = (~zeroix) & (~smallix) & (~largeix)
C[remainix] = (np.exp(-z[remainix]) - 1 + z[remainix]) / z[remainix]
return rd + ro / (1-C) - (1+1/(1-C)) / (Q-1) - G*I/ro
# ======= #
# Classes #
# ======= #
class IterativeMFT():
def __init__(self, G, ro, re, rd, I, alpha=1., Q=2):
"""Class for calculating discrete MFT quantities.
Parameters
----------
ro : float
G : float
re : float
rd : float
I : float
alpha : float, 1.
Cooperativity.
Q : int, 2
Bethe lattice branching ratio.
"""
assert alpha==1
assert Q>=2
self.ro = ro
self.G = G
self.re = re
self.rd = rd
self.I = I
self.alpha = alpha
self.Q = Q
self.n0 = ro/re/I # stationary density
# where is this criterion from?
#assert (2/(Q-1) * re - rd) * self.n0 - re*I*self.n0**2 <= 0, "Stationary criterion unmet."
# MFT guess for L, which we will refine using tail convergence criteria
try:
self.L0 = G * re * I / (ro * (rd + ro - 2*re/(Q-1)))
except ZeroDivisionError:
self.L0 = np.inf
# for handling infinite L
if ~np.isfinite(self.L0) or self.L0<0:
self.L0 = 100_000
self.L = self.L0
self.n = self.iterate_n()
else:
self.min_L(self.L0)
def min_L(self, L0, mx_decimal=10):
"""Lower L slower and slower while keeping tail positive in order to
find value of L that solves iterative solution.
As a heuristic, we keep the numerically calculated value of the tail
positive instead of the self-consistent iterative value, which seems to
behave worse (probably because it depends on the estimate of n[-2],
which itself can be erroneous).
Parameters
----------
L0 : float
L value to start with as a guess.
mx_decimal : int, 10
No. of decimal places to fit to.
Returns
-------
float
Refined estimate of lattice width L.
"""
assert mx_decimal < 14, "Exceeding floating point precision."
assert L0 > 2, L0
ro = self.ro
G = self.G
re = self.re
rd = self.rd
I = self.I
Q = self.Q
n0 = self.n0
L = np.ceil(L0)
decimal = 1
nfun = self.iterate_n(L)
# check that tail is positive; in case it is not, increase starting guess for L a few times
counter = 0
while nfun[-1] < 0: #(I * n0 * nfun[-2] + G/re/L / (I * n0 + rd/re)) < 0:
L += 1
nfun = self.iterate_n(L)
counter += 1
assert counter < 1e3
while decimal <= mx_decimal:
# ratchet down til the tail goes the wrong way
while nfun[-1] > 0 and L>0: #(I * n0 * nfun[-2] + G/re/L / (I * n0 + rd/re)) > 0:
L -= 10**-decimal
nfun = self.iterate_n(L)
L += 10**-decimal # oops, go back up
nfun = self.iterate_n(L)
decimal += 1
self.L, self.n = L, nfun
return L, nfun
def iterate_n_high_prec(self, L=None, iprint=False):
"""Iterative solution to occupancy number with high precision. See NB II pg. 118."""
import mpmath as mp
mp.dps = 30
ro = self.ro
G = self.G
re = self.re
rd = self.rd
I = self.I
n0 = self.n0
L = L or self.L
Q = self.Q
n = mp.matrix(int(L)+1, 1)
n[0] = n0
if len(n) > 1:
# assumption about n[-1]=0 gives n[1]
n[1] = (Q-1) * (I * n0**2 + (rd * n0 - G / L) / re)
for i in range(2, len(n)):
n[i] = (Q-1) * (re * I * n0 * (n[i-1] - n[i-2]) + (rd * n[i-1] - G / L)) / re
return np.array([float(i) for i in n])
def iterate_n(self, L=None, iprint=False):
"""Iterative solution to occupancy number. See NB II pg. 118."""
ro = self.ro
G = self.G
re = self.re
rd = self.rd
I = self.I
n0 = self.n0
L = L or self.L
Q = self.Q
n = np.zeros(int(L)+1)
n[0] = n0
if n.size > 1:
# assumption about n[-1]=0 gives n[1]
n[1] = (Q-1) * (I * n0**2 + (rd * n0 - G / L) / re)
# handle overflow separately
overflow = False
for i in range(2, n.size):
n[i] = (Q-1) * (re * I * n0 * (n[i-1] - n[i-2]) + (rd * n[i-1] - G / L)) / re
if abs(n[i]) > 1e200:
overflow = True
break
if overflow:
n[i:] = np.nan
return n
def estimate_L(self, x=2):
"""Invert stationary equation to solve for L.
Parameters
----------
x : int, 2
Lattice point to use for estimating L. Too close to the right side
boundary leads to large numerical errors.
"""
ro = self.ro
G = self.G
re = self.re
rd = self.rd
I = self.I
Q = self.Q
n0 = self.n0
n = self.n
return G/re / (I * n[0] * (n[x-1]-n[x-2]) + rd/re * n[x-1] - n[x]/(Q-1))
#end IterativeMFT
class FlowMFT():
def __init__(self,
G=None,
ro=None,
re=None,
rd=None,
I=None,
dt=None,
L_method=2,
alpha=1.,
Q=2):
"""Class for calculating discrete MFT quantities by running dynamics.
Parameters
----------
G : float
re : float
rd : float
I : float
dt : float
L_method : int
0, naive
1, corrected
2, ODE2
alpha : float, 1.
Cooperativity parameter.
Q : int, 2
Bethe lattice branching ratio.
"""
assert alpha>0
assert Q>=2
self.G = G
self.ro = ro
self.re = re
self.rd = rd
self.I = I
self.dt = dt
self.alpha = float(alpha)
self.Q = Q
self.n0 = (ro/re/I)**(1/alpha)
try:
if L_method==0:
self.L = G / (self.n0 * (rd - (1+1/(Q-1))*re + re*I*self.n0**self.alpha))
elif L_method==1:
self.L = L_1ode(G, ro, re, rd, I, alpha=alpha, Q=Q)
elif L_method==2:
self.L = ODE2(G, ro, re, rd, I, alpha=alpha, Q=Q).L
else: raise NotImplementedError
except ZeroDivisionError:
self.L = np.inf
if self.L <= 0:
self.L = np.inf
else:
self.L = min(self.L, 10_000)
def update_n(self, L=None):
"""Update occupancy number for a small time step self.dt.
Parameters
----------
L : float, None
"""
L = L or self.L
# entrance
dn = self.G / L
# innovation shift
dn -= self.re * self.I * self.n[0]**self.alpha * self.n
dn[1:] += self.re * self.I * self.n[0]**self.alpha * self.n[:-1]
# expansion
dn[:-1] += self.re / (self.Q-1) * self.n[1:]
# death
dn -= self.rd * self.n
self.n += dn * self.dt
def solve_stationary(self,
tol=1e-5,
T=5e4,
L=None,
n0=None,
iprint=False):
"""Run til stationary state is met using convergence criterion.
Parameters
----------
tol : float, 1e-5
Max absolute change permitted per lattice site per unit time.
T : float, 5e4
L : int, None
n0 : ndarray, None
iprint : bool, False
Returns
-------
int
Flag indicating if problem converged
(0) to stationary solution with correct innov. density
(1) to stationary soln with wrong innov. density
(2) or did not converge
float
Maximum absolute difference between last two steps of simulation.
"""
L = L or self.L
# simply impose a large upper limit for infinite L
if not np.isfinite(L):
L = 10_000
if n0 is None and 'n' in self.__dict__ and self.n.size==(L+1):
n0 = self.n[:-1]
elif n0 is None:
n0 = np.ones(int(L))/2
self.n = n0.copy()
prev_n = np.zeros_like(n0)
counter = 0
while (self.dt*counter) < T and np.abs(prev_n-self.n).max()>(self.dt*tol):
prev_n = self.n.copy()
self.update_n(L)
counter += 1
if (self.dt*counter) >= T:
flag = 2
elif np.abs(self.n[0]-self.n0)/self.n.max() < 1e-5: # relative error
flag = 0
else:
flag = 1
mx_err = np.abs(prev_n-self.n).max()
self.n = np.append(self.n, 0)
return flag, mx_err
def run(self, T, save_every, L=None, iprint=False):
"""
Parameters
----------
T : int
save_every : int
L : int, None
iprint : bool, False
Returns
-------
list of list
"""
L = L or self.L
# simply impose a large upper limit for infinite L
if not np.isfinite(L):
L = 10_000
t = []
n0 = np.ones(int(L))/2
self.n = n0.copy()
snapshot_n = []
counter = 0
while (self.dt*counter) < T:
self.update_n(L)
if np.isclose((self.dt*counter)%save_every, 0, atol=self.dt/10, rtol=0):
if iprint: print(f"Recording {dt*counter}...")
t.append(counter * self.dt)
snapshot_n.append(self.n.copy())
counter += 1
return snapshot_n, t
def solve_n0(self, L):
"""Quadratic equation solution for n0."""
assert self.alpha==1, "This does not apply for alpha!=1."
assert self.Q==2
G = self.G
re = self.re
rd = self.rd
I = self.I
return ((2-rd/re) + np.sqrt((2-rd/re)**2 + 4*I*G/re/L)) / 2 / I
def corrected_n0(self):
"""Stil figuring out the logic of this.
"""
assert self.alpha==1 and self.Q==2
G = self.G
I = self.I
re = self.re
rd = self.rd
ro = self.ro
n0 = self.n0
n02 = -(re-rd) * n0 / ((re-rd)/2 - re*I*n0 + re)
# this is our estimate of the correction to the slope, which gives corrections to the intercept
z = - (re - rd) / ((re-rd)/2 + re*(1 - I*n0))
return n0 + (n02 * z**-2. * (-1 + z + np.exp(-z)) if z!=0 else 0.)
#end FlowMFT
class ODE2():
def __init__(self, G, ro, re, rd, I, L=None, alpha=1., Q=2):
"""Class for second-order analytic solution to MFT.
Parameters
----------
ro : float
G : float
re : float
rd : float
I : float
L : float, None
alpha : float, 1.
Cooperativity.
Q : int, 2
Bethe lattice branching ratio.
"""
self.ro = float(ro)
self.G = float(G)
self.re = float(re)
self.rd = float(rd)
self.I = float(I)
self.alpha = alpha
self.Q = Q
self.n0 = (ro/re/I)**(1/alpha) # stationary density
self.L = self.solve_L(L)
def n(self, x, L=None, return_im=False, method=2):
"""Interpolated occupancy function.
Parameters
----------
x : ndarray
return_im : bool, False
Returns
-------
ndarray
"""
L = L if not L is None else self.L
if method==1:
# cleaned up output from mathematica
assert self.alpha==1
ro = self.ro
G = self.G
re = self.re
rd = self.rd
I = self.I
a = -re**2 - 4*re*ro + ro**2 + 2*rd*(re+ro)
sol = (G / ((np.exp(2*sqrt(a)/(re+ro))-1) * L * (re-rd)) *
(1 - np.exp(2*sqrt(a)/(re+ro)) + np.exp(-(sqrt(a)*(x-1)+re*x-ro*(1+x)+re)/(re+ro)) -
np.exp((-re*x+ro*(1+x)+sqrt(a)*(1+x)-re)/(re+ro)) +
(L*(re-rd)*ro/(G*re*I)+1) * (np.exp((-re*x+ro*x+sqrt(a)*(2+x))/(re+ro)) -
np.exp((-re*x+(ro-sqrt(a))*x)/(re+ro)))))
return sol.real
elif method==2:
# hand-written soln
# rescale params in units of re
ro = self.ro / self.re
G = self.G / self.re
rd = self.rd / self.re
I = self.I
Q = self.Q
# compute eigenvalues of characteristic soln
a = (1/(Q-1)-ro)**2 - 2 * (1/(Q-1)-rd) * (1/(Q-1)+ro)
lp = (ro-1/(Q-1) + sqrt(a)) / (1/(Q-1)+ro)
lm = (ro-1/(Q-1) - sqrt(a)) / (1/(Q-1)+ro)
# constants for homogenous terms
A = ((G * np.exp((-sqrt(a)+ro-1/(Q-1)) / (1/(Q-1)+ro)) / (L * (1/(Q-1)-rd)) -
(2*G*(1/(Q-1)+ro) / (L*((1/(Q-1)-ro)**2 - a)) + (ro/I)**(1./self.alpha))) /
(np.exp(-2*sqrt(a) / (1/(Q-1)+ro)) - 1))
B = ((G * np.exp(( sqrt(a)+ro-1/(Q-1)) / (1/(Q-1)+ro)) / (L * (1/(Q-1)-rd)) -
(2*G*(1/(Q-1)+ro) / (L*((1/(Q-1)-ro)**2 - a)) + (ro/I)**(1./self.alpha))) /
(np.exp( 2*sqrt(a) / (1/(Q-1)+ro)) - 1))
# particular soln
sol = A * np.exp(lp * x) + B * np.exp(lm * x) - G/L/(1/(Q-1)-rd)
if return_im:
return sol.real, sol.imag
return sol.real
else: raise NotImplementedError
def solve_L(self, L0=None, full_output=False, method=3):
"""Solve for stationary value of L that matches self-consistency condition,
i.e. analytic solution for L should be equal to the posited value of L.
Parameters
----------
L0 : float, None
Initial guess.
full_output : bool, False
method : int, 3
1: 'mathematica'
2: 'hand'
3: 'hand' but w/ boundary condition at x=L
Use formulation returned by mathematica or hand written solution. Hand
written solution is more numerically stable.
Returns
-------
float
dict (optional)
"""
G = self.G
ro = self.ro
re = self.re
rd = self.rd
I = self.I
Q = self.Q
# if not provided, use the iterative method to initialize the search
L0 = L0 or L_linear(G, ro, re, rd, I, alpha=self.alpha, Q=Q)
# this is infinite limit, don't expect a good solution
if L0 < 0: L0 = 2e5
if np.isinf(L0): L0 = 2e5
if method==1:
assert self.alpha==1
assert self.Q==2
# analytic eq for L solved from continuum formulation in Mathematica
# this formulation has much bigger numerical errors (sometimes)
a = -re**2 - 4*re*ro + ro**2 + 2*rd*(re+ro)
num = lambda x:(np.exp(-(re-ro)/(re+ro)) * (np.exp(-sqrt(a)*(x-1)/(re+ro)) -
np.exp(sqrt(a)*(x+1)/(re+ro))) +
np.exp((re-ro)*x/(re+ro)) * (1-np.exp(2*sqrt(a)/(re+ro))) -
np.exp(-sqrt(a)*x/(re+ro)) +
np.exp(sqrt(a)*(2+x)/(re+ro)))
den = lambda x:(np.exp(-sqrt(a)*x/(re+ro)) -
np.exp(sqrt(a)*(2+x)/(re+ro))) * (re-rd)*ro / (G*re*I)
statL = lambda x:num(x) / den(x)
soln = minimize(lambda x:(statL(x).real - x)**2, L0, tol=1e-10, bounds=[(0,np.inf)])
if full_output:
return soln['x'][0], soln
return soln['x'][0]
elif method==2:
def cost(args):
L = args[0]
return self.n(-1, L, method=2)**2
soln = minimize(cost, L0, tol=1e-10, bounds=[(0,np.inf)])
if full_output:
return soln['x'][0], soln
return soln['x'][0]
elif method==3:
def cost(args):
L = args[0]
return self.n(L, L, method=2)**2
soln = minimize(cost, L0, tol=1e-10, bounds=[(0,np.inf)])
if full_output:
return soln['x'][0], soln
return soln['x'][0]
else: raise NotImplementedError
def check_stat(self, x=1):
"""Violation of stationarity condition by checking accuracy of iterative
solution.
Returns
-------
float
n(x) - [n(x) calculated with n(x-1)]
"""
assert x>=1
G = self.G
ro = self.ro
re = self.re
rd = self.rd
I = self.I
L = self.L
n0 = self.n(0)
if x==1:
return self.n(x) - I * n0**2 - rd*n0/re + G/re/L
return self.n(x) - (I * n0 * (self.n(x-1) - self.n(x-2)) + rd*self.n(x-1)/re - G/re/L)
def slope(self):
"""Slope at x=0."""
ro = self.ro
G = self.G
re = self.re
rd = self.rd
I = self.I
L = self.L
return ro**2 / re**2 / I + (rd/re-1) * ro / re / I - G / re / L
def d_complex(self, x):
"""Complex derivative.
Parameters
----------
x : ndarray
Returns
-------
ndarray
"""
# transform into normal parameters
re = self.re
rd = self.rd/re
ro = self.ro/re
G = self.G/re
I = self.I
L = self.L
a = -1 + (ro - 4) * ro + 2 * rd * (1 + ro)
sqrta = sqrt(a)
return ((G/L/(rd-1) * ((-1 + ro + sqrta)/(1+ro) *
np.exp((-1 + sqrta + x*(sqrta-1) + ro*(1+x)) / (1+ro)) -
(-1 + ro - sqrta)/(1+ro) *
np.exp((-1 + sqrta - x*(1+sqrta) + ro*(1+x)) / (1+ro))) +
np.exp((-1 + ro - sqrta) * x / (1+ro)) * (G/L/(rd-1) - ro/I) * (-1+ro-sqrta)/(1+ro)+
np.exp((2 * sqrta + (ro-1+sqrta)*x) / (1+ro)) * (-G/L/(rd-1) + ro/I) * (-1+ro+sqrta) /
(1+ro)) /
(-1 + np.exp(2 * sqrta/(1+ro))))
def peak(self, initial_guess=None, full_output=False):
"""Solve for peak by finding root in derivative.
Parameters
----------
full_output : bool, False
Returns
-------
float
dict (optional)
"""
if initial_guess is None:
initial_guess = 2/3 * self.L
# transform into normal parameters
re = self.re
rd = self.rd/re
ro = self.ro/re
G = self.G/re
I = self.I
L = self.L
a = -1 + (ro - 4) * ro + 2 * rd * (1 + ro)
sqrta = sqrt(a)
# need a clever transformation of coordinates to allow the minimizer to find the soln
soln = minimize(lambda y:np.abs(self.d_complex(L-np.exp(y)).real), np.log(L-initial_guess),
bounds=[(-np.inf,np.log(L/2))],
method='powell')
if full_output:
return L - np.exp(soln['x'][0]), soln
return L - np.exp(soln['x'][0])
#end ODE2
class UnitSimulator(FlowMFT):
def __init__(self,
G=None,
ro=None,
re=None,
rd=None,
I=None,
dt=None,
L_method=2,
alpha=1.,
Q=2,
rng=np.random):
"""Independent unit simulation of firms, which is the same thing as a
density evolution equation. This is the simplest implementation possible
that only keeps track of the occupancy number and processes to order dt.
Parameters
----------
G : float
re : float
rd : float
I : float
dt : float
L_method : int
0, naive
1, corrected
2, ODE2
alpha : float, 1.
Cooperativity parameter.
Q : int, 2
Bethe lattice branching ratio.
rng : RandomState, np.random
"""
assert alpha>0
assert Q>=2
self.G = G
self.ro = ro
self.re = re
self.rd = rd
self.I = I
self.dt = dt
self.alpha = float(alpha)
self.Q = Q
self.rng = rng
self.n0 = (ro/re/I)**(1/alpha)
try:
if L_method==0:
self.L = G / (self.n0 * (rd - (1+1/(Q-1))*re + re*I*self.n0**self.alpha))
elif L_method==1:
self.L = L_1ode(G, ro, re, rd, I, alpha=alpha, Q=Q)
elif L_method==2:
self.L = ODE2(G, ro, re, rd, I, alpha=alpha, Q=Q).L
else: raise NotImplementedError
except ZeroDivisionError:
self.L = np.inf
if self.L <= 0:
self.L = np.inf
else:
self.L = min(self.L, 10_000)
def simulate(self, T,
reset_rng=False,
jit=True,
occupancy=None,
no_expansion=False):
"""
NOTE: dt must be small enough to ignore coincident events.
Parameters
----------
T : int
Simulation time to run.
reset_rng : bool, False
jit : bool, True
occupancy : list, None
Feed in a starting occupancy on which to run dynamics.
no_expansion : bool, False
When True, expansion term is removed from simulation. This only
works without occupancy.
Returns
-------
list
occupancy at each site
"""
G = float(self.G)
ro = float(self.ro)
rd = float(self.rd)
re = float(self.re)
I = float(self.I)
a = float(self.alpha)
dt = float(self.dt)
assert (G * dt)<1
assert (rd * dt)<1
assert (re * dt)<1
assert (ro * dt)<1
if jit and occupancy is None:
if reset_rng: np.random.seed()
if no_expansion:
return jit_unit_sim_loop_no_expand(T, dt, G, ro, re, rd, I, a)
return jit_unit_sim_loop(T, dt, G, ro, re, rd, I, a)
elif jit and not occupancy is None:
if reset_rng: np.random.seed()
occupancy = List(occupancy)
return list(jit_unit_sim_loop_with_occupancy(occupancy, T, dt, G, ro, re, rd, I, a))
if reset_rng: self.rng.seed()
counter = 0
occupancy = [0]
while (counter * dt) < T:
# innov
innov = False
if occupancy[-1] and np.random.rand() < (re * I * occupancy[-1]**a * dt):
occupancy.append(1)
innov = True
# obsolescence
if len(occupancy) > 1 and np.random.rand() < (ro * dt):
occupancy.pop(0)
# from right to left b/c of expansion
for x in range(len(occupancy)-1, -1, -1):
# expansion (fast approximation)
if x < (len(occupancy)-1-innov):
if occupancy[x] and np.random.rand() < (occupancy[x] * re * dt):
occupancy[x+1] += 1
# death (fast approximation)
if occupancy[x] and np.random.rand() < (occupancy[x] * rd * dt):
occupancy[x] -= 1
# start up (remember that L is length of lattice on x-axis, s.t. L=0 means lattice has one site)
if len(occupancy)==1:
occupancy[x] += 1
elif np.random.rand() < (G / len(occupancy) * dt):
occupancy[x] += 1
counter += 1
return occupancy
def parallel_simulate(self, n_samples, T, **kwargs):
"""
Parameters
----------
n_samples : int
T : int
**kwargs
Returns
-------
list of lists
Each inner list is an occupancy list.
"""
with Pool() as pool:
self.occupancy = list(pool.map(lambda args: self.simulate(T, True, **kwargs), range(n_samples)))
return self.occupancy
def mean_occupancy(self,
occupancy=None,
width=np.inf,
norm_err=True,
rescale=False):
"""
Parameters
----------
occupancy : list of list, None
width : tuple, np.inf
Range of permitted widths to average. When this is a float or int, then
only sim results with exactly that width are returned.
norm_err : bool, True
rescale : bool, False
If True, rescale density by cooperativity before taking mean.
Returns
-------
ndarray
Average occupancy.
ndarray
Standard deviation as error bars.
"""
if occupancy is None:
occupancy = self.occupancy
if not hasattr(width, '__len__'):
width = width, width
assert width[0]<=width[1]
if width[0]==np.inf:
maxL = max([len(i) for i in occupancy])
y = np.zeros(maxL)
yerr = np.zeros(maxL)
# first calculate the means
counts = np.zeros(maxL, dtype=int)
if rescale:
for i in occupancy:
y[:len(i)] += np.array(i[::-1])**self.alpha
counts[:len(i)] += 1
else:
for i in occupancy:
y[:len(i)] += i[::-1]
counts[:len(i)] += 1
y = y / counts
# then calculate the std
if rescale:
for i in occupancy:
yerr[:len(i)] += (np.array(i[::-1])**self.alpha - y[:len(i)])**2
else:
for i in occupancy:
yerr[:len(i)] += (i[::-1] - y[:len(i)])**2
yerr /= counts
yerr = np.sqrt(yerr)
if norm_err:
return y, yerr / np.sqrt(counts)
return y, yerr
# case where there is some finite range of L to average over
y = np.vstack([i[-width[0]:] for i in occupancy if width[0]<=len(i)<=width[1]])[:,::-1]
if rescale:
if norm_err:
return (y**self.alpha).mean(0), (y**self.alpha).std(0) / np.sqrt(y.shape[0])
return (y**self.alpha).mean(0), (y**self.alpha).std(0)
if norm_err:
return y.mean(0), y.std(0) / np.sqrt(y.shape[0])
return y.mean(0), y.std(0)
def rescale_factor(self, T, sample_size=1_000):
"""Rescaling factor needed to correct for bias in mean L. The returned
factor c can be used to modify the automaton model with the set of
transformations
x -> x * c
G -> G * c
n -> n / c
I -> I / c
Equivalently, we can transform the MFT with the inverse set of
transformations
x -> x / c
G -> G / c
n -> n * c
I -> I * c
Parameters
----------
T : float
Run time before sampling.
sample_size : int
No. of indpt. trajectories to use to estimate ratio.
Returns
-------
float
"""
G = float(self.G)
ro = float(self.ro)
re = float(self.re)
rd = float(self.rd)
I = float(self.I)
a = float(self.alpha)
dt = float(self.dt)
occupancy = self.parallel_simulate(sample_size, T)
L = np.array([(len(i)-1) for i in occupancy])
odemodel = ODE2(G, ro, re, rd, I)
return odemodel.L / L.mean(), occupancy
#end UnitSimulator
@njit
def jit_unit_sim_loop(T, dt, G, ro, re, rd, I, a):
"""
Parameters
----------
occupancy : numba.typed.ListType[int64]
T : int
dt : float
ro : float
G : float
re : float
rd : float
I : float
a : float
"""
counter = 0
occupancy = [0]
while (counter * dt) < T:
# innov
innov = False
if occupancy[-1] and np.random.rand() < (re * I * occupancy[-1]**a * dt):
occupancy.append(1)
innov = True
# obsolescence
if len(occupancy) > 1 and np.random.rand() < (ro * dt):
occupancy.pop(0)
# from right to left b/c of expansion
for x in range(len(occupancy)-1, -1, -1):
# expansion (fast approximation)
if x < (len(occupancy)-1-innov):
if occupancy[x] and np.random.rand() < (occupancy[x] * re * dt):
occupancy[x+1] += 1
# death (fast approximation)
if occupancy[x] and np.random.rand() < (occupancy[x] * rd * dt):
occupancy[x] -= 1
# start up (remember that L is length of lattice on x-axis, s.t. L=0 means lattice has one site)
if np.random.rand() < (G / len(occupancy) * dt):
occupancy[x] += 1
counter += 1
return occupancy
@njit
def jit_unit_sim_loop_with_occupancy(occupancy, T, dt, G, ro, re, rd, I, a):
"""
Parameters
----------
occupancy : numba.typed.ListType[int64]
T : int
dt : float
ro : float
G : float
re : float
rd : float
I : float
a : float
"""
counter = 0
while (counter * dt) < T:
# innov
innov = False
if occupancy[-1] and np.random.rand() < (re * I * occupancy[-1]**a * dt):
occupancy.append(1)
innov = True
# from right to left b/c of expansion
for x in range(len(occupancy)-1, -1, -1):
# death (fast approximation)
if occupancy[x] and np.random.rand() < (occupancy[x] * rd * dt):
occupancy[x] -= 1
# expansion (fast approximation)
if x < (len(occupancy)-1-innov):
if occupancy[x] and np.random.rand() < (occupancy[x] * re * dt):
occupancy[x+1] += 1
# start up
if np.random.rand() < (G / len(occupancy) * dt):
occupancy[x] += 1
# obsolescence
if len(occupancy) > 1 and np.random.rand() < (ro * dt):
occupancy.pop(0)
counter += 1
return occupancy
@njit
def jit_unit_sim_loop_no_expand(T, dt, G, ro, re, rd, I, a):
"""Special case for testing. Without expansion.
Parameters
----------
occupancy : numba.typed.ListType[int64]
T : int
dt : float
ro : float
G : float
re : float
rd : float
I : float
a : float
"""
counter = 0
occupancy = [0]
while (counter * dt) < T:
# innov
innov = False
if occupancy[-1] and np.random.rand() < (re * I * occupancy[-1]**a * dt):
occupancy.append(1)
innov = True
# obsolescence
if len(occupancy) > 1 and np.random.rand() < (ro * dt):
occupancy.pop(0)
# from right to left b/c of expansion
for x in range(len(occupancy)-1, -1, -1):
# death (fast approximation)
if occupancy[x] and np.random.rand() < (occupancy[x] * rd * dt):
occupancy[x] -= 1
# start up (remember that L is length of lattice on x-axis, s.t. L=0 means lattice has one site)
if len(occupancy)==1:
occupancy[x] += 1
elif np.random.rand() < (G / (len(occupancy)-1) * dt):
occupancy[x] += 1
counter += 1
return occupancy
| 3,755 | 28,240 | 361 |
2b5583d8b3f148600537d50261325fbfe0630646 | 4,277 | py | Python | pyqt2waybinding/tests/test_BindingEndpoint.py | jkokorian/pyqt2waybinding | fb1fb84f55608cfbf99c6486650100ba81743117 | [
"MIT"
] | 5 | 2015-05-26T17:21:39.000Z | 2020-12-07T22:13:45.000Z | pyqt2waybinding/tests/test_BindingEndpoint.py | jkokorian/pyqt2waybinding | fb1fb84f55608cfbf99c6486650100ba81743117 | [
"MIT"
] | null | null | null | pyqt2waybinding/tests/test_BindingEndpoint.py | jkokorian/pyqt2waybinding | fb1fb84f55608cfbf99c6486650100ba81743117 | [
"MIT"
] | 2 | 2018-05-30T12:26:23.000Z | 2020-12-07T23:51:57.000Z | import unittest
from pyqt2waybinding import BindingEndpoint
from PyQt4.QtCore import QObject, pyqtSignal
class RealPropertyModel(QObject):
"""
A simple model class for testing
"""
valueChanged = pyqtSignal(int)
@property
@value.setter
if __name__ == '__main__':
unittest.main()
| 30.119718 | 75 | 0.640402 | import unittest
from pyqt2waybinding import BindingEndpoint
from PyQt4.QtCore import QObject, pyqtSignal
class RealPropertyModel(QObject):
"""
A simple model class for testing
"""
valueChanged = pyqtSignal(int)
def __init__(self):
QObject.__init__(self)
self.__value = 0
@property
def value(self):
return self.__value
@value.setter
def value(self, value):
if (self.__value != value):
self.__value = value
self.valueChanged.emit(value)
class GetterSetterPairModel(QObject):
valueChanged = pyqtSignal(int)
def __init__(self):
QObject.__init__(self)
self.__value = 0
def value(self):
return self.__value
def setValue(self, value):
if (self.__value != value):
self.__value = value
self.valueChanged.emit(value)
class VirtualPropertyModel(QObject):
valueChanged = pyqtSignal(int)
def __init__(self):
QObject.__init__(self)
self.__value = 0
def getValue(self):
return self.__value
def setValue(self, value):
if (self.__value != value):
self.__value = value
self.valueChanged.emit(value)
class Test_Observer(unittest.TestCase):
def test_forProperty_realProperty(self):
m = RealPropertyModel()
endpoint = BindingEndpoint.forProperty(m,"value",useGetter=True)
assert isinstance(endpoint,BindingEndpoint)
self.assertTrue(endpoint.getter is not None)
self.assertTrue(endpoint.setter is not None)
endpoint.setter(10)
self.assertTrue(m.value == 10)
value = endpoint.getter()
self.assertTrue(value == 10)
def test_forProperty_getterSetterPairGet(self):
m = VirtualPropertyModel()
endpoint = BindingEndpoint.forProperty(m,"getValue",useGetter=True)
assert isinstance(endpoint,BindingEndpoint)
self.assertTrue(endpoint.getter is not None)
self.assertTrue(endpoint.setter is not None)
self.assertTrue(endpoint.getter.__name__ == "getValue")
self.assertTrue(endpoint.setter.__name__ == "setValue")
endpoint.setter(10)
self.assertTrue(m.getValue() == 10)
value = endpoint.getter()
self.assertTrue(value == 10)
def test_forProperty_getterSetterPairSet(self):
m = VirtualPropertyModel()
endpoint = BindingEndpoint.forProperty(m,"setValue",useGetter=True)
assert isinstance(endpoint,BindingEndpoint)
self.assertTrue(endpoint.getter is not None)
self.assertTrue(endpoint.setter is not None)
self.assertTrue(endpoint.getter.__name__ == "getValue")
self.assertTrue(endpoint.setter.__name__ == "setValue")
endpoint.setter(10)
self.assertTrue(m.getValue() == 10)
value = endpoint.getter()
self.assertTrue(value == 10)
def test_forProperty_getterSetterPairWithoutExplicitGet(self):
m = GetterSetterPairModel()
endpoint = BindingEndpoint.forProperty(m,"value",useGetter=True)
assert isinstance(endpoint,BindingEndpoint)
self.assertTrue(endpoint.getter is not None)
self.assertTrue(endpoint.setter is not None)
self.assertTrue(endpoint.getter.__name__ == "value")
self.assertTrue(endpoint.setter.__name__ == "setValue")
endpoint.setter(10)
self.assertTrue(m.value() == 10)
value = endpoint.getter()
self.assertTrue(value == 10)
def test_forProperty_virtualProperty(self):
m = VirtualPropertyModel()
endpoint = BindingEndpoint.forProperty(m,"value",useGetter=True)
assert isinstance(endpoint,BindingEndpoint)
self.assertTrue(endpoint.getter is not None)
self.assertTrue(endpoint.setter is not None)
self.assertTrue(endpoint.getter.__name__ == "getValue")
self.assertTrue(endpoint.setter.__name__ == "setValue")
endpoint.setter(10)
self.assertTrue(m.getValue() == 10)
value = endpoint.getter()
self.assertTrue(value == 10)
if __name__ == '__main__':
unittest.main()
| 3,358 | 297 | 295 |
0f371accbeac4ce12556a113f97e31cb37cde7d3 | 839 | py | Python | blog/forms.py | netocraft/web-netocraft | 87e35ea1ea23b8a75eeabb33fc10aac57cccb2ac | [
"Unlicense"
] | null | null | null | blog/forms.py | netocraft/web-netocraft | 87e35ea1ea23b8a75eeabb33fc10aac57cccb2ac | [
"Unlicense"
] | null | null | null | blog/forms.py | netocraft/web-netocraft | 87e35ea1ea23b8a75eeabb33fc10aac57cccb2ac | [
"Unlicense"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from .models import Post
| 33.56 | 89 | 0.649583 | from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('texto', 'imagen')
widgets = {
'texto': forms.Textarea(
attrs={'placeholder': 'Ingrese el contenido de su post aquí'}),
}
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
password1 = forms.CharField(label='Contraseña', widget=forms.PasswordInput)
password2 = forms.CharField(label='Confirmar Contraseña', widget=forms.PasswordInput)
class Meta:
model = User
fields = ['username','email','password1','password2']
help_texts = { k:"" for k in fields }
| 0 | 625 | 45 |
c84a835a747fe252d6862ce4c7060542df5b47b7 | 2,190 | py | Python | python/assets/list_assets.py | KennaSecurity/blog_samples | be98eb2ef0a02d41c4cf2f730092b89c99ddd49e | [
"MIT"
] | 1 | 2022-01-25T19:29:07.000Z | 2022-01-25T19:29:07.000Z | python/assets/list_assets.py | KennaSecurity/blog_samples | be98eb2ef0a02d41c4cf2f730092b89c99ddd49e | [
"MIT"
] | 2 | 2021-02-17T20:45:12.000Z | 2021-05-06T20:13:36.000Z | python/assets/list_assets.py | KennaSecurity/blog_samples | be98eb2ef0a02d41c4cf2f730092b89c99ddd49e | [
"MIT"
] | 1 | 2021-12-22T22:18:43.000Z | 2021-12-22T22:18:43.000Z | # Lists the assets' ID, hostname and note for active and inactive assets.
# Displays the first 500 assets, because there is no pagination code.
import os
import sys
import requests
status_opt = "both"
if len(sys.argv) > 1:
opt = sys.argv[1]
if (opt == "active") or (opt == "inactive"):
status_opt = opt
else:
print(sys.argv[0] + " [active | inactive]")
print("If option not specified that both active and inactive stati are displayed.")
sys.exit(1)
print("List Assets")
# KENNA_API_KEY is an environment variable.
api_key = os.getenv('KENNA_API_KEY')
if api_key is None:
print("Environment variable KENNA_API_KEY is non-existent")
sys.exit(1)
# HTTP header.
headers = {'Accept': 'application/json',
'X-Risk-Token': api_key}
# List assests depending on the URL. Context is displayed.
# List active assets.
list_active_assets_url = "https://api.kennasecurity.com/assets"
if (status_opt == "both") or (status_opt == "active"):
list_assets(list_active_assets_url, "Active Assets")
print("")
# List inactive assets.
if (status_opt == "both") or (status_opt == "inactive"):
list_inactive_assets_url = list_active_assets_url + "?filter=inactive"
list_assets(list_inactive_assets_url, "Inactive Assets")
| 30 | 96 | 0.654795 | # Lists the assets' ID, hostname and note for active and inactive assets.
# Displays the first 500 assets, because there is no pagination code.
import os
import sys
import requests
status_opt = "both"
if len(sys.argv) > 1:
opt = sys.argv[1]
if (opt == "active") or (opt == "inactive"):
status_opt = opt
else:
print(sys.argv[0] + " [active | inactive]")
print("If option not specified that both active and inactive stati are displayed.")
sys.exit(1)
print("List Assets")
# KENNA_API_KEY is an environment variable.
api_key = os.getenv('KENNA_API_KEY')
if api_key is None:
print("Environment variable KENNA_API_KEY is non-existent")
sys.exit(1)
# HTTP header.
headers = {'Accept': 'application/json',
'X-Risk-Token': api_key}
def sortFunc(entry):
return entry['id']
# List assests depending on the URL. Context is displayed.
def list_assets(url, context):
response = requests.get(url, headers=headers)
if response.status_code != 200:
print("List Asset Error: " + str(response.status_code))
sys.exit(1)
resp_json = response.json()
#print(resp_json)
assets = resp_json['assets']
print(context)
# Run through all the assets and print asset ID, asset hostname, and asset note.
assets.sort(key=sortFunc)
for asset in assets:
if asset['id'] is None:
continue
hostname = "no hostname" if asset['hostname'] is None else asset['hostname']
notes = "" if asset['notes'] is None or asset['notes'] == "" else " : " + asset['notes']
out_buf = str(asset['id']) + " : " + asset['status'] + " ; " + hostname + notes
print(out_buf)
print("Number of " + context + ": " + str(len(assets)))
# List active assets.
list_active_assets_url = "https://api.kennasecurity.com/assets"
if (status_opt == "both") or (status_opt == "active"):
list_assets(list_active_assets_url, "Active Assets")
print("")
# List inactive assets.
if (status_opt == "both") or (status_opt == "inactive"):
list_inactive_assets_url = list_active_assets_url + "?filter=inactive"
list_assets(list_inactive_assets_url, "Inactive Assets")
| 860 | 0 | 45 |
8cf4386e7aeda8ff7a0084904fafa63f362b749e | 750 | py | Python | yfinance/index_pain.py | piyushkp/yfinance | e5e150c5bfdb15f3ce135878b6735e0753d6d410 | [
"Apache-2.0"
] | null | null | null | yfinance/index_pain.py | piyushkp/yfinance | e5e150c5bfdb15f3ce135878b6735e0753d6d410 | [
"Apache-2.0"
] | null | null | null | yfinance/index_pain.py | piyushkp/yfinance | e5e150c5bfdb15f3ce135878b6735e0753d6d410 | [
"Apache-2.0"
] | null | null | null | import yfinance as yf
from maxpain import max_pain, get_current_price
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
if __name__ == "__main__":
get_index_maxpain() | 39.473684 | 169 | 0.618667 | import yfinance as yf
from maxpain import max_pain, get_current_price
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_index_maxpain():
for symbol in ["QQQ", "SPY", "^ndx", "^spx", "^RUT", "IWM", "DIA"]:
tk = yf.Ticker(symbol)
exps = tk.options
cp = get_current_price(symbol)
for index, tuple in enumerate(exps):
mp, mp_COI, mp_POI, credit, debit = max_pain(date=exps[index], symbol=symbol)
print("{0}={1} Date={2} MaxPain={3} call_OI={4} put_OI={5} credit={6} debit={7}".format(symbol, cp, exps[index], mp, mp_COI, mp_POI, credit, debit))
if index == 12:
break
if __name__ == "__main__":
get_index_maxpain() | 526 | 0 | 23 |
44ee9ea39b7620ad720f2762a3052fceb5432abd | 318 | py | Python | skills/models.py | Vicky-Rathod/django-blog | 1c15210376c9e365052dd5c106dbd903a9717bba | [
"MIT"
] | null | null | null | skills/models.py | Vicky-Rathod/django-blog | 1c15210376c9e365052dd5c106dbd903a9717bba | [
"MIT"
] | null | null | null | skills/models.py | Vicky-Rathod/django-blog | 1c15210376c9e365052dd5c106dbd903a9717bba | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
User = settings.AUTH_USER_MODEL
| 35.333333 | 84 | 0.779874 | from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
User = settings.AUTH_USER_MODEL
class Skill(models.Model):
user = models.ForeignKey(User, verbose_name=_("User"), on_delete=models.CASCADE)
name = models.CharField(_("name"), max_length=150)
| 0 | 145 | 22 |
e5f38a919bae7a1c95e5e5cc45e674fec4d9d741 | 185 | py | Python | funcion-con-arg.py | josaphatsv/EjercicioPython | 269bf5552bc926917ba3e54477e735af4f9c1830 | [
"MIT"
] | null | null | null | funcion-con-arg.py | josaphatsv/EjercicioPython | 269bf5552bc926917ba3e54477e735af4f9c1830 | [
"MIT"
] | null | null | null | funcion-con-arg.py | josaphatsv/EjercicioPython | 269bf5552bc926917ba3e54477e735af4f9c1830 | [
"MIT"
] | null | null | null | #funcion con parametros
funcion_arg("Josaphat","Lopez") | 26.428571 | 45 | 0.713514 | #funcion con parametros
def funcion_arg(nombre,apellido):
print("El nombre recibido es:", nombre)
print("El nombre recibido es:", apellido)
funcion_arg("Josaphat","Lopez") | 102 | 0 | 22 |
683051e7b71efe76d37dfdde771c88df3061b8f8 | 1,284 | py | Python | releng_tool/util/platform.py | releng-tool/releng-tool | cd8728f35a7bdaf6ef90fd019e8c33bc5da8b265 | [
"BSD-2-Clause"
] | 7 | 2019-04-06T21:21:22.000Z | 2021-12-10T04:07:20.000Z | releng_tool/util/platform.py | releng-tool/releng-tool | cd8728f35a7bdaf6ef90fd019e8c33bc5da8b265 | [
"BSD-2-Clause"
] | 1 | 2019-10-01T20:03:10.000Z | 2019-10-02T20:28:00.000Z | releng_tool/util/platform.py | releng-tool/releng-tool | cd8728f35a7bdaf6ef90fd019e8c33bc5da8b265 | [
"BSD-2-Clause"
] | 1 | 2021-07-23T17:00:57.000Z | 2021-07-23T17:00:57.000Z | # -*- coding: utf-8 -*-
# Copyright 2018-2021 releng-tool
from releng_tool.util.log import err
import sys
def platform_exit(msg=None, code=None):
"""
exit out of the releng-tool process
Provides a convenience method to help invoke a system exit call without
needing to explicitly use ``sys``. A caller can provide a message to
indicate the reason for the exit. The provide message will output to
standard error. The exit code, if not explicit set, will vary on other
arguments. If a message is provided to this call, the default exit code will
be ``1``. If no message is provided, the default exit code will be ``0``.
In any case, if the caller explicitly sets a code value, the provided code
value will be used.
An example when using in the context of script helpers is as follows:
.. code-block:: python
releng_exit('there was an error performing this task')
Args:
msg (optional): error message to print
code (optional): exit code; defaults to 0 if no message or defaults to 1
if a message is set
Raises:
SystemExit: always raised
"""
if msg:
err(msg)
if code is None:
code = 1
elif code is None:
code = 0
sys.exit(code)
| 30.571429 | 80 | 0.661215 | # -*- coding: utf-8 -*-
# Copyright 2018-2021 releng-tool
from releng_tool.util.log import err
import sys
def platform_exit(msg=None, code=None):
"""
exit out of the releng-tool process
Provides a convenience method to help invoke a system exit call without
needing to explicitly use ``sys``. A caller can provide a message to
indicate the reason for the exit. The provide message will output to
standard error. The exit code, if not explicit set, will vary on other
arguments. If a message is provided to this call, the default exit code will
be ``1``. If no message is provided, the default exit code will be ``0``.
In any case, if the caller explicitly sets a code value, the provided code
value will be used.
An example when using in the context of script helpers is as follows:
.. code-block:: python
releng_exit('there was an error performing this task')
Args:
msg (optional): error message to print
code (optional): exit code; defaults to 0 if no message or defaults to 1
if a message is set
Raises:
SystemExit: always raised
"""
if msg:
err(msg)
if code is None:
code = 1
elif code is None:
code = 0
sys.exit(code)
| 0 | 0 | 0 |
052e5a56f7224b1ab70fb450a72410c307d25822 | 926 | py | Python | tests/test_hyperspherical_uniform.py | macio232/pvae | 391a4c634d565d6a7dc60915fabf02ab77d3cc68 | [
"MIT"
] | 95 | 2019-04-30T12:36:00.000Z | 2022-03-14T13:59:52.000Z | tests/test_hyperspherical_uniform.py | thanosvlo/Causal-Future-Prediction-in-a-Minkowski-Space-Time | 0e0539a122484ce9869aca9acd436a24c2597908 | [
"MIT"
] | 8 | 2020-06-18T12:15:44.000Z | 2022-03-27T00:04:02.000Z | tests/test_hyperspherical_uniform.py | thanosvlo/Causal-Future-Prediction-in-a-Minkowski-Space-Time | 0e0539a122484ce9869aca9acd436a24c2597908 | [
"MIT"
] | 29 | 2019-05-02T09:12:35.000Z | 2022-01-24T11:31:45.000Z | import torch
import math
import unittest
from pvae.distributions import HypersphericalUniform
if __name__ == '__main__':
unittest.main() | 28.060606 | 96 | 0.632829 | import torch
import math
import unittest
from pvae.distributions import HypersphericalUniform
class TestHypersphericalUniform(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
torch.manual_seed(1234)
self.dim = 8
self.d = HypersphericalUniform(self.dim)
def test_sample(self):
x = self.d.rsample(torch.Size([5]))
torch.testing.assert_allclose(x.pow(2).sum(-1), torch.ones(torch.Size([*x.shape[:-1]])))
def test_log_prob(self):
d = HypersphericalUniform(2)
x = d.sample(torch.Size([5]))
logp = d.log_prob(x)
torch.testing.assert_allclose(logp, - (math.log(4) + math.log(math.pi)))
def test_rsample(self):
x = self.d.rsample(torch.Size([5]))
y = torch.tensor(2., requires_grad=True)
loss = (x - y).pow(2).sum()
loss.backward()
if __name__ == '__main__':
unittest.main() | 583 | 174 | 23 |
131e2c2283a543f9e3095db3e3f2b10e86568129 | 3,002 | py | Python | alisu/plugins/start.py | aksr-aashish/alisurobot | 0b0c05ea74ba6126ca9b328de16c808c72be365d | [
"MIT"
] | 9 | 2021-08-17T18:30:13.000Z | 2021-10-02T09:22:34.000Z | alisu/plugins/start.py | aksr-aashish/alisurobot | 0b0c05ea74ba6126ca9b328de16c808c72be365d | [
"MIT"
] | 1 | 2021-12-20T19:48:44.000Z | 2021-12-20T19:48:44.000Z | alisu/plugins/start.py | aksr-aashish/alisurobot | 0b0c05ea74ba6126ca9b328de16c808c72be365d | [
"MIT"
] | 8 | 2021-08-17T21:14:13.000Z | 2022-01-29T23:34:14.000Z | from typing import Union
from pyrogram import Client, filters
from pyrogram.types import (
CallbackQuery,
InlineKeyboardButton,
InlineKeyboardMarkup,
Message,
)
import alisu
from alisu.config import prefix
from alisu.utils import commands
from alisu.utils.localization import use_chat_lang
bot_repo_link: str = "https://github.com/iiiiii1wepfj/alisurobot"
bot_chat: str = "@AlisuChat"
# Using a low priority group so deeplinks will run before this and stop the propagation.
@Client.on_message(
filters.command("start", prefix) & ~filters.regex("^/start rules_"), group=2
)
@Client.on_callback_query(filters.regex("^start_back$"))
@use_chat_lang()
@Client.on_callback_query(filters.regex("^infos$"))
@use_chat_lang()
commands.add_command("start", "general")
| 29.145631 | 88 | 0.552632 | from typing import Union
from pyrogram import Client, filters
from pyrogram.types import (
CallbackQuery,
InlineKeyboardButton,
InlineKeyboardMarkup,
Message,
)
import alisu
from alisu.config import prefix
from alisu.utils import commands
from alisu.utils.localization import use_chat_lang
bot_repo_link: str = "https://github.com/iiiiii1wepfj/alisurobot"
bot_chat: str = "@AlisuChat"
# Using a low priority group so deeplinks will run before this and stop the propagation.
@Client.on_message(
filters.command("start", prefix) & ~filters.regex("^/start rules_"), group=2
)
@Client.on_callback_query(filters.regex("^start_back$"))
@use_chat_lang()
async def start(c: Client, m: Union[Message, CallbackQuery], strings):
if isinstance(m, CallbackQuery):
msg = m.message
method = msg.edit_text
else:
msg = m
method = msg.reply_text
if msg.chat.type == "private":
keyboard = InlineKeyboardMarkup(
inline_keyboard=[
[
InlineKeyboardButton(
strings("commands_btn"), callback_data="commands"
),
InlineKeyboardButton(strings("infos_btn"), callback_data="infos"),
],
[
InlineKeyboardButton(
strings("language_btn"), callback_data="chlang"
),
InlineKeyboardButton(
strings("add_chat_btn"),
url=f"https://t.me/{c.me.username}?startgroup=new",
),
],
]
)
await method(
strings("private").format(myname=c.me.first_name),
reply_markup=keyboard,
)
else:
keyboard = InlineKeyboardMarkup(
inline_keyboard=[
[
InlineKeyboardButton(
strings("start_chat"),
url=f"https://t.me/{c.me.username}?start=start",
)
]
]
)
await method(
strings("group").format(myname=c.me.first_name),
reply_markup=keyboard,
)
@Client.on_callback_query(filters.regex("^infos$"))
@use_chat_lang()
async def infos(c: Client, m: CallbackQuery, strings):
res = strings("info_page").format(
myname=c.me.first_name,
version=alisu.__version__,
version_code=c.version_code,
codelink=bot_repo_link,
bot_chat=bot_chat,
)
keyboard = InlineKeyboardMarkup(
inline_keyboard=[
[
InlineKeyboardButton(
strings("back_btn", context="general"),
callback_data="start_back",
)
]
]
)
await m.message.edit_text(
res,
reply_markup=keyboard,
disable_web_page_preview=True,
)
commands.add_command("start", "general")
| 2,172 | 0 | 44 |
35057241a4c1315f9063be95186b4e4aed92a7c0 | 327 | py | Python | list_all_paths.py | tw7613781/leetcode | 954a7288c21f315e664927b707b800d4d1d2bd6f | [
"Apache-2.0"
] | null | null | null | list_all_paths.py | tw7613781/leetcode | 954a7288c21f315e664927b707b800d4d1d2bd6f | [
"Apache-2.0"
] | 1 | 2021-05-11T19:27:47.000Z | 2021-05-11T19:27:47.000Z | list_all_paths.py | tw7613781/leetcode | 954a7288c21f315e664927b707b800d4d1d2bd6f | [
"Apache-2.0"
] | null | null | null | import os
import sys
if __name__ == '__main__':
params = sys.argv[1]
print(params)
all_files(params) | 21.8 | 45 | 0.605505 | import os
import sys
def all_files(path):
for subdir in os.listdir(path):
sub_path = os.path.join(path, subdir)
if os.path.isdir(sub_path):
all_files(sub_path)
else:
print(sub_path)
if __name__ == '__main__':
params = sys.argv[1]
print(params)
all_files(params) | 191 | 0 | 23 |
db7523af822a7076117fd0f451d020be6f7c841b | 500 | py | Python | models/make_skip_gram_for_image.py | centurio1987/NewbiesDeepTagging | 4dadbd1d580f5a1b3e89b299ddcdb590e97c95fe | [
"MIT"
] | null | null | null | models/make_skip_gram_for_image.py | centurio1987/NewbiesDeepTagging | 4dadbd1d580f5a1b3e89b299ddcdb590e97c95fe | [
"MIT"
] | 5 | 2017-03-24T06:36:57.000Z | 2017-03-24T06:38:32.000Z | models/make_skip_gram_for_image.py | centurio1987/NewbiesDeepTagging | 4dadbd1d580f5a1b3e89b299ddcdb590e97c95fe | [
"MIT"
] | null | null | null | import tensorflow as tf
def make_skip_gram_for_image(images_captions_pairs):
'''
images_captions_pairs: [[image, captions], ...]
image: [width, height, [channel]]
captions: [[word1, word2, word3], [word1, word2, word3, word4], ...]
return: [(image, word1), (image, word2), ....]
'''
image_word_pairs = []
for image, captions in images_captions_pairs:
for word in captions:
images_captions_pairs.append((image, word))
return image_word_pairs
| 29.411765 | 72 | 0.648 | import tensorflow as tf
def make_skip_gram_for_image(images_captions_pairs):
'''
images_captions_pairs: [[image, captions], ...]
image: [width, height, [channel]]
captions: [[word1, word2, word3], [word1, word2, word3, word4], ...]
return: [(image, word1), (image, word2), ....]
'''
image_word_pairs = []
for image, captions in images_captions_pairs:
for word in captions:
images_captions_pairs.append((image, word))
return image_word_pairs
| 0 | 0 | 0 |
f6679ac68b5663fc7e4d59d821de4c9ad9b79ba1 | 1,003 | py | Python | zerver/migrations/0264_migrate_is_announcement_only.py | DD2480-group7-2020/zulip | 9a1e18bcf383c38c35da168563a7345768c6d784 | [
"Apache-2.0"
] | 1 | 2020-03-17T14:58:50.000Z | 2020-03-17T14:58:50.000Z | zerver/migrations/0264_migrate_is_announcement_only.py | DD2480-group7-2020/zulip | 9a1e18bcf383c38c35da168563a7345768c6d784 | [
"Apache-2.0"
] | null | null | null | zerver/migrations/0264_migrate_is_announcement_only.py | DD2480-group7-2020/zulip | 9a1e18bcf383c38c35da168563a7345768c6d784 | [
"Apache-2.0"
] | 1 | 2020-07-16T06:00:10.000Z | 2020-07-16T06:00:10.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-01-25 23:47
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
| 33.433333 | 93 | 0.742772 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-01-25 23:47
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def upgrade_stream_post_policy(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
STREAM_POST_POLICY_EVERYONE = 1
STREAM_POST_POLICY_ADMINS = 2
Stream = apps.get_model('zerver', 'Stream')
Stream.objects.filter(is_announcement_only=False) \
.update(stream_post_policy=STREAM_POST_POLICY_EVERYONE)
Stream.objects.filter(is_announcement_only=True) \
.update(stream_post_policy=STREAM_POST_POLICY_ADMINS)
class Migration(migrations.Migration):
dependencies = [
('zerver', '0263_stream_stream_post_policy'),
]
operations = [
migrations.RunPython(upgrade_stream_post_policy,
reverse_code=migrations.RunPython.noop),
]
| 428 | 252 | 46 |
7e1928c5b40d05dc8c993376f8e62052a703ce33 | 72 | py | Python | scrubadub/filth/gb_nino.py | Jomcgi/scrubadub | 99ec3bf7027cd6c2a7c6b8f0dd8c7bd9fc12ee7d | [
"Apache-2.0"
] | null | null | null | scrubadub/filth/gb_nino.py | Jomcgi/scrubadub | 99ec3bf7027cd6c2a7c6b8f0dd8c7bd9fc12ee7d | [
"Apache-2.0"
] | null | null | null | scrubadub/filth/gb_nino.py | Jomcgi/scrubadub | 99ec3bf7027cd6c2a7c6b8f0dd8c7bd9fc12ee7d | [
"Apache-2.0"
] | null | null | null | from .base import Filth
| 12 | 25 | 0.694444 | from .base import Filth
class GBNinoFilth(Filth):
type = 'gbnino'
| 0 | 24 | 23 |
17c92304dacdef48995365b58ad42144870912c6 | 7,013 | py | Python | app.py | frank870622/NCKU_NLP_2018_industry3 | d9233861eca91c3c311e226a646700d1a746c2be | [
"MIT"
] | 1 | 2018-06-11T07:35:57.000Z | 2018-06-11T07:35:57.000Z | app.py | frank870622/NCKU_NLP_2018_industry3 | d9233861eca91c3c311e226a646700d1a746c2be | [
"MIT"
] | null | null | null | app.py | frank870622/NCKU_NLP_2018_industry3 | d9233861eca91c3c311e226a646700d1a746c2be | [
"MIT"
] | null | null | null | import graphing
import random
from flask import Flask, abort, request
from imgurpython import ImgurClient
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
import tempfile, os
from config import client_id, client_secret, album_id, access_token, refresh_token, line_channel_access_token, line_channel_secret
###above for import package
app = Flask(__name__)
line_bot_api = LineBotApi(line_channel_access_token)
handler = WebhookHandler(line_channel_secret)
static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')
@app.route("/callback", methods=['POST'])
@handler.add(MessageEvent, message=(ImageMessage, TextMessage))
if __name__ == '__main__':
app.run() | 38.322404 | 131 | 0.522173 | import graphing
import random
from flask import Flask, abort, request
from imgurpython import ImgurClient
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
import tempfile, os
from config import client_id, client_secret, album_id, access_token, refresh_token, line_channel_access_token, line_channel_secret
###above for import package
app = Flask(__name__)
line_bot_api = LineBotApi(line_channel_access_token)
handler = WebhookHandler(line_channel_secret)
static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
# print("body:",body)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'ok'
@handler.add(MessageEvent, message=(ImageMessage, TextMessage))
def handle_message(event):
if isinstance(event.message, TextMessage):
if '教學' in event.message.text:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='支援的輸入:\n師生數量\n註冊率\n就業比例\n學測分數\n指考分數'))
return 0
elif '師生數量' in event.message.text:
img = graphing.drawing('師生數量')
ext = 'png'
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix=ext + '-', delete=False) as tf:
img.save(tf, "PNG")
img.close()
tempfile_path = tf.name
dist_path = tempfile_path + '.' + ext
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
try:
client = ImgurClient(client_id, client_secret, access_token, refresh_token)
config = {
'album': album_id,
'name': 'Catastrophe!',
'title': 'Catastrophe!',
'description': 'Cute kitten being cute on '
}
path = os.path.join('static', 'tmp', dist_name)
image = client.upload_from_path(path, config=config, anon=False)
os.remove(path)
print(path)
image_message = ImageSendMessage(
original_content_url=image['link'],
preview_image_url=image['link']
)
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text='以下是您所查詢的資料'),
image_message])
except:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='操作失敗,請重新輸入'))
return 0
elif '註冊率' in event.message.text:
text = graphing.drawing('註冊率')
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="以下是您所查詢的資料\n" + text))
return 0
elif '就業比例' in event.message.text:
img = graphing.drawing('就業比例')
ext = 'png'
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix=ext + '-', delete=False) as tf:
img.save(tf, "PNG")
img.close()
tempfile_path = tf.name
dist_path = tempfile_path + '.' + ext
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
try:
client = ImgurClient(client_id, client_secret, access_token, refresh_token)
config = {
'album': album_id,
'name': 'Catastrophe!',
'title': 'Catastrophe!',
'description': 'Cute kitten being cute on '
}
path = os.path.join('static', 'tmp', dist_name)
image = client.upload_from_path(path, config=config, anon=False)
os.remove(path)
print(path)
image_message = ImageSendMessage(
original_content_url=image['link'],
preview_image_url=image['link']
)
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text='以下是您所查詢的資料'),
image_message])
except:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='操作失敗,請重新輸入'))
return 0
elif '學測分數' in event.message.text:
img = graphing.drawing('學測分數')
ext = 'png'
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix=ext + '-', delete=False) as tf:
img.save(tf, "PNG")
img.close()
tempfile_path = tf.name
dist_path = tempfile_path + '.' + ext
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
try:
client = ImgurClient(client_id, client_secret, access_token, refresh_token)
config = {
'album': album_id,
'name': 'Catastrophe!',
'title': 'Catastrophe!',
'description': 'Cute kitten being cute on '
}
path = os.path.join('static', 'tmp', dist_name)
image = client.upload_from_path(path, config=config, anon=False)
os.remove(path)
print(path)
image_message = ImageSendMessage(
original_content_url=image['link'],
preview_image_url=image['link']
)
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text='以下是您所查詢的資料'),
image_message])
except:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='操作失敗,請重新輸入'))
return 0
elif '指考分數' in event.message.text:
text = graphing.drawing('指考分數')
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="以下是您所查詢的資料\n" + text))
return 0
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="不好意思,您的輸入沒有支援的內容可以呈現\n"))
return 0
if __name__ == '__main__':
app.run() | 6,475 | 0 | 46 |
4ce77535cfa10319f7a82eaef29decd2ad989b5f | 292 | py | Python | dashboards/superuser/views.py | Apubra/django-boilerplate | 1936a866a3b8a4cf93fbc631bd15635beef9d62d | [
"MIT"
] | null | null | null | dashboards/superuser/views.py | Apubra/django-boilerplate | 1936a866a3b8a4cf93fbc631bd15635beef9d62d | [
"MIT"
] | null | null | null | dashboards/superuser/views.py | Apubra/django-boilerplate | 1936a866a3b8a4cf93fbc631bd15635beef9d62d | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.views import View
from django.http import HttpResponse
from django.contrib import messages, auth | 36.5 | 81 | 0.787671 | from django.shortcuts import render, redirect
from django.views import View
from django.http import HttpResponse
from django.contrib import messages, auth
class SuperUserView(View):
def get(self, request):
return render(request, 'dashboards/super user/super user dashboard.html') | 84 | 5 | 49 |
f89e6df1b5fbdb5a489c604e484b07e20272fd89 | 1,548 | py | Python | test.py | amnonpaz/dummy_application | dce83d35328150c912c1c73217cee9749b78e7e7 | [
"MIT"
] | null | null | null | test.py | amnonpaz/dummy_application | dce83d35328150c912c1c73217cee9749b78e7e7 | [
"MIT"
] | null | null | null | test.py | amnonpaz/dummy_application | dce83d35328150c912c1c73217cee9749b78e7e7 | [
"MIT"
] | null | null | null | import sys
import os
import json
testCasesFileName = parseInputArgs(sys.argv)
test = Test("dummy_app")
test.loadFile(testCasesFileName)
test.run()
| 27.157895 | 89 | 0.553618 | import sys
import os
import json
class Test:
def __init__(self, appName = "dummy_app"):
self.appName = appName
def loadFile(self, fileName):
try:
self.testCases = json.load(open(fileName))
except IOError, e:
print("Error: %s" % str(e))
sys.exit(0)
def executeTest(self, test):
return os.system("./" + self.appName + " " + test["parameters"] + " > /dev/null")
def checkTestResult(self, test):
expected = test["result"].lower()
if (expected != "pass") and (expected != "fail"):
print("Error: Test results can only be 'fail' or 'pass'")
return True
return (self.executeTest(test) == 0) ^ (expected == "pass")
def run(self):
try:
for test in self.testCases:
testStr = "+ Test \"" + test["name"] + "\": "
if self.checkTestResult(test):
print(testStr + "FAILED")
return
print(testStr + "PASSED")
except AttributeError, attrErr:
print("Error: %s" % str(attrErr));
except KeyError, keyErr:
print("Error: No Attribute %s in test Json" % str(keyErr));
def parseInputArgs(argv):
if len(argv) > 1:
testCasesFileName = argv[1]
else:
print("Error: Not enough input arguments")
sys.exit(0)
return testCasesFileName
testCasesFileName = parseInputArgs(sys.argv)
test = Test("dummy_app")
test.loadFile(testCasesFileName)
test.run()
| 1,227 | -10 | 181 |
baff52e4c9736023b639c9e087c0ac23b82bea9b | 2,089 | py | Python | nuro_matching.py | bhaprayan/keras-yolo3 | 9ad5ff761f3baf9042a17327c0149591ce1307c8 | [
"MIT"
] | null | null | null | nuro_matching.py | bhaprayan/keras-yolo3 | 9ad5ff761f3baf9042a17327c0149591ce1307c8 | [
"MIT"
] | null | null | null | nuro_matching.py | bhaprayan/keras-yolo3 | 9ad5ff761f3baf9042a17327c0149591ce1307c8 | [
"MIT"
] | null | null | null | import json
import shlex, subprocess
import re
train_file = open('updated_train_nuro.txt').readlines()
uuid_file = open('updated_uuid_nuro.txt').readlines()
qa_reports = open('qa_reports.txt').read().split()
subtasks = []
for report in qa_reports:
# adding try except since some reports differ in structure i.e.
# TODO: why the difference?
# NOTE: reading out subtasks from here since the qa_reports file doesn't have the subtask ID
# https://s3-us-west-1.amazonaws.com/6876.qa.report.linking/20190115_160816_00023_2646.0_2676.0_linking/qa_report.json
# https://scale.ai/corp/compare_attempts?subtask=5cfe774faaf51a0e1c8570a7&customerTaskResponse=https://s3-us-west-1.amazonaws.com/6876.qa.report.linking/20190115_160816_00023_2646.0_2676.0_linking/qa_report.json
try:
subtask_idx = report.split('?')[1].split('&')[0].split('=')[1] # extract subtask id's from qa_reports list
subtasks.append(subtask_idx)
except:
continue
for subtask_idx in subtasks[:1]:
SUBTASK_ID = subtask_idx
data = grepper(train_file, SUBTASK_ID)
fn_train = 'filtered_subtask/subtask_'+SUBTASK_ID+'_train.txt'
write_filtered(data, fn_train)
fn_uuid = 'filtered_subtask/subtask_'+SUBTASK_ID+'_uuid.txt'
data = grepper(uuid_file, SUBTASK_ID)
write_filtered(data, fn_uuid)
# extract object id from qa reports file
# grep uuid file for subtask id containing the object
# extract train + uuid lines from this subtask i:
# run model on the filtered subtask
# extract frames corresponding to matching object id
# grep SUBTASK_ID updated_train_nuro.txt > filtered_subtask/subtask_SUBTASK_ID_train.txt
# grep SUBTASK_ID updated_uuid_nuro.txt > filtered_subtask/subtask_SUBTASK_ID_uuid.txt
# f['nuro_labels'][0]['frames'][0]['annotations'][0]['scale_id']
| 37.981818 | 215 | 0.723313 | import json
import shlex, subprocess
import re
train_file = open('updated_train_nuro.txt').readlines()
uuid_file = open('updated_uuid_nuro.txt').readlines()
def grepper(fn, subtask_idx):
lines = []
for l in fn:
if re.search(subtask_idx,l):
lines.append(l)
return lines
def write_filtered(data, fn):
with open(fn, 'w') as f:
for line in data:
f.write("%s" % line)
f.close()
qa_reports = open('qa_reports.txt').read().split()
subtasks = []
for report in qa_reports:
# adding try except since some reports differ in structure i.e.
# TODO: why the difference?
# NOTE: reading out subtasks from here since the qa_reports file doesn't have the subtask ID
# https://s3-us-west-1.amazonaws.com/6876.qa.report.linking/20190115_160816_00023_2646.0_2676.0_linking/qa_report.json
# https://scale.ai/corp/compare_attempts?subtask=5cfe774faaf51a0e1c8570a7&customerTaskResponse=https://s3-us-west-1.amazonaws.com/6876.qa.report.linking/20190115_160816_00023_2646.0_2676.0_linking/qa_report.json
try:
subtask_idx = report.split('?')[1].split('&')[0].split('=')[1] # extract subtask id's from qa_reports list
subtasks.append(subtask_idx)
except:
continue
for subtask_idx in subtasks[:1]:
SUBTASK_ID = subtask_idx
data = grepper(train_file, SUBTASK_ID)
fn_train = 'filtered_subtask/subtask_'+SUBTASK_ID+'_train.txt'
write_filtered(data, fn_train)
fn_uuid = 'filtered_subtask/subtask_'+SUBTASK_ID+'_uuid.txt'
data = grepper(uuid_file, SUBTASK_ID)
write_filtered(data, fn_uuid)
# extract object id from qa reports file
# grep uuid file for subtask id containing the object
# extract train + uuid lines from this subtask i:
# run model on the filtered subtask
# extract frames corresponding to matching object id
# grep SUBTASK_ID updated_train_nuro.txt > filtered_subtask/subtask_SUBTASK_ID_train.txt
# grep SUBTASK_ID updated_uuid_nuro.txt > filtered_subtask/subtask_SUBTASK_ID_uuid.txt
# f['nuro_labels'][0]['frames'][0]['annotations'][0]['scale_id']
| 237 | 0 | 46 |
5f24a025ef2dc3278c334e1f6b2c55ca69cdd7d2 | 293 | py | Python | count/models.py | paradoxpj/Word-Counter | 139ebd249ac5ead3da8e4e0cec657bfff7af9fc8 | [
"MIT"
] | null | null | null | count/models.py | paradoxpj/Word-Counter | 139ebd249ac5ead3da8e4e0cec657bfff7af9fc8 | [
"MIT"
] | 6 | 2020-06-06T01:15:40.000Z | 2022-02-10T10:06:54.000Z | count/models.py | paradoxpj/Word-Counter | 139ebd249ac5ead3da8e4e0cec657bfff7af9fc8 | [
"MIT"
] | null | null | null | from django.db import models
| 24.416667 | 76 | 0.74744 | from django.db import models
class UrlText(models.Model):
urltext = models.CharField(max_length=200)
class FinalData(models.Model):
query = models.ForeignKey(UrlText, null=False, on_delete=models.CASCADE)
key = models.CharField(max_length=20)
value = models.IntegerField()
| 0 | 216 | 46 |
17470b1905f03fbaf82b0b46e589916e26795a74 | 337 | py | Python | Exercicios/ex031 - Passagem.py | anderdot/curso-em-video-python | ea295cf0afa914ff9ab9acb87c458d77e3fb62ad | [
"MIT"
] | null | null | null | Exercicios/ex031 - Passagem.py | anderdot/curso-em-video-python | ea295cf0afa914ff9ab9acb87c458d77e3fb62ad | [
"MIT"
] | null | null | null | Exercicios/ex031 - Passagem.py | anderdot/curso-em-video-python | ea295cf0afa914ff9ab9acb87c458d77e3fb62ad | [
"MIT"
] | null | null | null | # Desafio 031: Dado uma distancia da viagem, calcule o valor, 0.50$ pra cada
# quilometro até 200km e 0.45 para viagens acima de 200km.
from cores import cor
d = float(input('Digite a distância da viagem: '))
print('O custo será de R$ {}{:.2f}{} reais.'.format(
cor.verde,
d * 0.50 if d <= 200 else d * 0.45,
cor.reset))
| 33.7 | 77 | 0.652819 | # Desafio 031: Dado uma distancia da viagem, calcule o valor, 0.50$ pra cada
# quilometro até 200km e 0.45 para viagens acima de 200km.
from cores import cor
d = float(input('Digite a distância da viagem: '))
print('O custo será de R$ {}{:.2f}{} reais.'.format(
cor.verde,
d * 0.50 if d <= 200 else d * 0.45,
cor.reset))
| 0 | 0 | 0 |
b5a3b5bf19b4bc7889c5170aa1b370a645890f41 | 32,955 | py | Python | bhabana/utils/data_utils.py | dashayushman/bhabana | 7438505e20be53a4c524324abf9cf8985d0fc684 | [
"Apache-2.0"
] | null | null | null | bhabana/utils/data_utils.py | dashayushman/bhabana | 7438505e20be53a4c524324abf9cf8985d0fc684 | [
"Apache-2.0"
] | null | null | null | bhabana/utils/data_utils.py | dashayushman/bhabana | 7438505e20be53a4c524324abf9cf8985d0fc684 | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import math
import spacy
import codecs
import tarfile
import logging
import requests
import collections
import progressbar
import torch as th
import numpy as np
from sklearn.cluster.k_means_ import k_means
import bhabana.utils as utils
import bhabana.utils.generic_utils as gu
from bhabana.utils import wget
from bhabana.utils import constants
from torch.autograd import Variable
logger = logging.getLogger(__name__)
spacy_nlp_collection = {}
def get_spacy(lang='en', model=None):
"""
Returns the spaCy pipeline for the specified language.
Keyword arguments:
lang -- the language whose pipeline will be returned.
"""
if model is not None:
if lang not in model:
raise ValueError("There is no correspondence between the Languge "
"({})and the Model ({}) provided.".format(lang, model))
global spacy_nlp_collection
spacy_model_name = model if model is not None else lang
model_key = "{}_{}".format(lang, spacy_model_name)
if model_key not in spacy_nlp_collection:
spacy_nlp_collection[model_key] = spacy.load(spacy_model_name)
return spacy_nlp_collection[model_key]
def pad_sentences(data_batch, pad=0, raw=False):
"""
Given a sentence, returns the sentence padded with the 'PAD' string. If
`pad` is smaller than the size of the sentence, the sentence is trimmed
to `pad` elements. If `pad` is 0, the function just returns the original
`data`. If raw is False, then the sentence is padded with 0 instead of
the 'PAD' string.
Keyword arguments:
pad -- The number of elements to which the sentence should be padded.
raw -- If True, the padding character will be a string 'PAD'; else, 0.
"""
padded_batch = []
for data in data_batch:
if pad == 0:
return data
if pad <= len(data):
return data[:pad]
pad_vec = [0 if not raw else 'PAD' for _ in range(len(data[-1]))]
for i in range(pad - len(data)):
padded_batch.append(pad_vec)
return padded_batch
def pad_int_sequences(sequences, maxlen=None, dtype='int32',
padding='post',
truncating='post', value=0.):
""" pad_sequences.
Pad each sequence to the same length: the length of the longest sequence.
If maxlen is provided, any sequence longer than maxlen is truncated to
maxlen. Truncation happens off either the beginning or the end (default)
of the sequence. Supports pre-padding and post-padding (default).
Arguments:
sequences: list of lists where each element is a sequence.
maxlen: int, maximum length.
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
Returns:
x: `numpy array` with dimensions (number_of_sequences, maxlen)
Credits: From Keras `pad_sequences` function.
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError("Truncating type '%s' not understood" % padding)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError("Padding type '%s' not understood" % padding)
return x
def pad_1dconv_input(input, kernel_size, mode="same"):
"""
This method pads the input for "same" and "full"
convolutions. Currently just Same and full padding modes have been
implemented
:param input: Input Tensor with shape BATCH_SIZE X TIME_STEPS X FEATURES
:param mode:
:return: Padded Input Tensor with shape BATCH_SIZE X TIME_STEPS X FEATURES
"""
input_size = list(input.size())
if len(input_size) != 3:
raise ValueError("The Shape of the input is invalid."
" The Shape of the current input is {}, but ideally a 3D "
"vector is expected with shape in the following format:"
" BATCH_SIZE X TIME_STEPS X FEATURES".format(input_size))
n_time_steps = input_size[1]
if mode == "same":
n_padding = n_time_steps - (n_time_steps - kernel_size + 1)
elif mode == "full":
n_padding = 2 * (kernel_size -1)
else:
raise NotImplementedError("Other modes for padding have not been "
"implemented. Valid and Full are coming "
"soon")
if n_padding == 0:
padded_input = input
elif (n_padding % 2) == 0:
pad_len = int(n_padding / 2)
if input.data.is_cuda:
pad_tensor = Variable(th.zeros(input_size[0],
pad_len, input_size[-1]).cuda()).cuda()
else:
pad_tensor = Variable(th.zeros(input_size[0],
pad_len, input_size[-1]))
padded_input = th.cat([pad_tensor, input, pad_tensor], dim=1)
else:
pad_len = n_padding / 2
l_pad = int(math.ceil(pad_len))
r_pad = int(math.floor(pad_len))
if not input.data.is_cuda:
l_pad_tensor = Variable(th.zeros(input_size[0], l_pad,
input_size[-1]))
r_pad_tensor = Variable(th.zeros(input_size[0], r_pad,
input_size[-1]))
else:
l_pad_tensor = Variable(th.zeros(input_size[0], l_pad,
input_size[-1]).cuda()).cuda()
r_pad_tensor = Variable(th.zeros(input_size[0], r_pad,
input_size[-1]).cuda()).cuda()
padded_input = th.cat([l_pad_tensor, input, r_pad_tensor], dim=1)
return padded_input
def id2seq(data, i2w):
"""
`data` is a list of sequences. Each sequence is a list of numbers. For
example, the following could be an example of data:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
Each number represents the ID of a word in the vocabulary `i2w`. This
function transforms each list of numbers into the corresponding list of
words. For example, the list above could be transformed into:
[['the', 'dog', 'chased', 'the', 'cat' ],
['the', 'boy', 'kicked', 'the', 'girl'],
['the', 'girl', 'chased', 'the', 'boy' ]]
For a function that transforms the abovementioned list of words back into
IDs, see `seq2id`.
"""
buff = []
for seq in data:
w_seq = []
for term in seq:
if term in i2w:
w_seq.append(i2w[term])
sent = ' '.join(w_seq)
buff.append(sent)
return buff
def id2charseq(data, i2w):
"""
`data` is a list of sequences. Each sequence is a list of numbers. For
example, the following could be an example of data:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
Each number represents the ID of a word in the vocabulary `i2w`. This
function transforms each list of numbers into the corresponding list of
words. For example, the list above could be transformed into:
[['the', 'dog', 'chased', 'the', 'cat' ],
['the', 'boy', 'kicked', 'the', 'girl'],
['the', 'girl', 'chased', 'the', 'boy' ]]
For a function that transforms the abovementioned list of words back into
IDs, see `seq2id`.
"""
buff = []
for seq in data:
w_seq = []
for term in seq:
if term in i2w:
w_seq.append(i2w[term])
sent = ''.join(w_seq)
buff.append(sent)
return buff
def id2semhashseq(data, i2w):
"""
`data` is a list of sequences. Each sequence is a list of numbers. For
example, the following could be an example of data:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
Each number represents the ID of a word in the vocabulary `i2w`. This
function transforms each list of numbers into the corresponding list of
words. For example, the list above could be transformed into:
[['the', 'dog', 'chased', 'the', 'cat' ],
['the', 'boy', 'kicked', 'the', 'girl'],
['the', 'girl', 'chased', 'the', 'boy' ]]
For a function that transforms the abovementioned list of words back into
IDs, see `seq2id`.
"""
buff = []
for seq in data:
w_seq = []
for term in seq:
term_seq = []
trigram_indexes = np.where(term > 0)
for index in trigram_indexes:
if index in i2w:
term_seq.append(i2w[term].replace("#", ""))
w_seq.append("".join(term_seq))
sent = ' '.join(w_seq)
buff.append(sent)
return buff
def seq2id(data, w2i, seq_begin=False, seq_end=False):
"""
`data` is a list of sequences. Each sequence is a list of words. For
example, the following could be an example of data:
[['the', 'dog', 'chased', 'the', 'cat' ],
['the', 'boy', 'kicked', 'the', 'girl'],
['the', 'girl', 'chased', 'the', 'boy' ]]
Each number represents the ID of a word in the vocabulary `i2w`. This
function transforms each list of numbers into the corresponding list of
words. For example, the list above could be transformed into:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
For a function that transforms the abovementioned list of IDs back into
words, see `id2seq`.
Keyword arguments:
seq_begin -- If True, insert the ID corresponding to 'SEQ_BEGIN' in the
beginning of each sequence
seq_end -- If True, insert the ID corresponding to 'SEQ_END' in the end
of each sequence
"""
buff = []
for seq in data:
id_seq = []
if seq_begin:
id_seq.append(w2i[constants.BOS_WORD])
for term in seq:
try:
id_seq.append(w2i[term] if term in w2i else w2i[constants.UNK_WORD])
except Exception as e:
print(str(e))
if seq_end:
id_seq.append(w2i[constants.EOS_WORD])
buff.append(id_seq)
return buff
def append_seq_markers(data, seq_begin=True, seq_end=True):
"""
`data` is a list of sequences. Each sequence is a list of numbers. For
example, the following could be an example of data:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
Assume that 0 and 11 are IDs corresponding to 'SEQ_BEGIN' and 'SEQ_END',
respectively. This function adds 'SEQ_BEGIN' and 'SEQ_END' to all lists,
depending on the values of `seq_begin` and `seq_end`. For example, if
both are true, then, for the input above, this function will return:
[[0, 1, 10, 4, 1, 6, 11],
[0, 1, 2, 5, 1, 3, 11],
[0, 1, 8, 4, 1, 2, 11]]
Keyword arguments:
seq_begin -- If True, add the ID corresponding to 'SEQ_BEGIN' to each sequence
seq_end -- If True, add the ID corresponding to 'SEQ_END' to each sequence
"""
data_ = []
for d in data:
if seq_begin:
d = ['SEQ_BEGIN'] + d
if seq_end:
d = d + ['SEQ_END']
data_.append(d)
return data_
def mark_entities(data, lang='en'):
"""
`data` is a list of text lines. Each text line is a string composed of one
or more words. For example:
[['the dog chased the cat' ],
['the boy kicked the girl'],
['John kissed Mary']]
The function uses the spaCy pipeline in each line of text, finds Named
Entities, and tags them with their type. For example, for the example above,
the output will be:
[['the dog chased the cat' ],
['the boy kicked the girl'],
['BOE John PERSON EOE kissed BOE Mary PERSON EOE']]
where:
BOE indicates the beginning of an Entity
PERSON indicates the type of the Entity
EOE indicates the beginning of an Entity
Keyword arguments:
lang -- The language in which the sentences are (used to choose which spaCy
pipeline to call).
"""
marked_data = []
spacy_nlp = get_spacy(lang=lang)
for line in data:
marked_line = []
for token in line:
tok = spacy_nlp(token)[0]
if tok.ent_type_ != '':
marked_line.append('BOE')
marked_line.append(token)
marked_line.append(tok.ent_type_)
marked_line.append('EOE')
else:
marked_line.append(token)
marked_data.append(marked_line)
return marked_data
def sentence_tokenize(line, lang='en'):
"""
`line` is a string containing potentially multiple sentences. For each
sentence, this function produces a list of tokens. The output of this
function is a list containing the lists of tokens produced. For example,
say line is:
'I ate chocolate. She ate cake.'
This function produces:
[['I', 'ate', 'chocolate'],
['She', 'ate', 'cake']]
"""
sentences = []
doc = get_spacy(lang=lang)(line)
for sent in doc.sents:
sentence_tokens = []
for token in sent:
if token.ent_type_ == '':
sentence_tokens.append(token.text.lower())
else:
sentence_tokens.append(token.text)
sentences.append(sentence_tokens)
return sentences
def default_tokenize(sentence):
"""
Returns a list of strings containing each token in `sentence`
"""
return [i for i in re.split(r"([-.\"',:? !\$#@~()*&\^%;\[\]/\\\+<>\n=])",
sentence) if i != '' and i != ' ' and i != '\n']
def tokenize(line, tokenizer='spacy', lang='en', spacy_model=None):
"""
Returns a list of strings containing each token in `line`.
Keyword arguments:
tokenizer -- Possible values are 'spacy', 'split' and 'other'.
lang -- Possible values are 'en' and 'de'
"""
tokens = []
if tokenizer == 'spacy':
doc = get_spacy(lang=lang, model=spacy_model).tokenizer(line)
for token in doc:
if token.ent_type_ == '':
if lang == 'de':
text = token.text
else:
text = token.text.lower()
tokens.append(text)
else:
tokens.append(token.text)
elif tokenizer == 'split':
tokens = line.split(' ')
else:
tokens = default_tokenize(line)
return tokens
def load_classes(classes_path):
"""
Loads the classes from file `classes_path`.
"""
c2i = {}
i2c = {}
c_id = 0
with codecs.open(classes_path, 'r', 'utf-8') as cf:
for line in cf:
label = line.strip()
c2i[label] = c_id
i2c[c_id] = label
c_id += 1
return c2i, i2c
def load_vocabulary(vocab_path):
"""
Loads the vocabulary from file `vocab_path`.
"""
w2i = {constants.PAD_WORD: constants.PAD, constants.UNK_WORD: constants.UNK,
constants.BOS_WORD: constants.BOS, constants.EOS_WORD: constants.EOS,
constants.SPACE_WORD: constants.SPACE}
i2w = {constants.PAD: constants.PAD_WORD, constants.UNK: constants.UNK_WORD,
constants.BOS: constants.BOS_WORD, constants.EOS: constants.EOS_WORD,
constants.SPACE: constants.SPACE_WORD}
with codecs.open(vocab_path, 'r', 'utf-8') as vf:
wid = 5
dup_id = 0
for line in vf:
term = line.strip().split('\t')[0]
if term == " " or len(term) == 0:
continue
if term not in w2i:
w2i[term] = wid
i2w[wid] = term
wid += 1
#else:
# w2i["{}{}".format(term, dup_id)] = wid
# i2w[wid] = "{}{}".format(term, dup_id)
# wid += 1
# dup_id += 1
return w2i, i2w
def preload_w2v(w2i, lang='en', model=None):
'''
Loads the vocabulary based on spaCy's vectors.
Keyword arguments:
initialize -- Either 'random' or 'zeros'. Indicate the value of the new
vectors to be created (if a word is not found in spaCy's
vocabulary
lang -- Either 'en' or 'de'.
'''
logger.info('Preloading a w2v matrix')
spacy_nlp = get_spacy(lang, model)
vec_size = get_spacy_vector_size(lang, model)
w2v = np.zeros((len(w2i), vec_size))
bar = progressbar.ProgressBar(max_value=len(w2i),
redirect_stdout=True)
for i_t, term in enumerate(w2i):
if spacy_nlp(term).has_vector:
w2v[w2i[term]] = spacy_nlp(term).vector
bar.update(i_t)
bar.finish()
return w2v
def rescale(values, new_range, original_range):
"""
`values` is a list of numbers. Rescale the numbers in `values` so that
they are always between `new_range` and `original_range`.
"""
if new_range is None:
return values
if new_range == original_range:
return values
rescaled_values = []
for value in values:
original_range_size = (original_range[1] - original_range[0])
if (original_range_size == 0):
new_value = new_range[0]
else:
new_range_size = (new_range[1] - new_range[0])
new_value = (((value - original_range[
0]) * new_range_size) / original_range_size) + \
new_range[0]
rescaled_values.append(new_value)
return rescaled_values
if __name__ == '__main__':
data_1 = [[[1, 2, 3], [2, 2, 3], [2, 3], [2, 3, 4, 5]],
[[1, 2, 3], [2, 2, 3], [2, 3]]]
data_2 = [[1, 2, 3, 4, 5 ,6], [1, 2, 3, 4]]
padded_data2 = pad_sequences(data_2, 2)
padded_data1 = pad_sequences(data_1, 2)
print(np.array(padded_data1))
print(padded_data2)
| 32.661051 | 101 | 0.567865 | import os
import re
import sys
import math
import spacy
import codecs
import tarfile
import logging
import requests
import collections
import progressbar
import torch as th
import numpy as np
from sklearn.cluster.k_means_ import k_means
import bhabana.utils as utils
import bhabana.utils.generic_utils as gu
from bhabana.utils import wget
from bhabana.utils import constants
from torch.autograd import Variable
logger = logging.getLogger(__name__)
spacy_nlp_collection = {}
def url_exists(url):
try:
request = requests.get(url, timeout=20)
except Exception as e:
raise Exception(str(e))
if request.status_code == 200:
logger.info('URL: {} exists'.format(url))
return True
else:
logger.warning('URL: {} does not exists or is not '
'responding'.format(url))
return False
def user_wants_to_download(name, type='model', force=False):
if force:
return True
sys.stdout.write("Could not find {} {}. Do you want to download it "
"([Y]/n)?".format(type, name))
sys.stdout.flush()
user_response = sys.stdin.readline()
if user_response is None:
user_response = True
elif user_response is '':
user_response = True
elif user_response is 'n' or user_response is 'N':
user_response = False
else:
user_response = True
return user_response
def download_from_url(url, output_dir):
if not url_exists(url):
raise FileNotFoundError('{} was not found in our data '
'repository'.format(url))
if not os.path.exists(output_dir): os.makedirs(output_dir)
filename = wget.download(url, out=output_dir)
return filename
def extract_tar_gz(file_path, output_dir="."):
logger.info('Untaring {}'.format(file_path))
if not tarfile.is_tarfile(file_path):
raise ValueError("{} is not a valid tar file".format(file_path))
tar = tarfile.open(file_path, "r:gz")
tar.extractall(path=output_dir)
tar.close()
def delete_file(file_path):
os.remove(file_path)
def download_and_extract_tar(file_url, output_dir):
tar_file_path = download_from_url(file_url, output_dir)
#extract_tar_gz(tar_file_path, output_dir)
delete_file(tar_file_path)
def maybe_download(name, type='model', force=False):
if type == 'dataset':
subdir = 'datasets'
output_dir = utils.DATASET_DIR
elif type == 'model':
subdir = 'models'
output_dir = utils.MODELS_DIR
else:
raise ValueError('downloadable data of type {} is not '
'supported.'.format(type))
output_path = os.path.join(output_dir, name)
tar_file_path = output_path + '.tar.gz'
file_url = utils.BASE_URL + subdir + '/' + name + '.tar.gz'
if not os.path.exists(output_path):
if not os.path.exists(tar_file_path):
if user_wants_to_download(name, type, force):
logger.info('Trying to download files from {}'.format(file_url))
try:
download_and_extract_tar(file_url, output_dir)
except:
raise FileNotFoundError('Could not find {} {}. Please download '
'the files to successfully run the '
'script'.format(type, name))
else:
return None
else:
try:
extract_tar_gz(tar_file_path, output_dir)
delete_file(tar_file_path)
except:
download_and_extract_tar(file_url, output_dir)
else:
logger.info('{} {} already exists'.format(name, type))
return output_path
def get_spacy(lang='en', model=None):
"""
Returns the spaCy pipeline for the specified language.
Keyword arguments:
lang -- the language whose pipeline will be returned.
"""
if model is not None:
if lang not in model:
raise ValueError("There is no correspondence between the Languge "
"({})and the Model ({}) provided.".format(lang, model))
global spacy_nlp_collection
spacy_model_name = model if model is not None else lang
model_key = "{}_{}".format(lang, spacy_model_name)
if model_key not in spacy_nlp_collection:
spacy_nlp_collection[model_key] = spacy.load(spacy_model_name)
return spacy_nlp_collection[model_key]
def pad_sentences(data_batch, pad=0, raw=False):
"""
Given a sentence, returns the sentence padded with the 'PAD' string. If
`pad` is smaller than the size of the sentence, the sentence is trimmed
to `pad` elements. If `pad` is 0, the function just returns the original
`data`. If raw is False, then the sentence is padded with 0 instead of
the 'PAD' string.
Keyword arguments:
pad -- The number of elements to which the sentence should be padded.
raw -- If True, the padding character will be a string 'PAD'; else, 0.
"""
padded_batch = []
for data in data_batch:
if pad == 0:
return data
if pad <= len(data):
return data[:pad]
pad_vec = [0 if not raw else 'PAD' for _ in range(len(data[-1]))]
for i in range(pad - len(data)):
padded_batch.append(pad_vec)
return padded_batch
def pad_int_sequences(sequences, maxlen=None, dtype='int32',
padding='post',
truncating='post', value=0.):
""" pad_sequences.
Pad each sequence to the same length: the length of the longest sequence.
If maxlen is provided, any sequence longer than maxlen is truncated to
maxlen. Truncation happens off either the beginning or the end (default)
of the sequence. Supports pre-padding and post-padding (default).
Arguments:
sequences: list of lists where each element is a sequence.
maxlen: int, maximum length.
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
Returns:
x: `numpy array` with dimensions (number_of_sequences, maxlen)
Credits: From Keras `pad_sequences` function.
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError("Truncating type '%s' not understood" % padding)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError("Padding type '%s' not understood" % padding)
return x
def pad_vector_sequences(data, maxlen, value):
last_dim_max = 0
for d in data:
for f in d:
if len(f) > last_dim_max: last_dim_max = len(f)
last_dim_padded_batch = []
for d in data:
padded_features = []
for f in d:
f = np.array(f)
diff = last_dim_max - f.shape[0]
padded_features.append(np.pad(f, (0, diff),
'constant', constant_values=value).tolist())
last_dim_padded_batch.append(padded_features)
last_dim_padded_batch = np.array(last_dim_padded_batch)
padded_batch = []
for d in last_dim_padded_batch:
d = np.array(d)
if d.shape[0] > maxlen:
padded_batch.append(d[:maxlen, :].tolist())
else:
diff = maxlen - d.shape[0]
padded_batch.append(np.pad(d, [(0, diff), (0, 0)],
'constant', constant_values=value).tolist())
return padded_batch
def get_batch_depth(batch):
n_dims = 1
for d in batch:
if type(d) == list:
for d_i in d:
if type(d_i) == list:
for d_i_j in d_i:
if type(d_i_j) == list:
raise Exception("Currently padding for 3 "
"dimensions is supported")
else:
n_dims = 3
else:
n_dims = 2
break
else:
n_dims = 1
break
return n_dims
def pad_sequences(data, padlen=0, padvalue=0, raw=False):
padded_data = []
if padlen == 0:
return []
elif raw:
for d in data:
diff = padlen - len(d)
if diff > 0:
pads = ['PAD'] * diff
d = d + pads
padded_data.append(d[:padlen])
else:
#vec_data = np.array(data)
len_n_dims = get_batch_depth(data)
#len_n_dims = len(n_dims)
if len_n_dims == 2:
padded_data = pad_int_sequences(data, maxlen=padlen, dtype="int32",
padding='post', truncating='post', value=padvalue).tolist()
elif len_n_dims == 3:
padded_data = pad_vector_sequences(data, maxlen=padlen,
value=padvalue)
else:
raise NotImplementedError("Padding for more than 3 dimensional vectors has "
"not been implemented")
return padded_data
def pad_1dconv_input(input, kernel_size, mode="same"):
"""
This method pads the input for "same" and "full"
convolutions. Currently just Same and full padding modes have been
implemented
:param input: Input Tensor with shape BATCH_SIZE X TIME_STEPS X FEATURES
:param mode:
:return: Padded Input Tensor with shape BATCH_SIZE X TIME_STEPS X FEATURES
"""
input_size = list(input.size())
if len(input_size) != 3:
raise ValueError("The Shape of the input is invalid."
" The Shape of the current input is {}, but ideally a 3D "
"vector is expected with shape in the following format:"
" BATCH_SIZE X TIME_STEPS X FEATURES".format(input_size))
n_time_steps = input_size[1]
if mode == "same":
n_padding = n_time_steps - (n_time_steps - kernel_size + 1)
elif mode == "full":
n_padding = 2 * (kernel_size -1)
else:
raise NotImplementedError("Other modes for padding have not been "
"implemented. Valid and Full are coming "
"soon")
if n_padding == 0:
padded_input = input
elif (n_padding % 2) == 0:
pad_len = int(n_padding / 2)
if input.data.is_cuda:
pad_tensor = Variable(th.zeros(input_size[0],
pad_len, input_size[-1]).cuda()).cuda()
else:
pad_tensor = Variable(th.zeros(input_size[0],
pad_len, input_size[-1]))
padded_input = th.cat([pad_tensor, input, pad_tensor], dim=1)
else:
pad_len = n_padding / 2
l_pad = int(math.ceil(pad_len))
r_pad = int(math.floor(pad_len))
if not input.data.is_cuda:
l_pad_tensor = Variable(th.zeros(input_size[0], l_pad,
input_size[-1]))
r_pad_tensor = Variable(th.zeros(input_size[0], r_pad,
input_size[-1]))
else:
l_pad_tensor = Variable(th.zeros(input_size[0], l_pad,
input_size[-1]).cuda()).cuda()
r_pad_tensor = Variable(th.zeros(input_size[0], r_pad,
input_size[-1]).cuda()).cuda()
padded_input = th.cat([l_pad_tensor, input, r_pad_tensor], dim=1)
return padded_input
def id2seq(data, i2w):
"""
`data` is a list of sequences. Each sequence is a list of numbers. For
example, the following could be an example of data:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
Each number represents the ID of a word in the vocabulary `i2w`. This
function transforms each list of numbers into the corresponding list of
words. For example, the list above could be transformed into:
[['the', 'dog', 'chased', 'the', 'cat' ],
['the', 'boy', 'kicked', 'the', 'girl'],
['the', 'girl', 'chased', 'the', 'boy' ]]
For a function that transforms the abovementioned list of words back into
IDs, see `seq2id`.
"""
buff = []
for seq in data:
w_seq = []
for term in seq:
if term in i2w:
w_seq.append(i2w[term])
sent = ' '.join(w_seq)
buff.append(sent)
return buff
def id2charseq(data, i2w):
"""
`data` is a list of sequences. Each sequence is a list of numbers. For
example, the following could be an example of data:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
Each number represents the ID of a word in the vocabulary `i2w`. This
function transforms each list of numbers into the corresponding list of
words. For example, the list above could be transformed into:
[['the', 'dog', 'chased', 'the', 'cat' ],
['the', 'boy', 'kicked', 'the', 'girl'],
['the', 'girl', 'chased', 'the', 'boy' ]]
For a function that transforms the abovementioned list of words back into
IDs, see `seq2id`.
"""
buff = []
for seq in data:
w_seq = []
for term in seq:
if term in i2w:
w_seq.append(i2w[term])
sent = ''.join(w_seq)
buff.append(sent)
return buff
def id2semhashseq(data, i2w):
"""
`data` is a list of sequences. Each sequence is a list of numbers. For
example, the following could be an example of data:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
Each number represents the ID of a word in the vocabulary `i2w`. This
function transforms each list of numbers into the corresponding list of
words. For example, the list above could be transformed into:
[['the', 'dog', 'chased', 'the', 'cat' ],
['the', 'boy', 'kicked', 'the', 'girl'],
['the', 'girl', 'chased', 'the', 'boy' ]]
For a function that transforms the abovementioned list of words back into
IDs, see `seq2id`.
"""
buff = []
for seq in data:
w_seq = []
for term in seq:
term_seq = []
trigram_indexes = np.where(term > 0)
for index in trigram_indexes:
if index in i2w:
term_seq.append(i2w[term].replace("#", ""))
w_seq.append("".join(term_seq))
sent = ' '.join(w_seq)
buff.append(sent)
return buff
def seq2id(data, w2i, seq_begin=False, seq_end=False):
"""
`data` is a list of sequences. Each sequence is a list of words. For
example, the following could be an example of data:
[['the', 'dog', 'chased', 'the', 'cat' ],
['the', 'boy', 'kicked', 'the', 'girl'],
['the', 'girl', 'chased', 'the', 'boy' ]]
Each number represents the ID of a word in the vocabulary `i2w`. This
function transforms each list of numbers into the corresponding list of
words. For example, the list above could be transformed into:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
For a function that transforms the abovementioned list of IDs back into
words, see `id2seq`.
Keyword arguments:
seq_begin -- If True, insert the ID corresponding to 'SEQ_BEGIN' in the
beginning of each sequence
seq_end -- If True, insert the ID corresponding to 'SEQ_END' in the end
of each sequence
"""
buff = []
for seq in data:
id_seq = []
if seq_begin:
id_seq.append(w2i[constants.BOS_WORD])
for term in seq:
try:
id_seq.append(w2i[term] if term in w2i else w2i[constants.UNK_WORD])
except Exception as e:
print(str(e))
if seq_end:
id_seq.append(w2i[constants.EOS_WORD])
buff.append(id_seq)
return buff
def semhashseq2id(data, w2i):
buff = []
for seq in data:
id_seq = []
for term_hash_seq in seq:
k_hot = []
for hash in term_hash_seq:
k_hot.append(gu.to_categorical(w2i[hash] if hash in w2i \
else w2i[constants.UNK_WORD], len(w2i)))
k_hot = np.sum(k_hot, axis=0).tolist()
id_seq.append(k_hot)
buff.append(id_seq)
return buff
def sentence2id(data, w2i):
buff = []
for sentences in data:
id_seq = []
for sentence in sentences:
id_sentence = seq2id(sentence, w2i)
id_seq.append(id_sentence)
buff.append(id_seq)
return buff
def onehot2seq(data, i2w):
buff = []
for seq in data:
w_seq = []
for term in seq:
arg = np.argmax(term)
if arg in i2w:
if arg == 0 or arg == 1 or arg == 2:
continue
w_seq.append(i2w[arg])
sent = ' '.join(w_seq)
buff.append(sent)
return buff
def append_seq_markers(data, seq_begin=True, seq_end=True):
"""
`data` is a list of sequences. Each sequence is a list of numbers. For
example, the following could be an example of data:
[[1, 10, 4, 1, 6],
[1, 2, 5, 1, 3],
[1, 8, 4, 1, 2]]
Assume that 0 and 11 are IDs corresponding to 'SEQ_BEGIN' and 'SEQ_END',
respectively. This function adds 'SEQ_BEGIN' and 'SEQ_END' to all lists,
depending on the values of `seq_begin` and `seq_end`. For example, if
both are true, then, for the input above, this function will return:
[[0, 1, 10, 4, 1, 6, 11],
[0, 1, 2, 5, 1, 3, 11],
[0, 1, 8, 4, 1, 2, 11]]
Keyword arguments:
seq_begin -- If True, add the ID corresponding to 'SEQ_BEGIN' to each sequence
seq_end -- If True, add the ID corresponding to 'SEQ_END' to each sequence
"""
data_ = []
for d in data:
if seq_begin:
d = ['SEQ_BEGIN'] + d
if seq_end:
d = d + ['SEQ_END']
data_.append(d)
return data_
def mark_entities(data, lang='en'):
"""
`data` is a list of text lines. Each text line is a string composed of one
or more words. For example:
[['the dog chased the cat' ],
['the boy kicked the girl'],
['John kissed Mary']]
The function uses the spaCy pipeline in each line of text, finds Named
Entities, and tags them with their type. For example, for the example above,
the output will be:
[['the dog chased the cat' ],
['the boy kicked the girl'],
['BOE John PERSON EOE kissed BOE Mary PERSON EOE']]
where:
BOE indicates the beginning of an Entity
PERSON indicates the type of the Entity
EOE indicates the beginning of an Entity
Keyword arguments:
lang -- The language in which the sentences are (used to choose which spaCy
pipeline to call).
"""
marked_data = []
spacy_nlp = get_spacy(lang=lang)
for line in data:
marked_line = []
for token in line:
tok = spacy_nlp(token)[0]
if tok.ent_type_ != '':
marked_line.append('BOE')
marked_line.append(token)
marked_line.append(tok.ent_type_)
marked_line.append('EOE')
else:
marked_line.append(token)
marked_data.append(marked_line)
return marked_data
def sentence_tokenize(line, lang='en'):
"""
`line` is a string containing potentially multiple sentences. For each
sentence, this function produces a list of tokens. The output of this
function is a list containing the lists of tokens produced. For example,
say line is:
'I ate chocolate. She ate cake.'
This function produces:
[['I', 'ate', 'chocolate'],
['She', 'ate', 'cake']]
"""
sentences = []
doc = get_spacy(lang=lang)(line)
for sent in doc.sents:
sentence_tokens = []
for token in sent:
if token.ent_type_ == '':
sentence_tokens.append(token.text.lower())
else:
sentence_tokens.append(token.text)
sentences.append(sentence_tokens)
return sentences
def default_tokenize(sentence):
"""
Returns a list of strings containing each token in `sentence`
"""
return [i for i in re.split(r"([-.\"',:? !\$#@~()*&\^%;\[\]/\\\+<>\n=])",
sentence) if i != '' and i != ' ' and i != '\n']
def tokenize(line, tokenizer='spacy', lang='en', spacy_model=None):
"""
Returns a list of strings containing each token in `line`.
Keyword arguments:
tokenizer -- Possible values are 'spacy', 'split' and 'other'.
lang -- Possible values are 'en' and 'de'
"""
tokens = []
if tokenizer == 'spacy':
doc = get_spacy(lang=lang, model=spacy_model).tokenizer(line)
for token in doc:
if token.ent_type_ == '':
if lang == 'de':
text = token.text
else:
text = token.text.lower()
tokens.append(text)
else:
tokens.append(token.text)
elif tokenizer == 'split':
tokens = line.split(' ')
else:
tokens = default_tokenize(line)
return tokens
def pos_tokenize(line, lang='en'):
tokens = []
doc = get_spacy(lang=lang)(line)
for token in doc:
tokens.append(token.tag_)
return tokens
def dep_tokenize(line, lang='en'):
tokens = []
doc = get_spacy(lang=lang)(line)
for token in doc:
tokens.append(token.dep_)
return tokens
def ent_tokenize(line, lang='en'):
tokens = []
doc = get_spacy(lang=lang)(line)
for token in doc:
tokens.append(token.ent_type_ if token.ent_type_ != "" else
constants.PAD_WORD)
return tokens
def semhash_tokenize(text, tokenizer="spacy", lang="en"):
tokens = tokenize(text, tokenizer=tokenizer, lang=lang)
hashed_tokens = ["#{}#".format(token) for token in tokens]
sem_hash_tokens = [["".join(gram)
for gram in find_ngrams(list(hash_token), 3)]
for hash_token in hashed_tokens]
return sem_hash_tokens
def char_tokenize(text):
chars = list(text)
for i_c, char in enumerate(chars):
if char == " ":
chars[i_c] = constants.SPACE_WORD
return chars
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def vocabulary_builder(data_paths, min_frequency=5, tokenizer='spacy',
downcase=True, max_vocab_size=None, line_processor=None,
lang='en'):
print('Building a new vocabulary')
cnt = collections.Counter()
for data_path in data_paths:
bar = progressbar.ProgressBar(max_value=progressbar.UnknownLength,
redirect_stdout=True)
n_line = 0
for line in codecs.open(data_path, 'r', 'utf-8'):
line = line_processor(line)
if downcase:
line = line.lower()
tokens = tokenize(line, tokenizer, lang)
tokens = [_ for _ in tokens if len(_) > 0]
cnt.update(tokens)
n_line += 1
bar.update(n_line)
bar.finish()
print("Found %d unique tokens in the vocabulary.", len(cnt))
# Filter tokens below the frequency threshold
if min_frequency > 0:
filtered_tokens = [(w, c) for w, c in cnt.most_common()
if c > min_frequency]
cnt = collections.Counter(dict(filtered_tokens))
print("Found %d unique tokens with frequency > %d.",
len(cnt), min_frequency)
# Sort tokens by 1. frequency 2. lexically to break ties
vocab = cnt.most_common()
vocab = sorted(
vocab, key=lambda x: (x[1], x[0]), reverse=True)
# Take only max-vocab
if max_vocab_size is not None:
vocab = vocab[:max_vocab_size]
return vocab
def new_vocabulary(files, dataset_path, min_frequency, tokenizer,
downcase, max_vocab_size, name,
line_processor=lambda line: " ".join(line.split('\t')[:2]),
lang='en'):
vocab_path = os.path.join(dataset_path,
'{}_{}_{}_{}_{}_vocab.txt'.format(
name.replace(' ', '_'), min_frequency,
tokenizer, downcase, max_vocab_size))
metadata_path = os.path.join(dataset_path,
'{}_{}_{}_{}_{}_metadata.txt'.format(
name.replace(' ', '_'), min_frequency,
tokenizer, downcase, max_vocab_size))
w2v_path = os.path.join(dataset_path,
'{}_{}_{}_{}_{}_w2v.npy'.format(
name.replace(' ', '_'),
min_frequency, tokenizer, downcase,
max_vocab_size))
if os.path.exists(vocab_path) and os.path.exists(w2v_path) and \
os.path.exists(metadata_path):
print("Files exist already")
return vocab_path, w2v_path, metadata_path
word_with_counts = vocabulary_builder(files,
min_frequency=min_frequency,
tokenizer=tokenizer,
downcase=downcase,
max_vocab_size=max_vocab_size,
line_processor=line_processor,
lang=lang)
entities = ['PERSON', 'NORP', 'FACILITY', 'ORG', 'GPE', 'LOC' +
'PRODUCT', 'EVENT', 'WORK_OF_ART', 'LANGUAGE',
'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY',
'ORDINAL', 'CARDINAL', 'BOE', 'EOE']
with codecs.open(vocab_path, 'w', 'utf-8') as vf, codecs.open(metadata_path, 'w', 'utf-8') as mf:
mf.write('word\tfreq\n')
mf.write('PAD\t1\n')
mf.write('SEQ_BEGIN\t1\n')
mf.write('SEQ_END\t1\n')
mf.write('UNK\t1\n')
vf.write('PAD\t1\n')
vf.write('SEQ_BEGIN\t1\n')
vf.write('SEQ_END\t1\n')
vf.write('UNK\t1\n')
for ent in entities:
vf.write("{}\t{}\n".format(ent, 1))
mf.write("{}\t{}\n".format(ent, 1))
for word, count in word_with_counts:
vf.write("{}\t{}\n".format(word, count))
mf.write("{}\t{}\n".format(word, count))
return vocab_path, w2v_path, metadata_path
def write_spacy_vocab(path, lang="en", model_name=None):
if not os.path.exists(path):
spacy_nlp = get_spacy(lang=lang, model=model_name)
vocab_size = 0
with codecs.open(path, 'w', 'utf-8') as f:
for tok in spacy_nlp.vocab:
vocab_size += 1
f.write("{}\n".format(tok.text))
def load_classes(classes_path):
"""
Loads the classes from file `classes_path`.
"""
c2i = {}
i2c = {}
c_id = 0
with codecs.open(classes_path, 'r', 'utf-8') as cf:
for line in cf:
label = line.strip()
c2i[label] = c_id
i2c[c_id] = label
c_id += 1
return c2i, i2c
def load_vocabulary(vocab_path):
"""
Loads the vocabulary from file `vocab_path`.
"""
w2i = {constants.PAD_WORD: constants.PAD, constants.UNK_WORD: constants.UNK,
constants.BOS_WORD: constants.BOS, constants.EOS_WORD: constants.EOS,
constants.SPACE_WORD: constants.SPACE}
i2w = {constants.PAD: constants.PAD_WORD, constants.UNK: constants.UNK_WORD,
constants.BOS: constants.BOS_WORD, constants.EOS: constants.EOS_WORD,
constants.SPACE: constants.SPACE_WORD}
with codecs.open(vocab_path, 'r', 'utf-8') as vf:
wid = 5
dup_id = 0
for line in vf:
term = line.strip().split('\t')[0]
if term == " " or len(term) == 0:
continue
if term not in w2i:
w2i[term] = wid
i2w[wid] = term
wid += 1
#else:
# w2i["{}{}".format(term, dup_id)] = wid
# i2w[wid] = "{}{}".format(term, dup_id)
# wid += 1
# dup_id += 1
return w2i, i2w
def preload_w2v(w2i, lang='en', model=None):
'''
Loads the vocabulary based on spaCy's vectors.
Keyword arguments:
initialize -- Either 'random' or 'zeros'. Indicate the value of the new
vectors to be created (if a word is not found in spaCy's
vocabulary
lang -- Either 'en' or 'de'.
'''
logger.info('Preloading a w2v matrix')
spacy_nlp = get_spacy(lang, model)
vec_size = get_spacy_vector_size(lang, model)
w2v = np.zeros((len(w2i), vec_size))
bar = progressbar.ProgressBar(max_value=len(w2i),
redirect_stdout=True)
for i_t, term in enumerate(w2i):
if spacy_nlp(term).has_vector:
w2v[w2i[term]] = spacy_nlp(term).vector
bar.update(i_t)
bar.finish()
return w2v
def get_spacy_vector_size(lang="en", model=None):
spacy_nlp = get_spacy(lang, model)
for lex in spacy_nlp.vocab:
tok = spacy_nlp(lex.text)
if tok.has_vector:
return tok.vector.shape[0]
def get_spacy_pos_tags(lang="en"):
get_spacy(lang)
mod = sys.modules["spacy.lang.{}.tag_map".format(lang)]
tag_list = []
for k in mod.TAG_MAP:
tag_list.append(k)
#del mod
return list(set(tag_list))
def get_spacy_dep_tags(lang="en"):
if lang == "en":
return constants.EN_DEP_TAGS
elif lang == "en":
return constants.DE_DEP_TAGS
else:
return constants.UNIVERSAL_DEP_TAGS
def get_spacy_ner_tags(lang="en"):
if lang == "en":
return constants.ONE_NOTE_NER_TAGS
else:
return constants.WIKI_NER_TAGS
def write_spacy_aux_vocab(path, lang, type="pos"):
if not os.path.exists(path):
if type == "pos":
vocab = get_spacy_pos_tags(lang)
elif type == "ent":
vocab = get_spacy_ner_tags(lang)
elif type == "dep":
vocab = get_spacy_dep_tags(lang)
else:
raise Exception("Type {} is not supported or is an invalid type of "
"vocab.".format(type))
with codecs.open(path, 'w', 'utf-8') as f:
for tok in vocab:
f.write("{}\n".format(tok))
def load_w2v(path):
return np.load(path)
def save_w2v(path, w2v):
return np.save(path, w2v)
def validate_rescale(range):
if range[0] > range[1]:
raise ValueError('Incompatible rescale values. rescale[0] should '
'be less than rescale[1]. An example of a valid '
'rescale is (4, 8).')
def rescale(values, new_range, original_range):
"""
`values` is a list of numbers. Rescale the numbers in `values` so that
they are always between `new_range` and `original_range`.
"""
if new_range is None:
return values
if new_range == original_range:
return values
rescaled_values = []
for value in values:
original_range_size = (original_range[1] - original_range[0])
if (original_range_size == 0):
new_value = new_range[0]
else:
new_range_size = (new_range[1] - new_range[0])
new_value = (((value - original_range[
0]) * new_range_size) / original_range_size) + \
new_range[0]
rescaled_values.append(new_value)
return rescaled_values
def is_supported_data(name):
if name in utils.DATA_REGISTER:
return True
else:
return False
if __name__ == '__main__':
data_1 = [[[1, 2, 3], [2, 2, 3], [2, 3], [2, 3, 4, 5]],
[[1, 2, 3], [2, 2, 3], [2, 3]]]
data_2 = [[1, 2, 3, 4, 5 ,6], [1, 2, 3, 4]]
padded_data2 = pad_sequences(data_2, 2)
padded_data1 = pad_sequences(data_1, 2)
print(np.array(padded_data1))
print(padded_data2)
| 13,726 | 0 | 713 |
07fb7705e64f2158ddf99966f292b40cd9cd11ab | 2,119 | py | Python | data/re2/combine.py | CatherineWong/l3 | 53ed9dc99d9b247cb209333ae9b528974e5e7e96 | [
"Apache-2.0"
] | 46 | 2017-11-03T16:54:36.000Z | 2021-12-07T23:07:58.000Z | data/re2/combine.py | CatherineWong/l3 | 53ed9dc99d9b247cb209333ae9b528974e5e7e96 | [
"Apache-2.0"
] | 7 | 2018-08-03T18:27:53.000Z | 2020-12-17T17:08:52.000Z | data/re2/combine.py | CatherineWong/l3 | 53ed9dc99d9b247cb209333ae9b528974e5e7e96 | [
"Apache-2.0"
] | 6 | 2018-02-24T19:00:00.000Z | 2021-03-28T19:50:53.000Z | #!/usr/bin/env python2
import json
import re
import numpy as np
START = "<"
STOP = ">"
SEP = "@"
random = np.random.RandomState(0)
N_EX = 5
with open("data.json") as data_f:
data = json.load(data_f)
with open("templates.json") as template_f:
templates = json.load(template_f)
with open("hints.json") as hint_f:
hints = json.load(hint_f)
hints = {int(k): v for k, v in hints.items()}
annotations = []
for i, example in enumerate(data):
t_before = example["before"]
t_before = t_before.replace("[aeiou]", "V").replace("[^aeiou]", "C")
re_before = t_before
letters_before = t_before.split(")(")[1].replace(".", "").replace("V", "").replace("C", "")
letters_before = " ".join(letters_before)
t_before = re.sub("[a-z]+", "l", t_before)
t_after = example["after"][2:-2]
re_after = t_after
letters_after = t_after.replace("\\2", "")
letters_after = " ".join(letters_after)
t_after = re.sub("[a-z]+", "l", t_after)
template_key = t_before + SEP + t_after
if template_key not in templates:
continue
aug_hints = []
for template in templates[template_key]:
aug_hint = template.replace("BEFORE", letters_before).replace("AFTER", letters_after)
aug_hint = [START] + aug_hint.split() + [STOP]
aug_hints.append(aug_hint)
if i in hints:
hint = hints[i]
else:
hint = ""
hint = [START] + hint.split() + [STOP]
re_hint = START + re_before + SEP + re_after + STOP
ex = []
for inp, out in example["examples"]:
inp = START + inp + STOP
out = START + out + STOP
ex.append((inp, out))
annotations.append({
"examples": ex,
"re": re_hint,
"hint": hint,
"hints_aug": aug_hints
})
train = annotations[:3000]
val = annotations[3000:3500]
test = annotations[3500:4000]
for datum in val:
del datum["examples"][N_EX+1:]
for datum in test:
del datum["examples"][N_EX+1:]
corpus = {
"train": train,
"val": val,
"test": test
}
with open("corpus.json", "w") as corpus_f:
json.dump(corpus, corpus_f)
| 23.808989 | 95 | 0.599339 | #!/usr/bin/env python2
import json
import re
import numpy as np
START = "<"
STOP = ">"
SEP = "@"
random = np.random.RandomState(0)
N_EX = 5
with open("data.json") as data_f:
data = json.load(data_f)
with open("templates.json") as template_f:
templates = json.load(template_f)
with open("hints.json") as hint_f:
hints = json.load(hint_f)
hints = {int(k): v for k, v in hints.items()}
annotations = []
for i, example in enumerate(data):
t_before = example["before"]
t_before = t_before.replace("[aeiou]", "V").replace("[^aeiou]", "C")
re_before = t_before
letters_before = t_before.split(")(")[1].replace(".", "").replace("V", "").replace("C", "")
letters_before = " ".join(letters_before)
t_before = re.sub("[a-z]+", "l", t_before)
t_after = example["after"][2:-2]
re_after = t_after
letters_after = t_after.replace("\\2", "")
letters_after = " ".join(letters_after)
t_after = re.sub("[a-z]+", "l", t_after)
template_key = t_before + SEP + t_after
if template_key not in templates:
continue
aug_hints = []
for template in templates[template_key]:
aug_hint = template.replace("BEFORE", letters_before).replace("AFTER", letters_after)
aug_hint = [START] + aug_hint.split() + [STOP]
aug_hints.append(aug_hint)
if i in hints:
hint = hints[i]
else:
hint = ""
hint = [START] + hint.split() + [STOP]
re_hint = START + re_before + SEP + re_after + STOP
ex = []
for inp, out in example["examples"]:
inp = START + inp + STOP
out = START + out + STOP
ex.append((inp, out))
annotations.append({
"examples": ex,
"re": re_hint,
"hint": hint,
"hints_aug": aug_hints
})
train = annotations[:3000]
val = annotations[3000:3500]
test = annotations[3500:4000]
for datum in val:
del datum["examples"][N_EX+1:]
for datum in test:
del datum["examples"][N_EX+1:]
corpus = {
"train": train,
"val": val,
"test": test
}
with open("corpus.json", "w") as corpus_f:
json.dump(corpus, corpus_f)
| 0 | 0 | 0 |
bd1d24f1ab560dfa55744e479be6d57b7bba9da4 | 225 | py | Python | Recursive Algorithms/Ackermann/Python/Ackermann.py | thomprycejones/Algorithms | 9ae771e34db598bc084c004ec791f6385f7e7793 | [
"MIT"
] | null | null | null | Recursive Algorithms/Ackermann/Python/Ackermann.py | thomprycejones/Algorithms | 9ae771e34db598bc084c004ec791f6385f7e7793 | [
"MIT"
] | null | null | null | Recursive Algorithms/Ackermann/Python/Ackermann.py | thomprycejones/Algorithms | 9ae771e34db598bc084c004ec791f6385f7e7793 | [
"MIT"
] | null | null | null |
print(ackermann(2,2)) | 22.5 | 34 | 0.493333 | def ackermann(m, n):
print('A({},{})'.format(m, n))
if m == 0:
return n + 1
if n == 0:
return ackermann(m - 1, 1)
n2 = ackermann(m, n - 1)
return ackermann(m - 1, n2)
print(ackermann(2,2)) | 181 | 0 | 22 |
136707da18afbc8878cc00c32db2c5142b2f06db | 5,646 | py | Python | tensorflow/contrib/cloud/python/ops/bigquery_reader_ops.py | AlexChrisF/udacity | b7f85a74058fc63ccb7601c418450ab934ef5953 | [
"Apache-2.0"
] | 28 | 2017-04-08T09:47:57.000Z | 2020-07-12T03:10:46.000Z | tensorflow/contrib/cloud/python/ops/bigquery_reader_ops.py | AlexChrisF/udacity | b7f85a74058fc63ccb7601c418450ab934ef5953 | [
"Apache-2.0"
] | 10 | 2017-07-13T00:24:03.000Z | 2017-07-17T07:39:03.000Z | tensorflow/contrib/cloud/python/ops/bigquery_reader_ops.py | AlexChrisF/udacity | b7f85a74058fc63ccb7601c418450ab934ef5953 | [
"Apache-2.0"
] | 38 | 2017-04-28T04:15:48.000Z | 2019-09-28T05:11:46.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BigQuery reading support for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cloud.python.ops import gen_bigquery_reader_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
class BigQueryReader(io_ops.ReaderBase):
"""A Reader that outputs keys and tf.Example values from a BigQuery table.
Example use:
```python
# Assume a BigQuery has the following schema,
# name STRING,
# age INT,
# state STRING
# Create the parse_examples list of features.
features = dict(
name=tf.FixedLenFeature([1], tf.string),
age=tf.FixedLenFeature([1], tf.int32),
state=tf.FixedLenFeature([1], dtype=tf.string, default_value="UNK"))
# Create a Reader.
reader = bigquery_reader_ops.BigQueryReader(project_id=PROJECT,
dataset_id=DATASET,
table_id=TABLE,
timestamp_millis=TIME,
num_partitions=NUM_PARTITIONS,
features=features)
# Populate a queue with the BigQuery Table partitions.
queue = tf.training.string_input_producer(reader.partitions())
# Read and parse examples.
row_id, examples_serialized = reader.read(queue)
examples = tf.parse_example(examples_serialized, features=features)
# Process the Tensors examples["name"], examples["age"], etc...
```
Note that to create a reader a snapshot timestamp is necessary. This
will enable the reader to look at a consistent snapshot of the table.
For more information, see 'Table Decorators' in BigQuery docs.
See ReaderBase for supported methods.
"""
def __init__(self,
project_id,
dataset_id,
table_id,
timestamp_millis,
num_partitions,
features=None,
columns=None,
test_end_point=None,
name=None):
"""Creates a BigQueryReader.
Args:
project_id: GCP project ID.
dataset_id: BigQuery dataset ID.
table_id: BigQuery table ID.
timestamp_millis: timestamp to snapshot the table in milliseconds since
the epoch. Relative (negative or zero) snapshot times are not allowed.
For more details, see 'Table Decorators' in BigQuery docs.
num_partitions: Number of non-overlapping partitions to read from.
features: parse_example compatible dict from keys to `VarLenFeature` and
`FixedLenFeature` objects. Keys are read as columns from the db.
columns: list of columns to read, can be set iff features is None.
test_end_point: Used only for testing purposes (optional).
name: a name for the operation (optional).
Raises:
TypeError: - If features is neither None nor a dict or
- If columns is is neither None nor a list or
- If both features and columns are None or set.
"""
if (features is None) == (columns is None):
raise TypeError("exactly one of features and columns must be set.")
if features is not None:
if not isinstance(features, dict):
raise TypeError("features must be a dict.")
self._columns = list(features.keys())
elif columns is not None:
if not isinstance(columns, list):
raise TypeError("columns must be a list.")
self._columns = columns
self._project_id = project_id
self._dataset_id = dataset_id
self._table_id = table_id
self._timestamp_millis = timestamp_millis
self._num_partitions = num_partitions
self._test_end_point = test_end_point
reader = gen_bigquery_reader_ops.big_query_reader(
name=name,
project_id=self._project_id,
dataset_id=self._dataset_id,
table_id=self._table_id,
timestamp_millis=self._timestamp_millis,
columns=self._columns,
test_end_point=self._test_end_point)
super(BigQueryReader, self).__init__(reader)
def partitions(self, name=None):
"""Returns serialized BigQueryTablePartition messages.
These messages represent a non-overlapping division of a table for a
bulk read.
Args:
name: a name for the operation (optional).
Returns:
`1-D` string `Tensor` of serialized `BigQueryTablePartition` messages.
"""
return gen_bigquery_reader_ops.generate_big_query_reader_partitions(
name=name,
project_id=self._project_id,
dataset_id=self._dataset_id,
table_id=self._table_id,
timestamp_millis=self._timestamp_millis,
num_partitions=self._num_partitions,
test_end_point=self._test_end_point,
columns=self._columns)
ops.NotDifferentiable("BigQueryReader")
| 37.390728 | 80 | 0.660113 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BigQuery reading support for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cloud.python.ops import gen_bigquery_reader_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
class BigQueryReader(io_ops.ReaderBase):
"""A Reader that outputs keys and tf.Example values from a BigQuery table.
Example use:
```python
# Assume a BigQuery has the following schema,
# name STRING,
# age INT,
# state STRING
# Create the parse_examples list of features.
features = dict(
name=tf.FixedLenFeature([1], tf.string),
age=tf.FixedLenFeature([1], tf.int32),
state=tf.FixedLenFeature([1], dtype=tf.string, default_value="UNK"))
# Create a Reader.
reader = bigquery_reader_ops.BigQueryReader(project_id=PROJECT,
dataset_id=DATASET,
table_id=TABLE,
timestamp_millis=TIME,
num_partitions=NUM_PARTITIONS,
features=features)
# Populate a queue with the BigQuery Table partitions.
queue = tf.training.string_input_producer(reader.partitions())
# Read and parse examples.
row_id, examples_serialized = reader.read(queue)
examples = tf.parse_example(examples_serialized, features=features)
# Process the Tensors examples["name"], examples["age"], etc...
```
Note that to create a reader a snapshot timestamp is necessary. This
will enable the reader to look at a consistent snapshot of the table.
For more information, see 'Table Decorators' in BigQuery docs.
See ReaderBase for supported methods.
"""
def __init__(self,
project_id,
dataset_id,
table_id,
timestamp_millis,
num_partitions,
features=None,
columns=None,
test_end_point=None,
name=None):
"""Creates a BigQueryReader.
Args:
project_id: GCP project ID.
dataset_id: BigQuery dataset ID.
table_id: BigQuery table ID.
timestamp_millis: timestamp to snapshot the table in milliseconds since
the epoch. Relative (negative or zero) snapshot times are not allowed.
For more details, see 'Table Decorators' in BigQuery docs.
num_partitions: Number of non-overlapping partitions to read from.
features: parse_example compatible dict from keys to `VarLenFeature` and
`FixedLenFeature` objects. Keys are read as columns from the db.
columns: list of columns to read, can be set iff features is None.
test_end_point: Used only for testing purposes (optional).
name: a name for the operation (optional).
Raises:
TypeError: - If features is neither None nor a dict or
- If columns is is neither None nor a list or
- If both features and columns are None or set.
"""
if (features is None) == (columns is None):
raise TypeError("exactly one of features and columns must be set.")
if features is not None:
if not isinstance(features, dict):
raise TypeError("features must be a dict.")
self._columns = list(features.keys())
elif columns is not None:
if not isinstance(columns, list):
raise TypeError("columns must be a list.")
self._columns = columns
self._project_id = project_id
self._dataset_id = dataset_id
self._table_id = table_id
self._timestamp_millis = timestamp_millis
self._num_partitions = num_partitions
self._test_end_point = test_end_point
reader = gen_bigquery_reader_ops.big_query_reader(
name=name,
project_id=self._project_id,
dataset_id=self._dataset_id,
table_id=self._table_id,
timestamp_millis=self._timestamp_millis,
columns=self._columns,
test_end_point=self._test_end_point)
super(BigQueryReader, self).__init__(reader)
def partitions(self, name=None):
"""Returns serialized BigQueryTablePartition messages.
These messages represent a non-overlapping division of a table for a
bulk read.
Args:
name: a name for the operation (optional).
Returns:
`1-D` string `Tensor` of serialized `BigQueryTablePartition` messages.
"""
return gen_bigquery_reader_ops.generate_big_query_reader_partitions(
name=name,
project_id=self._project_id,
dataset_id=self._dataset_id,
table_id=self._table_id,
timestamp_millis=self._timestamp_millis,
num_partitions=self._num_partitions,
test_end_point=self._test_end_point,
columns=self._columns)
ops.NotDifferentiable("BigQueryReader")
| 0 | 0 | 0 |
e973c8634eb07788d94bfa45a8b4f95c94200b1b | 11,348 | py | Python | djamizdat/samizdat/models.py | sarutobi/djamizdat | 937a0ff4aa3a8798191db3aa9cc908d5b70ba043 | [
"MIT"
] | null | null | null | djamizdat/samizdat/models.py | sarutobi/djamizdat | 937a0ff4aa3a8798191db3aa9cc908d5b70ba043 | [
"MIT"
] | null | null | null | djamizdat/samizdat/models.py | sarutobi/djamizdat | 937a0ff4aa3a8798191db3aa9cc908d5b70ba043 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
# from documents.models import Languages
class WikiTexts(models.Model):
""" Текст для вики """
nazv = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
red_zag = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="редакционный заголовок"
)
author = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Автор"
)
translator = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Переводчик"
)
editor = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Редактор"
)
data_n = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
place_n = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
data_i = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
place_i = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
zhanr = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Жанр"
)
picture = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Изображение"
)
samizdat = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
categories = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Категории"
)
title = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
link = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Ссылка"
)
user = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Пользователь"
)
ruwiki = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="RU wiki"
)
enwiki = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="EN wiki"
)
timestamp = models.DateTimeField(
auto_now_add=True, null=True,
verbose_name="Дата")
oborotka = models.TextField(
blank=True, default='',
verbose_name="Оборотка"
)
class TXTC(models.Model):
""" TXTC """
t_type = models.CharField(
max_length=50,
blank=True, default='',
verbose_name="Тип"
)
class XTC(models.Model):
""" XTC """
number = models.CharField(
max_length=10,
blank=True, default='',
verbose_name="Номер ХТС"
)
pages = models.CharField(
max_length=50,
verbose_name="Номера страниц",
help_text="Номера страниц хроники, на которых упомянут документ"
)
pages_from = models.IntegerField(
blank=True, null=True,
verbose_name="Начальная страница диапазона")
pages_to = models.IntegerField(
blank=True, null=True,
verbose_name="Последняя страница диапазона")
profile = models.CharField(
max_length=20,
blank=True,
verbose_name="Профиль упоминания",
default="упом."
)
notes = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Примечания"
)
catalog = models.ForeignKey("Catalog", verbose_name="Документ")
operator = models.CharField(
max_length=8,
blank=True, default='',
verbose_name="Оператор"
)
date = models.DateField(
auto_now_add=True, null=True, verbose_name="Дата ввода")
class Catalog(models.Model):
""" Основной каталог ?? """
ACNumber = models.CharField(
max_length=28,
blank=True, default='',
verbose_name="Номер АС"
)
language = models.CharField(
max_length=2,
blank=True, default='',
verbose_name="Язык"
)
# language = models.ForeignKey(
# "documents.Languages",
# db_column="language",
# null=True)
translated = models.CharField(
max_length=2,
blank=True, default='',
verbose_name="Переведено"
)
author = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Автор"
)
auth_notes = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Примечания к автору"
)
auth_group = models.CharField(
max_length=100,
blank=True, default='',
verbose_name="Группа авторов"
)
# auth_group_notes = models.CharField(
# max_length=255,
# blank=True, default='',
# verbose_name="Примечания к группе авторов"
# )
group_members = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Состав группы"
)
# members_notes = models.CharField(
# max_length=255,
# blank=True, default='',
# verbose_name="Примечания к составу группы"
# )
signers = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Подписанты"
)
signers_notes = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Примечания о подписантах"
)
complie_editors = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Редакторы_составители"
)
ce_notes = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Примечания о редакторах-составителях"
)
selfname = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Самоназвание"
)
name1 = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="name1"
)
#XXX Possible foreign key to TypeDoc
typedoc = models.CharField(
max_length=25,
blank=True, default='',
verbose_name="Тип документа"
)
name = models.TextField(blank=True, verbose_name="Название")
name2 = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Name2"
)
place = models.CharField(
max_length=100,
blank=True, null=True, default='',
verbose_name="Место"
)
m_ind = models.CharField(
max_length=100,
blank=True, default='',
verbose_name="m-ind"
)
place_prim = models.CharField(
max_length=255,
blank=True, null=True, default='',
verbose_name="PlacePrim"
)
date = models.CharField(
max_length=125,
blank=True, default='',
verbose_name="Дата"
)
date_prim = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="DatePrim"
)
date1 = models.DateTimeField(blank=True, null=True, verbose_name="date1")
date2 = models.DateTimeField(blank=True, null=True, verbose_name="date2")
reproducing = models.CharField(
max_length=15,
blank=True, default='',
verbose_name="Способ воспроизведения"
)
authencity = models.CharField(
max_length=10,
blank=True, default='',
verbose_name="Подлинность"
)
num_copies = models.CharField(
max_length=10,
blank=True, null=True,
verbose_name="Число экземпляров"
)
correction = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name="Правка"
)
medium = models.CharField(
max_length=35,
blank=True, null=True,
verbose_name="Носитель"
)
pages = models.CharField(
max_length=50,
blank=True, null=True,
verbose_name="Страниц"
)
archive_notes = models.CharField(
max_length=50,
blank=True, null=True,
verbose_name="Архивные примечания"
)
notes = models.TextField(
blank=True, null=True,
verbose_name="Примечания"
)
published = models.TextField(
blank=True, null=True,
verbose_name="Опубликовано"
)
tome = models.CharField(
max_length=15,
blank=True, null=True,
verbose_name="Том"
)
number_mc = models.CharField(
max_length=10,
blank=True, null=True,
verbose_name="Номер МС"
)
year = models.CharField(
max_length=20,
blank=True, null=True,
verbose_name="Год"
)
fund = models.CharField(
max_length=70,
blank=True, null=True,
verbose_name="Фонд"
)
register = models.CharField(
max_length=70,
blank=True, null=True,
verbose_name="Опись"
)
folder = models.CharField(
max_length=10,
blank=True, null=True,
verbose_name="Дело"
)
sheets = models.CharField(
max_length=50,
blank=True, null=True,
verbose_name="Листы"
)
annotation = models.TextField(
blank=True, null=True,
verbose_name="Аннотация"
)
web_address = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name="Адрес документа"
)
nas = models.IntegerField(
blank=True, null=True,
verbose_name="NAS"
)
nas_ind = models.CharField(
max_length=28,
blank=True, null=True,
verbose_name="NAS-ind"
)
troubles = models.NullBooleanField(verbose_name="Troubles")
hr = models.NullBooleanField(
verbose_name="В хронике",
help_text="Отметка о том, что документ упоминается в Хронике"
)
hr_search = models.NullBooleanField(
verbose_name="hr_poisk",
help_text="Отметка для фильтра по номеру ХТС"
)
operator = models.CharField(
max_length=10,
blank=True, null=True,
verbose_name="Оператор",
help_text="Имя оператора, вводящего запись"
)
registration_date = models.DateTimeField(
blank=True, null=True,
verbose_name="Дата ввода",
help_text="Дата ввода оператором(проставляется автоматически)",
auto_now_add=True
)
ready = models.NullBooleanField(
verbose_name="Ready",
help_text="Отметка для записей, обработанных на авторство по Именнику"
)
belles_lettres = models.NullBooleanField(
verbose_name="Художка",
)
link = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name="Ссылка",
)
aka_name = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name="AKA_name",
)
| 26.390698 | 78 | 0.590501 | # -*- coding: utf-8 -*-
from django.db import models
# from documents.models import Languages
class WikiTexts(models.Model):
""" Текст для вики """
class Meta:
verbose_name = "Wiki заметка"
verbose_name_plural = "Wiki заметки"
nazv = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
red_zag = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="редакционный заголовок"
)
author = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Автор"
)
translator = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Переводчик"
)
editor = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Редактор"
)
data_n = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
place_n = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
data_i = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
place_i = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
zhanr = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Жанр"
)
picture = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Изображение"
)
samizdat = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
categories = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Категории"
)
title = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Название"
)
link = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Ссылка"
)
user = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Пользователь"
)
ruwiki = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="RU wiki"
)
enwiki = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="EN wiki"
)
timestamp = models.DateTimeField(
auto_now_add=True, null=True,
verbose_name="Дата")
oborotka = models.TextField(
blank=True, default='',
verbose_name="Оборотка"
)
def __unicode__(self):
return self.nazv
class TXTC(models.Model):
""" TXTC """
t_type = models.CharField(
max_length=50,
blank=True, default='',
verbose_name="Тип"
)
class XTC(models.Model):
""" XTC """
class Meta:
verbose_name = "Карточка"
verbose_name_plural = "Карточки"
number = models.CharField(
max_length=10,
blank=True, default='',
verbose_name="Номер ХТС"
)
pages = models.CharField(
max_length=50,
verbose_name="Номера страниц",
help_text="Номера страниц хроники, на которых упомянут документ"
)
pages_from = models.IntegerField(
blank=True, null=True,
verbose_name="Начальная страница диапазона")
pages_to = models.IntegerField(
blank=True, null=True,
verbose_name="Последняя страница диапазона")
profile = models.CharField(
max_length=20,
blank=True,
verbose_name="Профиль упоминания",
default="упом."
)
notes = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Примечания"
)
catalog = models.ForeignKey("Catalog", verbose_name="Документ")
operator = models.CharField(
max_length=8,
blank=True, default='',
verbose_name="Оператор"
)
date = models.DateField(
auto_now_add=True, null=True, verbose_name="Дата ввода")
def __unicode__(self):
return self.number
class Catalog(models.Model):
""" Основной каталог ?? """
ACNumber = models.CharField(
max_length=28,
blank=True, default='',
verbose_name="Номер АС"
)
language = models.CharField(
max_length=2,
blank=True, default='',
verbose_name="Язык"
)
# language = models.ForeignKey(
# "documents.Languages",
# db_column="language",
# null=True)
translated = models.CharField(
max_length=2,
blank=True, default='',
verbose_name="Переведено"
)
author = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Автор"
)
auth_notes = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Примечания к автору"
)
auth_group = models.CharField(
max_length=100,
blank=True, default='',
verbose_name="Группа авторов"
)
# auth_group_notes = models.CharField(
# max_length=255,
# blank=True, default='',
# verbose_name="Примечания к группе авторов"
# )
group_members = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Состав группы"
)
# members_notes = models.CharField(
# max_length=255,
# blank=True, default='',
# verbose_name="Примечания к составу группы"
# )
signers = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Подписанты"
)
signers_notes = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Примечания о подписантах"
)
complie_editors = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Редакторы_составители"
)
ce_notes = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Примечания о редакторах-составителях"
)
selfname = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Самоназвание"
)
name1 = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="name1"
)
#XXX Possible foreign key to TypeDoc
typedoc = models.CharField(
max_length=25,
blank=True, default='',
verbose_name="Тип документа"
)
name = models.TextField(blank=True, verbose_name="Название")
name2 = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="Name2"
)
place = models.CharField(
max_length=100,
blank=True, null=True, default='',
verbose_name="Место"
)
m_ind = models.CharField(
max_length=100,
blank=True, default='',
verbose_name="m-ind"
)
place_prim = models.CharField(
max_length=255,
blank=True, null=True, default='',
verbose_name="PlacePrim"
)
date = models.CharField(
max_length=125,
blank=True, default='',
verbose_name="Дата"
)
date_prim = models.CharField(
max_length=255,
blank=True, default='',
verbose_name="DatePrim"
)
date1 = models.DateTimeField(blank=True, null=True, verbose_name="date1")
date2 = models.DateTimeField(blank=True, null=True, verbose_name="date2")
reproducing = models.CharField(
max_length=15,
blank=True, default='',
verbose_name="Способ воспроизведения"
)
authencity = models.CharField(
max_length=10,
blank=True, default='',
verbose_name="Подлинность"
)
num_copies = models.CharField(
max_length=10,
blank=True, null=True,
verbose_name="Число экземпляров"
)
correction = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name="Правка"
)
medium = models.CharField(
max_length=35,
blank=True, null=True,
verbose_name="Носитель"
)
pages = models.CharField(
max_length=50,
blank=True, null=True,
verbose_name="Страниц"
)
archive_notes = models.CharField(
max_length=50,
blank=True, null=True,
verbose_name="Архивные примечания"
)
notes = models.TextField(
blank=True, null=True,
verbose_name="Примечания"
)
published = models.TextField(
blank=True, null=True,
verbose_name="Опубликовано"
)
tome = models.CharField(
max_length=15,
blank=True, null=True,
verbose_name="Том"
)
number_mc = models.CharField(
max_length=10,
blank=True, null=True,
verbose_name="Номер МС"
)
year = models.CharField(
max_length=20,
blank=True, null=True,
verbose_name="Год"
)
fund = models.CharField(
max_length=70,
blank=True, null=True,
verbose_name="Фонд"
)
register = models.CharField(
max_length=70,
blank=True, null=True,
verbose_name="Опись"
)
folder = models.CharField(
max_length=10,
blank=True, null=True,
verbose_name="Дело"
)
sheets = models.CharField(
max_length=50,
blank=True, null=True,
verbose_name="Листы"
)
annotation = models.TextField(
blank=True, null=True,
verbose_name="Аннотация"
)
web_address = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name="Адрес документа"
)
nas = models.IntegerField(
blank=True, null=True,
verbose_name="NAS"
)
nas_ind = models.CharField(
max_length=28,
blank=True, null=True,
verbose_name="NAS-ind"
)
troubles = models.NullBooleanField(verbose_name="Troubles")
hr = models.NullBooleanField(
verbose_name="В хронике",
help_text="Отметка о том, что документ упоминается в Хронике"
)
hr_search = models.NullBooleanField(
verbose_name="hr_poisk",
help_text="Отметка для фильтра по номеру ХТС"
)
operator = models.CharField(
max_length=10,
blank=True, null=True,
verbose_name="Оператор",
help_text="Имя оператора, вводящего запись"
)
registration_date = models.DateTimeField(
blank=True, null=True,
verbose_name="Дата ввода",
help_text="Дата ввода оператором(проставляется автоматически)",
auto_now_add=True
)
ready = models.NullBooleanField(
verbose_name="Ready",
help_text="Отметка для записей, обработанных на авторство по Именнику"
)
belles_lettres = models.NullBooleanField(
verbose_name="Художка",
)
link = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name="Ссылка",
)
aka_name = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name="AKA_name",
)
def __unicode__(self):
return self.name[0:60]
| 86 | 168 | 133 |
99d232bec300897af4749fe343bc127619f5f7db | 3,087 | py | Python | website/apps/core/tests/test_view_source_edit.py | shh-dlce/pulotu | 82acbb8a3b7f3ec3acc76baffd4047265a77f7d3 | [
"Apache-2.0"
] | 2 | 2016-01-18T09:12:14.000Z | 2017-04-28T12:05:03.000Z | website/apps/core/tests/test_view_source_edit.py | shh-dlce/pulotu | 82acbb8a3b7f3ec3acc76baffd4047265a77f7d3 | [
"Apache-2.0"
] | 4 | 2015-12-02T11:12:25.000Z | 2021-11-16T11:51:32.000Z | website/apps/core/tests/test_view_source_edit.py | shh-dlce/pulotu | 82acbb8a3b7f3ec3acc76baffd4047265a77f7d3 | [
"Apache-2.0"
] | null | null | null | from django.core.urlresolvers import reverse
from website.apps.core.models import Source
from website.testutils import WithEditor
class Test_View_SourceEdit_NotLoggedIn(WithEditor):
"""Tests the source_edit view"""
class Test_View_SourceEdit_LoggedIn(WithEditor):
"""Tests the source_edit view"""
| 32.494737 | 77 | 0.576288 | from django.core.urlresolvers import reverse
from website.apps.core.models import Source
from website.testutils import WithEditor
class Test_View_SourceEdit_NotLoggedIn(WithEditor):
"""Tests the source_edit view"""
def setUp(self):
WithEditor.setUp(self)
self.source1 = Source.objects.create(
year=1991, author='Greenhill',
slug='greenhill1991',
reference='S2',
comment='c1',
editor=self.editor)
self.url = reverse("source-edit", kwargs={'slug': self.source1.slug})
def test_error_when_not_logged_in(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response,
"/accounts/login/?next=%s" % self.url,
status_code=302,
target_status_code=200)
class Test_View_SourceEdit_LoggedIn(WithEditor):
"""Tests the source_edit view"""
def setUp(self):
WithEditor.setUp(self)
self.source1 = Source.objects.create(
year=1991,
author='Greenhill',
slug='greenhill1991',
reference='S2',
comment='c1',
editor=self.editor)
self.client.login(username="admin", password="test")
def test_404_on_missing_culture(self):
response = self.client.get(
reverse("source-edit", kwargs={'slug': 'fudge'})
)
self.assertEqual(response.status_code, 404)
def test_get_existing(self):
response = self.client.get(
reverse("source-edit", kwargs={'slug': self.source1.slug})
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Greenhill')
def test_get_new(self):
response = self.client.get(reverse("source-add"))
self.assertEqual(response.status_code, 200)
def test_redirect_on_success(self):
form_data = {
'year': 2013,
'author': 'Johnson',
'reference': '...',
'submit': 'true',
}
response = self.client.post(reverse("source-add"), form_data)
self.assertRedirects(
response,
reverse('admin:core_source_changelist'),
status_code=302,
target_status_code=200)
def test_slug_is_added(self):
form_data = {
'year': 2013,
'author': 'Johnson',
'reference': '...',
'submit': 'true',
}
response = self.client.post(reverse("source-add"), form_data)
self.assertRedirects(
response,
reverse('admin:core_source_changelist'),
status_code=302,
target_status_code=200)
def test_error_on_duplicate_culture(self):
form_data = {
'year': 1991,
'author': 'Greenhill',
'submit': 'true',
}
response = self.client.post(reverse("source-add"), form_data)
assert not response.context['form'].is_valid()
| 2,535 | 0 | 243 |
175c4b33406d03406b24e50b8f76bce4df70ca5d | 4,288 | py | Python | tasks/finger_tapping_task/server_finger_tapping_task.py | eduongAZ/tomcat-baseline-tasks | a913962d1bb19531e734bfe780b2a8b400224741 | [
"MIT"
] | 1 | 2022-01-23T19:29:57.000Z | 2022-01-23T19:29:57.000Z | tasks/finger_tapping_task/server_finger_tapping_task.py | eduongAZ/tomcat-baseline-tasks | a913962d1bb19531e734bfe780b2a8b400224741 | [
"MIT"
] | 34 | 2022-01-18T18:26:15.000Z | 2022-03-31T19:21:28.000Z | tasks/finger_tapping_task/server_finger_tapping_task.py | eduongAZ/tomcat-baseline-tasks | a913962d1bb19531e734bfe780b2a8b400224741 | [
"MIT"
] | 2 | 2022-01-27T05:31:33.000Z | 2022-03-29T20:57:23.000Z | import csv
import json
import threading
import time
import psutil
import pygame
from common import record_metadata, request_clients_end
from config import UPDATE_RATE
from network import receive, send
from .config_finger_tapping_task import (COUNT_DOWN_MESSAGE,
SECONDS_COUNT_DOWN,
SECONDS_PER_SESSION, SESSION,
SQUARE_WIDTH)
from .utils import TAPPED, UNTAPPED
| 33.76378 | 106 | 0.605177 | import csv
import json
import threading
import time
import psutil
import pygame
from common import record_metadata, request_clients_end
from config import UPDATE_RATE
from network import receive, send
from .config_finger_tapping_task import (COUNT_DOWN_MESSAGE,
SECONDS_COUNT_DOWN,
SECONDS_PER_SESSION, SESSION,
SQUARE_WIDTH)
from .utils import TAPPED, UNTAPPED
class ServerFingerTappingTask:
def __init__(self,
to_client_connections: list,
from_client_connections: dict,
data_save_path: str = '') -> None:
self._to_client_connections = to_client_connections
self._from_client_connections = from_client_connections
self._state = {}
for client_name in from_client_connections.values():
self._state[client_name] = UNTAPPED
data_path = data_save_path + "/finger_tapping"
csv_file_name = data_path + '/' + str(int(time.time()))
header = ['time', 'monotonic_time', 'boot_time', 'state']
self._csv_file = open(csv_file_name + ".csv", 'w', newline='')
self._csv_writer = csv.DictWriter(self._csv_file, delimiter=';', fieldnames = header)
self._csv_writer.writeheader()
metadata = {}
metadata["session"] = SESSION
metadata["seconds_per_session"] = SECONDS_PER_SESSION
metadata["seconds_count_down"] = SECONDS_COUNT_DOWN
metadata["square_width"] = SQUARE_WIDTH
metadata["count_down_message"] = COUNT_DOWN_MESSAGE
json_file_name = csv_file_name + "_metadata"
record_metadata(json_file_name, metadata)
self._running = False
def run(self):
self._running = True
to_client_update_state_thread = threading.Thread(target=self._to_client_update_state, daemon=True)
to_client_update_state_thread.start()
from_client_commands_thread = threading.Thread(target=self._from_client_commands, daemon=True)
from_client_commands_thread.start()
print("[STATUS] Running finger tapping task")
# Wait for threads to finish
to_client_update_state_thread.join()
from_client_commands_thread.join()
self._csv_file.close()
request_clients_end(self._to_client_connections)
self._csv_file.close()
print("[STATUS] Finger tapping task ended")
def _to_client_update_state(self):
session_index = -1
counter_target = SECONDS_COUNT_DOWN
start_ticks = pygame.time.get_ticks()
seconds = 0.0
clock = pygame.time.Clock()
while self._running:
if seconds >= counter_target:
session_index += 1
if session_index >= len(SESSION):
self._running = False
break
counter_target = SECONDS_PER_SESSION[session_index]
start_ticks = pygame.time.get_ticks()
data = {}
data["type"] = "state"
data["state"] = self._state
data["reveal"] = 1 if session_index < 0 else SESSION[session_index]
data["session_index"] = session_index
seconds_to_send = int(counter_target) - int(seconds)
data["seconds"] = 1 if seconds_to_send <= 0 else seconds_to_send
# Record state of the game
if session_index >= 0:
self._csv_writer.writerow({"time" : time.time(), "monotonic_time" : time.monotonic(),
"boot_time" : psutil.boot_time(), "state" : json.dumps(data)})
send(self._to_client_connections, data)
seconds = (pygame.time.get_ticks() - start_ticks) / 1000.0
clock.tick(UPDATE_RATE)
def _from_client_commands(self):
while self._running:
all_data = receive(self._from_client_connections.keys(), 0.1)
for data in all_data:
if data["type"] == "command":
if data["command"] == "tap":
self._state[data["sender"]] = TAPPED
else:
self._state[data["sender"]] = UNTAPPED
| 3,661 | 9 | 130 |
926f8ad9b8813e900f3dbb435ed9a0d97565d223 | 4,085 | py | Python | aux_files/old ML categorisation model/cat.py | RaghibMrz/webapp-testing | 3d7ecc68d3ee09af7e820c534b5cced1ad96aaab | [
"MIT"
] | 2 | 2020-07-01T02:46:29.000Z | 2020-07-01T02:47:23.000Z | aux_files/old ML categorisation model/cat.py | RaghibMrz/webapp-testing | 3d7ecc68d3ee09af7e820c534b5cced1ad96aaab | [
"MIT"
] | 9 | 2020-03-11T15:42:35.000Z | 2022-01-13T02:07:14.000Z | aux_files/old ML categorisation model/cat.py | RaghibMrz/webapp-testing | 3d7ecc68d3ee09af7e820c534b5cced1ad96aaab | [
"MIT"
] | null | null | null | import urllib.request
import requests
from requests import auth
import json
# def getRows(userID):
# me = auth.HTTPDigestAuth("admin", "admin")
# row = []
# transactionAttributes = ["BookingDateTime", "TransactionInformation", "Amount", "Currency"]
# id = str(userID)
# res = requests.get("http://51.11.48.127:8060/v1/documents?uri=/documents/"+id+".json", auth = me)
# if (res.status_code == 404):
# return False
# a = json.loads(res.text)
# for transaction in a['Data']['Transaction']:
# collecting = {
# 'BookingDateTime': '',
# 'TransactionInformation': '',
# 'Amount': '',
# 'Currency': ''
# }
# for attribute in transactionAttributes:
# if ((attribute == "Amount") or (attribute == "Currency")) :
# collecting[attribute] = transaction['Amount'][str(attribute)]
# else:
# collecting[attribute] = transaction[str(attribute)]
# row.append(collecting)
# return row
main() | 34.327731 | 179 | 0.503794 | import urllib.request
import requests
from requests import auth
import json
def getValues(r_list):
valueList = []
for i in range(0, len(r_list)):
row = []
row.append(str(i))
row.append(r_list[i])
row.append("")
valueList.append(row)
return valueList
def main():
# refList = []
# for row in getRows(10567):
# refList.append(row["TransactionInformation"])
# values = getValues(refList)
data = { "Inputs": {
"input1": {
"ColumnNames": ["ID", "Description", "Column 2"],
"Values": [
["1", "Amazon shopping", ""],
["2", "cash from John", ""],
["3", "Mikel Coffee", ""],
["4", "Pizza hut", ""],
["5", "council rent", ""],
["6", "paypal", ""]
]
},
},
"GlobalParameters": {}
}
body = str.encode(json.dumps(data))
url = 'https://ussouthcentral.services.azureml.net/workspaces/39de74e263724481af1ff429fb093ea4/services/818e86b59ad64ea79133a75f13071aa1/execute?api-version=2.0&details=true'
api_key = 'K8FsrW164co+p2caaZGOQC/uWQt3oEtDlktGkIa4CMz1H/ZdiLVYlHss+EQsDDYJK4grVsSkB9p6u9iT4jvW7Q=='
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
req = urllib.request.Request(url, body, headers)
print(getCategories(getCategoryIDs(req)))
def getCategoryIDs(req):
try:
response = urllib.request.urlopen(req)
result = response.read()
dictionary = json.loads(result.decode('utf-8'))
lists = dictionary["Results"]["output1"]["value"]["Values"]
categoryList = []
for refList in lists:
# scores = []
# l_id = refList[0]
# for i in range(1, 13):
# scores.append(refList[i])
# print(l_id)
# print(scores)
if (refList[13] != None):
categoryList.append(refList[13])
else:
categoryList.append(0)
return categoryList
except urllib.request.HTTPError as error:
print("The request failed with status code: " + str(error.code))
print(error.info())
print(json.loads(error.read()))
return -1
def getCategories(catIDs):
catDict = {
"1": "Bills & Payments",
"2": "Transport",
"3": "Groceries",
"4": "Electronics",
"5": "Fashion & Cosmetics",
"6": "Finances",
"7": "Food",
"8": "Games & Sports",
"9": "General",
"10": "Charity",
"11": "Entertainment",
"12": "Leisure",
"0": "Uncategorised"
}
categoryList = []
for elem in catIDs:
categoryList.append(catDict[str(elem)])
return categoryList
# def getRows(userID):
# me = auth.HTTPDigestAuth("admin", "admin")
# row = []
# transactionAttributes = ["BookingDateTime", "TransactionInformation", "Amount", "Currency"]
# id = str(userID)
# res = requests.get("http://51.11.48.127:8060/v1/documents?uri=/documents/"+id+".json", auth = me)
# if (res.status_code == 404):
# return False
# a = json.loads(res.text)
# for transaction in a['Data']['Transaction']:
# collecting = {
# 'BookingDateTime': '',
# 'TransactionInformation': '',
# 'Amount': '',
# 'Currency': ''
# }
# for attribute in transactionAttributes:
# if ((attribute == "Amount") or (attribute == "Currency")) :
# collecting[attribute] = transaction['Amount'][str(attribute)]
# else:
# collecting[attribute] = transaction[str(attribute)]
# row.append(collecting)
# return row
main() | 2,899 | 0 | 100 |
c910e2484fb24d2134d5aba6e5f0f3ee5e95bd1f | 375 | py | Python | test/__init__.py | buddly27/nomenclator-nuke | 783a9ff9bba0a974cf4532ca5c14bb3fc0312af9 | [
"MIT"
] | 11 | 2021-09-06T15:41:28.000Z | 2022-03-21T23:52:59.000Z | test/__init__.py | buddly27/nomenclator-nuke | 783a9ff9bba0a974cf4532ca5c14bb3fc0312af9 | [
"MIT"
] | 2 | 2021-09-14T02:56:55.000Z | 2021-09-14T03:00:03.000Z | test/__init__.py | buddly27/nomenclator-nuke | 783a9ff9bba0a974cf4532ca5c14bb3fc0312af9 | [
"MIT"
] | 2 | 2021-09-07T06:53:06.000Z | 2021-09-13T19:20:22.000Z | """
Converting test folders into modules allows to use similar file names within
structure::
test/
__init__.py
integration/
__init__.py
test_something.py
unit/
__init__.py
test_something.py
.. seealso::
https://docs.pytest.org/en/latest/goodpractices.html#tests-outside-application-code
"""
| 19.736842 | 87 | 0.618667 | """
Converting test folders into modules allows to use similar file names within
structure::
test/
__init__.py
integration/
__init__.py
test_something.py
unit/
__init__.py
test_something.py
.. seealso::
https://docs.pytest.org/en/latest/goodpractices.html#tests-outside-application-code
"""
| 0 | 0 | 0 |
6325e933883e4dd40593f480bb0ddfa36ae441fc | 2,342 | py | Python | venv/lib/python3.8/site-packages/continuous_delivery/models/provisioning_configuration_target.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/continuous_delivery/models/provisioning_configuration_target.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/continuous_delivery/models/provisioning_configuration_target.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
| 54.465116 | 161 | 0.614005 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ProvisioningConfigurationTarget(Model):
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'target_type': {'key': 'type', 'type': 'str'},
'environment_type': {'key': 'environmentType', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'subscription_name': {'key': 'subscriptionName', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'resource_identifier': {'key': 'resourceIdentifier', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'authorization_info': {'key': 'authorizationInfo', 'type': 'AuthorizationInfo'},
'slot_swap_configuration': {'key': 'slotSwapConfiguration', 'type': 'SlotSwapConfiguration'},
'create_options': {'key': 'createOptions', 'type': 'CreateOptions'}
}
def __init__(self, provider=None, target_type=None, environment_type=None, friendly_name=None, subscription_id=None, subscription_name=None, tenant_id=None,
resource_identifier=None, resource_group_name=None, location=None, authorization_info=None,
slot_swap_configuration=None, create_options=None):
self.provider = provider
self.target_type = target_type
self.environment_type = environment_type
self.friendly_name = friendly_name
self.subscription_id = subscription_id
self.subscription_name = subscription_name
self.tenant_id = tenant_id
self.resource_identifier = resource_identifier
self.resource_group_name = resource_group_name
self.location = location
self.authorization_info = authorization_info
self.slot_swap_configuration = slot_swap_configuration
self.create_options = create_options | 929 | 1,019 | 24 |
246bf38a3c8533bc691743186cfece50c216e7a3 | 6,124 | py | Python | djangocms_maps/migrations/0001_initial.py | WINAK-UA/djangocms-maps | 07863d4c2f1f07329c978a68a111fed57d6120bf | [
"BSD-3-Clause"
] | 5 | 2016-09-01T13:34:14.000Z | 2021-12-21T17:22:22.000Z | djangocms_maps/migrations/0001_initial.py | WINAK-UA/djangocms-maps | 07863d4c2f1f07329c978a68a111fed57d6120bf | [
"BSD-3-Clause"
] | 15 | 2016-08-27T22:14:02.000Z | 2021-03-23T23:05:36.000Z | djangocms_maps/migrations/0001_initial.py | WINAK-UA/djangocms-maps | 07863d4c2f1f07329c978a68a111fed57d6120bf | [
"BSD-3-Clause"
] | 15 | 2016-09-02T05:06:28.000Z | 2021-06-22T11:27:15.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| 40.556291 | 96 | 0.402188 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='Maps',
fields=[
('cmsplugin_ptr', models.OneToOneField(
related_name='djangocms_maps_maps',
primary_key=True,
to='cms.CMSPlugin',
serialize=False,
parent_link=True,
auto_created=True,
on_delete=models.CASCADE,
)),
('map_provider', models.CharField(
verbose_name='map provider',
max_length=16,
choices=[
('mapbox', 'Mapbox OSM (API key required)'),
('bingmaps', 'Bing Maps (API key required)'),
('googlemaps', 'Google Maps (API key required)'),
('here', 'HERE WeGo (API key required)'),
('viamichelin', 'ViaMichelin (API key required)')],
default='mapbox')),
('title', models.CharField(
verbose_name='map title',
max_length=100,
null=True,
blank=True)),
('address', models.CharField(
verbose_name='address',
max_length=150)),
('zipcode', models.CharField(
verbose_name='zip code',
max_length=30)),
('city', models.CharField(
verbose_name='city',
max_length=100)),
('content', models.CharField(
help_text='Displayed under address in the bubble.',
verbose_name='additional content',
max_length=255,
blank=True)),
('style', models.TextField(
help_text='Provide a valid JSON configuration (escaped). '
'See developers.google.com/maps/documentation/javascript/styling',
verbose_name='custom map style',
blank=True)),
('zoom', models.PositiveSmallIntegerField(
verbose_name='zoom level',
default=13,
choices=[
(0, '0'),
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10'),
(11, '11'),
(12, '12'),
(13, '13'),
(14, '14'),
(15, '15'),
(16, '16'),
(17, '17'),
(18, '18'),
(19, '19'),
(20, '20'),
(21, '21')])),
('lat', models.DecimalField(
help_text='Use latitude & longitude to fine tune the map position.',
blank=True,
verbose_name='latitude',
null=True,
max_digits=10,
decimal_places=6)),
('lng', models.DecimalField(
verbose_name='longitude',
max_digits=10,
null=True,
blank=True,
decimal_places=6)),
('route_planer_title', models.CharField(
verbose_name='route planner title',
max_length=150,
null=True,
blank=True,
default='Calculate your fastest way to here')),
('route_planer', models.BooleanField(
verbose_name='route planner',
default=False)),
('width', models.CharField(
help_text='Plugin width (in px, em, %).',
verbose_name='width',
max_length=6,
default='100%')),
('height', models.CharField(
help_text='Plugin height (in px, em).',
verbose_name='height',
max_length=6,
default='400px')),
('info_window', models.BooleanField(
help_text='Show textbox over marker',
verbose_name='info window',
default=True)),
('scrollwheel', models.BooleanField(
help_text='Enable scrollwheel zooming on the map',
verbose_name='scrollwheel',
default=True)),
('double_click_zoom', models.BooleanField(
verbose_name='double click zoom',
default=True)),
('draggable', models.BooleanField(
verbose_name='draggable',
default=True)),
('keyboard_shortcuts', models.BooleanField(
verbose_name='keyboard shortcuts',
default=True)),
('pan_control', models.BooleanField(
verbose_name='Pan control',
default=True)),
('zoom_control', models.BooleanField(
verbose_name='zoom control',
default=True)),
('street_view_control', models.BooleanField(
verbose_name='Street View control',
default=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| 0 | 5,994 | 23 |